diff options
author | James Morris <james.morris@microsoft.com> | 2018-04-24 03:57:26 +1000 |
---|---|---|
committer | James Morris <james.morris@microsoft.com> | 2018-04-24 03:57:26 +1000 |
commit | b393a707c84bb56a7800c93849fd8b492f76ba42 (patch) | |
tree | 0c752bbf4d28f47d29042df8fa0c7826c51c50fd /include/linux | |
parent | e59644b720aed4b9ec9d3818b483f97376fb31ed (diff) | |
parent | 6d08b06e67cd117f6992c46611dfb4ce267cd71e (diff) | |
download | linux-b393a707c84bb56a7800c93849fd8b492f76ba42.tar.gz linux-b393a707c84bb56a7800c93849fd8b492f76ba42.tar.bz2 linux-b393a707c84bb56a7800c93849fd8b492f76ba42.zip |
Merge tag 'v4.17-rc2' into next-general
Sync to Linux 4.17-rc2 for developers.
Diffstat (limited to 'include/linux')
317 files changed, 8911 insertions, 3250 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 968173ec2726..15bfb15c2fa5 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -623,6 +623,13 @@ bool acpi_gtdt_c3stop(int type); int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); #endif +#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER +static inline u64 acpi_arch_get_root_pointer(void) +{ + return 0; +} +#endif + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 2f7a29242b87..38cd77b39a64 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -26,7 +26,8 @@ #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) -int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); +int iort_register_domain_token(int trans_id, phys_addr_t base, + struct fwnode_handle *fw_node); void iort_deregister_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT @@ -38,6 +39,7 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); const struct iommu_ops *iort_iommu_configure(struct device *dev); +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); #else static inline void acpi_iort_init(void) { } static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) @@ -52,6 +54,9 @@ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, static inline const struct iommu_ops *iort_iommu_configure( struct device *dev) { return NULL; } +static inline +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) +{ return 0; } #endif #endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/atalk.h b/include/linux/atalk.h index 4d356e168692..40373920ea58 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -113,10 +113,12 @@ extern void aarp_proto_init(void); /* Inter module exports */ /* Give a device find its atif control structure */ +#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) static inline struct atalk_iface *atalk_find_dev(struct net_device *dev) { return dev->atalk_ptr; } +#endif extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev); extern struct net_device *atrtr_get_dev(struct atalk_addr *sa); diff --git a/include/linux/audit.h b/include/linux/audit.h index af410d9fbf2d..75d5b031e802 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -146,8 +146,7 @@ extern void audit_log_d_path(struct audit_buffer *ab, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); -extern void audit_log_link_denied(const char *operation, - const struct path *link); +extern void audit_log_link_denied(const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); @@ -194,8 +193,7 @@ static inline void audit_log_d_path(struct audit_buffer *ab, { } static inline void audit_log_key(struct audit_buffer *ab, char *key) { } -static inline void audit_log_link_denied(const char *string, - const struct path *link) +static inline void audit_log_link_denied(const char *string) { } static inline int audit_log_task_context(struct audit_buffer *ab) { diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 3ce61342fa31..b0a7f315bfbe 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -136,15 +136,21 @@ enum virtchnl_ops { VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_ENABLE_CHANNELS = 30, + VIRTCHNL_OP_DISABLE_CHANNELS = 31, + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, }; -/* This macro is used to generate a compilation error if a structure +/* These macros are used to generate compilation errors if a structure/union * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. */ #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } /* Virtual channel message descriptor. This overlays the admin queue * descriptor. All other data is passed in external buffers. @@ -244,6 +250,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 +#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ @@ -496,6 +503,81 @@ struct virtchnl_rss_hena { VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); +/* VIRTCHNL_OP_ENABLE_CHANNELS + * VIRTCHNL_OP_DISABLE_CHANNELS + * VF sends these messages to enable or disable channels based on + * the user specified queue count and queue offset for each traffic class. + * This struct encompasses all the information that the PF needs from + * VF to create a channel. + */ +struct virtchnl_channel_info { + u16 count; /* number of queues in a channel */ + u16 offset; /* queues in a channel start from 'offset' */ + u32 pad; + u64 max_tx_rate; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); + +struct virtchnl_tc_info { + u32 num_tc; + u32 pad; + struct virtchnl_channel_info list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); + +/* VIRTCHNL_ADD_CLOUD_FILTER + * VIRTCHNL_DEL_CLOUD_FILTER + * VF sends these messages to add or delete a cloud filter based on the + * user specified match and action filters. These structures encompass + * all the information that the PF needs from the VF to add/delete a + * cloud filter. + */ + +struct virtchnl_l4_spec { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + __be16 vlan_id; + __be16 pad; /* reserved for future use */ + __be32 src_ip[4]; + __be32 dst_ip[4]; + __be16 src_port; + __be16 dst_port; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); + +union virtchnl_flow_spec { + struct virtchnl_l4_spec tcp_spec; + u8 buffer[128]; /* reserved for future use */ +}; + +VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); + +enum virtchnl_action { + /* action types */ + VIRTCHNL_ACTION_DROP = 0, + VIRTCHNL_ACTION_TC_REDIRECT, +}; + +enum virtchnl_flow_type { + /* flow types */ + VIRTCHNL_TCP_V4_FLOW = 0, + VIRTCHNL_TCP_V6_FLOW, +}; + +struct virtchnl_filter { + union virtchnl_flow_spec data; + union virtchnl_flow_spec mask; + enum virtchnl_flow_type flow_type; + enum virtchnl_action action; + u32 action_meta; + __u8 field_flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); + /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other @@ -711,6 +793,25 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_REQUEST_QUEUES: valid_len = sizeof(struct virtchnl_vf_res_request); break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + valid_len = sizeof(struct virtchnl_tc_info); + if (msglen >= valid_len) { + struct virtchnl_tc_info *vti = + (struct virtchnl_tc_info *)msg; + valid_len += vti->num_tc * + sizeof(struct virtchnl_channel_info); + if (vti->num_tc == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index bfe86b54f6c1..0bd432a4d7bd 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) set_wb_congested(bdi->wb.congested, sync); } +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + #ifdef CONFIG_CGROUP_WRITEBACK /** diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 3e4ce54d84ab..72ca0f3d39f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -28,6 +28,7 @@ void bdi_put(struct backing_dev_info *bdi); __printf(2, 3) int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); +__printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); @@ -175,7 +176,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) } long congestion_wait(int sync, long timeout); -long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); +long wait_iff_congested(int sync, long timeout); static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi) { @@ -329,7 +330,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode) * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be - * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the + * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock. */ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) @@ -337,7 +338,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&inode->i_lock) && - !lockdep_is_held(&inode->i_mapping->tree_lock) && + !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; @@ -346,20 +347,20 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode - * @lockedp: temp bool output param, to be passed to the end function + * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't - * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This + * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * - * The caller must call unlocked_inode_to_wb_end() with *@lockdep - * afterwards and can't sleep during transaction. IRQ may or may not be - * disabled on return. + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and + * can't sleep during the transaction. IRQs may or may not be disabled on + * return. */ static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); @@ -367,14 +368,14 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) * Paired with store_release in inode_switch_wb_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ - *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; - if (unlikely(*lockedp)) - spin_lock_irq(&inode->i_mapping->tree_lock); + if (unlikely(cookie->locked)) + xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* - * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. - * inode_to_wb() will bark. Deref directly. + * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages + * lock. inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } @@ -382,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode - * @locked: *@lockedp from unlocked_inode_to_wb_begin() + * @cookie: @cookie from unlocked_inode_to_wb_begin() */ -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) { - if (unlikely(locked)) - spin_unlock_irq(&inode->i_mapping->tree_lock); + if (unlikely(cookie->locked)) + xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } @@ -434,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) } static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) { } diff --git a/include/linux/backlight.h b/include/linux/backlight.h index af7003548593..2baab6f3861d 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -130,6 +130,48 @@ static inline int backlight_update_status(struct backlight_device *bd) return ret; } +/** + * backlight_enable - Enable backlight + * @bd: the backlight device to enable + */ +static inline int backlight_enable(struct backlight_device *bd) +{ + if (!bd) + return 0; + + bd->props.power = FB_BLANK_UNBLANK; + bd->props.fb_blank = FB_BLANK_UNBLANK; + bd->props.state &= ~BL_CORE_FBBLANK; + + return backlight_update_status(bd); +} + +/** + * backlight_disable - Disable backlight + * @bd: the backlight device to disable + */ +static inline int backlight_disable(struct backlight_device *bd) +{ + if (!bd) + return 0; + + bd->props.power = FB_BLANK_POWERDOWN; + bd->props.fb_blank = FB_BLANK_POWERDOWN; + bd->props.state |= BL_CORE_FBBLANK; + + return backlight_update_status(bd); +} + +/** + * backlight_put - Drop backlight reference + * @bd: the backlight device to put + */ +static inline void backlight_put(struct backlight_device *bd) +{ + if (bd) + put_device(&bd->dev); +} + extern struct backlight_device *backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); @@ -173,4 +215,20 @@ of_find_backlight_by_node(struct device_node *node) } #endif +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +struct backlight_device *of_find_backlight(struct device *dev); +struct backlight_device *devm_of_find_backlight(struct device *dev); +#else +static inline struct backlight_device *of_find_backlight(struct device *dev) +{ + return NULL; +} + +static inline struct backlight_device * +devm_of_find_backlight(struct device *dev) +{ + return NULL; +} +#endif + #endif diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h deleted file mode 100644 index a69554ef8476..000000000000 --- a/include/linux/bfin_mac.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Blackfin On-Chip MAC Driver - * - * Copyright 2004-2010 Analog Devices Inc. - * - * Enter bugs at http://blackfin.uclinux.org/ - * - * Licensed under the GPL-2 or later. - */ - -#ifndef _LINUX_BFIN_MAC_H_ -#define _LINUX_BFIN_MAC_H_ - -#include <linux/phy.h> - -struct bfin_phydev_platform_data { - unsigned short addr; - int irq; -}; - -struct bfin_mii_bus_platform_data { - int phydev_number; - struct bfin_phydev_platform_data *phydev_data; - const unsigned short *mac_peripherals; - int phy_mode; - unsigned int phy_mask; - unsigned short vlan1_mask, vlan2_mask; -}; - -#endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b0abe21d6cc9..4955e0863b83 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -61,6 +61,8 @@ struct linux_binprm { unsigned interp_flags; unsigned interp_data; unsigned long loader, exec; + + struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 @@ -118,6 +120,7 @@ extern int __must_check remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *); extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); +extern void finalize_exec(struct linux_binprm *bprm); extern void would_dump(struct linux_binprm *, struct file *); extern int suid_dumpable; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 5f11fbdc27f8..1ee46f492267 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -302,12 +302,20 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr __bitmap_complement(dst, src, nbits); } +#ifdef __LITTLE_ENDIAN +#define BITMAP_MEM_ALIGNMENT 8 +#else +#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) +#endif +#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) + static inline int bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); - if (__builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8)) + if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) return !memcmp(src1, src2, nbits / 8); return __bitmap_equal(src1, src2, nbits); } @@ -358,8 +366,10 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start, { if (__builtin_constant_p(nbits) && nbits == 1) __set_bit(start, map); - else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) && - __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8)) + else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && + IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && + __builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) memset((char *)map + start / 8, 0xff, nbits / 8); else __bitmap_set(map, start, nbits); @@ -370,8 +380,10 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, { if (__builtin_constant_p(nbits) && nbits == 1) __clear_bit(start, map); - else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) && - __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8)) + else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && + IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && + __builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) memset((char *)map + start / 8, 0, nbits / 8); else __bitmap_clear(map, start, nbits); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 69bea82ebeb1..6c666fd7de3c 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -88,6 +88,7 @@ struct blkg_policy_data { /* the blkg and policy id this per-policy data belongs to */ struct blkcg_gq *blkg; int plid; + bool offline; }; /* diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h index 6338551e0fb9..9f4c17f0d2d8 100644 --- a/include/linux/blk-mq-pci.h +++ b/include/linux/blk-mq-pci.h @@ -5,6 +5,7 @@ struct blk_mq_tag_set; struct pci_dev; -int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev); +int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev, + int offset); #endif /* _LINUX_BLK_MQ_PCI_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8efcf49796a3..e3986f4b3461 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -183,7 +183,6 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, - BLK_MQ_S_START_ON_RUN = 3, BLK_MQ_MAX_DEPTH = 10240, @@ -270,7 +269,6 @@ void blk_mq_unquiesce_queue(struct request_queue *q); void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); -void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index bf18b95ed92d..17b18b91ebac 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *); /* * Block error status values. See block/blk-core:blk_errors for the details. + * Alpha cannot write a byte atomically, so we need to use 32-bit value. */ +#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) +typedef u32 __bitwise blk_status_t; +#else typedef u8 __bitwise blk_status_t; +#endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) #define BLK_STS_TIMEOUT ((__force blk_status_t)2) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ed63f3b69c12..9af3e0f430bc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -707,73 +707,10 @@ struct request_queue { (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_POLL)) -/* - * @q->queue_lock is set while a queue is being initialized. Since we know - * that no other threads access the queue object before @q->queue_lock has - * been set, it is safe to manipulate queue flags without holding the - * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and - * blk_init_allocated_queue(). - */ -static inline void queue_lockdep_assert_held(struct request_queue *q) -{ - if (q->queue_lock) - lockdep_assert_held(q->queue_lock); -} - -static inline void queue_flag_set_unlocked(unsigned int flag, - struct request_queue *q) -{ - __set_bit(flag, &q->queue_flags); -} - -static inline int queue_flag_test_and_clear(unsigned int flag, - struct request_queue *q) -{ - queue_lockdep_assert_held(q); - - if (test_bit(flag, &q->queue_flags)) { - __clear_bit(flag, &q->queue_flags); - return 1; - } - - return 0; -} - -static inline int queue_flag_test_and_set(unsigned int flag, - struct request_queue *q) -{ - queue_lockdep_assert_held(q); - - if (!test_bit(flag, &q->queue_flags)) { - __set_bit(flag, &q->queue_flags); - return 0; - } - - return 1; -} - -static inline void queue_flag_set(unsigned int flag, struct request_queue *q) -{ - queue_lockdep_assert_held(q); - __set_bit(flag, &q->queue_flags); -} - -static inline void queue_flag_clear_unlocked(unsigned int flag, - struct request_queue *q) -{ - __clear_bit(flag, &q->queue_flags); -} - -static inline int queue_in_flight(struct request_queue *q) -{ - return q->in_flight[0] + q->in_flight[1]; -} - -static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) -{ - queue_lockdep_assert_held(q); - __clear_bit(flag, &q->queue_flags); -} +void blk_queue_flag_set(unsigned int flag, struct request_queue *q); +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) @@ -804,6 +741,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) extern int blk_set_preempt_only(struct request_queue *q); extern void blk_clear_preempt_only(struct request_queue *q); +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + static inline bool blk_account_rq(struct request *rq) { return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); @@ -1080,6 +1022,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) } /* + * The basic unit of block I/O is a sector. It is used in a number of contexts + * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 + * bytes. Variables of type sector_t represent an offset or size that is a + * multiple of 512 bytes. Hence these two constants. + */ +#ifndef SECTOR_SHIFT +#define SECTOR_SHIFT 9 +#endif +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1 << SECTOR_SHIFT) +#endif + +/* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request * blk_rq_cur_bytes() : bytes left in the current segment @@ -1106,12 +1061,12 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq); static inline unsigned int blk_rq_sectors(const struct request *rq) { - return blk_rq_bytes(rq) >> 9; + return blk_rq_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { - return blk_rq_cur_bytes(rq) >> 9; + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_zone_no(struct request *rq) @@ -1141,7 +1096,8 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) - return min(q->limits.max_discard_sectors, UINT_MAX >> 9); + return min(q->limits.max_discard_sectors, + UINT_MAX >> SECTOR_SHIFT); if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; @@ -1321,7 +1277,8 @@ extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(gfp_t); -struct request_queue *blk_alloc_queue_node(gfp_t, int); +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + spinlock_t *lock); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1452,16 +1409,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { - return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), - nr_blocks << (sb->s_blocksize_bits - 9), + return blkdev_issue_discard(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), gfp_mask, flags); } static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) { return blkdev_issue_zeroout(sb->s_bdev, - block << (sb->s_blocksize_bits - 9), - nr_blocks << (sb->s_blocksize_bits - 9), + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), gfp_mask, 0); } @@ -1568,7 +1530,8 @@ static inline int queue_alignment_offset(struct request_queue *q) static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) { unsigned int granularity = max(lim->physical_block_size, lim->io_min); - unsigned int alignment = sector_div(sector, granularity >> 9) << 9; + unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) + << SECTOR_SHIFT; return (granularity + lim->alignment_offset - alignment) % granularity; } @@ -1602,8 +1565,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return 0; /* Why are these in bytes, not sectors? */ - alignment = lim->discard_alignment >> 9; - granularity = lim->discard_granularity >> 9; + alignment = lim->discard_alignment >> SECTOR_SHIFT; + granularity = lim->discard_granularity >> SECTOR_SHIFT; if (!granularity) return 0; @@ -1614,7 +1577,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector offset = (granularity + alignment - offset) % granularity; /* Turn it back into bytes, gaah */ - return offset << 9; + return offset << SECTOR_SHIFT; } static inline int bdev_discard_alignment(struct block_device *bdev) diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index a53063e9d7d8..7942a96b1a9d 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -364,15 +364,6 @@ static inline void __init memblock_free_late( } #endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */ -#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP -extern void *alloc_remap(int nid, unsigned long size); -#else -static inline void *alloc_remap(int nid, unsigned long size) -{ - return NULL; -} -#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ - extern void *alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index a7f16e0f8d68..30d15e64b993 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -6,6 +6,7 @@ #include <uapi/linux/bpf.h> struct sock; +struct sockaddr; struct cgroup; struct sk_buff; struct bpf_sock_ops_kern; @@ -63,6 +64,10 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, int __cgroup_bpf_run_filter_sk(struct sock *sk, enum bpf_attach_type type); +int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, + struct sockaddr *uaddr, + enum bpf_attach_type type); + int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, enum bpf_attach_type type); @@ -93,16 +98,64 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, __ret; \ }) +#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, type); \ + } \ + __ret; \ +}) + #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) + +#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) + +#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) + +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled && sk) { \ - __ret = __cgroup_bpf_run_filter_sk(sk, \ - BPF_CGROUP_INET_SOCK_CREATE); \ + if (cgroup_bpf_enabled) { \ + lock_sock(sk); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \ + release_sock(sk); \ } \ __ret; \ }) +#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) + +#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) + +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ + sk->sk_prot->pre_connect) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ @@ -132,9 +185,18 @@ struct cgroup_bpf {}; static inline void cgroup_bpf_put(struct cgroup *cgrp) {} static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 66df387106de..95a7abd0ee92 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -21,6 +21,7 @@ struct bpf_verifier_env; struct perf_event; struct bpf_prog; struct bpf_map; +struct sock; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { @@ -207,12 +208,15 @@ struct bpf_prog_ops { struct bpf_verifier_ops { /* return eBPF function prototype for verification */ - const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); + const struct bpf_func_proto * + (*get_func_proto)(enum bpf_func_id func_id, + const struct bpf_prog *prog); /* return true if 'size' wide access at offset 'off' within bpf_context * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, struct bpf_insn_access_aux *info); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 19b8349a3809..2b28fcf6f6ae 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -8,16 +8,19 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) #endif #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6b66cd1aa0b9..7e61c395fddf 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -153,7 +153,7 @@ struct bpf_insn_aux_data { #define BPF_VERIFIER_TMP_LOG_SIZE 1024 -struct bpf_verifer_log { +struct bpf_verifier_log { u32 level; char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; char __user *ubuf; @@ -161,11 +161,16 @@ struct bpf_verifer_log { u32 len_total; }; -static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) +static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) { return log->len_used >= log->len_total - 1; } +static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) +{ + return log->level && log->ubuf && !bpf_verifier_log_full(log); +} + #define BPF_MAX_SUBPROGS 256 /* single container for all structs @@ -185,13 +190,15 @@ struct bpf_verifier_env { bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ - struct bpf_verifer_log log; + struct bpf_verifier_log log; u32 subprog_starts[BPF_MAX_SUBPROGS]; /* computes the stack depth of each bpf function */ u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; u32 subprog_cnt; }; +void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, + va_list args); __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index b1be0233ce35..28a7ccc55c89 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -38,12 +38,12 @@ struct bsg_buffer { }; struct bsg_job { - struct scsi_request sreq; struct device *dev; - struct request *req; struct kref kref; + unsigned int timeout; + /* Transport/driver specific request/reply structs */ void *request; void *reply; @@ -63,6 +63,9 @@ struct bsg_job { struct bsg_buffer request_payload; struct bsg_buffer reply_payload; + int result; + unsigned int reply_payload_rcv_len; + void *dd_data; /* Used for driver-specific storage */ }; diff --git a/include/linux/bsg.h b/include/linux/bsg.h index 2a202e41a3af..0c7dd9ceb139 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -1,34 +1,43 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef BSG_H -#define BSG_H +#ifndef _LINUX_BSG_H +#define _LINUX_BSG_H #include <uapi/linux/bsg.h> +struct request; + +#ifdef CONFIG_BLK_DEV_BSG +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *hdr); + int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode); + int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); + void (*free_rq)(struct request *rq); +}; -#if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device { struct device *class_dev; struct device *parent; int minor; struct request_queue *queue; struct kref ref; + const struct bsg_ops *ops; void (*release)(struct device *); }; -extern int bsg_register_queue(struct request_queue *q, - struct device *parent, const char *name, - void (*release)(struct device *)); -extern void bsg_unregister_queue(struct request_queue *); +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, const struct bsg_ops *ops, + void (*release)(struct device *)); +int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); +void bsg_unregister_queue(struct request_queue *q); #else -static inline int bsg_register_queue(struct request_queue *q, - struct device *parent, const char *name, - void (*release)(struct device *)) +static inline int bsg_scsi_register_queue(struct request_queue *q, + struct device *parent) { return 0; } static inline void bsg_unregister_queue(struct request_queue *q) { } -#endif - -#endif +#endif /* CONFIG_BLK_DEV_BSG */ +#endif /* _LINUX_BSG_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index 451aaa0786ae..4b13e0a3e15b 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -156,6 +156,23 @@ static inline void le64_add_cpu(__le64 *var, u64 val) *var = cpu_to_le64(le64_to_cpu(*var) + val); } +/* XXX: this stuff can be optimized */ +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} + static inline void be16_add_cpu(__be16 *var, u16 val) { *var = cpu_to_be16(be16_to_cpu(*var) + val); diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 59042d5ac520..3901927cf6a0 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -204,6 +204,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ CEPH_FEATURE_MSGR_KEEPALIVE2 | \ CEPH_FEATURE_OSD_POOLRESEND | \ + CEPH_FEATURE_MDS_QUOTA | \ CEPH_FEATURE_CRUSH_V4 | \ CEPH_FEATURE_NEW_OSDOP_ENCODING | \ CEPH_FEATURE_SERVER_JEWEL | \ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 88dd51381aaf..7ecfc88314d8 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -134,6 +134,7 @@ struct ceph_dir_layout { #define CEPH_MSG_CLIENT_LEASE 0x311 #define CEPH_MSG_CLIENT_SNAP 0x312 #define CEPH_MSG_CLIENT_CAPRELEASE 0x313 +#define CEPH_MSG_CLIENT_QUOTA 0x314 /* pool ops */ #define CEPH_MSG_POOLOP_REPLY 48 @@ -807,4 +808,20 @@ struct ceph_mds_snap_realm { } __attribute__ ((packed)); /* followed by my snap list, then prior parent snap list */ +/* + * quotas + */ +struct ceph_mds_quota { + __le64 ino; /* ino */ + struct ceph_timespec rctime; + __le64 rbytes; /* dir stats */ + __le64 rfiles; + __le64 rsubdirs; + __u8 struct_v; /* compat */ + __u8 struct_compat; + __le32 struct_len; + __le64 max_bytes; /* quota max. bytes */ + __le64 max_files; /* quota max. files */ +} __attribute__ ((packed)); + #endif diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index c2ec44cf5098..49c93b9308d7 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -262,6 +262,7 @@ extern struct kmem_cache *ceph_cap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; +extern struct kmem_cache *ceph_dir_file_cachep; /* ceph_common.c */ extern bool libceph_compatible(void *data); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index ead9d85f1c11..c7dfcb8a1fb2 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -76,6 +76,7 @@ enum ceph_msg_data_type { #ifdef CONFIG_BLOCK CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ #endif /* CONFIG_BLOCK */ + CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ }; static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) @@ -87,22 +88,106 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: #endif /* CONFIG_BLOCK */ + case CEPH_MSG_DATA_BVECS: return true; default: return false; } } +#ifdef CONFIG_BLOCK + +struct ceph_bio_iter { + struct bio *bio; + struct bvec_iter iter; +}; + +#define __ceph_bio_iter_advance_step(it, n, STEP) do { \ + unsigned int __n = (n), __cur_n; \ + \ + while (__n) { \ + BUG_ON(!(it)->iter.bi_size); \ + __cur_n = min((it)->iter.bi_size, __n); \ + (void)(STEP); \ + bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \ + if (!(it)->iter.bi_size && (it)->bio->bi_next) { \ + dout("__ceph_bio_iter_advance_step next bio\n"); \ + (it)->bio = (it)->bio->bi_next; \ + (it)->iter = (it)->bio->bi_iter; \ + } \ + __n -= __cur_n; \ + } \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bio_iter_advance(it, n) \ + __ceph_bio_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bio_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = __cur_n; \ + __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#endif /* CONFIG_BLOCK */ + +struct ceph_bvec_iter { + struct bio_vec *bvecs; + struct bvec_iter iter; +}; + +#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (void)(STEP); \ + bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bvec_iter_advance(it, n) \ + __ceph_bvec_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bvec_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = (n); \ + for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#define ceph_bvec_iter_shorten(it, n) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (it)->iter.bi_size = (n); \ +} while (0) + struct ceph_msg_data { struct list_head links; /* ceph_msg->data */ enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK struct { - struct bio *bio; - size_t bio_length; + struct ceph_bio_iter bio_pos; + u32 bio_length; }; #endif /* CONFIG_BLOCK */ + struct ceph_bvec_iter bvec_pos; struct { struct page **pages; /* NOT OWNER. */ size_t length; /* total # bytes */ @@ -122,11 +207,9 @@ struct ceph_msg_data_cursor { bool need_crc; /* crc update needed */ union { #ifdef CONFIG_BLOCK - struct { /* bio */ - struct bio *bio; /* bio from list */ - struct bvec_iter bvec_iter; - }; + struct ceph_bio_iter bio_iter; #endif /* CONFIG_BLOCK */ + struct bvec_iter bvec_iter; struct { /* pages */ unsigned int page_offset; /* offset in page */ unsigned short page_index; /* index in array */ @@ -290,9 +373,11 @@ extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, - size_t length); +void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, + u32 length); #endif /* CONFIG_BLOCK */ +void ceph_msg_data_add_bvecs(struct ceph_msg *msg, + struct ceph_bvec_iter *bvec_pos); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 52fb37d1c2a5..528ccc943cee 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -57,6 +57,7 @@ enum ceph_osd_data_type { #ifdef CONFIG_BLOCK CEPH_OSD_DATA_TYPE_BIO, #endif /* CONFIG_BLOCK */ + CEPH_OSD_DATA_TYPE_BVECS, }; struct ceph_osd_data { @@ -72,10 +73,11 @@ struct ceph_osd_data { struct ceph_pagelist *pagelist; #ifdef CONFIG_BLOCK struct { - struct bio *bio; /* list of bios */ - size_t bio_length; /* total in list */ + struct ceph_bio_iter bio_pos; + u32 bio_length; }; #endif /* CONFIG_BLOCK */ + struct ceph_bvec_iter bvec_pos; }; }; @@ -405,10 +407,14 @@ extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, unsigned int which, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, - unsigned int which, - struct bio *bio, size_t bio_length); +void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bio_iter *bio_pos, + u32 bio_length); #endif /* CONFIG_BLOCK */ +void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bvec_iter *bvec_pos); extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, unsigned int which, @@ -418,6 +424,9 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); +void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 bytes); extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, struct page **pages, u64 length, diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index d41fad99c0fa..e71fb222c7c3 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -5,7 +5,6 @@ #include <linux/rbtree.h> #include <linux/ceph/types.h> #include <linux/ceph/decode.h> -#include <linux/ceph/ceph_fs.h> #include <linux/crush/crush.h> /* @@ -280,11 +279,6 @@ bool ceph_osds_changed(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting, bool any_change); -/* calculate mapping of a file extent to an object */ -extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, - u64 off, u64 len, - u64 *bno, u64 *oxoff, u64 *oxlen); - int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, const struct ceph_object_id *oid, const struct ceph_object_locator *oloc, diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h new file mode 100644 index 000000000000..cbd0d24b7148 --- /dev/null +++ b/include/linux/ceph/striper.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CEPH_STRIPER_H +#define _LINUX_CEPH_STRIPER_H + +#include <linux/list.h> +#include <linux/types.h> + +struct ceph_file_layout; + +void ceph_calc_file_object_mapping(struct ceph_file_layout *l, + u64 off, u64 len, + u64 *objno, u64 *objoff, u32 *xlen); + +struct ceph_object_extent { + struct list_head oe_item; + u64 oe_objno; + u64 oe_off; + u64 oe_len; +}; + +static inline void ceph_object_extent_init(struct ceph_object_extent *ex) +{ + INIT_LIST_HEAD(&ex->oe_item); +} + +/* + * Called for each mapped stripe unit. + * + * @bytes: number of bytes mapped, i.e. the minimum of the full length + * requested (file extent length) or the remainder of the stripe + * unit within an object + */ +typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex, + u32 bytes, void *arg); + +int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + struct ceph_object_extent *alloc_fn(void *arg), + void *alloc_arg, + ceph_object_extent_fn_t action_fn, + void *action_arg); +int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + ceph_object_extent_fn_t action_fn, + void *action_arg); + +struct ceph_file_extent { + u64 fe_off; + u64 fe_len; +}; + +static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents, + u32 num_file_extents) +{ + u64 bytes = 0; + u32 i; + + for (i = 0; i < num_file_extents; i++) + bytes += file_extents[i].fe_len; + + return bytes; +} + +int ceph_extent_to_file(struct ceph_file_layout *l, + u64 objno, u64 objoff, u64 objlen, + struct ceph_file_extent **file_extents, + u32 *num_file_extents); + +#endif diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 9f242b876fde..dc5b70449dc6 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -151,8 +151,8 @@ struct cgroup_subsys_state { atomic_t online_cnt; /* percpu_ref killing and RCU release */ - struct rcu_head rcu_head; struct work_struct destroy_work; + struct rcu_work destroy_rwork; /* * PI: the parent css. Placed here for cache proximity to following @@ -755,13 +755,13 @@ struct sock_cgroup_data { * updaters and return part of the previous pointer as the prioidx or * classid. Such races are short-lived and the result isn't critical. */ -static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd) +static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) { /* fallback to 1 which is always the ID of the root cgroup */ return (skcd->is_data & 1) ? skcd->prioidx : 1; } -static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd) +static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) { /* fallback to 0 which is the unconfigured default classid */ return (skcd->is_data & 1) ? skcd->classid : 0; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index f711be6e8c44..210a890008f9 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -399,6 +399,7 @@ struct clk_divider { spinlock_t *lock; }; +#define clk_div_mask(width) ((1 << (width)) - 1) #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) #define CLK_DIVIDER_ONE_BASED BIT(0) @@ -419,6 +420,10 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, unsigned long rate, unsigned long *prate, const struct clk_div_table *table, u8 width, unsigned long flags); +long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, + unsigned long rate, unsigned long *prate, + const struct clk_div_table *table, u8 width, + unsigned long flags, unsigned int val); int divider_get_val(unsigned long rate, unsigned long parent_rate, const struct clk_div_table *table, u8 width, unsigned long flags); @@ -449,8 +454,9 @@ void clk_hw_unregister_divider(struct clk_hw *hw); * * @hw: handle between common and hardware-specific interfaces * @reg: register controlling multiplexer + * @table: array of register values corresponding to the parent index * @shift: shift to multiplexer bit field - * @width: width of mutliplexer bit field + * @mask: mask of mutliplexer bit field * @flags: hardware-specific flags * @lock: register lock * @@ -510,6 +516,10 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); +int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, + unsigned int val); +unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index); + void clk_unregister_mux(struct clk *clk); void clk_hw_unregister_mux(struct clk_hw *hw); @@ -774,6 +784,17 @@ static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate, rate, prate, table, width, flags); } +static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags, + unsigned int val) +{ + return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, table, width, flags, + val); +} + /* * FIXME clock api without lock protection */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 4c4ef9f34db3..0dbd0885b2c2 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -209,7 +209,7 @@ static inline int clk_prepare(struct clk *clk) return 0; } -static inline int clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) { might_sleep(); return 0; @@ -603,8 +603,8 @@ static inline struct clk *clk_get(struct device *dev, const char *id) return NULL; } -static inline int clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) { return 0; } @@ -614,8 +614,8 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } -static inline int devm_clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks) +static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) { return 0; } @@ -645,7 +645,7 @@ static inline int clk_enable(struct clk *clk) return 0; } -static inline int clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) { return 0; } @@ -719,8 +719,8 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -static inline int clk_bulk_prepare_enable(int num_clks, - struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_prepare_enable(int num_clks, + struct clk_bulk_data *clks) { int ret; diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index d23c9cf26993..afb9edfa5d58 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -128,5 +128,6 @@ extern void tegra210_sata_pll_hw_sequence_start(void); extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); +extern int tegra210_clk_handle_mbist_war(unsigned int id); #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index d18da839b810..a8faa38b1ed6 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -203,6 +203,7 @@ enum { TI_CLKM_PRM, TI_CLKM_SCRM, TI_CLKM_CTRL, + TI_CLKM_CTRL_AUX, TI_CLKM_PLLSS, CLK_MAX_MEMMAPS }; @@ -211,6 +212,7 @@ enum { * struct ti_clk_ll_ops - low-level ops for clocks * @clk_readl: pointer to register read function * @clk_writel: pointer to register write function + * @clk_rmw: pointer to register read-modify-write function * @clkdm_clk_enable: pointer to clockdomain enable function * @clkdm_clk_disable: pointer to clockdomain disable function * @clkdm_lookup: pointer to clockdomain lookup function @@ -226,6 +228,7 @@ enum { struct ti_clk_ll_ops { u32 (*clk_readl)(const struct clk_omap_reg *reg); void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); + void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg); int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); int (*clkdm_clk_disable)(struct clockdomain *clkdm, struct clk *clk); diff --git a/include/linux/compat.h b/include/linux/compat.h index 16c3027074a2..081281ad5772 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -24,6 +24,17 @@ #include <asm/siginfo.h> #include <asm/signal.h> +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +/* + * It may be useful for an architecture to override the definitions of the + * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular + * to use a different calling convention for syscalls. To allow for that, + + the prototypes for the compat_sys_*() functions below will *not* be included + * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#include <asm/syscall_wrapper.h> +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + #ifndef COMPAT_USE_64BIT_TIME #define COMPAT_USE_64BIT_TIME 0 #endif @@ -32,8 +43,12 @@ #define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v)) #endif +#ifndef COMPAT_SYSCALL_DEFINE0 #define COMPAT_SYSCALL_DEFINE0(name) \ + asmlinkage long compat_sys_##name(void); \ + ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \ asmlinkage long compat_sys_##name(void) +#endif /* COMPAT_SYSCALL_DEFINE0 */ #define COMPAT_SYSCALL_DEFINE1(name, ...) \ COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) @@ -48,16 +63,25 @@ #define COMPAT_SYSCALL_DEFINE6(name, ...) \ COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) -#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ - asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\ - __attribute__((alias(__stringify(compat_SyS##name)))); \ - static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ - asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\ - asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ - { \ - return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ - } \ - static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) +/* + * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which + * sign-extends 32-bit ints to longs whenever needed. The actual work is + * done within __do_compat_sys_*(). + */ +#ifndef COMPAT_SYSCALL_DEFINEx +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ + __attribute__((alias(__stringify(__se_compat_sys##name)))); \ + ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) +#endif /* COMPAT_SYSCALL_DEFINEx */ #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() @@ -222,6 +246,8 @@ typedef struct compat_siginfo { #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif +#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ + sizeof(short) : __alignof__(compat_uptr_t)) union { /* * used when si_code=BUS_MCEERR_AR or @@ -230,13 +256,13 @@ typedef struct compat_siginfo { short int _addr_lsb; /* Valid LSB of the reported address. */ /* used when si_code=SEGV_BNDERR */ struct { - compat_uptr_t _dummy_bnd; + char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD]; compat_uptr_t _lower; compat_uptr_t _upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ struct { - compat_uptr_t _dummy_pkey; + char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; u32 _pkey; } _addr_pkey; }; @@ -305,10 +331,6 @@ extern int put_compat_rusage(const struct rusage *, struct compat_siginfo; -extern asmlinkage long compat_sys_waitid(int, compat_pid_t, - struct compat_siginfo __user *, int, - struct compat_rusage __user *); - struct compat_dirent { u32 d_ino; compat_off_t d_off; @@ -422,87 +444,6 @@ struct compat_msgbuf; extern void compat_exit_robust_list(struct task_struct *curr); -asmlinkage long -compat_sys_set_robust_list(struct compat_robust_list_head __user *head, - compat_size_t len); -asmlinkage long -compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, - compat_size_t __user *len_ptr); - -asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); -asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); -asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); -asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, - compat_ssize_t msgsz, int msgflg); -asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, - compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); -long compat_sys_msgctl(int first, int second, void __user *uptr); -long compat_sys_shmctl(int first, int second, void __user *uptr); -long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsems, const struct compat_timespec __user *timeout); -asmlinkage long compat_sys_keyctl(u32 option, - u32 arg2, u32 arg3, u32 arg4, u32 arg5); -asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); - -asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd, - const struct compat_iovec __user *vec, compat_ulong_t vlen); -asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd, - const struct compat_iovec __user *vec, compat_ulong_t vlen); -asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, - const struct compat_iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high); -asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, - const struct compat_iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high); -asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, - const struct compat_iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); -asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, - const struct compat_iovec __user *vec, - compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); - -#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 -asmlinkage long compat_sys_preadv64(unsigned long fd, - const struct compat_iovec __user *vec, - unsigned long vlen, loff_t pos); -#endif - -#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 -asmlinkage long compat_sys_pwritev64(unsigned long fd, - const struct compat_iovec __user *vec, - unsigned long vlen, loff_t pos); -#endif - -#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 -asmlinkage long compat_sys_readv64v2(unsigned long fd, - const struct compat_iovec __user *vec, - unsigned long vlen, loff_t pos, rwf_t flags); -#endif - -#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 -asmlinkage long compat_sys_pwritev64v2(unsigned long fd, - const struct compat_iovec __user *vec, - unsigned long vlen, loff_t pos, rwf_t flags); -#endif - -asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); - -asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, - const compat_uptr_t __user *envp); -asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, - const compat_uptr_t __user *argv, - const compat_uptr_t __user *envp, int flags); - -asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, - compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timeval __user *tvp); - -asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); - -asmlinkage long compat_sys_wait4(compat_pid_t pid, - compat_uint_t __user *stat_addr, int options, - struct compat_rusage __user *ru); - #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) #define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) @@ -515,13 +456,6 @@ int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user * int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); -long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, - struct compat_siginfo __user *uinfo); -#ifdef CONFIG_COMPAT_OLD_SIGACTION -asmlinkage long compat_sys_sigaction(int sig, - const struct compat_old_sigaction __user *act, - struct compat_old_sigaction __user *oact); -#endif static inline int compat_timeval_compare(struct compat_timeval *lhs, struct compat_timeval *rhs) @@ -543,13 +477,6 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, return lhs->tv_nsec - rhs->tv_nsec; } -asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, - struct timezone __user *tz); -asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, - struct timezone __user *tz); - -asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); - extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); /* @@ -575,110 +502,137 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, #endif } -asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, - compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, - const compat_ulong_t __user *new_nodes); - extern int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data); extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data); -asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, - compat_long_t addr, compat_long_t data); -asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); +struct epoll_event; /* fortunately, this one is fixed-layout */ + +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, + unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); + +extern void __user *compat_alloc_user_space(unsigned long len); + +int compat_restore_altstack(const compat_stack_t __user *uss); +int __compat_save_altstack(compat_stack_t __user *, unsigned long); +#define compat_save_altstack_ex(uss, sp) do { \ + compat_stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ +} while (0); + /* - * epoll (fs/eventpoll.c) compat bits follow ... + * These syscall function prototypes are kept in the same order as + * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls + * go below. + * + * Please note that these prototypes here are only provided for information + * purposes, for static analysis, and for linking from the syscall table. + * These functions should not be called elsewhere from kernel code. + * + * As the syscall calling convention may be different from the default + * for architectures overriding the syscall calling convention, do not + * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. */ -struct epoll_event; /* fortunately, this one is fixed-layout */ +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); +asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, + u32 __user *iocb); +asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct compat_timespec __user *timeout); + +/* fs/cookies.c */ +asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); + +/* fs/eventpoll.c */ asmlinkage long compat_sys_epoll_pwait(int epfd, struct epoll_event __user *events, int maxevents, int timeout, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); -asmlinkage long compat_sys_utime(const char __user *filename, - struct compat_utimbuf __user *t); -asmlinkage long compat_sys_utimensat(unsigned int dfd, - const char __user *filename, - struct compat_timespec __user *t, - int flags); +/* fs/fcntl.c */ +asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); +asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); -asmlinkage long compat_sys_time(compat_time_t __user *tloc); -asmlinkage long compat_sys_stime(compat_time_t __user *tptr); -asmlinkage long compat_sys_signalfd(int ufd, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize); -asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, - const struct compat_itimerspec __user *utmr, - struct compat_itimerspec __user *otmr); -asmlinkage long compat_sys_timerfd_gettime(int ufd, - struct compat_itimerspec __user *otmr); +/* fs/ioctl.c */ +asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); -asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, - __u32 __user *pages, - const int __user *nodes, - int __user *status, - int flags); -asmlinkage long compat_sys_futimesat(unsigned int dfd, - const char __user *filename, - struct compat_timeval __user *t); -asmlinkage long compat_sys_utimes(const char __user *filename, - struct compat_timeval __user *t); -asmlinkage long compat_sys_newstat(const char __user *filename, - struct compat_stat __user *statbuf); -asmlinkage long compat_sys_newlstat(const char __user *filename, - struct compat_stat __user *statbuf); -asmlinkage long compat_sys_newfstatat(unsigned int dfd, - const char __user *filename, - struct compat_stat __user *statbuf, - int flag); -asmlinkage long compat_sys_newfstat(unsigned int fd, - struct compat_stat __user *statbuf); +/* fs/namespace.c */ +asmlinkage long compat_sys_mount(const char __user *dev_name, + const char __user *dir_name, + const char __user *type, compat_ulong_t flags, + const void __user *data); + +/* fs/open.c */ asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf); -asmlinkage long compat_sys_fstatfs(unsigned int fd, - struct compat_statfs __user *buf); asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t sz, struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fstatfs(unsigned int fd, + struct compat_statfs __user *buf); asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf); -asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, - compat_ulong_t arg); -asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, - compat_ulong_t arg); -asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); -asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, - compat_long_t min_nr, - compat_long_t nr, - struct io_event __user *events, - struct compat_timespec __user *timeout); -asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, - u32 __user *iocb); -asmlinkage long compat_sys_mount(const char __user *dev_name, - const char __user *dir_name, - const char __user *type, compat_ulong_t flags, - const void __user *data); -asmlinkage long compat_sys_old_readdir(unsigned int fd, - struct compat_old_linux_dirent __user *, - unsigned int count); +asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); +asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t); +/* No generic prototype for truncate64, ftruncate64, fallocate */ +asmlinkage long compat_sys_openat(int dfd, const char __user *filename, + int flags, umode_t mode); + +/* fs/readdir.c */ asmlinkage long compat_sys_getdents(unsigned int fd, struct compat_linux_dirent __user *dirent, unsigned int count); -asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, - unsigned int nr_segs, unsigned int flags); -asmlinkage long compat_sys_open(const char __user *filename, int flags, - umode_t mode); -asmlinkage long compat_sys_openat(int dfd, const char __user *filename, - int flags, umode_t mode); -asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, - struct file_handle __user *handle, - int flags); -asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); -asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t); + +/* fs/read_write.c */ +asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); +asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +/* No generic prototype for pread64 and pwrite64 */ +asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 +asmlinkage long compat_sys_preadv64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 +asmlinkage long compat_sys_pwritev64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +/* fs/sendfile.c */ +asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, + compat_off_t __user *offset, compat_size_t count); +asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, + compat_loff_t __user *offset, compat_size_t count); + +/* fs/select.c */ asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, @@ -689,110 +643,149 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, struct compat_timespec __user *tsp, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); + +/* fs/signalfd.c */ asmlinkage long compat_sys_signalfd4(int ufd, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize, int flags); -asmlinkage long compat_sys_get_mempolicy(int __user *policy, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, - compat_ulong_t addr, - compat_ulong_t flags); -asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, - compat_ulong_t maxnode); -asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, - compat_ulong_t mode, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, compat_ulong_t flags); -asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, - char __user *optval, unsigned int optlen); -asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, - unsigned flags); -asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags); -asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, - unsigned int flags); -asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, - unsigned flags); -asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, - unsigned flags, struct sockaddr __user *addr, - int __user *addrlen); -asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags, - struct compat_timespec __user *timeout); +/* fs/splice.c */ +asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, + unsigned int nr_segs, unsigned int flags); + +/* fs/stat.c */ +asmlinkage long compat_sys_newfstatat(unsigned int dfd, + const char __user *filename, + struct compat_stat __user *statbuf, + int flag); +asmlinkage long compat_sys_newfstat(unsigned int fd, + struct compat_stat __user *statbuf); + +/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ + +/* fs/timerfd.c */ +asmlinkage long compat_sys_timerfd_gettime(int ufd, + struct compat_itimerspec __user *otmr); +asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, + const struct compat_itimerspec __user *utmr, + struct compat_itimerspec __user *otmr); + +/* fs/utimes.c */ +asmlinkage long compat_sys_utimensat(unsigned int dfd, + const char __user *filename, + struct compat_timespec __user *t, + int flags); + +/* kernel/exit.c */ +asmlinkage long compat_sys_waitid(int, compat_pid_t, + struct compat_siginfo __user *, int, + struct compat_rusage __user *); + + + +/* kernel/futex.c */ +asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, + struct compat_timespec __user *utime, u32 __user *uaddr2, + u32 val3); +asmlinkage long +compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +asmlinkage long +compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + compat_size_t __user *len_ptr); + +/* kernel/hrtimer.c */ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp); + +/* kernel/itimer.c */ asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it); asmlinkage long compat_sys_setitimer(int which, struct compat_itimerval __user *in, struct compat_itimerval __user *out); -asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); -asmlinkage long compat_sys_setrlimit(unsigned int resource, - struct compat_rlimit __user *rlim); -asmlinkage long compat_sys_getrlimit(unsigned int resource, - struct compat_rlimit __user *rlim); -asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); -asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, - unsigned int len, - compat_ulong_t __user *user_mask_ptr); -asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, - unsigned int len, - compat_ulong_t __user *user_mask_ptr); + +/* kernel/kexec.c */ +asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, + compat_ulong_t nr_segments, + struct compat_kexec_segment __user *, + compat_ulong_t flags); + +/* kernel/posix-timers.c */ asmlinkage long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id); +asmlinkage long compat_sys_timer_gettime(timer_t timer_id, + struct compat_itimerspec __user *setting); asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, struct compat_itimerspec __user *new, struct compat_itimerspec __user *old); -asmlinkage long compat_sys_timer_gettime(timer_t timer_id, - struct compat_itimerspec __user *setting); asmlinkage long compat_sys_clock_settime(clockid_t which_clock, struct compat_timespec __user *tp); asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, struct compat_timespec __user *tp); -asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, - struct compat_timex __user *tp); asmlinkage long compat_sys_clock_getres(clockid_t which_clock, struct compat_timespec __user *tp); asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp); -asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, - struct compat_siginfo __user *uinfo, - struct compat_timespec __user *uts, compat_size_t sigsetsize); + +/* kernel/ptrace.c */ +asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + compat_long_t addr, compat_long_t data); + +/* kernel/sched/core.c */ +asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, + struct compat_timespec __user *interval); + +/* kernel/signal.c */ +asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, + compat_stack_t __user *uoss_ptr); asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, - compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, - compat_size_t sigsetsize); #ifndef CONFIG_ODD_RT_SIGACTION asmlinkage long compat_sys_rt_sigaction(int, const struct compat_sigaction __user *, struct compat_sigaction __user *, compat_size_t); #endif +asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, + compat_sigset_t __user *oset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct compat_timespec __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); +/* No generic prototype for rt_sigreturn */ + +/* kernel/sys.c */ +asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); +asmlinkage long compat_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); + +/* kernel/time.c */ +asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); + +/* kernel/timer.c */ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); -asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, - compat_ulong_t arg); -asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, - struct compat_timespec __user *utime, u32 __user *uaddr2, - u32 val3); -asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen); -asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, - compat_ulong_t nr_segments, - struct compat_kexec_segment __user *, - compat_ulong_t flags); -asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, - const struct compat_mq_attr __user *u_mqstat, - struct compat_mq_attr __user *u_omqstat); -asmlinkage long compat_sys_mq_notify(mqd_t mqdes, - const struct compat_sigevent __user *u_notification); + +/* ipc/mqueue.c */ asmlinkage long compat_sys_mq_open(const char __user *u_name, int oflag, compat_mode_t mode, struct compat_mq_attr __user *u_attr); @@ -804,17 +797,92 @@ asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, compat_size_t msg_len, unsigned int __user *u_msg_prio, const struct compat_timespec __user *u_abs_timeout); -asmlinkage long compat_sys_socketcall(int call, u32 __user *args); -asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); +asmlinkage long compat_sys_mq_notify(mqd_t mqdes, + const struct compat_sigevent __user *u_notification); +asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, + const struct compat_mq_attr __user *u_mqstat, + struct compat_mq_attr __user *u_omqstat); -extern ssize_t compat_rw_copy_check_uvector(int type, - const struct compat_iovec __user *uvector, - unsigned long nr_segs, - unsigned long fast_segs, struct iovec *fast_pointer, - struct iovec **ret_pointer); +/* ipc/msg.c */ +asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr); +asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); +asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg); -extern void __user *compat_alloc_user_space(unsigned long len); +/* ipc/sem.c */ +asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); +asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned nsems, const struct compat_timespec __user *timeout); + +/* ipc/shm.c */ +asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); +asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); +/* net/socket.c */ +asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, + unsigned flags, struct sockaddr __user *addr, + int __user *addrlen); +asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, + char __user *optval, unsigned int optlen); +asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, + unsigned flags); +asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, + unsigned int flags); + +/* mm/filemap.c: No generic prototype for readahead */ + +/* security/keys/keyctl.c */ +asmlinkage long compat_sys_keyctl(u32 option, + u32 arg2, u32 arg3, u32 arg4, u32 arg5); + +/* arch/example/kernel/sys_example.c */ +asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); + +/* mm/fadvise.c: No generic prototype for fadvise64_64 */ + +/* mm/, CONFIG_MMU only */ +asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + compat_ulong_t mode, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, compat_ulong_t flags); +asmlinkage long compat_sys_get_mempolicy(int __user *policy, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, + compat_ulong_t addr, + compat_ulong_t flags); +asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode); +asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, + compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, + const compat_ulong_t __user *new_nodes); +asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, + __u32 __user *pages, + const int __user *nodes, + int __user *status, + int flags); + +asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, + compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); +asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_wait4(compat_pid_t pid, + compat_uint_t __user *stat_addr, int options, + struct compat_rusage __user *ru); +asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, + int, const char __user *); +asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, + struct compat_timex __user *tp); +asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags); asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, const struct compat_iovec __user *lvec, compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, @@ -823,14 +891,77 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, const struct compat_iovec __user *lvec, compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, compat_ulong_t riovcnt, compat_ulong_t flags); +asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, int flags); +asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); +asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 +asmlinkage long compat_sys_readv64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif -asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, - compat_off_t __user *offset, compat_size_t count); -asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, - compat_loff_t __user *offset, compat_size_t count); -asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, - compat_stack_t __user *uoss_ptr); +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 +asmlinkage long compat_sys_pwritev64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + + +/* + * Deprecated system calls which are still defined in + * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch + */ + +/* __ARCH_WANT_SYSCALL_NO_AT */ +asmlinkage long compat_sys_open(const char __user *filename, int flags, + umode_t mode); +asmlinkage long compat_sys_utimes(const char __user *filename, + struct compat_timeval __user *t); +/* __ARCH_WANT_SYSCALL_NO_FLAGS */ +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* __ARCH_WANT_SYSCALL_OFF_T */ +asmlinkage long compat_sys_newstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newlstat(const char __user *filename, + struct compat_stat __user *statbuf); + +/* __ARCH_WANT_SYSCALL_DEPRECATED */ +asmlinkage long compat_sys_time(compat_time_t __user *tloc); +asmlinkage long compat_sys_utime(const char __user *filename, + struct compat_utimbuf __user *t); +asmlinkage long compat_sys_futimesat(unsigned int dfd, + const char __user *filename, + struct compat_timeval __user *t); +asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, compat_ulong_t __user *exp, + struct compat_timeval __user *tvp); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); +asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, + unsigned flags); +asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); + +/* obsolete: fs/readdir.c */ +asmlinkage long compat_sys_old_readdir(unsigned int fd, + struct compat_old_linux_dirent __user *, + unsigned int count); + +/* obsolete: fs/select.c */ +asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); + +/* obsolete: ipc */ +asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); + +/* obsolete: kernel/signal.c */ #ifdef __ARCH_WANT_SYS_SIGPENDING asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); #endif @@ -839,26 +970,20 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, compat_old_sigset_t __user *oset); #endif +#ifdef CONFIG_COMPAT_OLD_SIGACTION +asmlinkage long compat_sys_sigaction(int sig, + const struct compat_old_sigaction __user *act, + struct compat_old_sigaction __user *oact); +#endif -int compat_restore_altstack(const compat_stack_t __user *uss); -int __compat_save_altstack(compat_stack_t __user *, unsigned long); -#define compat_save_altstack_ex(uss, sp) do { \ - compat_stack_t __user *__uss = uss; \ - struct task_struct *t = current; \ - put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ - put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ - put_user_ex(t->sas_ss_size, &__uss->ss_size); \ - if (t->sas_ss_flags & SS_AUTODISARM) \ - sas_ss_reset(t); \ -} while (0); +/* obsolete: kernel/time/time.c */ +asmlinkage long compat_sys_stime(compat_time_t __user *tptr); -asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval); +/* obsolete: net/socket.c */ +asmlinkage long compat_sys_socketcall(int call, u32 __user *args); -asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, - int, const char __user *); +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ -asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2); /* * For most but not all architectures, "am I in a compat syscall?" and diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d3f264a5b04d..7d98e263e048 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -17,9 +17,6 @@ */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) -#define randomized_struct_fields_start struct { -#define randomized_struct_fields_end }; - /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 @@ -28,6 +25,9 @@ #define __SANITIZE_ADDRESS__ #endif +#undef __no_sanitize_address +#define __no_sanitize_address __attribute__((no_sanitize("address"))) + /* Clang doesn't have a way to turn it off per-function, yet. */ #ifdef __noretpoline #undef __noretpoline diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index e2c7f4369eff..b4bf73f5e38f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -242,6 +242,9 @@ #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) #define __randomize_layout __attribute__((randomize_layout)) #define __no_randomize_layout __attribute__((no_randomize_layout)) +/* This anon struct can add padding, so only enable it under randstruct. */ +#define randomized_struct_fields_start struct { +#define randomized_struct_fields_end } __randomize_layout; #endif #endif /* GCC_VERSION >= 40500 */ @@ -256,15 +259,6 @@ */ #define __visible __attribute__((externally_visible)) -/* - * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only - * possible since GCC 4.6. To provide as much build testing coverage - * as possible, this is used for all GCC 4.6+ builds, and not just on - * RANDSTRUCT_PLUGIN builds. - */ -#define randomized_struct_fields_start struct { -#define randomized_struct_fields_end } __randomize_layout; - #endif /* GCC_VERSION >= 40600 */ diff --git a/include/linux/console.h b/include/linux/console.h index b8920a031a3e..dfd6b0e97855 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -46,46 +46,52 @@ enum con_scroll { struct consw { struct module *owner; const char *(*con_startup)(void); - void (*con_init)(struct vc_data *, int); - void (*con_deinit)(struct vc_data *); - void (*con_clear)(struct vc_data *, int, int, int, int); - void (*con_putc)(struct vc_data *, int, int, int); - void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); - void (*con_cursor)(struct vc_data *, int); - bool (*con_scroll)(struct vc_data *, unsigned int top, + void (*con_init)(struct vc_data *vc, int init); + void (*con_deinit)(struct vc_data *vc); + void (*con_clear)(struct vc_data *vc, int sy, int sx, int height, + int width); + void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos); + void (*con_putcs)(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos); + void (*con_cursor)(struct vc_data *vc, int mode); + bool (*con_scroll)(struct vc_data *vc, unsigned int top, unsigned int bottom, enum con_scroll dir, unsigned int lines); - int (*con_switch)(struct vc_data *); - int (*con_blank)(struct vc_data *, int, int); - int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); - int (*con_font_get)(struct vc_data *, struct console_font *); - int (*con_font_default)(struct vc_data *, struct console_font *, char *); - int (*con_font_copy)(struct vc_data *, int); - int (*con_resize)(struct vc_data *, unsigned int, unsigned int, - unsigned int); - void (*con_set_palette)(struct vc_data *, + int (*con_switch)(struct vc_data *vc); + int (*con_blank)(struct vc_data *vc, int blank, int mode_switch); + int (*con_font_set)(struct vc_data *vc, struct console_font *font, + unsigned int flags); + int (*con_font_get)(struct vc_data *vc, struct console_font *font); + int (*con_font_default)(struct vc_data *vc, + struct console_font *font, char *name); + int (*con_font_copy)(struct vc_data *vc, int con); + int (*con_resize)(struct vc_data *vc, unsigned int width, + unsigned int height, unsigned int user); + void (*con_set_palette)(struct vc_data *vc, const unsigned char *table); - void (*con_scrolldelta)(struct vc_data *, int lines); - int (*con_set_origin)(struct vc_data *); - void (*con_save_screen)(struct vc_data *); - u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); - void (*con_invert_region)(struct vc_data *, u16 *, int); - u16 *(*con_screen_pos)(struct vc_data *, int); - unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); + void (*con_scrolldelta)(struct vc_data *vc, int lines); + int (*con_set_origin)(struct vc_data *vc); + void (*con_save_screen)(struct vc_data *vc); + u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity, + u8 blink, u8 underline, u8 reverse, u8 italic); + void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); + u16 *(*con_screen_pos)(struct vc_data *vc, int offset); + unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, + int *px, int *py); /* * Flush the video console driver's scrollback buffer */ - void (*con_flush_scrollback)(struct vc_data *); + void (*con_flush_scrollback)(struct vc_data *vc); /* * Prepare the console for the debugger. This includes, but is not * limited to, unblanking the console, loading an appropriate * palette, and allowing debugger generated output. */ - int (*con_debug_enter)(struct vc_data *); + int (*con_debug_enter)(struct vc_data *vc); /* * Restore the console to its pre-debug state as closely as possible. */ - int (*con_debug_leave)(struct vc_data *); + int (*con_debug_leave)(struct vc_data *vc); }; extern const struct consw *conswitchp; diff --git a/include/linux/const.h b/include/linux/const.h new file mode 100644 index 000000000000..7b55a55f5911 --- /dev/null +++ b/include/linux/const.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +#include <uapi/linux/const.h> + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* _LINUX_CONST_H */ diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _LINUX_CORESIGHT_PMU_H diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 21e8d248d956..87f48dd932eb 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -960,8 +960,7 @@ extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; extern struct freq_attr *cpufreq_generic_attr[]; -int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table); +int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); unsigned int cpufreq_generic_get(unsigned int cpu); int cpufreq_generic_init(struct cpufreq_policy *policy, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5172ad0daa7c..8796ba387152 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -29,7 +29,6 @@ enum cpuhp_state { CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_AMD_UNCORE_PREP, - CPUHP_PERF_BFIN, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, @@ -108,7 +107,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, - CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_ARM_SDEI_STARTING, CPUHP_AP_ARM_VFP_STARTING, @@ -122,7 +120,6 @@ enum cpuhp_state { CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, - CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MARCO_TIMER_STARTING, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 0b3fc229086c..1eefabf1621f 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -33,6 +33,10 @@ struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; unsigned long long time; /* in US */ +#ifdef CONFIG_SUSPEND + unsigned long long s2idle_usage; + unsigned long long s2idle_time; /* in US */ +#endif }; struct cpuidle_state { @@ -131,7 +135,8 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + bool *stop_tick); extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); @@ -163,7 +168,7 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool *stop_tick) {return -ENODEV; } static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) @@ -246,7 +251,8 @@ struct cpuidle_governor { struct cpuidle_device *dev); int (*select) (struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + bool *stop_tick); void (*reflect) (struct cpuidle_device *dev, int index); }; diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h index 357ae4611a45..bd21af828ff6 100644 --- a/include/linux/crc32c.h +++ b/include/linux/crc32c.h @@ -5,6 +5,7 @@ #include <linux/types.h> extern u32 crc32c(u32 crc, const void *address, unsigned int length); +extern const char *crc32c_impl(void); /* This macro exists for backwards-compatibility. */ #define crc32c_le crc32c diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7e6e84cf6383..6eb06101089f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -435,6 +435,14 @@ struct compress_alg { * @cra_exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @cra_init, used to remove various changes set in * @cra_init. + * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher + * definition. See @struct @ablkcipher_alg. + * @cra_u.blkcipher: Union member which contains a synchronous block cipher + * definition See @struct @blkcipher_alg. + * @cra_u.cipher: Union member which contains a single-block symmetric cipher + * definition. See @struct @cipher_alg. + * @cra_u.compress: Union member which contains a (de)compression algorithm. + * See @struct @compress_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE * @cra_list: internally used * @cra_users: internally used diff --git a/include/linux/dax.h b/include/linux/dax.h index 0185ecdae135..f9eb22ad341e 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -26,18 +26,42 @@ extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) struct dax_device *dax_get_by_host(const char *host); +struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops); void put_dax(struct dax_device *dax_dev); +void kill_dax(struct dax_device *dax_dev); +void dax_write_cache(struct dax_device *dax_dev, bool wc); +bool dax_write_cache_enabled(struct dax_device *dax_dev); #else static inline struct dax_device *dax_get_by_host(const char *host) { return NULL; } - +static inline struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops) +{ + /* + * Callers should check IS_ENABLED(CONFIG_DAX) to know if this + * NULL is an error or expected. + */ + return NULL; +} static inline void put_dax(struct dax_device *dax_dev) { } +static inline void kill_dax(struct dax_device *dax_dev) +{ +} +static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) +{ +} +static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) +{ + return false; +} #endif +struct writeback_control; int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) int __bdev_dax_supported(struct super_block *sb, int blocksize); @@ -57,6 +81,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev) } struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); +int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc); #else static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { @@ -76,22 +102,23 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) { return NULL; } + +static inline int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc) +{ + return -EOPNOTSUPP; +} #endif int dax_read_lock(void); void dax_read_unlock(int id); -struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops); bool dax_alive(struct dax_device *dax_dev); -void kill_dax(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); -void dax_write_cache(struct dax_device *dax_dev, bool wc); -bool dax_write_cache_enabled(struct dax_device *dax_dev); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); @@ -121,7 +148,4 @@ static inline bool dax_mapping(struct address_space *mapping) return mapping->host && IS_DAX(mapping->host); } -struct writeback_control; -int dax_writeback_mapping_range(struct address_space *mapping, - struct block_device *bdev, struct writeback_control *wbc); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 82a99d366aec..94acbde17bb1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -56,9 +56,7 @@ struct qstr { #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } -extern const char empty_string[]; extern const struct qstr empty_name; -extern const char slash_string[]; extern const struct qstr slash_name; struct dentry_stat_t { @@ -361,7 +359,7 @@ static inline void dont_mount(struct dentry *dentry) extern void __d_lookup_done(struct dentry *); -static inline int d_in_lookup(struct dentry *dentry) +static inline int d_in_lookup(const struct dentry *dentry) { return dentry->d_flags & DCACHE_PAR_LOOKUP; } @@ -489,7 +487,7 @@ static inline bool d_really_is_positive(const struct dentry *dentry) return dentry->d_inode != NULL; } -static inline int simple_positive(struct dentry *dentry) +static inline int simple_positive(const struct dentry *dentry) { return d_really_is_positive(dentry) && !d_unhashed(dentry); } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index da83f64952e7..31fef7c34185 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -87,10 +87,10 @@ typedef void (*dm_resume_fn) (struct dm_target *ti); typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, unsigned status_flags, char *result, unsigned maxlen); -typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); +typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, + char *result, unsigned maxlen); -typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, - struct block_device **bdev, fmode_t *mode); +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); /* * These iteration functions are typically used to check (and combine) @@ -267,6 +267,12 @@ struct dm_target { unsigned num_discard_bios; /* + * The number of secure erase bios that will be submitted to the target. + * The bio number can be accessed with dm_bio_get_target_bio_nr. + */ + unsigned num_secure_erase_bios; + + /* * The number of WRITE SAME bios that will be submitted to the target. * The bio number can be accessed with dm_bio_get_target_bio_nr. */ @@ -542,8 +548,6 @@ do { \ #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) -#define SECTOR_SHIFT 9 - /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/device.h b/include/linux/device.h index b093405ed525..0059b99e1f25 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -256,6 +256,7 @@ enum probe_type { * automatically. * @pm: Power management operations of the device which matched * this driver. + * @coredump: Called through sysfs to initiate a device coredump. * @p: Driver core's private data, no one other than the driver * core can touch this. * @@ -730,6 +731,28 @@ struct device_dma_parameters { }; /** + * struct device_connection - Device Connection Descriptor + * @endpoint: The names of the two devices connected together + * @id: Unique identifier for the connection + * @list: List head, private, for internal use only + */ +struct device_connection { + const char *endpoint[2]; + const char *id; + struct list_head list; +}; + +void *device_connection_find_match(struct device *dev, const char *con_id, + void *data, + void *(*match)(struct device_connection *con, + int ep, void *data)); + +struct device *device_connection_find(struct device *dev, const char *con_id); + +void device_connection_add(struct device_connection *con); +void device_connection_remove(struct device_connection *con); + +/** * enum device_link_state - Device link states. * @DL_STATE_NONE: The presence of the drivers is not being tracked. * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. @@ -769,6 +792,7 @@ enum device_link_state { * @status: The state of the link (with respect to the presence of drivers). * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @kref: Count repeated addition of the same link. * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. */ struct device_link { @@ -779,6 +803,7 @@ struct device_link { enum device_link_state status; u32 flags; bool rpm_active; + struct kref kref; #ifdef CONFIG_SRCU struct rcu_head rcu_head; #endif diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h new file mode 100644 index 000000000000..3c8b7d274bd9 --- /dev/null +++ b/include/linux/dm-bufio.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2009-2011 Red Hat, Inc. + * + * Author: Mikulas Patocka <mpatocka@redhat.com> + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_BUFIO_H +#define _LINUX_DM_BUFIO_H + +#include <linux/blkdev.h> +#include <linux/types.h> + +/*----------------------------------------------------------------*/ + +struct dm_bufio_client; +struct dm_buffer; + +/* + * Create a buffered IO cache on a given device + */ +struct dm_bufio_client * +dm_bufio_client_create(struct block_device *bdev, unsigned block_size, + unsigned reserved_buffers, unsigned aux_size, + void (*alloc_callback)(struct dm_buffer *), + void (*write_callback)(struct dm_buffer *)); + +/* + * Release a buffered IO cache. + */ +void dm_bufio_client_destroy(struct dm_bufio_client *c); + +/* + * Set the sector range. + * When this function is called, there must be no I/O in progress on the bufio + * client. + */ +void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); + +/* + * WARNING: to avoid deadlocks, these conditions are observed: + * + * - At most one thread can hold at most "reserved_buffers" simultaneously. + * - Each other threads can hold at most one buffer. + * - Threads which call only dm_bufio_get can hold unlimited number of + * buffers. + */ + +/* + * Read a given block from disk. Returns pointer to data. Returns a + * pointer to dm_buffer that can be used to release the buffer or to make + * it dirty. + */ +void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Like dm_bufio_read, but return buffer from cache, don't read + * it. If the buffer is not in the cache, return NULL. + */ +void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Like dm_bufio_read, but don't read anything from the disk. It is + * expected that the caller initializes the buffer and marks it dirty. + */ +void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Prefetch the specified blocks to the cache. + * The function starts to read the blocks and returns without waiting for + * I/O to finish. + */ +void dm_bufio_prefetch(struct dm_bufio_client *c, + sector_t block, unsigned n_blocks); + +/* + * Release a reference obtained with dm_bufio_{read,get,new}. The data + * pointer and dm_buffer pointer is no longer valid after this call. + */ +void dm_bufio_release(struct dm_buffer *b); + +/* + * Mark a buffer dirty. It should be called after the buffer is modified. + * + * In case of memory pressure, the buffer may be written after + * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So + * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but + * the actual writing may occur earlier. + */ +void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); + +/* + * Mark a part of the buffer dirty. + * + * The specified part of the buffer is scheduled to be written. dm-bufio may + * write the specified part of the buffer or it may write a larger superset. + */ +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end); + +/* + * Initiate writing of dirty buffers, without waiting for completion. + */ +void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); + +/* + * Write all dirty buffers. Guarantees that all dirty buffers created prior + * to this call are on disk when this call exits. + */ +int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); + +/* + * Send an empty write barrier to the device to flush hardware disk cache. + */ +int dm_bufio_issue_flush(struct dm_bufio_client *c); + +/* + * Like dm_bufio_release but also move the buffer to the new + * block. dm_bufio_write_dirty_buffers is needed to commit the new block. + */ +void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); + +/* + * Free the given buffer. + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); + +/* + * Set the minimum number of buffers before cleanup happens. + */ +void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); + +unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); +sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); +sector_t dm_bufio_get_block_number(struct dm_buffer *b); +void *dm_bufio_get_block_data(struct dm_buffer *b); +void *dm_bufio_get_aux_data(struct dm_buffer *b); +struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b); + +/*----------------------------------------------------------------*/ + +#endif diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index bcdb1a3e4b1f..53ad6a47f513 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -3,18 +3,19 @@ #define _LINUX_DMA_DIRECT_H 1 #include <linux/dma-mapping.h> +#include <linux/mem_encrypt.h> #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include <asm/dma-direct.h> #else -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) { dma_addr_t dev_addr = (dma_addr_t)paddr; return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); } -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) { phys_addr_t paddr = (phys_addr_t)dev_addr; @@ -30,6 +31,22 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ +/* + * If memory encryption is supported, phys_to_dma will set the memory encryption + * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma + * and __dma_to_phys versions should only be used on non-encrypted memory for + * special occasions like DMA coherent buffers. + */ +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return __sme_set(__phys_to_dma(dev, paddr)); +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return __sme_clr(__dma_to_phys(dev, daddr)); +} + #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN void dma_mark_clean(void *addr, size_t size); #else diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index eb9eab4ecd6d..f8ab1c0f589e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -212,14 +212,14 @@ static inline void set_dma_ops(struct device *dev, } #else /* - * Define the dma api to allow compilation but not linking of - * dma dependent code. Code that depends on the dma-mapping - * API needs to set 'depends on HAS_DMA' in its Kconfig + * Define the dma api to allow compilation of dma dependent code. + * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA' + * in its Kconfig, unless it already depends on <something> || COMPILE_TEST, + * where <something> guarantuees the availability of the dma-mapping API. */ -extern const struct dma_map_ops bad_dma_ops; static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { - return &bad_dma_ops; + return NULL; } #endif @@ -518,12 +518,8 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr; - /* - * Let the implementation decide on the zone to allocate from, and - * decide on the way of zeroing the memory given that the memory - * returned should always be zeroed. - */ - flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO); + /* let the implementation decide on the zone to allocate from: */ + flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); if (!arch_dma_alloc_attrs(&dev, &flag)) return NULL; @@ -776,10 +772,19 @@ static inline void dma_deconfigure(struct device *dev) {} /* * Managed DMA API */ +#ifdef CONFIG_HAS_DMA extern void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); +#else /* !CONFIG_HAS_DMA */ +static inline void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ return NULL; } +static inline void dmam_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { } +#endif /* !CONFIG_HAS_DMA */ + extern void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f838764993eb..861be5cab1df 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -470,7 +470,11 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param, const struct dmaengine_result *result); struct dmaengine_unmap_data { +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + u16 map_cnt; +#else u8 map_cnt; +#endif u8 to_cnt; u8 from_cnt; u8 bidi_cnt; diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index 53ba737505df..f632ecfb4238 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -16,6 +16,8 @@ struct device; +#ifdef CONFIG_HAS_DMA + struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation); @@ -23,13 +25,6 @@ void dma_pool_destroy(struct dma_pool *pool); void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle); - -static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, - dma_addr_t *handle) -{ - return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); -} - void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); /* @@ -39,5 +34,26 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation); void dmam_pool_destroy(struct dma_pool *pool); +#else /* !CONFIG_HAS_DMA */ +static inline struct dma_pool *dma_pool_create(const char *name, + struct device *dev, size_t size, size_t align, size_t allocation) +{ return NULL; } +static inline void dma_pool_destroy(struct dma_pool *pool) { } +static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) { return NULL; } +static inline void dma_pool_free(struct dma_pool *pool, void *vaddr, + dma_addr_t addr) { } +static inline struct dma_pool *dmam_pool_create(const char *name, + struct device *dev, size_t size, size_t align, size_t allocation) +{ return NULL; } +static inline void dmam_pool_destroy(struct dma_pool *pool) { } +#endif /* !CONFIG_HAS_DMA */ + +static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) +{ + return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); +} + #endif diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 46e151172d95..c46fdb36700b 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -106,6 +106,7 @@ extern void dmi_scan_machine(void); extern void dmi_memdev_walk(void); extern void dmi_set_dump_stack_arch_desc(void); extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); +extern int dmi_get_bios_year(void); extern int dmi_name_in_vendors(const char *str); extern int dmi_name_in_serial(const char *str); extern int dmi_available; @@ -113,6 +114,7 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), void *private_data); extern bool dmi_match(enum dmi_field f, const char *str); extern void dmi_memdev_name(u16 handle, const char **bank, const char **device); +extern u64 dmi_memdev_size(u16 handle); #else @@ -133,6 +135,7 @@ static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) *dayp = 0; return false; } +static inline int dmi_get_bios_year(void) { return -ENXIO; } static inline int dmi_name_in_vendors(const char *s) { return 0; } static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 @@ -142,6 +145,7 @@ static inline bool dmi_match(enum dmi_field f, const char *str) { return false; } static inline void dmi_memdev_name(u16 handle, const char **bank, const char **device) { } +static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; } static inline const struct dmi_system_id * dmi_first_match(const struct dmi_system_id *list) { return NULL; } diff --git a/include/linux/edac.h b/include/linux/edac.h index cd75c173fd00..bffb97828ed6 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -186,6 +186,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. * @MEM_LRDDR4: Load-Reduced DDR4 memory. + * @MEM_NVDIMM: Non-volatile RAM */ enum mem_type { MEM_EMPTY = 0, @@ -209,6 +210,7 @@ enum mem_type { MEM_DDR4, MEM_RDDR4, MEM_LRDDR4, + MEM_NVDIMM, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -231,6 +233,7 @@ enum mem_type { #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) +#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) /** * enum edac-type - Error Detection and Correction capabilities and mode diff --git a/include/linux/efi.h b/include/linux/efi.h index f5083aa72eae..f1b7d68ac460 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -966,6 +966,8 @@ extern struct efi { unsigned long flags; } efi; +extern struct mm_struct efi_mm; + static inline int efi_guidcmp (efi_guid_t left, efi_guid_t right) { diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 2ec41a7eb54f..ebe41811ed34 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -371,6 +371,11 @@ struct ethtool_ops { u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, const u8 *key, const u8 hfunc); + int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc, u32 rss_context); + int (*set_rxfh_context)(struct net_device *, const u32 *indir, + const u8 *key, const u8 hfunc, + u32 *rss_context, bool delete); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 6d94e82c8ad9..7f033b1ea568 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -230,6 +230,7 @@ extern void devm_extcon_unregister_notifier_all(struct device *dev, * Following APIs get the extcon_dev from devicetree or by through extcon name. */ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); +extern struct extcon_dev *extcon_find_edev_by_node(struct device_node *node); extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index); @@ -283,6 +284,11 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) return ERR_PTR(-ENODEV); } +static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node) +{ + return ERR_PTR(-ENODEV); +} + static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) { diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h deleted file mode 100644 index 7cacafb78b09..000000000000 --- a/include/linux/extcon/extcon-gpio.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Single-state GPIO extcon driver based on extcon class - * - * Copyright (C) 2012 Samsung Electronics - * Author: MyungJoo Ham <myungjoo.ham@samsung.com> - * - * based on switch class driver - * Copyright (C) 2008 Google, Inc. - * Author: Mike Lockwood <lockwood@android.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __EXTCON_GPIO_H__ -#define __EXTCON_GPIO_H__ __FILE__ - -#include <linux/extcon.h> - -/** - * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device. - * @extcon_id: The unique id of specific external connector. - * @gpio: Corresponding GPIO. - * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 - * If true, low state of gpio means active. - * If false, high state of gpio means active. - * @debounce: Debounce time for GPIO IRQ in ms. - * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). - * @check_on_resume: Boolean describing whether to check the state of gpio - * while resuming from sleep. - */ -struct gpio_extcon_pdata { - unsigned int extcon_id; - unsigned gpio; - bool gpio_active_low; - unsigned long debounce; - unsigned long irq_flags; - - bool check_on_resume; -}; - -#endif /* __EXTCON_GPIO_H__ */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 58aecb60ea51..aa5db8b5521a 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -21,6 +21,7 @@ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ +#define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ @@ -38,15 +39,14 @@ #define F2FS_MAX_QUOTAS 3 -#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ -#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ -#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ -#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) -#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -102,7 +102,7 @@ struct f2fs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ - __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ + __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */ __le32 cp_payload; __u8 version[VERSION_LEN]; /* the kernel version */ __u8 init_version[VERSION_LEN]; /* the initial kernel version */ @@ -111,12 +111,14 @@ struct f2fs_super_block { __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ - __u8 reserved[315]; /* valid reserved region */ + __u8 hot_ext_count; /* # of hot file extension */ + __u8 reserved[314]; /* valid reserved region */ } __packed; /* * For checkpoint */ +#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 #define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 #define CP_NAT_BITS_FLAG 0x00000080 @@ -303,6 +305,10 @@ struct f2fs_node { */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) #define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) +#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \ + ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \ + BITS_PER_LONG * BITS_PER_LONG) + struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c3c95d18bf43..7e6c77740413 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -64,10 +64,11 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, struct kmem_cache; +int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB -extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); +extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else -static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags) +static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) { return false; } diff --git a/include/linux/fb.h b/include/linux/fb.h index f577d3c89618..aa74a228bb92 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -571,8 +571,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \ defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \ - defined(__avr32__) || defined(__bfin__) || defined(__arm__) || \ - defined(__aarch64__) + defined(__arm__) || defined(__aarch64__) #define fb_readb __raw_readb #define fb_readw __raw_readw diff --git a/include/linux/filter.h b/include/linux/filter.h index 276932d75975..fc4e8f91b03d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -20,7 +20,6 @@ #include <linux/set_memory.h> #include <linux/kallsyms.h> -#include <net/xdp.h> #include <net/sch_generic.h> #include <uapi/linux/filter.h> @@ -30,6 +29,7 @@ struct sk_buff; struct sock; struct seccomp_data; struct bpf_prog_aux; +struct xdp_rxq_info; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@ -372,7 +372,7 @@ struct bpf_prog_aux; #define BPF_LDST_BYTES(insn) \ ({ \ - const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \ + const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ WARN_ON(__size < 0); \ __size; \ }) @@ -469,6 +469,7 @@ struct bpf_prog { is_func:1, /* program is a bpf function */ kprobe_override:1; /* Do we override a kprobe? */ enum bpf_prog_type type; /* Type of BPF program */ + enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ u8 tag[BPF_TAG_SIZE]; @@ -507,6 +508,24 @@ struct xdp_buff { struct xdp_rxq_info *rxq; }; +struct sk_msg_buff { + void *data; + void *data_end; + __u32 apply_bytes; + __u32 cork_bytes; + int sg_copybreak; + int sg_start; + int sg_curr; + int sg_end; + struct scatterlist sg_data[MAX_SKB_FRAGS]; + bool sg_copy[MAX_SKB_FRAGS]; + __u32 key; + __u32 flags; + struct bpf_map *map; + struct sk_buff *skb; + struct list_head list; +}; + /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, * lwt, ...). Subsystems allowing direct data access must (!) @@ -771,6 +790,7 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp) void bpf_warn_invalid_xdp_action(u32 act); struct sock *do_sk_redirect_map(struct sk_buff *skb); +struct sock *do_msg_redirect_map(struct sk_msg_buff *md); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; @@ -1001,6 +1021,16 @@ static inline int bpf_tell_extensions(void) return SKF_AD_MAX; } +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + /* Temporary "register" to make indirect stores to nested structures + * defined above. We need three registers to make such a store, but + * only two (src and dst) are available at convert_ctx_access time + */ + u64 tmp_reg; +}; + struct bpf_sock_ops_kern { struct sock *sk; u32 op; diff --git a/include/linux/firmware.h b/include/linux/firmware.h index d4508080348d..41050417cafb 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -85,4 +85,7 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p, } #endif + +int firmware_request_cache(struct device *device, const char *name); + #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index c6baf767619e..760d8da1b6c7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -13,6 +13,7 @@ #include <linux/list_lru.h> #include <linux/llist.h> #include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/rbtree.h> #include <linux/init.h> #include <linux/pid.h> @@ -390,12 +391,11 @@ int pagecache_write_end(struct file *, struct address_space *mapping, struct address_space { struct inode *host; /* owner: inode, block_device */ - struct radix_tree_root page_tree; /* radix tree of all pages */ - spinlock_t tree_lock; /* and lock protecting it */ + struct radix_tree_root i_pages; /* cached pages */ atomic_t i_mmap_writable;/* count VM_SHARED mappings */ struct rb_root_cached i_mmap; /* tree of private and shared mappings */ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ - /* Protected by tree_lock together with the radix tree */ + /* Protected by the i_pages lock */ unsigned long nrpages; /* number of total pages */ /* number of shadow or DAX exceptional entries */ unsigned long nrexceptional; @@ -1321,6 +1321,8 @@ extern int send_sigurg(struct fown_struct *fown); /* sb->s_iflags to limit user namespace mounts */ #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ +#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 +#define SB_I_UNTRUSTED_MOUNTER 0x00000040 /* Possible states of 'frozen' field */ enum { @@ -1665,7 +1667,7 @@ typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned); struct dir_context { - const filldir_t actor; + filldir_t actor; loff_t pos; }; @@ -1987,7 +1989,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) * * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to * synchronize competing switching instances and to tell - * wb stat updates to grab mapping->tree_lock. See + * wb stat updates to grab the i_pages lock. See * inode_switch_wb_work_fn() for details. * * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper @@ -2015,7 +2017,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) -#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) +#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) +#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) extern void __mark_inode_dirty(struct inode *, int); @@ -2381,8 +2384,8 @@ struct audit_names; struct filename { const char *name; /* pointer to actual string */ const __user char *uptr; /* original userland pointer */ - struct audit_names *aname; int refcnt; + struct audit_names *aname; const char iname[]; }; @@ -2442,6 +2445,7 @@ extern int sync_blockdev(struct block_device *bdev); extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); +extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); @@ -2467,6 +2471,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) return 0; } +static inline int emergency_thaw_bdev(struct super_block *sb) +{ + return 0; +} + static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) { } @@ -2977,12 +2986,6 @@ enum { /* filesystem does not support filling holes */ DIO_SKIP_HOLES = 0x02, - - /* filesystem can handle aio writes beyond i_size */ - DIO_ASYNC_EXTEND = 0x04, - - /* inode/fs/bdev does not need truncate protection */ - DIO_SKIP_DIO_COUNT = 0x08, }; void dio_end_io(struct bio *bio); @@ -3130,6 +3133,10 @@ extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); extern int noop_fsync(struct file *, loff_t, loff_t, int); +extern int noop_set_page_dirty(struct page *page); +extern void noop_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); +extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 4c467ef50159..34cf0fdd7dc7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -29,6 +29,18 @@ struct fscache_cache_ops; struct fscache_object; struct fscache_operation; +enum fscache_obj_ref_trace { + fscache_obj_get_add_to_deps, + fscache_obj_get_queue, + fscache_obj_put_alloc_fail, + fscache_obj_put_attach_fail, + fscache_obj_put_drop_obj, + fscache_obj_put_enq_dep, + fscache_obj_put_queue, + fscache_obj_put_work, + fscache_obj_ref__nr_traces +}; + /* * cache tag definition */ @@ -123,7 +135,8 @@ extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_op_complete(struct fscache_operation *, bool); extern void fscache_put_operation(struct fscache_operation *); -extern void fscache_operation_init(struct fscache_operation *, +extern void fscache_operation_init(struct fscache_cookie *, + struct fscache_operation *, fscache_operation_processor_t, fscache_operation_cancel_t, fscache_operation_release_t); @@ -185,7 +198,7 @@ static inline void fscache_retrieval_complete(struct fscache_retrieval *op, { atomic_sub(n_pages, &op->n_pages); if (atomic_read(&op->n_pages) <= 0) - fscache_op_complete(&op->op, true); + fscache_op_complete(&op->op, false); } /** @@ -231,7 +244,8 @@ struct fscache_cache_ops { void (*lookup_complete)(struct fscache_object *object); /* increment the usage count on this object (may fail if unmounting) */ - struct fscache_object *(*grab_object)(struct fscache_object *object); + struct fscache_object *(*grab_object)(struct fscache_object *object, + enum fscache_obj_ref_trace why); /* pin an object in the cache */ int (*pin_object)(struct fscache_object *object); @@ -254,7 +268,8 @@ struct fscache_cache_ops { void (*drop_object)(struct fscache_object *object); /* dispose of a reference to an object */ - void (*put_object)(struct fscache_object *object); + void (*put_object)(struct fscache_object *object, + enum fscache_obj_ref_trace why); /* sync a cache */ void (*sync_cache)(struct fscache_cache *cache); @@ -496,7 +511,7 @@ static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie) static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie) { - wake_up_atomic_t(&cookie->n_active); + wake_up_var(&cookie->n_active); } /** @@ -538,7 +553,8 @@ extern bool fscache_object_sleep_till_congested(signed long *timeoutp); extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, - uint16_t datalen); + uint16_t datalen, + loff_t object_size); extern void fscache_object_retrying_stale(struct fscache_object *object); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index fe0c349684fa..84b90a79d75a 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -22,6 +22,7 @@ #include <linux/list.h> #include <linux/pagemap.h> #include <linux/pagevec.h> +#include <linux/list_bl.h> #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) #define fscache_available() (1) @@ -83,45 +84,15 @@ struct fscache_cookie_def { const void *parent_netfs_data, const void *cookie_netfs_data); - /* get an index key - * - should store the key data in the buffer - * - should return the amount of data stored - * - not permitted to return an error - * - the netfs data from the cookie being used as the source is - * presented - */ - uint16_t (*get_key)(const void *cookie_netfs_data, - void *buffer, - uint16_t bufmax); - - /* get certain file attributes from the netfs data - * - this function can be absent for an index - * - not permitted to return an error - * - the netfs data from the cookie being used as the source is - * presented - */ - void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); - - /* get the auxiliary data from netfs data - * - this function can be absent if the index carries no state data - * - should store the auxiliary data in the buffer - * - should return the amount of amount stored - * - not permitted to return an error - * - the netfs data from the cookie being used as the source is - * presented - */ - uint16_t (*get_aux)(const void *cookie_netfs_data, - void *buffer, - uint16_t bufmax); - /* consult the netfs about the state of an object * - this function can be absent if the index carries no state data * - the netfs data from the cookie being used as the target is - * presented, as is the auxiliary data + * presented, as is the auxiliary data and the object size */ enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, const void *data, - uint16_t datalen); + uint16_t datalen, + loff_t object_size); /* get an extra reference on a read context * - this function can be absent if the completion function doesn't @@ -154,7 +125,6 @@ struct fscache_netfs { uint32_t version; /* indexing version */ const char *name; /* filesystem name */ struct fscache_cookie *primary_index; - struct list_head link; /* internal link */ }; /* @@ -173,6 +143,7 @@ struct fscache_cookie { struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ + struct hlist_bl_node hash_link; /* Link in hash table */ void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ @@ -186,6 +157,22 @@ struct fscache_cookie { #define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */ #define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */ #define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */ +#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */ +#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */ +#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */ + + u8 type; /* Type of object */ + u8 key_len; /* Length of index key */ + u8 aux_len; /* Length of auxiliary data */ + u32 key_hash; /* Hash of parent, type, key, len */ + union { + void *key; /* Index key */ + u8 inline_key[16]; /* - If the key is short enough */ + }; + union { + void *aux; /* Auxiliary data */ + u8 inline_aux[8]; /* - If the aux data is short enough */ + }; }; static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie) @@ -208,10 +195,12 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *); extern struct fscache_cookie *__fscache_acquire_cookie( struct fscache_cookie *, const struct fscache_cookie_def *, - void *, bool); -extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool); -extern int __fscache_check_consistency(struct fscache_cookie *); -extern void __fscache_update_cookie(struct fscache_cookie *); + const void *, size_t, + const void *, size_t, + void *, loff_t, bool); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool); +extern int __fscache_check_consistency(struct fscache_cookie *, const void *); +extern void __fscache_update_cookie(struct fscache_cookie *, const void *); extern int __fscache_attr_changed(struct fscache_cookie *); extern void __fscache_invalidate(struct fscache_cookie *); extern void __fscache_wait_on_invalidate(struct fscache_cookie *); @@ -228,7 +217,7 @@ extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, void *, gfp_t); extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); -extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t); extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); @@ -238,8 +227,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, struct inode *); extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, struct list_head *pages); -extern void __fscache_disable_cookie(struct fscache_cookie *, bool); -extern void __fscache_enable_cookie(struct fscache_cookie *, +extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); +extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, bool (*)(void *), void *); /** @@ -317,8 +306,13 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag) * fscache_acquire_cookie - Acquire a cookie to represent a cache object * @parent: The cookie that's to be the parent of this one * @def: A description of the cache object, including callback operations + * @index_key: The index key for this cookie + * @index_key_len: Size of the index key + * @aux_data: The auxiliary data for the cookie (may be NULL) + * @aux_data_len: Size of the auxiliary data buffer * @netfs_data: An arbitrary piece of data to be kept in the cookie to * represent the cache object to the netfs + * @object_size: The initial size of object * @enable: Whether or not to enable a data cookie immediately * * This function is used to inform FS-Cache about part of an index hierarchy @@ -332,12 +326,19 @@ static inline struct fscache_cookie *fscache_acquire_cookie( struct fscache_cookie *parent, const struct fscache_cookie_def *def, + const void *index_key, + size_t index_key_len, + const void *aux_data, + size_t aux_data_len, void *netfs_data, + loff_t object_size, bool enable) { if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent)) - return __fscache_acquire_cookie(parent, def, netfs_data, - enable); + return __fscache_acquire_cookie(parent, def, + index_key, index_key_len, + aux_data, aux_data_len, + netfs_data, object_size, enable); else return NULL; } @@ -346,36 +347,44 @@ struct fscache_cookie *fscache_acquire_cookie( * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding * it * @cookie: The cookie being returned + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * @retire: True if the cache object the cookie represents is to be discarded * * This function returns a cookie to the cache, forcibly discarding the - * associated cache object if retire is set to true. + * associated cache object if retire is set to true. The opportunity is + * provided to update the auxiliary data in the cache before the object is + * disconnected. * * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline -void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) +void fscache_relinquish_cookie(struct fscache_cookie *cookie, + const void *aux_data, + bool retire) { if (fscache_cookie_valid(cookie)) - __fscache_relinquish_cookie(cookie, retire); + __fscache_relinquish_cookie(cookie, aux_data, retire); } /** - * fscache_check_consistency - Request that if the cache is updated + * fscache_check_consistency - Request validation of a cache's auxiliary data * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * - * Request an consistency check from fscache, which passes the request - * to the backing cache. + * Request an consistency check from fscache, which passes the request to the + * backing cache. The auxiliary data on the cookie will be updated first if + * @aux_data is set. * * Returns 0 if consistent and -ESTALE if inconsistent. May also * return -ENOMEM and -ERESTARTSYS. */ static inline -int fscache_check_consistency(struct fscache_cookie *cookie) +int fscache_check_consistency(struct fscache_cookie *cookie, + const void *aux_data) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_check_consistency(cookie); + return __fscache_check_consistency(cookie, aux_data); else return 0; } @@ -383,18 +392,20 @@ int fscache_check_consistency(struct fscache_cookie *cookie) /** * fscache_update_cookie - Request that a cache object be updated * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * * Request an update of the index data for the cache object associated with the - * cookie. + * cookie. The auxiliary data on the cookie will be updated first if @aux_data + * is set. * * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline -void fscache_update_cookie(struct fscache_cookie *cookie) +void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - __fscache_update_cookie(cookie); + __fscache_update_cookie(cookie, aux_data); } /** @@ -648,6 +659,7 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, * fscache_write_page - Request storage of a page in the cache * @cookie: The cookie representing the cache object * @page: The netfs page to store + * @object_size: Updated size of object * @gfp: The conditions under which memory allocation should be made * * Request the contents of the netfs page be written into the cache. This @@ -665,10 +677,11 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, static inline int fscache_write_page(struct fscache_cookie *cookie, struct page *page, + loff_t object_size, gfp_t gfp) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_write_page(cookie, page, gfp); + return __fscache_write_page(cookie, page, object_size, gfp); else return -ENOBUFS; } @@ -780,6 +793,7 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, /** * fscache_disable_cookie - Disable a cookie * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * @invalidate: Invalidate the backing object * * Disable a cookie from accepting further alloc, read, write, invalidate, @@ -790,34 +804,44 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, * * If @invalidate is set, then the backing object will be invalidated and * detached, otherwise it will just be detached. + * + * If @aux_data is set, then auxiliary data will be updated from that. */ static inline -void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) +void fscache_disable_cookie(struct fscache_cookie *cookie, + const void *aux_data, + bool invalidate) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - __fscache_disable_cookie(cookie, invalidate); + __fscache_disable_cookie(cookie, aux_data, invalidate); } /** * fscache_enable_cookie - Reenable a cookie * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * @object_size: Current size of object * @can_enable: A function to permit enablement once lock is held * @data: Data for can_enable() * * Reenable a previously disabled cookie, allowing it to accept further alloc, * read, write, invalidate, update or acquire operations. An attempt will be - * made to immediately reattach the cookie to a backing object. + * made to immediately reattach the cookie to a backing object. If @aux_data + * is set, the auxiliary data attached to the cookie will be updated. * * The can_enable() function is called (if not NULL) once the enablement lock * is held to rule on whether enablement is still permitted to go ahead. */ static inline void fscache_enable_cookie(struct fscache_cookie *cookie, + const void *aux_data, + loff_t object_size, bool (*can_enable)(void *data), void *data) { if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie)) - __fscache_enable_cookie(cookie, can_enable, data); + __fscache_enable_cookie(cookie, aux_data, object_size, + can_enable, data); } #endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h new file mode 100644 index 000000000000..f27cb14088a4 --- /dev/null +++ b/include/linux/fsl/mc.h @@ -0,0 +1,562 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Freescale Management Complex (MC) bus public interface + * + * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. + * Author: German Rivera <German.Rivera@freescale.com> + * + */ +#ifndef _FSL_MC_H_ +#define _FSL_MC_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/interrupt.h> + +#define FSL_MC_VENDOR_FREESCALE 0x1957 + +struct irq_domain; +struct msi_domain_info; + +struct fsl_mc_device; +struct fsl_mc_io; + +/** + * struct fsl_mc_driver - MC object device driver object + * @driver: Generic device driver + * @match_id_table: table of supported device matching Ids + * @probe: Function called when a device is added + * @remove: Function called when a device is removed + * @shutdown: Function called at shutdown time to quiesce the device + * @suspend: Function called when a device is stopped + * @resume: Function called when a device is resumed + * + * Generic DPAA device driver object for device drivers that are registered + * with a DPRC bus. This structure is to be embedded in each device-specific + * driver structure. + */ +struct fsl_mc_driver { + struct device_driver driver; + const struct fsl_mc_device_id *match_id_table; + int (*probe)(struct fsl_mc_device *dev); + int (*remove)(struct fsl_mc_device *dev); + void (*shutdown)(struct fsl_mc_device *dev); + int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); + int (*resume)(struct fsl_mc_device *dev); +}; + +#define to_fsl_mc_driver(_drv) \ + container_of(_drv, struct fsl_mc_driver, driver) + +/** + * enum fsl_mc_pool_type - Types of allocatable MC bus resources + * + * Entries in these enum are used as indices in the array of resource + * pools of an fsl_mc_bus object. + */ +enum fsl_mc_pool_type { + FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ + FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ + FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ + FSL_MC_POOL_IRQ, + + /* + * NOTE: New resource pool types must be added before this entry + */ + FSL_MC_NUM_POOL_TYPES +}; + +/** + * struct fsl_mc_resource - MC generic resource + * @type: type of resource + * @id: unique MC resource Id within the resources of the same type + * @data: pointer to resource-specific data if the resource is currently + * allocated, or NULL if the resource is not currently allocated. + * @parent_pool: pointer to the parent resource pool from which this + * resource is allocated from. + * @node: Node in the free list of the corresponding resource pool + * + * NOTE: This structure is to be embedded as a field of specific + * MC resource structures. + */ +struct fsl_mc_resource { + enum fsl_mc_pool_type type; + s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +}; + +/** + * struct fsl_mc_device_irq - MC object device message-based interrupt + * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs() + * @mc_dev: MC object device that owns this interrupt + * @dev_irq_index: device-relative IRQ index + * @resource: MC generic resource associated with the interrupt + */ +struct fsl_mc_device_irq { + struct msi_desc *msi_desc; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +#define to_fsl_mc_irq(_mc_resource) \ + container_of(_mc_resource, struct fsl_mc_device_irq, resource) + +/* Opened state - Indicates that an object is open by at least one owner */ +#define FSL_MC_OBJ_STATE_OPEN 0x00000001 +/* Plugged state - Indicates that the object is plugged */ +#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002 + +/** + * Shareability flag - Object flag indicating no memory shareability. + * the object generates memory accesses that are non coherent with other + * masters; + * user is responsible for proper memory handling through IOMMU configuration. + */ +#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 + +/** + * struct fsl_mc_obj_desc - Object descriptor + * @type: Type of object: NULL terminated string + * @id: ID of logical object resource + * @vendor: Object vendor identifier + * @ver_major: Major version number + * @ver_minor: Minor version number + * @irq_count: Number of interrupts supported by the object + * @region_count: Number of mappable regions supported by the object + * @state: Object state: combination of FSL_MC_OBJ_STATE_ states + * @label: Object label: NULL terminated string + * @flags: Object's flags + */ +struct fsl_mc_obj_desc { + char type[16]; + int id; + u16 vendor; + u16 ver_major; + u16 ver_minor; + u8 irq_count; + u8 region_count; + u32 state; + char label[16]; + u16 flags; +}; + +/** + * Bit masks for a MC object device (struct fsl_mc_device) flags + */ +#define FSL_MC_IS_DPRC 0x0001 + +/** + * struct fsl_mc_device - MC object device object + * @dev: Linux driver model device object + * @dma_mask: Default DMA mask + * @flags: MC object device flags + * @icid: Isolation context ID for the device + * @mc_handle: MC handle for the corresponding MC object opened + * @mc_io: Pointer to MC IO object assigned to this device or + * NULL if none. + * @obj_desc: MC description of the DPAA device + * @regions: pointer to array of MMIO region entries + * @irqs: pointer to array of pointers to interrupts allocated to this device + * @resource: generic resource associated with this MC object device, if any. + * + * Generic device object for MC object devices that are "attached" to a + * MC bus. + * + * NOTES: + * - For a non-DPRC object its icid is the same as its parent DPRC's icid. + * - The SMMU notifier callback gets invoked after device_add() has been + * called for an MC object device, but before the device-specific probe + * callback gets called. + * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC + * portals. For all other MC objects, their device drivers are responsible for + * allocating MC portals for them by calling fsl_mc_portal_allocate(). + * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are + * treated as resources that can be allocated/deallocated from the + * corresponding resource pool in the object's parent DPRC, using the + * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects + * are known as "allocatable" objects. For them, the corresponding + * fsl_mc_device's 'resource' points to the associated resource object. + * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), + * 'resource' is NULL. + */ +struct fsl_mc_device { + struct device dev; + u64 dma_mask; + u16 flags; + u16 icid; + u16 mc_handle; + struct fsl_mc_io *mc_io; + struct fsl_mc_obj_desc obj_desc; + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; +}; + +#define to_fsl_mc_device(_dev) \ + container_of(_dev, struct fsl_mc_device, dev) + +#define MC_CMD_NUM_OF_PARAMS 7 + +struct mc_cmd_header { + u8 src_id; + u8 flags_hw; + u8 status; + u8 flags_sw; + __le16 token; + __le16 cmd_id; +}; + +struct fsl_mc_command { + u64 header; + u64 params[MC_CMD_NUM_OF_PARAMS]; +}; + +enum mc_cmd_status { + MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ + MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ + MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ + MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ + MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ + MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ + MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ + MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ + MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ + MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ + MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ + MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ +}; + +/* + * MC command flags + */ + +/* High priority flag */ +#define MC_CMD_FLAG_PRI 0x80 +/* Command completion flag */ +#define MC_CMD_FLAG_INTR_DIS 0x01 + +static inline u64 mc_encode_cmd_header(u16 cmd_id, + u32 cmd_flags, + u16 token) +{ + u64 header = 0; + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; + + hdr->cmd_id = cpu_to_le16(cmd_id); + hdr->token = cpu_to_le16(token); + hdr->status = MC_CMD_STATUS_READY; + if (cmd_flags & MC_CMD_FLAG_PRI) + hdr->flags_hw = MC_CMD_FLAG_PRI; + if (cmd_flags & MC_CMD_FLAG_INTR_DIS) + hdr->flags_sw = MC_CMD_FLAG_INTR_DIS; + + return header; +} + +static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd) +{ + struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; + u16 token = le16_to_cpu(hdr->token); + + return token; +} + +struct mc_rsp_create { + __le32 object_id; +}; + +struct mc_rsp_api_ver { + __le16 major_ver; + __le16 minor_ver; +}; + +static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd) +{ + struct mc_rsp_create *rsp_params; + + rsp_params = (struct mc_rsp_create *)cmd->params; + return le32_to_cpu(rsp_params->object_id); +} + +static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_rsp_api_ver *rsp_params; + + rsp_params = (struct mc_rsp_api_ver *)cmd->params; + *major_ver = le16_to_cpu(rsp_params->major_ver); + *minor_ver = le16_to_cpu(rsp_params->minor_ver); +} + +/** + * Bit masks for a MC I/O object (struct fsl_mc_io) flags + */ +#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 + +/** + * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() + * @dev: device associated with this Mc I/O object + * @flags: flags for mc_send_command() + * @portal_size: MC command portal size in bytes + * @portal_phys_addr: MC command portal physical address + * @portal_virt_addr: MC command portal virtual address + * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. + * + * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not + * set: + * @mutex: Mutex to serialize mc_send_command() calls that use the same MC + * portal, if the fsl_mc_io object was created with the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this + * fsl_mc_io object must be made only from non-atomic context. + * + * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is + * set: + * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC + * portal, if the fsl_mc_io object was created with the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this + * fsl_mc_io object can be made from atomic or non-atomic context. + */ +struct fsl_mc_io { + struct device *dev; + u16 flags; + u32 portal_size; + phys_addr_t portal_phys_addr; + void __iomem *portal_virt_addr; + struct fsl_mc_device *dpmcp_dev; + union { + /* + * This field is only meaningful if the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set + */ + struct mutex mutex; /* serializes mc_send_command() */ + + /* + * This field is only meaningful if the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set + */ + spinlock_t spinlock; /* serializes mc_send_command() */ + }; +}; + +int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd); + +#ifdef CONFIG_FSL_MC_BUS +#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type) +#else +/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */ +#define dev_is_fsl_mc(_dev) (0) +#endif + +/* + * module_fsl_mc_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_fsl_mc_driver(__fsl_mc_driver) \ + module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ + fsl_mc_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define fsl_mc_driver_register(drv) \ + __fsl_mc_driver_register(drv, THIS_MODULE) + +int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, + struct module *owner); + +void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); + +int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, + u16 mc_io_flags, + struct fsl_mc_io **new_mc_io); + +void fsl_mc_portal_free(struct fsl_mc_io *mc_io); + +int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); + +int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, + enum fsl_mc_pool_type pool_type, + struct fsl_mc_device **new_mc_adev); + +void fsl_mc_object_free(struct fsl_mc_device *mc_adev); + +struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent); + +int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); + +void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); + +extern struct bus_type fsl_mc_bus_type; + +extern struct device_type fsl_mc_bus_dprc_type; +extern struct device_type fsl_mc_bus_dpni_type; +extern struct device_type fsl_mc_bus_dpio_type; +extern struct device_type fsl_mc_bus_dpsw_type; +extern struct device_type fsl_mc_bus_dpbp_type; +extern struct device_type fsl_mc_bus_dpcon_type; +extern struct device_type fsl_mc_bus_dpmcp_type; +extern struct device_type fsl_mc_bus_dpmac_type; +extern struct device_type fsl_mc_bus_dprtc_type; + +static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dprc_type; +} + +static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpni_type; +} + +static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpio_type; +} + +static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpsw_type; +} + +static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpbp_type; +} + +static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpcon_type; +} + +static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type; +} + +static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpmac_type; +} + +static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dprtc_type; +} + +/* + * Data Path Buffer Pool (DPBP) API + * Contains initialization APIs and runtime control APIs for DPBP + */ + +int dpbp_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpbp_id, + u16 *token); + +int dpbp_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpbp_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpbp_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpbp_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpbp_attr - Structure representing DPBP attributes + * @id: DPBP object ID + * @bpid: Hardware buffer pool ID; should be used as an argument in + * acquire/release operations on buffers + */ +struct dpbp_attr { + int id; + u16 bpid; +}; + +int dpbp_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpbp_attr *attr); + +/* Data Path Concentrator (DPCON) API + * Contains initialization APIs and runtime control APIs for DPCON + */ + +/** + * Use it to disable notifications; see dpcon_set_notification() + */ +#define DPCON_INVALID_DPIO_ID (int)(-1) + +int dpcon_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpcon_id, + u16 *token); + +int dpcon_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpcon_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpcon_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpcon_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpcon_attr - Structure representing DPCON attributes + * @id: DPCON object ID + * @qbman_ch_id: Channel ID to be used by dequeue operation + * @num_priorities: Number of priorities for the DPCON channel (1-8) + */ +struct dpcon_attr { + int id; + u16 qbman_ch_id; + u8 num_priorities; +}; + +int dpcon_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpcon_attr *attr); + +/** + * struct dpcon_notification_cfg - Structure representing notification params + * @dpio_id: DPIO object ID; must be configured with a notification channel; + * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; + * @priority: Priority selection within the DPIO channel; valid values + * are 0-7, depending on the number of priorities in that channel + * @user_ctx: User context value provided with each CDAN message + */ +struct dpcon_notification_cfg { + int dpio_id; + u8 priority; + u64 user_ctx; +}; + +int dpcon_set_notification(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpcon_notification_cfg *cfg); + +#endif /* _FSL_MC_H_ */ diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index c332f0a45607..3fdfede2f0f3 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -734,11 +734,7 @@ struct fsl_ifc_nand { u32 res19[0x10]; __be32 nand_fsr; u32 res20; - /* The V1 nand_eccstat is actually 4 words that overlaps the - * V2 nand_eccstat. - */ - __be32 v1_nand_eccstat[2]; - __be32 v2_nand_eccstat[6]; + __be32 nand_eccstat[8]; u32 res21[0x1c]; __be32 nanndcr; u32 res22[0x2]; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 067d52e95f02..e0c95c9f1e29 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -248,7 +248,7 @@ struct fsnotify_mark { /* Group this mark is for. Set on mark creation, stable until last ref * is dropped */ struct fsnotify_group *group; - /* List of marks by group->i_fsnotify_marks. Also reused for queueing + /* List of marks by group->marks_list. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ struct list_head g_list; @@ -331,6 +331,12 @@ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); +/* Queue overflow event to a notification group */ +static inline void fsnotify_queue_overflow(struct fsnotify_group *group) +{ + fsnotify_add_event(group, group->overflow_event, NULL); +} + /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ diff --git a/include/linux/futex.h b/include/linux/futex.h index c0fb9a24bbd2..821ae502d3d8 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -9,9 +9,6 @@ struct inode; struct mm_struct; struct task_struct; -long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, - u32 __user *uaddr2, u32 val2, u32 val3); - extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); @@ -55,6 +52,9 @@ union futex_key { #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); + +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + u32 __user *uaddr2, u32 val2, u32 val3); #ifdef CONFIG_HAVE_FUTEX_CMPXCHG #define futex_cmpxchg_enabled 1 #else @@ -64,6 +64,13 @@ extern int futex_cmpxchg_enabled; static inline void exit_robust_list(struct task_struct *curr) { } + +static inline long do_futex(u32 __user *uaddr, int op, u32 val, + ktime_t *timeout, u32 __user *uaddr2, + u32 val2, u32 val3) +{ + return -EINVAL; +} #endif #ifdef CONFIG_FUTEX_PI diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1ba9a331ec51..5382b5183b7e 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -288,6 +288,21 @@ struct gpio_chip { struct gpio_irq_chip irq; #endif + /** + * @need_valid_mask: + * + * If set core allocates @valid_mask with all bits set to one. + */ + bool need_valid_mask; + + /** + * @valid_mask: + * + * If not %NULL holds bitmask of GPIOs which are valid to be used + * from the chip. + */ + unsigned long *valid_mask; + #if defined(CONFIG_OF_GPIO) /* * If CONFIG_OF is enabled, then all GPIO controllers described in the @@ -384,6 +399,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); /* Sleep persistence inquiry for drivers */ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); /* get driver data */ void *gpiochip_get_data(struct gpio_chip *chip); diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index d06bf77400f1..7160df54a6fe 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -13,6 +13,7 @@ struct device; * @desc: label that will be attached to button's gpio * @type: input event type (%EV_KEY, %EV_SW, %EV_ABS) * @wakeup: configure the button as a wake-up source + * @wakeup_event_action: event action to trigger wakeup * @debounce_interval: debounce ticks interval in msecs * @can_disable: %true indicates that userspace is allowed to * disable button via sysfs @@ -26,6 +27,7 @@ struct gpio_keys_button { const char *desc; unsigned int type; int wakeup; + int wakeup_event_action; int debounce_interval; bool can_disable; int value; diff --git a/include/linux/hid.h b/include/linux/hid.h index 091a81cf330f..26240a22978a 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -26,6 +26,7 @@ #define __HID_H +#include <linux/bitops.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> @@ -310,13 +311,13 @@ struct hid_item { * HID connect requests */ -#define HID_CONNECT_HIDINPUT 0x01 -#define HID_CONNECT_HIDINPUT_FORCE 0x02 -#define HID_CONNECT_HIDRAW 0x04 -#define HID_CONNECT_HIDDEV 0x08 -#define HID_CONNECT_HIDDEV_FORCE 0x10 -#define HID_CONNECT_FF 0x20 -#define HID_CONNECT_DRIVER 0x40 +#define HID_CONNECT_HIDINPUT BIT(0) +#define HID_CONNECT_HIDINPUT_FORCE BIT(1) +#define HID_CONNECT_HIDRAW BIT(2) +#define HID_CONNECT_HIDDEV BIT(3) +#define HID_CONNECT_HIDDEV_FORCE BIT(4) +#define HID_CONNECT_FF BIT(5) +#define HID_CONNECT_DRIVER BIT(6) #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ HID_CONNECT_HIDDEV|HID_CONNECT_FF) @@ -329,25 +330,25 @@ struct hid_item { */ #define MAX_USBHID_BOOT_QUIRKS 4 -#define HID_QUIRK_INVERT 0x00000001 -#define HID_QUIRK_NOTOUCH 0x00000002 -#define HID_QUIRK_IGNORE 0x00000004 -#define HID_QUIRK_NOGET 0x00000008 -#define HID_QUIRK_HIDDEV_FORCE 0x00000010 -#define HID_QUIRK_BADPAD 0x00000020 -#define HID_QUIRK_MULTI_INPUT 0x00000040 -#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 -#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 -/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ -#define HID_QUIRK_ALWAYS_POLL 0x00000400 -#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 -#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 -#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 -#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000 -#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 -#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 -#define HID_QUIRK_NO_IGNORE 0x40000000 -#define HID_QUIRK_NO_INPUT_SYNC 0x80000000 +#define HID_QUIRK_INVERT BIT(0) +#define HID_QUIRK_NOTOUCH BIT(1) +#define HID_QUIRK_IGNORE BIT(2) +#define HID_QUIRK_NOGET BIT(3) +#define HID_QUIRK_HIDDEV_FORCE BIT(4) +#define HID_QUIRK_BADPAD BIT(5) +#define HID_QUIRK_MULTI_INPUT BIT(6) +#define HID_QUIRK_HIDINPUT_FORCE BIT(7) +/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */ +/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ +#define HID_QUIRK_ALWAYS_POLL BIT(10) +#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) +#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) +#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) +#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) +#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) +#define HID_QUIRK_NO_INIT_REPORTS BIT(29) +#define HID_QUIRK_NO_IGNORE BIT(30) +#define HID_QUIRK_NO_INPUT_SYNC BIT(31) /* * HID device groups @@ -494,13 +495,13 @@ struct hid_output_fifo { char *raw_report; }; -#define HID_CLAIMED_INPUT 1 -#define HID_CLAIMED_HIDDEV 2 -#define HID_CLAIMED_HIDRAW 4 -#define HID_CLAIMED_DRIVER 8 +#define HID_CLAIMED_INPUT BIT(0) +#define HID_CLAIMED_HIDDEV BIT(1) +#define HID_CLAIMED_HIDRAW BIT(2) +#define HID_CLAIMED_DRIVER BIT(3) -#define HID_STAT_ADDED 1 -#define HID_STAT_PARSED 2 +#define HID_STAT_ADDED BIT(0) +#define HID_STAT_PARSED BIT(1) struct hid_input { struct list_head list; @@ -515,6 +516,12 @@ enum hid_type { HID_TYPE_USBNONE }; +enum hid_battery_status { + HID_BATTERY_UNKNOWN = 0, + HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ + HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ +}; + struct hid_driver; struct hid_ll_driver; @@ -557,7 +564,8 @@ struct hid_device { /* device report descriptor */ __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; - bool battery_reported; + enum hid_battery_status battery_status; + bool battery_avoid_query; #endif unsigned int status; /* see STAT flags above */ @@ -686,8 +694,6 @@ struct hid_usage_id { * @input_mapped: invoked on input registering after mapping an usage * @input_configured: invoked just before the device is registered * @feature_mapping: invoked on feature registering - * @bus_add_driver: invoked when a HID driver is about to be added - * @bus_removed_driver: invoked when a HID driver has been removed * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) * @reset_resume: invoked on resume if device was reset (NULL means nop) @@ -742,8 +748,6 @@ struct hid_driver { void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); - void (*bus_add_driver)(struct hid_driver *driver); - void (*bus_removed_driver)(struct hid_driver *driver); #ifdef CONFIG_PM int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); @@ -851,7 +855,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force); extern void hidinput_disconnect(struct hid_device *); int hid_set_field(struct hid_field *, unsigned, __s32); -int hid_input_report(struct hid_device *, int type, u8 *, int, int); +int hid_input_report(struct hid_device *, int type, u8 *, u32, int); int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); struct hid_field *hidinput_get_led_field(struct hid_device *hid); unsigned int hidinput_count_leds(struct hid_device *hid); @@ -1102,13 +1106,13 @@ static inline void hid_hw_wait(struct hid_device *hdev) * * @report: the report we want to know the length */ -static inline int hid_report_len(struct hid_report *report) +static inline u32 hid_report_len(struct hid_report *report) { /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ return ((report->size - 1) >> 3) + 1 + (report->id > 0); } -int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, +int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt); /* HID quirks API */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 776f90f3a1cd..0690679832d4 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -237,6 +237,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif +#ifndef __HAVE_ARCH_COPY_HIGHPAGE + static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; @@ -248,4 +250,6 @@ static inline void copy_highpage(struct page *to, struct page *from) kunmap_atomic(vfrom); } +#endif + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 325017ad9311..39988924de3a 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -80,76 +80,145 @@ struct hmm; /* - * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page + * hmm_pfn_flag_e - HMM flag enums * * Flags: - * HMM_PFN_VALID: pfn is valid - * HMM_PFN_READ: CPU page table has read permission set + * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_WRITE: CPU page table has write permission set + * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) + * + * The driver provide a flags array, if driver valid bit for an entry is bit + * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide + * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. + * Same logic apply to all flags. This is same idea as vm_page_prot in vma + * except that this is per device driver rather than per architecture. + */ +enum hmm_pfn_flag_e { + HMM_PFN_VALID = 0, + HMM_PFN_WRITE, + HMM_PFN_DEVICE_PRIVATE, + HMM_PFN_FLAG_MAX +}; + +/* + * hmm_pfn_value_e - HMM pfn special value + * + * Flags: * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory - * HMM_PFN_EMPTY: corresponding CPU page table entry is pte_none() + * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not * be mirrored by a device, because the entry will never have HMM_PFN_VALID * set and the pfn value is undefined. - * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE) + * + * Driver provide entry value for none entry, error entry and special entry, + * driver can alias (ie use same value for error and special for instance). It + * should not alias none and error or special. + * + * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: + * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, + * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table + * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one */ -typedef unsigned long hmm_pfn_t; +enum hmm_pfn_value_e { + HMM_PFN_ERROR, + HMM_PFN_NONE, + HMM_PFN_SPECIAL, + HMM_PFN_VALUE_MAX +}; -#define HMM_PFN_VALID (1 << 0) -#define HMM_PFN_READ (1 << 1) -#define HMM_PFN_WRITE (1 << 2) -#define HMM_PFN_ERROR (1 << 3) -#define HMM_PFN_EMPTY (1 << 4) -#define HMM_PFN_SPECIAL (1 << 5) -#define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 6) -#define HMM_PFN_SHIFT 7 +/* + * struct hmm_range - track invalidation lock on virtual address range + * + * @vma: the vm area struct for the range + * @list: all range lock are on a list + * @start: range virtual start address (inclusive) + * @end: range virtual end address (exclusive) + * @pfns: array of pfns (big enough for the range) + * @flags: pfn flags to match device driver page table + * @values: pfn value for some special case (none, special, error, ...) + * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) + * @valid: pfns array did not change since it has been fill by an HMM function + */ +struct hmm_range { + struct vm_area_struct *vma; + struct list_head list; + unsigned long start; + unsigned long end; + uint64_t *pfns; + const uint64_t *flags; + const uint64_t *values; + uint8_t pfn_shift; + bool valid; +}; /* - * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t - * @pfn: hmm_pfn_t to convert to struct page - * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise + * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn + * @range: range use to decode HMM pfn value + * @pfn: HMM pfn value to get corresponding struct page from + * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise * - * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page - * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL. + * If the HMM pfn is valid (ie valid flag set) then return the struct page + * matching the pfn value stored in the HMM pfn. Otherwise return NULL. */ -static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn) +static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, + uint64_t pfn) { - if (!(pfn & HMM_PFN_VALID)) + if (pfn == range->values[HMM_PFN_NONE]) + return NULL; + if (pfn == range->values[HMM_PFN_ERROR]) return NULL; - return pfn_to_page(pfn >> HMM_PFN_SHIFT); + if (pfn == range->values[HMM_PFN_SPECIAL]) + return NULL; + if (!(pfn & range->flags[HMM_PFN_VALID])) + return NULL; + return pfn_to_page(pfn >> range->pfn_shift); } /* - * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t - * @pfn: hmm_pfn_t to extract pfn from - * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise + * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn + * @range: range use to decode HMM pfn value + * @pfn: HMM pfn value to extract pfn from + * Returns: pfn value if HMM pfn is valid, -1UL otherwise */ -static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn) +static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, + uint64_t pfn) { - if (!(pfn & HMM_PFN_VALID)) + if (pfn == range->values[HMM_PFN_NONE]) + return -1UL; + if (pfn == range->values[HMM_PFN_ERROR]) + return -1UL; + if (pfn == range->values[HMM_PFN_SPECIAL]) + return -1UL; + if (!(pfn & range->flags[HMM_PFN_VALID])) return -1UL; - return (pfn >> HMM_PFN_SHIFT); + return (pfn >> range->pfn_shift); } /* - * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page - * @page: struct page pointer for which to create the hmm_pfn_t - * Returns: valid hmm_pfn_t for the page + * hmm_pfn_from_page() - create a valid HMM pfn value from struct page + * @range: range use to encode HMM pfn value + * @page: struct page pointer for which to create the HMM pfn + * Returns: valid HMM pfn for the page */ -static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page) +static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, + struct page *page) { - return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID; + return (page_to_pfn(page) << range->pfn_shift) | + range->flags[HMM_PFN_VALID]; } /* - * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn - * @pfn: pfn value for which to create the hmm_pfn_t - * Returns: valid hmm_pfn_t for the pfn + * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn + * @range: range use to encode HMM pfn value + * @pfn: pfn value for which to create the HMM pfn + * Returns: valid HMM pfn for the pfn */ -static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn) +static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, + unsigned long pfn) { - return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID; + return (pfn << range->pfn_shift) | + range->flags[HMM_PFN_VALID]; } @@ -218,6 +287,16 @@ enum hmm_update_type { * @update: callback to update range on a device */ struct hmm_mirror_ops { + /* release() - release hmm_mirror + * + * @mirror: pointer to struct hmm_mirror + * + * This is called when the mm_struct is being released. + * The callback should make sure no references to the mirror occur + * after the callback returns. + */ + void (*release)(struct hmm_mirror *mirror); + /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror @@ -262,23 +341,6 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror); /* - * struct hmm_range - track invalidation lock on virtual address range - * - * @list: all range lock are on a list - * @start: range virtual start address (inclusive) - * @end: range virtual end address (exclusive) - * @pfns: array of pfns (big enough for the range) - * @valid: pfns array did not change since it has been fill by an HMM function - */ -struct hmm_range { - struct list_head list; - unsigned long start; - unsigned long end; - hmm_pfn_t *pfns; - bool valid; -}; - -/* * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device * driver lock that serializes device page table updates, then call * hmm_vma_range_done(), to check if the snapshot is still valid. The same @@ -291,17 +353,13 @@ struct hmm_range { * * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID ! */ -int hmm_vma_get_pfns(struct vm_area_struct *vma, - struct hmm_range *range, - unsigned long start, - unsigned long end, - hmm_pfn_t *pfns); -bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range); +int hmm_vma_get_pfns(struct hmm_range *range); +bool hmm_vma_range_done(struct hmm_range *range); /* * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will - * not migrate any device memory back to system memory. The hmm_pfn_t array will + * not migrate any device memory back to system memory. The HMM pfn array will * be updated with the fault result and current snapshot of the CPU page table * for the range. * @@ -310,22 +368,26 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range); * function returns -EAGAIN. * * Return value does not reflect if the fault was successful for every single - * address or not. Therefore, the caller must to inspect the hmm_pfn_t array to + * address or not. Therefore, the caller must to inspect the HMM pfn array to * determine fault status for each address. * * Trying to fault inside an invalid vma will result in -EINVAL. * * See the function description in mm/hmm.c for further documentation. */ -int hmm_vma_fault(struct vm_area_struct *vma, - struct hmm_range *range, - unsigned long start, - unsigned long end, - hmm_pfn_t *pfns, - bool write, - bool block); -#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ +int hmm_vma_fault(struct hmm_range *range, bool block); +/* Below are for HMM internal use only! Not to be used by device driver! */ +void hmm_mm_destroy(struct mm_struct *mm); + +static inline void hmm_mm_init(struct mm_struct *mm) +{ + mm->hmm = NULL; +} +#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ +static inline void hmm_mm_destroy(struct mm_struct *mm) {} +static inline void hmm_mm_init(struct mm_struct *mm) {} +#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) struct hmm_devmem; @@ -498,23 +560,9 @@ struct hmm_device { struct hmm_device *hmm_device_new(void *drvdata); void hmm_device_put(struct hmm_device *hmm_device); #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -#endif /* IS_ENABLED(CONFIG_HMM) */ - -/* Below are for HMM internal use only! Not to be used by device driver! */ -#if IS_ENABLED(CONFIG_HMM_MIRROR) -void hmm_mm_destroy(struct mm_struct *mm); - -static inline void hmm_mm_init(struct mm_struct *mm) -{ - mm->hmm = NULL; -} -#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} -#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ - - #else /* IS_ENABLED(CONFIG_HMM) */ static inline void hmm_mm_destroy(struct mm_struct *mm) {} static inline void hmm_mm_init(struct mm_struct *mm) {} +#endif /* IS_ENABLED(CONFIG_HMM) */ + #endif /* LINUX_HMM_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index c7902ca7c9f4..a2656c3ebe81 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -161,11 +161,9 @@ struct hrtimer_clock_base { enum hrtimer_base_type { HRTIMER_BASE_MONOTONIC, HRTIMER_BASE_REALTIME, - HRTIMER_BASE_BOOTTIME, HRTIMER_BASE_TAI, HRTIMER_BASE_MONOTONIC_SOFT, HRTIMER_BASE_REALTIME_SOFT, - HRTIMER_BASE_BOOTTIME_SOFT, HRTIMER_BASE_TAI_SOFT, HRTIMER_MAX_CLOCK_BASES, }; @@ -426,6 +424,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) } extern u64 hrtimer_get_next_event(void); +extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); extern bool hrtimer_active(const struct hrtimer *timer); diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index cf045885a499..6058c3844a76 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -53,6 +53,9 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, /* FIXME: only change from the attr, and don't unregister */ extern int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); +extern int +modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, + bool check); /* * Kernel breakpoints are not associated with any particular thread. @@ -97,6 +100,10 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, static inline int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { return -ENOSYS; } +static inline int +modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, + bool check) { return -ENOSYS; } + static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index ceb751987c40..e5fd2707b6df 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -29,6 +29,7 @@ enum hwmon_sensor_types { hwmon_humidity, hwmon_fan, hwmon_pwm, + hwmon_max, }; enum hwmon_chip_attributes { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 93bd6fcd6e62..192ed8fbc403 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -26,7 +26,6 @@ #define _HYPERV_H #include <uapi/linux/hyperv.h> -#include <uapi/asm/hyperv.h> #include <linux/types.h> #include <linux/scatterlist.h> @@ -844,7 +843,7 @@ struct vmbus_channel { /* * NUMA distribution policy: - * We support teo policies: + * We support two policies: * 1) Balanced: Here all performance critical channels are * distributed evenly amongst all the NUMA nodes. * This policy will be the default policy. diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index b19563f9a8eb..fc08b433c856 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -8,15 +8,28 @@ */ #ifdef CONFIG_X86 + +#include <asm/jailhouse_para.h> #include <asm/x86_init.h> + static inline void hypervisor_pin_vcpu(int cpu) { x86_platform.hyper.pin_vcpu(cpu); } -#else + +#else /* !CONFIG_X86 */ + +#include <linux/of.h> + static inline void hypervisor_pin_vcpu(int cpu) { } -#endif + +static inline bool jailhouse_paravirt(void) +{ + return of_find_compatible_node(NULL, NULL, "jailhouse,cell"); +} + +#endif /* !CONFIG_X86 */ #endif /* __LINUX_HYPEVISOR_H */ diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h index 0e5f7c77d1d8..c37329432a8e 100644 --- a/include/linux/i2c-pca-platform.h +++ b/include/linux/i2c-pca-platform.h @@ -3,9 +3,6 @@ #define I2C_PCA9564_PLATFORM_H struct i2c_pca9564_pf_platform_data { - int gpio; /* pin to reset chip. driver will work when - * not supplied (negative value), but it - * cannot exit some error conditions then */ int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ int timeout; /* timeout in jiffies */ }; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 419a38e7c315..44ad14e016b5 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -47,6 +47,7 @@ struct i2c_algorithm; struct i2c_adapter; struct i2c_client; struct i2c_driver; +struct i2c_device_identity; union i2c_smbus_data; struct i2c_board_info; enum i2c_slave_event; @@ -186,8 +187,37 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, extern s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, u8 command, u8 length, u8 *values); +int i2c_get_device_id(const struct i2c_client *client, + struct i2c_device_identity *id); #endif /* I2C */ +/** + * struct i2c_device_identity - i2c client device identification + * @manufacturer_id: 0 - 4095, database maintained by NXP + * @part_id: 0 - 511, according to manufacturer + * @die_revision: 0 - 7, according to manufacturer + */ +struct i2c_device_identity { + u16 manufacturer_id; +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3 +#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4 +#define I2C_DEVICE_ID_ANALOG_DEVICES 5 +#define I2C_DEVICE_ID_STMICROELECTRONICS 6 +#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7 +#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8 +#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9 +#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10 +#define I2C_DEVICE_ID_FLIR 11 +#define I2C_DEVICE_ID_O2MICRO 12 +#define I2C_DEVICE_ID_ATMEL 13 +#define I2C_DEVICE_ID_NONE 0xffff + u16 part_id; + u8 die_revision; +}; + enum i2c_alert_protocol { I2C_PROTOCOL_SMBUS_ALERT, I2C_PROTOCOL_SMBUS_HOST_NOTIFY, diff --git a/include/linux/ide.h b/include/linux/ide.h index 771989d25ef8..ca9d34feb572 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -25,15 +25,10 @@ #include <asm/byteorder.h> #include <asm/io.h> -#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) -# define SUPPORT_VLB_SYNC 0 -#else -# define SUPPORT_VLB_SYNC 1 -#endif - /* * Probably not wise to fiddle with these */ +#define SUPPORT_VLB_SYNC 1 #define IDE_DEFAULT_MAX_FAILURES 1 #define ERROR_MAX 8 /* Max read/write errors per sector */ #define ERROR_RESET 3 /* Reset controller every 4th retry */ @@ -165,7 +160,6 @@ struct ide_io_ports { */ #define PARTN_BITS 6 /* number of minor dev bits for partitions */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ -#define SECTOR_SIZE 512 /* * Timeouts for various operations: diff --git a/include/linux/idr.h b/include/linux/idr.h index 7d6a6313f0ab..e856f4e0ab35 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -29,29 +29,31 @@ struct idr { #define IDR_FREE 0 /* Set the IDR flag and the IDR_FREE tag */ -#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) +#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ + (1 << (ROOT_TAG_SHIFT + IDR_FREE))) -#define IDR_INIT_BASE(base) { \ - .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \ +#define IDR_INIT_BASE(name, base) { \ + .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \ .idr_base = (base), \ .idr_next = 0, \ } /** * IDR_INIT() - Initialise an IDR. + * @name: Name of IDR. * * A freshly-initialised IDR contains no IDs. */ -#define IDR_INIT IDR_INIT_BASE(0) +#define IDR_INIT(name) IDR_INIT_BASE(name, 0) /** - * DEFINE_IDR() - Define a statically-allocated IDR - * @name: Name of IDR + * DEFINE_IDR() - Define a statically-allocated IDR. + * @name: Name of IDR. * * An IDR defined using this macro is ready for use with no additional * initialisation required. It contains no IDs. */ -#define DEFINE_IDR(name) struct idr name = IDR_INIT +#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -218,10 +220,10 @@ struct ida { struct radix_tree_root ida_rt; }; -#define IDA_INIT { \ - .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +#define IDA_INIT(name) { \ + .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ } -#define DEFINE_IDA(name) struct ida name = IDA_INIT +#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index ee6657a0ed69..8fe7e4306816 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -8,6 +8,7 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2111,7 +2112,7 @@ enum ieee80211_key_len { #define FILS_ERP_MAX_REALM_LEN 253 #define FILS_ERP_MAX_RRK_LEN 64 -#define PMK_MAX_LEN 48 +#define PMK_MAX_LEN 64 /* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ enum ieee80211_pub_actioncode { @@ -2502,6 +2503,17 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) } /** + * ieee80211_get_tid - get qos TID + * @hdr: the frame + */ +static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) +{ + u8 *qc = ieee80211_get_qos_ctl(hdr); + + return qc[0] & IEEE80211_QOS_CTL_TID_MASK; +} + +/** * ieee80211_get_SA - get pointer to SA * @hdr: the frame * diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index c5b0a75a7812..fd00170b494f 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -25,6 +25,7 @@ struct ptr_ring *tun_get_tx_ring(struct file *file); bool tun_is_xdp_buff(void *ptr); void *tun_xdp_to_ptr(void *ptr); void *tun_ptr_to_xdp(void *ptr); +void tun_ptr_free(void *ptr); #else #include <linux/err.h> #include <linux/errno.h> @@ -50,5 +51,8 @@ static inline void *tun_ptr_to_xdp(void *ptr) { return NULL; } +static inline void tun_ptr_free(void *ptr) +{ +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 5e6a2d4dc366..78a5a90b4267 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -83,6 +83,30 @@ static inline bool is_vlan_dev(const struct net_device *dev) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) +static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); +} + +static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); +} + /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats * @rx_packets: number of received packets @@ -300,32 +324,47 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, } /** - * __vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_inner_tag - inner VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert + * @mac_len: MAC header length including outer vlan headers * - * Inserts the VLAN tag into @skb as part of the payload + * Inserts the VLAN tag into @skb as part of the payload at offset mac_len * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ -static inline int __vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline int __vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci, + unsigned int mac_len) { struct vlan_ethhdr *veth; if (skb_cow_head(skb, VLAN_HLEN) < 0) return -ENOMEM; - veth = skb_push(skb, VLAN_HLEN); + skb_push(skb, VLAN_HLEN); - /* Move the mac addresses to the beginning of the new header. */ - memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + /* Move the mac header sans proto to the beginning of the new header. */ + if (likely(mac_len > ETH_TLEN)) + memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); skb->mac_header -= VLAN_HLEN; + veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); + /* first, the ethernet type */ - veth->h_vlan_proto = vlan_proto; + if (likely(mac_len >= ETH_TLEN)) { + /* h_vlan_encapsulated_proto should already be populated, and + * skb->data has space for h_vlan_proto + */ + veth->h_vlan_proto = vlan_proto; + } else { + /* h_vlan_encapsulated_proto should not be populated, and + * skb->data has no space for h_vlan_proto + */ + veth->h_vlan_encapsulated_proto = skb->protocol; + } /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); @@ -334,12 +373,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, } /** - * vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload + * Returns error if skb_cow_head failes. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); +} + +/** + * vlan_insert_inner_tag - inner VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * @mac_len: MAC header length including outer vlan headers + * + * Inserts the VLAN tag into @skb as part of the payload at offset mac_len * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. * * Following the skb_unshare() example, in case of error, the calling function @@ -347,12 +404,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci, + unsigned int mac_len) { int err; - err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); if (err) { dev_kfree_skb_any(skb); return NULL; @@ -361,6 +420,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, } /** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); +} + +/** * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol @@ -584,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) * Returns true if the skb is tagged with multiple vlan headers, regardless * of whether it is hardware accelerated or not. */ -static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) +static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) { __be16 protocol = skb->protocol; @@ -594,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) if (likely(!eth_type_vlan(protocol))) return false; + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) + return false; + veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } @@ -611,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) * * Returns features without unsafe ones if the skb has multiple tags. */ -static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, +static inline netdev_features_t vlan_features_check(struct sk_buff *skb, netdev_features_t features) { if (skb_vlan_tagged_multi(skb)) { diff --git a/include/linux/inet.h b/include/linux/inet.h index 636ebe87e6f8..97defc1139e9 100644 --- a/include/linux/inet.h +++ b/include/linux/inet.h @@ -59,5 +59,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char extern int inet_pton_with_scope(struct net *net, unsigned short af, const char *src, const char *port, struct sockaddr_storage *addr); +extern bool inet_addr_is_any(struct sockaddr *addr); #endif /* _LINUX_INET_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 8dad3dd26eae..ef169d67df92 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -209,12 +209,12 @@ #define DMA_FECTL_IM (((u32)1) << 31) /* FSTS_REG */ -#define DMA_FSTS_PPF ((u32)2) -#define DMA_FSTS_PFO ((u32)1) -#define DMA_FSTS_IQE (1 << 4) -#define DMA_FSTS_ICE (1 << 5) -#define DMA_FSTS_ITE (1 << 6) -#define DMA_FSTS_PRO (1 << 7) +#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ +#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ +#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ +#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ +#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ +#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) /* FRCD_REG, 32 bits access */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 69c238210325..5426627f9c55 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -4,9 +4,7 @@ #define _LINUX_INTERRUPT_H #include <linux/kernel.h> -#include <linux/linkage.h> #include <linux/bitops.h> -#include <linux/preempt.h> #include <linux/cpumask.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 41b8c5757859..19938ee6eb31 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -465,23 +465,23 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; } -static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) +static inline size_t iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) { - return -ENODEV; + return 0; } -static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, - int gfp_order) +static inline size_t iommu_unmap_fast(struct iommu_domain *domain, + unsigned long iova, int gfp_order) { - return -ENODEV; + return 0; } static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { - return -ENODEV; + return 0; } static inline void iommu_flush_tlb_all(struct iommu_domain *domain) diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 821b2f260992..6cc2df7f7ac9 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -8,8 +8,6 @@ #include <uapi/linux/ipc.h> #include <linux/refcount.h> -#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ - /* used by in-kernel data structures */ struct kern_ipc_perm { spinlock_t lock; diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h index 4d3a76380e32..05c9422624c6 100644 --- a/include/linux/ipmi-fru.h +++ b/include/linux/ipmi-fru.h @@ -1,9 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2012 CERN (www.cern.ch) * Author: Alessandro Rubini <rubini@gnudd.com> * - * Released according to the GNU GPL, version 2 or any later version. - * * This work is part of the White Rabbit project, a research effort led * by CERN, the European Institute for Nuclear Research. */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index f4ffacf4fe9d..8b0626cec980 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * ipmi.h * @@ -9,26 +10,6 @@ * * Copyright 2002 MontaVista Software Inc. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_IPMI_H #define __LINUX_IPMI_H diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 5be51281e14d..af457b5a689e 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * ipmi_smi.h * @@ -9,26 +10,6 @@ * * Copyright 2002 MontaVista Software Inc. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_IPMI_SMI_H diff --git a/include/linux/irq.h b/include/linux/irq.h index a0231e96a578..65916a305f3d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -10,18 +10,13 @@ * Thanks. --rmk */ -#include <linux/smp.h> -#include <linux/linkage.h> #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/cpumask.h> -#include <linux/gfp.h> #include <linux/irqhandler.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> -#include <linux/errno.h> #include <linux/topology.h> -#include <linux/wait.h> #include <linux/io.h> #include <linux/slab.h> @@ -1170,4 +1165,22 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); int ipi_send_single(unsigned int virq, unsigned int cpu); int ipi_send_mask(unsigned int virq, const struct cpumask *dest); +#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER +/* + * Registers a generic IRQ handling function as the top-level IRQ handler in + * the system, which is generally the first C code called from an assembly + * architecture-specific interrupt handler. + * + * Returns 0 on success, or -EBUSY if an IRQ handler has already been + * registered. + */ +int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); + +/* + * Allows interrupt handlers to find the irqchip that's been registered as the + * top-level IRQ handler. + */ +extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; +#endif + #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index b26eccc78fb1..f5af3b594e6e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -106,6 +106,7 @@ #define GICR_PIDR2 GICD_PIDR2 #define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_RWP (1UL << 3) #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) @@ -312,7 +313,8 @@ #define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) -#define GITS_TYPER_HWCOLLCNT_SHIFT 24 +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) #define GITS_TYPER_VMOVP (1ULL << 37) #define GITS_IIDR_REV_SHIFT 12 diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h deleted file mode 100644 index d120496370b9..000000000000 --- a/include/linux/irqchip/metag-ext.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2012 Imagination Technologies - */ - -#ifndef _LINUX_IRQCHIP_METAG_EXT_H_ -#define _LINUX_IRQCHIP_METAG_EXT_H_ - -struct irq_data; -struct platform_device; - -/* called from core irq code at init */ -int init_external_IRQ(void); - -/* - * called from SoC init_irq() callback to dynamically indicate the lack of - * HWMASKEXT registers. - */ -void meta_intc_no_mask(void); - -/* - * These allow SoCs to specialise the interrupt controller from their init_irq - * callbacks. - */ - -extern struct irq_chip meta_intc_edge_chip; -extern struct irq_chip meta_intc_level_chip; - -/* this should be called in the mask callback */ -void meta_intc_mask_irq_simple(struct irq_data *data); -/* this should be called in the unmask callback */ -void meta_intc_unmask_irq_simple(struct irq_data *data); - -#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */ diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h deleted file mode 100644 index 0adcf449e4e4..000000000000 --- a/include/linux/irqchip/metag.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2011 Imagination Technologies - */ - -#ifndef _LINUX_IRQCHIP_METAG_H_ -#define _LINUX_IRQCHIP_METAG_H_ - -#include <linux/errno.h> - -#ifdef CONFIG_METAG_PERFCOUNTER_IRQS -extern int init_internal_IRQ(void); -extern int internal_irq_map(unsigned int hw); -#else -static inline int init_internal_IRQ(void) -{ - return 0; -} -static inline int internal_irq_map(unsigned int hw) -{ - return -EINVAL; -} -#endif - -#endif /* _LINUX_IRQCHIP_METAG_H_ */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 9385aa57497b..a27cf6652327 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -62,8 +62,11 @@ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) -/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ -#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) +/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ +#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) + +/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ +#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) #ifndef __jiffy_arch_data #define __jiffy_arch_data diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 2168cc6b8b30..b46b541c67c4 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -151,7 +151,7 @@ extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); -extern void jump_label_invalidate_init(void); +extern void jump_label_invalidate_initmem(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, @@ -199,7 +199,7 @@ static __always_inline void jump_label_init(void) static_key_initialized = true; } -static inline void jump_label_invalidate_init(void) {} +static inline void jump_label_invalidate_initmem(void) {} static __always_inline bool static_key_false(struct static_key *key) { diff --git a/include/linux/kasan.h b/include/linux/kasan.h index adc13474a53b..de784fd11d12 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -18,7 +18,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE]; extern pte_t kasan_zero_pte[PTRS_PER_PTE]; extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; extern pud_t kasan_zero_pud[PTRS_PER_PUD]; -extern p4d_t kasan_zero_p4d[PTRS_PER_P4D]; +extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D]; void kasan_populate_zero_shadow(const void *shadow_start, const void *shadow_end); @@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, - size_t *size, + unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index dcde9471897d..cc8fa109cfa3 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -70,7 +70,4 @@ */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) -/* Make sure we always have all types and struct attributes defined. */ -#include <linux/compiler_types.h> - #endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3fd291503576..6a1eb0b0aad9 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -439,7 +439,8 @@ extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -extern int num_to_str(char *buf, int size, unsigned long long num); +extern int num_to_str(char *buf, int size, + unsigned long long num, unsigned int width); /* lib/printf utilities */ @@ -480,6 +481,15 @@ extern int func_ptr_is_kernel_text(void *ptr); unsigned long int_sqrt(unsigned long); +#if BITS_PER_LONG < 64 +u32 int_sqrt64(u64 x); +#else +static inline u32 int_sqrt64(u64 x) +{ + return (u32)int_sqrt(x); +} +#endif + extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; @@ -534,6 +544,7 @@ extern enum system_states { SYSTEM_RESTART, } system_state; +/* This cannot be an enum because some may be used in assembly source. */ #define TAINT_PROPRIETARY_MODULE 0 #define TAINT_FORCED_MODULE 1 #define TAINT_CPU_OUT_OF_SPEC 2 @@ -551,7 +562,8 @@ extern enum system_states { #define TAINT_SOFTLOCKUP 14 #define TAINT_LIVEPATCH 15 #define TAINT_AUX 16 -#define TAINT_FLAGS_COUNT 17 +#define TAINT_RANDSTRUCT 17 +#define TAINT_FLAGS_COUNT 18 struct taint_flag { char c_true; /* character printed when tainted */ @@ -783,41 +795,59 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ /* - * min()/max()/clamp() macros that also do - * strict type-checking.. See the - * "unnecessary" pointer comparison. + * min()/max()/clamp() macros must accomplish three things: + * + * - avoid multiple evaluations of the arguments (so side-effects like + * "x++" happen only once) when non-constant. + * - perform strict type-checking (to generate warnings instead of + * nasty runtime surprises). See the "unnecessary" pointer comparison + * in __typecheck(). + * - retain result as a constant expressions when called with only + * constant expressions (to avoid tripping VLA warnings in stack + * allocation usage). + */ +#define __typecheck(x, y) \ + (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) + +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> */ -#define __min(t1, t2, min1, min2, x, y) ({ \ - t1 min1 = (x); \ - t2 min2 = (y); \ - (void) (&min1 == &min2); \ - min1 < min2 ? min1 : min2; }) +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +#define __no_side_effects(x, y) \ + (__is_constexpr(x) && __is_constexpr(y)) + +#define __safe_cmp(x, y) \ + (__typecheck(x, y) && __no_side_effects(x, y)) + +#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) + +#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + __cmp(unique_x, unique_y, op); }) + +#define __careful_cmp(x, y, op) \ + __builtin_choose_expr(__safe_cmp(x, y), \ + __cmp(x, y, op), \ + __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) /** * min - return minimum of two values of the same or compatible types * @x: first value * @y: second value */ -#define min(x, y) \ - __min(typeof(x), typeof(y), \ - __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ - x, y) - -#define __max(t1, t2, max1, max2, x, y) ({ \ - t1 max1 = (x); \ - t2 max2 = (y); \ - (void) (&max1 == &max2); \ - max1 > max2 ? max1 : max2; }) +#define min(x, y) __careful_cmp(x, y, <) /** * max - return maximum of two values of the same or compatible types * @x: first value * @y: second value */ -#define max(x, y) \ - __max(typeof(x), typeof(y), \ - __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ - x, y) +#define max(x, y) __careful_cmp(x, y, >) /** * min3 - return minimum of three values @@ -869,10 +899,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * @x: first value * @y: second value */ -#define min_t(type, x, y) \ - __min(type, type, \ - __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ - x, y) +#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) /** * max_t - return maximum of two values, using the specified type @@ -880,10 +907,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * @x: first value * @y: second value */ -#define max_t(type, x, y) \ - __max(type, type, \ - __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ - x, y) +#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) /** * clamp_t - return a value clamped to a given range using a given type @@ -919,6 +943,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #define swap(a, b) \ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +/* This counts to 12. Any more, it will return 13th argument. */ +#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n +#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __CONCAT(a, b) a ## b +#define CONCATENATE(a, b) __CONCAT(a, b) + /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. diff --git a/include/linux/kexec.h b/include/linux/kexec.h index f16f6ceb3875..9e4e638fb505 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -99,21 +99,25 @@ struct compat_kexec_segment { #ifdef CONFIG_KEXEC_FILE struct purgatory_info { - /* Pointer to elf header of read only purgatory */ - Elf_Ehdr *ehdr; - - /* Pointer to purgatory sechdrs which are modifiable */ + /* + * Pointer to elf header at the beginning of kexec_purgatory. + * Note: kexec_purgatory is read only + */ + const Elf_Ehdr *ehdr; + /* + * Temporary, modifiable buffer for sechdrs used for relocation. + * This memory can be freed post image load. + */ Elf_Shdr *sechdrs; /* - * Temporary buffer location where purgatory is loaded and relocated - * This memory can be freed post image load + * Temporary, modifiable buffer for stripped purgatory used for + * relocation. This memory can be freed post image load. */ void *purgatory_buf; - - /* Address where purgatory is finally loaded and is executed from */ - unsigned long purgatory_load_addr; }; +struct kimage; + typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, unsigned long kernel_len, char *initrd, @@ -135,6 +139,11 @@ struct kexec_file_ops { #endif }; +extern const struct kexec_file_ops * const kexec_file_loaders[]; + +int kexec_image_probe_default(struct kimage *image, void *buf, + unsigned long buf_len); + /** * struct kexec_buf - parameters for finding a place for a buffer in memory * @image: kexec image in which memory to search. @@ -159,10 +168,44 @@ struct kexec_buf { bool top_down; }; +int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf); +int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, + void *buf, unsigned int size, + bool get_value); +void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); + +int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); + int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(struct resource *, void *)); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); + +/* Alignment required for elf header segment */ +#define ELF_CORE_HEADER_ALIGN 4096 + +struct crash_mem_range { + u64 start, end; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct crash_mem_range ranges[0]; +}; + +extern int crash_exclude_mem_range(struct crash_mem *mem, + unsigned long long mstart, + unsigned long long mend); +extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, + void **addr, unsigned long *sz); #endif /* CONFIG_KEXEC_FILE */ struct kimage { @@ -209,7 +252,7 @@ struct kimage { unsigned long cmdline_buf_len; /* File operations provided by image loader */ - struct kexec_file_ops *fops; + const struct kexec_file_ops *fops; /* Image loader handling the kernel can store a pointer here */ void *image_loader_data; @@ -223,21 +266,9 @@ struct kimage { extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); -extern asmlinkage long sys_kexec_load(unsigned long entry, - unsigned long nr_segments, - struct kexec_segment __user *segments, - unsigned long flags); extern int kernel_kexec(void); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); -extern int kexec_load_purgatory(struct kimage *image, unsigned long min, - unsigned long max, int top_down, - unsigned long *load_addr); -extern int kexec_purgatory_get_set_symbol(struct kimage *image, - const char *name, void *buf, - unsigned int size, bool get_value); -extern void *kexec_purgatory_get_symbol_addr(struct kimage *image, - const char *name); extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); @@ -277,16 +308,6 @@ int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void * __weak arch_kexec_kernel_image_load(struct kimage *image); -int __weak arch_kimage_file_post_load_cleanup(struct kimage *image); -int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len); -int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, - Elf_Shdr *sechdrs, unsigned int relsec); -int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - unsigned int relsec); void arch_kexec_protect_crashkres(void); void arch_kexec_unprotect_crashkres(void); diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index e251533a5939..89fc8dc7bf38 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -41,11 +41,11 @@ */ /* - * Note about locking : There is no locking required until only * one reader - * and one writer is using the fifo and no kfifo_reset() will be * called - * kfifo_reset_out() can be safely used, until it will be only called + * Note about locking: There is no locking required until only one reader + * and one writer is using the fifo and no kfifo_reset() will be called. + * kfifo_reset_out() can be safely used, until it will be only called * in the reader thread. - * For multiple writer and one reader there is only a need to lock the writer. + * For multiple writer and one reader there is only a need to lock the writer. * And vice versa for only one writer and multiple reader there is only a need * to lock the reader. */ diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 51f6ef2c2ff4..f23b90b02898 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -9,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature) { return !!(kvm_arch_para_features() & (1UL << feature)); } + +static inline bool kvm_para_has_hint(unsigned int feature) +{ + return !!(kvm_arch_para_hints() & (1UL << feature)); +} #endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 5579c64c8fd6..b7e82550e655 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -346,9 +346,9 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) /* Trigger specific functions */ #ifdef CONFIG_LEDS_TRIGGER_DISK -extern void ledtrig_disk_activity(void); +extern void ledtrig_disk_activity(bool write); #else -static inline void ledtrig_disk_activity(void) {} +static inline void ledtrig_disk_activity(bool write) {} #endif #ifdef CONFIG_LEDS_TRIGGER_MTD diff --git a/include/linux/libata.h b/include/linux/libata.h index ed9826b21c5e..1795fecdea17 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -617,6 +617,7 @@ struct ata_host { void *private_data; struct ata_port_operations *ops; unsigned long flags; + struct kref kref; struct mutex eh_mutex; struct task_struct *eh_owner; diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index 14997285e53d..c6ac1fe7ec68 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBFDT_ENV_H -#define _LIBFDT_ENV_H +#ifndef LIBFDT_ENV_H +#define LIBFDT_ENV_H #include <linux/string.h> @@ -15,4 +15,4 @@ typedef __be64 fdt64_t; #define fdt64_to_cpu(x) be64_to_cpu(x) #define cpu_to_fdt64(x) cpu_to_be64(x) -#endif /* _LIBFDT_ENV_H */ +#endif /* LIBFDT_ENV_H */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index ff855ed965fb..097072c5a852 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -76,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); +struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long bus_dsm_mask; unsigned long cmd_mask; struct module *module; char *provider_name; + struct device_node *of_node; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, @@ -123,6 +125,7 @@ struct nd_region_desc { int num_lanes; int numa_node; unsigned long flags; + struct device_node *of_node; }; struct device; @@ -164,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus *to_nvdimm_bus(struct device *dev); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); +struct device *nd_region_dev(struct nd_region *nd_region); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); diff --git a/include/linux/libps2.h b/include/linux/libps2.h index 4ad06e824f76..5f18fe02ae37 100644 --- a/include/linux/libps2.h +++ b/include/linux/libps2.h @@ -10,7 +10,13 @@ * the Free Software Foundation. */ +#include <linux/bitops.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/wait.h> +#define PS2_CMD_SETSCALE11 0x00e6 +#define PS2_CMD_SETRES 0x10e8 #define PS2_CMD_GETID 0x02f2 #define PS2_CMD_RESET_BAT 0x02ff @@ -20,11 +26,12 @@ #define PS2_RET_NAK 0xfe #define PS2_RET_ERR 0xfc -#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */ -#define PS2_FLAG_CMD 2 /* Waiting for command to finish */ -#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */ -#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */ -#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */ +#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */ +#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */ +#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */ +#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */ +#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */ +#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */ struct ps2dev { struct serio *serio; @@ -36,21 +43,22 @@ struct ps2dev { wait_queue_head_t wait; unsigned long flags; - unsigned char cmdbuf[8]; - unsigned char cmdcnt; - unsigned char nak; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; }; void ps2_init(struct ps2dev *ps2dev, struct serio *serio); -int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout); -void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout); +int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout); +void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout); void ps2_begin_command(struct ps2dev *ps2dev); void ps2_end_command(struct ps2dev *ps2dev); -int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); -int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); -int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data); -int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data); +int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command); +int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command); +int ps2_sliced_command(struct ps2dev *ps2dev, u8 command); +bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data); +bool ps2_handle_response(struct ps2dev *ps2dev, u8 data); void ps2_cmd_aborted(struct ps2dev *ps2dev); -int ps2_is_keyboard_id(char id); +bool ps2_is_keyboard_id(u8 id); #endif /* _LIBPS2_H */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 7f4b60abdf27..6e0859b9d4d2 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -16,26 +16,58 @@ enum { NVM_IOTYPE_GC = 1, }; -#define NVM_BLK_BITS (16) -#define NVM_PG_BITS (16) -#define NVM_SEC_BITS (8) -#define NVM_PL_BITS (8) -#define NVM_LUN_BITS (8) -#define NVM_CH_BITS (7) +/* common format */ +#define NVM_GEN_CH_BITS (8) +#define NVM_GEN_LUN_BITS (8) +#define NVM_GEN_BLK_BITS (16) +#define NVM_GEN_RESERVED (32) + +/* 1.2 format */ +#define NVM_12_PG_BITS (16) +#define NVM_12_PL_BITS (4) +#define NVM_12_SEC_BITS (4) +#define NVM_12_RESERVED (8) + +/* 2.0 format */ +#define NVM_20_SEC_BITS (24) +#define NVM_20_RESERVED (8) + +enum { + NVM_OCSSD_SPEC_12 = 12, + NVM_OCSSD_SPEC_20 = 20, +}; struct ppa_addr { /* Generic structure for all addresses */ union { + /* generic device format */ + struct { + u64 ch : NVM_GEN_CH_BITS; + u64 lun : NVM_GEN_LUN_BITS; + u64 blk : NVM_GEN_BLK_BITS; + u64 reserved : NVM_GEN_RESERVED; + } a; + + /* 1.2 device format */ struct { - u64 blk : NVM_BLK_BITS; - u64 pg : NVM_PG_BITS; - u64 sec : NVM_SEC_BITS; - u64 pl : NVM_PL_BITS; - u64 lun : NVM_LUN_BITS; - u64 ch : NVM_CH_BITS; - u64 reserved : 1; + u64 ch : NVM_GEN_CH_BITS; + u64 lun : NVM_GEN_LUN_BITS; + u64 blk : NVM_GEN_BLK_BITS; + u64 pg : NVM_12_PG_BITS; + u64 pl : NVM_12_PL_BITS; + u64 sec : NVM_12_SEC_BITS; + u64 reserved : NVM_12_RESERVED; } g; + /* 2.0 device format */ + struct { + u64 grp : NVM_GEN_CH_BITS; + u64 pu : NVM_GEN_LUN_BITS; + u64 chk : NVM_GEN_BLK_BITS; + u64 sec : NVM_20_SEC_BITS; + u64 reserved : NVM_20_RESERVED; + } m; + struct { u64 line : 63; u64 is_cached : 1; @@ -49,10 +81,13 @@ struct nvm_rq; struct nvm_id; struct nvm_dev; struct nvm_tgt_dev; +struct nvm_chk_meta; -typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); +typedef int (nvm_id_fn)(struct nvm_dev *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); +typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *, + sector_t, int); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); @@ -66,6 +101,8 @@ struct nvm_dev_ops { nvm_op_bb_tbl_fn *get_bb_tbl; nvm_op_set_bb_fn *set_bb_tbl; + nvm_get_chk_meta_fn *get_chk_meta; + nvm_submit_io_fn *submit_io; nvm_submit_io_sync_fn *submit_io_sync; @@ -73,8 +110,6 @@ struct nvm_dev_ops { nvm_destroy_dma_pool_fn *destroy_dma_pool; nvm_dev_dma_alloc_fn *dev_dma_alloc; nvm_dev_dma_free_fn *dev_dma_free; - - unsigned int max_phys_sect; }; #ifdef CONFIG_NVM @@ -154,60 +189,75 @@ struct nvm_id_lp_tbl { struct nvm_id_lp_mlc mlc; }; -struct nvm_id_group { - u8 mtype; - u8 fmtype; - u8 num_ch; - u8 num_lun; - u16 num_chk; - u16 clba; - u16 csecs; - u16 sos; - - u16 ws_min; - u16 ws_opt; - u16 ws_seq; - u16 ws_per_chk; - - u32 trdt; - u32 trdm; - u32 tprt; - u32 tprm; - u32 tbet; - u32 tbem; - u32 mpos; - u32 mccap; - u16 cpar; - - /* 1.2 compatibility */ - u8 num_pln; - u16 num_pg; - u16 fpg_sz; -}; - -struct nvm_addr_format { - u8 ch_offset; +struct nvm_addrf_12 { u8 ch_len; - u8 lun_offset; u8 lun_len; - u8 pln_offset; + u8 blk_len; + u8 pg_len; u8 pln_len; + u8 sec_len; + + u8 ch_offset; + u8 lun_offset; u8 blk_offset; - u8 blk_len; u8 pg_offset; - u8 pg_len; - u8 sect_offset; - u8 sect_len; + u8 pln_offset; + u8 sec_offset; + + u64 ch_mask; + u64 lun_mask; + u64 blk_mask; + u64 pg_mask; + u64 pln_mask; + u64 sec_mask; }; -struct nvm_id { - u8 ver_id; - u8 vmnt; - u32 cap; - u32 dom; - struct nvm_addr_format ppaf; - struct nvm_id_group grp; -} __packed; +struct nvm_addrf { + u8 ch_len; + u8 lun_len; + u8 chk_len; + u8 sec_len; + u8 rsv_len[2]; + + u8 ch_offset; + u8 lun_offset; + u8 chk_offset; + u8 sec_offset; + u8 rsv_off[2]; + + u64 ch_mask; + u64 lun_mask; + u64 chk_mask; + u64 sec_mask; + u64 rsv_mask[2]; +}; + +enum { + /* Chunk states */ + NVM_CHK_ST_FREE = 1 << 0, + NVM_CHK_ST_CLOSED = 1 << 1, + NVM_CHK_ST_OPEN = 1 << 2, + NVM_CHK_ST_OFFLINE = 1 << 3, + + /* Chunk types */ + NVM_CHK_TP_W_SEQ = 1 << 0, + NVM_CHK_TP_W_RAN = 1 << 1, + NVM_CHK_TP_SZ_SPEC = 1 << 4, +}; + +/* + * Note: The structure size is linked to nvme_nvm_chk_meta such that the same + * buffer can be used when converting from little endian to cpu addressing. + */ +struct nvm_chk_meta { + u8 state; + u8 type; + u8 wi; + u8 rsvd[5]; + u64 slba; + u64 cnlb; + u64 wp; +}; struct nvm_target { struct list_head list; @@ -226,6 +276,8 @@ struct nvm_target { #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 +#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ + struct nvm_rq; typedef void (nvm_end_io_fn)(struct nvm_rq *); @@ -272,38 +324,69 @@ enum { NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; - -/* Device generic information */ +/* Instance geometry */ struct nvm_geo { - /* generic geometry */ - int nr_chnls; - int all_luns; /* across channels */ - int nr_luns; /* per channel */ - int nr_chks; /* per lun */ + /* device reported version */ + u8 major_ver_id; + u8 minor_ver_id; + + /* kernel short version */ + u8 version; - int sec_size; - int oob_size; - int mccap; + /* instance specific geometry */ + int num_ch; + int num_lun; /* per channel */ - int sec_per_chk; - int sec_per_lun; + /* calculated values */ + int all_luns; /* across channels */ + int all_chunks; /* across channels */ - int ws_min; - int ws_opt; - int ws_seq; - int ws_per_chk; + int op; /* over-provision in instance */ - int max_rq_size; + sector_t total_secs; /* across channels */ + + /* chunk geometry */ + u32 num_chk; /* chunks per lun */ + u32 clba; /* sectors per chunk */ + u16 csecs; /* sector size */ + u16 sos; /* out-of-band area size */ + + /* device write constrains */ + u32 ws_min; /* minimum write size */ + u32 ws_opt; /* optimal write size */ + u32 mw_cunits; /* distance required for successful read */ + u32 maxoc; /* maximum open chunks */ + u32 maxocpu; /* maximum open chunks per parallel unit */ + + /* device capabilities */ + u32 mccap; + + /* device timings */ + u32 trdt; /* Avg. Tread (ns) */ + u32 trdm; /* Max Tread (ns) */ + u32 tprt; /* Avg. Tprog (ns) */ + u32 tprm; /* Max Tprog (ns) */ + u32 tbet; /* Avg. Terase (ns) */ + u32 tbem; /* Max Terase (ns) */ + + /* generic address format */ + struct nvm_addrf addrf; + + /* 1.2 compatibility */ + u8 vmnt; + u32 cap; + u32 dom; - int op; + u8 mtype; + u8 fmtype; - struct nvm_addr_format ppaf; + u16 cpar; + u32 mpos; - /* Legacy 1.2 specific geometry */ - int plane_mode; /* drive device in single, double or quad mode */ - int nr_planes; - int sec_per_pg; /* only sectors for a single page */ - int sec_per_pl; /* all sectors across planes */ + u8 num_pln; + u8 pln_mode; + u16 num_pg; + u16 fpg_sz; }; /* sub-device structure */ @@ -314,9 +397,6 @@ struct nvm_tgt_dev { /* Base ppas for target LUNs */ struct ppa_addr *luns; - sector_t total_secs; - - struct nvm_id identity; struct request_queue *q; struct nvm_dev *parent; @@ -331,13 +411,9 @@ struct nvm_dev { /* Device information */ struct nvm_geo geo; - unsigned long total_secs; - unsigned long *lun_map; void *dma_pool; - struct nvm_id identity; - /* Backend device */ struct request_queue *q; char name[DISK_NAME_LEN]; @@ -353,44 +429,58 @@ struct nvm_dev { struct list_head targets; }; -static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, +static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, struct ppa_addr r) { - struct nvm_geo *geo = &tgt_dev->geo; + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; - l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; - l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset; - l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset; - l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset; - l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset; - l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset; + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; + l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; + l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; + l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; + l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; + l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &geo->addrf; + + l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; + l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; + l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; + l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; + } return l; } -static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, +static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, struct ppa_addr r) { - struct nvm_geo *geo = &tgt_dev->geo; + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; l.ppa = 0; - /* - * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. - */ - l.g.blk = (r.ppa >> geo->ppaf.blk_offset) & - (((1 << geo->ppaf.blk_len) - 1)); - l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) & - (((1 << geo->ppaf.pg_len) - 1)); - l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) & - (((1 << geo->ppaf.sect_len) - 1)); - l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) & - (((1 << geo->ppaf.pln_len) - 1)); - l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) & - (((1 << geo->ppaf.lun_len) - 1)); - l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) & - (((1 << geo->ppaf.ch_len) - 1)); + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; + l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; + l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; + l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; + l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; + l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &geo->addrf; + + l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; + l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; + l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; + l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; + } return l; } @@ -434,9 +524,13 @@ extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); + +extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, + struct nvm_chk_meta *meta, struct ppa_addr ppa, + int nchks); + extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, int, int); -extern int nvm_max_phys_sects(struct nvm_tgt_dev *); extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); extern void nvm_end_io(struct nvm_rq *); diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h index 5e3581d76c7f..d4d5b93efe84 100644 --- a/include/linux/linux_logo.h +++ b/include/linux/linux_logo.h @@ -36,8 +36,6 @@ struct linux_logo { extern const struct linux_logo logo_linux_mono; extern const struct linux_logo logo_linux_vga16; extern const struct linux_logo logo_linux_clut224; -extern const struct linux_logo logo_blackfin_vga16; -extern const struct linux_logo logo_blackfin_clut224; extern const struct linux_logo logo_dec_clut224; extern const struct linux_logo logo_mac_clut224; extern const struct linux_logo logo_parisc_clut224; @@ -46,7 +44,6 @@ extern const struct linux_logo logo_sun_clut224; extern const struct linux_logo logo_superh_mono; extern const struct linux_logo logo_superh_vga16; extern const struct linux_logo logo_superh_clut224; -extern const struct linux_logo logo_m32r_clut224; extern const struct linux_logo logo_spe_clut224; extern const struct linux_logo *fb_find_logo(int depth); diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index bb8129a3474d..96def9d15b1b 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,6 +32,7 @@ struct list_lru_one { }; struct list_lru_memcg { + struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ struct list_lru_one *lru[0]; }; @@ -43,7 +44,7 @@ struct list_lru_node { struct list_lru_one lru; #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg *memcg_lrus; + struct list_lru_memcg __rcu *memcg_lrus; #endif long nr_items; } ____cacheline_aligned_in_smp; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 4754f01c1abb..aec44b1d9582 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void) IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); } +typedef int (*klp_shadow_ctor_t)(void *obj, + void *shadow_data, + void *ctor_data); +typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); + void *klp_shadow_get(void *obj, unsigned long id); -void *klp_shadow_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags); -void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags); -void klp_shadow_free(void *obj, unsigned long id); -void klp_shadow_free_all(unsigned long id); +void *klp_shadow_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data); +void *klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data); +void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); +void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); #else /* !CONFIG_LIVEPATCH */ diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 2eac32095113..99f17cc8e163 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -37,6 +37,7 @@ struct lockref { extern void lockref_get(struct lockref *); extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); +extern int lockref_put_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h new file mode 100644 index 000000000000..cbd9d8495690 --- /dev/null +++ b/include/linux/logic_pio.h @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved. + * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com> + * Author: Zhichang Yuan <yuanzhichang@hisilicon.com> + */ + +#ifndef __LINUX_LOGIC_PIO_H +#define __LINUX_LOGIC_PIO_H + +#include <linux/fwnode.h> + +enum { + LOGIC_PIO_INDIRECT, /* Indirect IO flag */ + LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */ +}; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; /* range size populated */ + unsigned long flags; + + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth); + void (*out)(void *hostdata, unsigned long addr, u32 val, + size_t dwidth); + u32 (*ins)(void *hostdata, unsigned long addr, void *buffer, + size_t dwidth, unsigned int count); + void (*outs)(void *hostdata, unsigned long addr, const void *buffer, + size_t dwidth, unsigned int count); +}; + +#ifdef CONFIG_INDIRECT_PIO +u8 logic_inb(unsigned long addr); +void logic_outb(u8 value, unsigned long addr); +void logic_outw(u16 value, unsigned long addr); +void logic_outl(u32 value, unsigned long addr); +u16 logic_inw(unsigned long addr); +u32 logic_inl(unsigned long addr); +void logic_outb(u8 value, unsigned long addr); +void logic_outw(u16 value, unsigned long addr); +void logic_outl(u32 value, unsigned long addr); +void logic_insb(unsigned long addr, void *buffer, unsigned int count); +void logic_insl(unsigned long addr, void *buffer, unsigned int count); +void logic_insw(unsigned long addr, void *buffer, unsigned int count); +void logic_outsb(unsigned long addr, const void *buffer, unsigned int count); +void logic_outsw(unsigned long addr, const void *buffer, unsigned int count); +void logic_outsl(unsigned long addr, const void *buffer, unsigned int count); + +#ifndef inb +#define inb logic_inb +#endif + +#ifndef inw +#define inw logic_inw +#endif + +#ifndef inl +#define inl logic_inl +#endif + +#ifndef outb +#define outb logic_outb +#endif + +#ifndef outw +#define outw logic_outw +#endif + +#ifndef outl +#define outl logic_outl +#endif + +#ifndef insb +#define insb logic_insb +#endif + +#ifndef insw +#define insw logic_insw +#endif + +#ifndef insl +#define insl logic_insl +#endif + +#ifndef outsb +#define outsb logic_outsb +#endif + +#ifndef outsw +#define outsw logic_outsw +#endif + +#ifndef outsl +#define outsl logic_outsl +#endif + +/* + * We reserve 0x4000 bytes for Indirect IO as so far this library is only + * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO + * area by redefining the macro below. + */ +#define PIO_INDIRECT_SIZE 0x4000 +#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE) +#else +#define MMIO_UPPER_LIMIT IO_SPACE_LIMIT +#endif /* CONFIG_INDIRECT_PIO */ + +struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); +unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, + resource_size_t hw_addr, resource_size_t size); +int logic_pio_register_range(struct logic_pio_hwaddr *newrange); +resource_size_t logic_pio_to_hwaddr(unsigned long pio); +unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); + +#endif /* __LINUX_LOGIC_PIO_H */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index ac491137b10a..9d0b286f3dba 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -554,6 +554,10 @@ * @new points to the new credentials. * @old points to the original credentials. * Transfer data from original creds to new creds + * @cred_getsecid: + * Retrieve the security identifier of the cred structure @c + * @c contains the credentials, secid will be placed into @secid. + * In case of failure, @secid will be set to zero. * @kernel_act_as: * Set the credentials for a kernel service to act as (subjective context). * @new points to the credentials to be modified. @@ -907,6 +911,33 @@ * associated with the TUN device's security structure. * @security pointer to the TUN devices's security structure. * + * Security hooks for SCTP + * + * @sctp_assoc_request: + * Passes the @ep and @chunk->skb of the association INIT packet to + * the security module. + * @ep pointer to sctp endpoint structure. + * @skb pointer to skbuff of association packet. + * Return 0 on success, error on failure. + * @sctp_bind_connect: + * Validiate permissions required for each address associated with sock + * @sk. Depending on @optname, the addresses will be treated as either + * for a connect or bind service. The @addrlen is calculated on each + * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or + * sizeof(struct sockaddr_in6). + * @sk pointer to sock structure. + * @optname name of the option to validate. + * @address list containing one or more ipv4/ipv6 addresses. + * @addrlen total length of address(s). + * Return 0 on success, error on failure. + * @sctp_sk_clone: + * Called whenever a new socket is created by accept(2) (i.e. a TCP + * style socket) or when a socket is 'peeled off' e.g userspace + * calls sctp_peeloff(3). + * @ep pointer to current sctp endpoint structure. + * @sk pointer to current sock structure. + * @sk pointer to new sock structure. + * * Security hooks for Infiniband * * @ib_pkey_access: @@ -1542,6 +1573,7 @@ union security_list_options { int (*cred_prepare)(struct cred *new, const struct cred *old, gfp_t gfp); void (*cred_transfer)(struct cred *new, const struct cred *old); + void (*cred_getsecid)(const struct cred *c, u32 *secid); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); int (*kernel_module_request)(char *kmod_name); @@ -1576,28 +1608,28 @@ union security_list_options { int (*msg_msg_alloc_security)(struct msg_msg *msg); void (*msg_msg_free_security)(struct msg_msg *msg); - int (*msg_queue_alloc_security)(struct msg_queue *msq); - void (*msg_queue_free_security)(struct msg_queue *msq); - int (*msg_queue_associate)(struct msg_queue *msq, int msqflg); - int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd); - int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg, + int (*msg_queue_alloc_security)(struct kern_ipc_perm *msq); + void (*msg_queue_free_security)(struct kern_ipc_perm *msq); + int (*msg_queue_associate)(struct kern_ipc_perm *msq, int msqflg); + int (*msg_queue_msgctl)(struct kern_ipc_perm *msq, int cmd); + int (*msg_queue_msgsnd)(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg); - int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg, + int (*msg_queue_msgrcv)(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode); - int (*shm_alloc_security)(struct shmid_kernel *shp); - void (*shm_free_security)(struct shmid_kernel *shp); - int (*shm_associate)(struct shmid_kernel *shp, int shmflg); - int (*shm_shmctl)(struct shmid_kernel *shp, int cmd); - int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr, + int (*shm_alloc_security)(struct kern_ipc_perm *shp); + void (*shm_free_security)(struct kern_ipc_perm *shp); + int (*shm_associate)(struct kern_ipc_perm *shp, int shmflg); + int (*shm_shmctl)(struct kern_ipc_perm *shp, int cmd); + int (*shm_shmat)(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg); - int (*sem_alloc_security)(struct sem_array *sma); - void (*sem_free_security)(struct sem_array *sma); - int (*sem_associate)(struct sem_array *sma, int semflg); - int (*sem_semctl)(struct sem_array *sma, int cmd); - int (*sem_semop)(struct sem_array *sma, struct sembuf *sops, + int (*sem_alloc_security)(struct kern_ipc_perm *sma); + void (*sem_free_security)(struct kern_ipc_perm *sma); + int (*sem_associate)(struct kern_ipc_perm *sma, int semflg); + int (*sem_semctl)(struct kern_ipc_perm *sma, int cmd); + int (*sem_semop)(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter); int (*netlink_send)(struct sock *sk, struct sk_buff *skb); @@ -1666,6 +1698,12 @@ union security_list_options { int (*tun_dev_attach_queue)(void *security); int (*tun_dev_attach)(struct sock *sk, void *security); int (*tun_dev_open)(void *security); + int (*sctp_assoc_request)(struct sctp_endpoint *ep, + struct sk_buff *skb); + int (*sctp_bind_connect)(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); + void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND @@ -1825,6 +1863,7 @@ struct security_hook_heads { struct hlist_head cred_free; struct hlist_head cred_prepare; struct hlist_head cred_transfer; + struct hlist_head cred_getsecid; struct hlist_head kernel_act_as; struct hlist_head kernel_create_files_as; struct hlist_head kernel_read_file; @@ -1915,6 +1954,9 @@ struct security_hook_heads { struct hlist_head tun_dev_attach_queue; struct hlist_head tun_dev_attach; struct hlist_head tun_dev_open; + struct hlist_head sctp_assoc_request; + struct hlist_head sctp_bind_connect; + struct hlist_head sctp_sk_clone; #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND struct hlist_head ib_pkey_access; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 8be5077efb5f..ca59883c8364 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -187,7 +187,6 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid); -unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn); /** * for_each_mem_pfn_range - early memory pfn range iterator @@ -319,6 +318,9 @@ static inline bool memblock_bottom_up(void) phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, ulong flags); +phys_addr_t memblock_alloc_base_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t max_addr, + int nid, ulong flags); phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr); phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, @@ -417,21 +419,11 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } #endif - -extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, - phys_addr_t end_addr); #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { return 0; } - -static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, - phys_addr_t end_addr) -{ - return 0; -} - #endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c46016bb25eb..d99b71bc2c66 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -48,13 +48,12 @@ enum memcg_stat_item { MEMCG_NR_STAT, }; -/* Cgroup-specific events, on top of universal VM events */ -enum memcg_event_item { - MEMCG_LOW = NR_VM_EVENT_ITEMS, +enum memcg_memory_event { + MEMCG_LOW, MEMCG_HIGH, MEMCG_MAX, MEMCG_OOM, - MEMCG_NR_EVENTS, + MEMCG_NR_MEMORY_EVENTS, }; struct mem_cgroup_reclaim_cookie { @@ -88,7 +87,7 @@ enum mem_cgroup_events_target { struct mem_cgroup_stat_cpu { long count[MEMCG_NR_STAT]; - unsigned long events[MEMCG_NR_EVENTS]; + unsigned long events[NR_VM_EVENT_ITEMS]; unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; }; @@ -120,6 +119,9 @@ struct mem_cgroup_per_node { unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; + bool congested; /* memcg has many dirty pages */ + /* backed by a congested BDI */ + struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ }; @@ -202,7 +204,8 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; - /* handle for "memory.events" */ + /* memory.events */ + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; struct cgroup_file events_file; /* protect arrays of thresholds */ @@ -231,9 +234,10 @@ struct mem_cgroup { struct task_struct *move_lock_task; unsigned long move_lock_flags; + /* memory.stat */ struct mem_cgroup_stat_cpu __percpu *stat_cpu; atomic_long_t stat[MEMCG_NR_STAT]; - atomic_long_t events[MEMCG_NR_EVENTS]; + atomic_long_t events[NR_VM_EVENT_ITEMS]; unsigned long socket_pressure; @@ -645,9 +649,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); -/* idx can be of type enum memcg_event_item or vm_event_item */ static inline void __count_memcg_events(struct mem_cgroup *memcg, - int idx, unsigned long count) + enum vm_event_item idx, + unsigned long count) { unsigned long x; @@ -663,7 +667,8 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, } static inline void count_memcg_events(struct mem_cgroup *memcg, - int idx, unsigned long count) + enum vm_event_item idx, + unsigned long count) { unsigned long flags; @@ -672,9 +677,8 @@ static inline void count_memcg_events(struct mem_cgroup *memcg, local_irq_restore(flags); } -/* idx can be of type enum memcg_event_item or vm_event_item */ static inline void count_memcg_page_event(struct page *page, - int idx) + enum vm_event_item idx) { if (page->mem_cgroup) count_memcg_events(page->mem_cgroup, idx, 1); @@ -698,10 +702,10 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, rcu_read_unlock(); } -static inline void mem_cgroup_event(struct mem_cgroup *memcg, - enum memcg_event_item event) +static inline void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) { - count_memcg_events(memcg, event, 1); + atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); } @@ -721,8 +725,8 @@ static inline bool mem_cgroup_disabled(void) return true; } -static inline void mem_cgroup_event(struct mem_cgroup *memcg, - enum memcg_event_item event) +static inline void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) { } diff --git a/include/linux/memory.h b/include/linux/memory.h index f71e732c77b2..31ca3e28b0eb 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -33,6 +33,7 @@ struct memory_block { void *hw; /* optional pointer to fw/hw data */ int (*phys_callback)(struct memory_block *); struct device dev; + int nid; /* NID for this memory block */ }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -109,7 +110,7 @@ extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); -extern int register_new_memory(int, struct mem_section *); +int hotplug_memory_register(int nid, struct mem_section *section); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index aba5f86eb038..e0e49b5b1ee1 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -52,24 +52,6 @@ enum { }; /* - * pgdat resizing functions - */ -static inline -void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_lock_irqsave(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_init(struct pglist_data *pgdat) -{ - spin_lock_init(&pgdat->node_size_lock); -} -/* * Zone resizing functions * * Note: any attempt to resize a zone should has pgdat_resize_lock() @@ -234,9 +216,6 @@ void put_online_mems(void); void mem_hotplug_begin(void); void mem_hotplug_done(void); -extern void set_zone_contiguous(struct zone *zone); -extern void clear_zone_contiguous(struct zone *zone); - #else /* ! CONFIG_MEMORY_HOTPLUG */ #define pfn_to_online_page(pfn) \ ({ \ @@ -246,13 +225,6 @@ extern void clear_zone_contiguous(struct zone *zone); ___page; \ }) -/* - * Stub functions for when hotplug is off - */ -static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_init(struct pglist_data *pgdat) {} - static inline unsigned zone_span_seqbegin(struct zone *zone) { return 0; @@ -293,6 +265,34 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) +/* + * pgdat resizing functions + */ +static inline +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_lock_irqsave(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_init(struct pglist_data *pgdat) +{ + spin_lock_init(&pgdat->node_size_lock); +} +#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ +/* + * Stub functions for when hotplug is off + */ +static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_init(struct pglist_data *pgdat) {} +#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ + #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 080798f17ece..82bf7747b312 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -266,6 +266,8 @@ enum axp20x_variants { #define AXP288_RT_BATT_V_H 0xa0 #define AXP288_RT_BATT_V_L 0xa1 +#define AXP813_ADC_RATE 0x85 + /* Fuel Gauge */ #define AXP288_FG_RDC1_REG 0xba #define AXP288_FG_RDC0_REG 0xbb diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index c61535979b8f..2d4e23c9ea0a 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -183,6 +183,7 @@ struct cros_ec_debugfs; * @ec_dev: cros_ec_device structure to talk to the physical device * @dev: pointer to the platform device * @debug_info: cros_ec_debugfs structure for debugging information + * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC. * @cmd_offset: offset to apply for each command. */ struct cros_ec_dev { @@ -191,6 +192,7 @@ struct cros_ec_dev { struct cros_ec_device *ec_dev; struct device *dev; struct cros_ec_debugfs *debug_info; + bool has_kb_wake_angle; u16 cmd_offset; u32 features[2]; }; diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 2b96e630e3b6..f2edd9969b40 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -2948,6 +2948,9 @@ struct ec_response_usb_pd_control_v1 { #define EC_CMD_USB_PD_PORTS 0x102 +/* Maximum number of PD ports on a device, num_ports will be <= this */ +#define EC_USB_PD_MAX_PORTS 8 + struct ec_response_usb_pd_ports { uint8_t num_ports; } __packed; diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h index 04e092be4b07..1a94fa2ac309 100644 --- a/include/linux/mfd/da9055/pdata.h +++ b/include/linux/mfd/da9055/pdata.h @@ -12,6 +12,7 @@ #define DA9055_MAX_REGULATORS 8 struct da9055; +struct gpio_desc; enum gpio_select { NO_GPIO = 0, @@ -47,7 +48,7 @@ struct da9055_pdata { * controls the regulator set A/B, 0 if not available. */ enum gpio_select *reg_rsel; - /* GPIOs to enable regulator, 0 if not available */ - int *ena_gpio; + /* GPIO descriptors to enable regulator, NULL if not available */ + struct gpio_desc **ena_gpiods; }; #endif /* __DA9055_PDATA_H */ diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 48c3c5be7eb1..9ed2871ea335 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -141,15 +141,4 @@ enum s2mps_rtc_reg { #define WTSR_ENABLE_SHIFT 6 #define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT) -enum { - RTC_SEC = 0, - RTC_MIN, - RTC_HOUR, - RTC_WEEKDAY, - RTC_DATE, - RTC_MONTH, - RTC_YEAR1, - RTC_YEAR2, -}; - #endif /* __LINUX_MFD_SEC_RTC_H */ diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index c8e0164c5423..e06f5f79eaef 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -243,6 +243,8 @@ #define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0) #define IMX6Q_GPR5_L2_CLK_STOP BIT(8) +#define IMX6Q_GPR5_SATA_SW_PD BIT(10) +#define IMX6Q_GPR5_SATA_SW_RST BIT(11) #define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0) #define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4) diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 396a103c8bc6..91f92215ca74 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -36,7 +36,6 @@ } while (0) /* tmio MMC platform flags */ -#define TMIO_MMC_WRPROTECT_DISABLE BIT(0) /* * Some controllers can support a 2-byte block size when the bus width * is configured in 4-bit mode. diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index eb492d47f717..8f9c90379732 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h @@ -70,4 +70,12 @@ #define LAN88XX_MMD3_CHIP_ID (32877) #define LAN88XX_MMD3_CHIP_REV (32878) +/* DSP registers */ +#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) +#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) +#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) +#define LAN88XX_EXT_PAGE_TR_CR 16 +#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 +#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 + #endif /* _MICROCHIPPHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2246cf670ba..f2b4abbca55e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -7,8 +7,7 @@ #include <linux/migrate_mode.h> #include <linux/hugetlb.h> -typedef struct page *new_page_t(struct page *page, unsigned long private, - int **reason); +typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); /* @@ -25,7 +24,7 @@ enum migrate_reason { MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, - MR_CMA, + MR_CONTIG_RANGE, MR_TYPES }; @@ -43,9 +42,9 @@ static inline struct page *new_page_nodemask(struct page *page, return alloc_huge_page_nodemask(page_hstate(compound_head(page)), preferred_nid, nodemask); - if (thp_migration_supported() && PageTransHuge(page)) { - order = HPAGE_PMD_ORDER; + if (PageTransHuge(page)) { gfp_mask |= GFP_TRANSHUGE; + order = HPAGE_PMD_ORDER; } if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a9b5fed8f7c6..81d0799b6091 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -257,10 +257,6 @@ enum { }; enum { - MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0 -}; - -enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h new file mode 100644 index 000000000000..70e7e5673ce9 --- /dev/null +++ b/include/linux/mlx5/accel.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_ACCEL_H__ +#define __MLX5_ACCEL_H__ + +#include <linux/mlx5/driver.h> + +enum mlx5_accel_esp_aes_gcm_keymat_iv_algo { + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ, +}; + +enum mlx5_accel_esp_flags { + MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ + MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, + MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, +}; + +enum mlx5_accel_esp_action { + MLX5_ACCEL_ESP_ACTION_DECRYPT, + MLX5_ACCEL_ESP_ACTION_ENCRYPT, +}; + +enum mlx5_accel_esp_keymats { + MLX5_ACCEL_ESP_KEYMAT_AES_NONE, + MLX5_ACCEL_ESP_KEYMAT_AES_GCM, +}; + +enum mlx5_accel_esp_replay { + MLX5_ACCEL_ESP_REPLAY_NONE, + MLX5_ACCEL_ESP_REPLAY_BMP, +}; + +struct aes_gcm_keymat { + u64 seq_iv; + enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo; + + u32 salt; + u32 icv_len; + + u32 key_len; + u32 aes_key[256 / 32]; +}; + +struct mlx5_accel_esp_xfrm_attrs { + enum mlx5_accel_esp_action action; + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + u32 flags; + u32 sa_handle; + enum mlx5_accel_esp_replay replay_type; + union { + struct { + u32 size; + + } bmp; + } replay; + enum mlx5_accel_esp_keymats keymat_type; + union { + struct aes_gcm_keymat aes_gcm; + } keymat; +}; + +struct mlx5_accel_esp_xfrm { + struct mlx5_core_dev *mdev; + struct mlx5_accel_esp_xfrm_attrs attrs; +}; + +enum { + MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0, +}; + +enum mlx5_accel_ipsec_cap { + MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, + MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, + MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, + MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, + MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, + MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, + MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6, + MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, +}; + +#ifdef CONFIG_MLX5_ACCEL + +u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + +#else + +static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } + +static inline struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) { return ERR_PTR(-EOPNOTSUPP); } +static inline void +mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} +static inline int +mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } + +#endif +#endif diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 48c181a2acc9..0ef6138eca49 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -60,6 +60,7 @@ struct mlx5_core_cq { } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; + struct mlx5_eq *eq; }; @@ -171,8 +172,17 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL); } -int mlx5_init_cq_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); +static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) +{ + refcount_inc(&cq->refcount); +} + +static inline void mlx5_cq_put(struct mlx5_core_cq *cq) +{ + if (refcount_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); @@ -183,6 +193,12 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u16 cq_period, u16 cq_max_count); +static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev, + struct mlx5_err_cqe *err_cqe) +{ + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, + sizeof(*err_cqe), false); +} int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index e5258ee4e38b..2bc27f8c5b87 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -782,6 +782,9 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) +#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) + struct mpwrq_cqe_bc { __be16 filler_consumed_strides; __be16 byte_cnt; @@ -1013,6 +1016,9 @@ enum mlx5_cap_type { MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, + MLX5_CAP_DEBUG, + MLX5_CAP_RESERVED_14, + MLX5_CAP_DEV_MEM, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1140,6 +1146,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_QOS(mdev, cap)\ MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) +#define MLX5_CAP_DEBUG(mdev, cap)\ + MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) + #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) @@ -1161,6 +1170,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP64_FPGA(mdev, cap) \ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) +#define MLX5_CAP_DEV_MEM(mdev, cap)\ + MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + +#define MLX5_CAP64_DEV_MEM(mdev, cap)\ + MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, @@ -1204,8 +1219,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } -#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 -#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 +#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 +#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6ed79a8a8318..767d193c269a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -345,13 +345,6 @@ struct mlx5_buf_list { dma_addr_t map; }; -struct mlx5_buf { - struct mlx5_buf_list direct; - int npages; - int size; - u8 page_shift; -}; - struct mlx5_frag_buf { struct mlx5_buf_list *frags; int npages; @@ -359,6 +352,15 @@ struct mlx5_frag_buf { u8 page_shift; }; +struct mlx5_frag_buf_ctrl { + struct mlx5_frag_buf frag_buf; + u32 sz_m1; + u32 frag_sz_m1; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + struct mlx5_eq_tasklet { struct list_head list; struct list_head process_list; @@ -375,11 +377,18 @@ struct mlx5_eq_pagefault { mempool_t *pool; }; +struct mlx5_cq_table { + /* protect radix tree */ + spinlock_t lock; + struct radix_tree_root tree; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; + struct mlx5_cq_table cq_table; __be32 __iomem *doorbell; u32 cons_index; - struct mlx5_buf buf; + struct mlx5_frag_buf buf; int size; unsigned int irqn; u8 eqn; @@ -453,8 +462,8 @@ struct mlx5_core_srq { struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; - int max_gs; - int max_avail_gather; + size_t max_gs; + size_t max_avail_gather; int wqe_shift; void (*event) (struct mlx5_core_srq *, enum mlx5_event); @@ -526,13 +535,6 @@ struct mlx5_core_health { struct delayed_work recover_work; }; -struct mlx5_cq_table { - /* protect radix tree - */ - spinlock_t lock; - struct radix_tree_root tree; -}; - struct mlx5_qp_table { /* protect radix tree */ @@ -589,8 +591,14 @@ struct mlx5_eswitch; struct mlx5_lag; struct mlx5_pagefault; +struct mlx5_rate_limit { + u32 rate; + u32 max_burst_sz; + u16 typical_pkt_sz; +}; + struct mlx5_rl_entry { - u32 rate; + struct mlx5_rate_limit rl; u16 index; u16 refcount; }; @@ -654,10 +662,6 @@ struct mlx5_priv { struct dentry *cmdif_debugfs; /* end: qp staff */ - /* start: cq staff */ - struct mlx5_cq_table cq_table; - /* end: cq staff */ - /* start: mkey staff */ struct mlx5_mkey_table mkey_table; /* end: mkey staff */ @@ -936,9 +940,9 @@ struct mlx5_hca_vport_context { bool grh_required; }; -static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) +static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) { - return buf->direct.buf + offset; + return buf->frags->buf + offset; } #define STRUCT_FIELD(header, field) \ @@ -977,6 +981,25 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } +static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, + void *cqc) +{ + fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); + fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; +} + +static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag = (ix >> fbc->log_frag_strides); + + return fbc->frag_buf.frags[frag].buf + + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); @@ -1002,9 +1025,10 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_buf *buf, int node); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); + struct mlx5_frag_buf *buf, int node); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, + int size, struct mlx5_frag_buf *buf); +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); @@ -1049,22 +1073,12 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -int mlx5_eq_init(struct mlx5_core_dev *dev); -void mlx5_eq_cleanup(struct mlx5_core_dev *dev); -void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); + +void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); -void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); -int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, - enum mlx5_eq_type type); -int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_start_eqs(struct mlx5_core_dev *dev); -void mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1076,14 +1090,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - u32 *out, int outlen); -int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); -int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); @@ -1107,9 +1113,12 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); -int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); -void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, + struct mlx5_rate_limit *rl); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, + struct mlx5_rate_limit *rl_1); int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); @@ -1224,6 +1233,12 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } +#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev)) +#define MLX5_VPORT_MANAGER(mdev) \ + (MLX5_CAP_GEN(mdev, vport_group_manager) && \ + (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ + mlx5_core_is_pf(mdev)) + static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h new file mode 100644 index 000000000000..d3c9db492b30 --- /dev/null +++ b/include/linux/mlx5/eswitch.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + */ + +#ifndef _MLX5_ESWITCH_ +#define _MLX5_ESWITCH_ + +#include <linux/mlx5/driver.h> + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +enum { + REP_ETH, + REP_IB, + NUM_REP_TYPES, +}; + +struct mlx5_eswitch_rep; +struct mlx5_eswitch_rep_if { + int (*load)(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); + void *priv; + bool valid; +}; + +struct mlx5_eswitch_rep { + struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; + u16 vport; + u8 hw_id[ETH_ALEN]; + u16 vlan; + u32 vlan_refcount; +}; + +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + int vport_index, + struct mlx5_eswitch_rep_if *rep_if, + u8 rep_type); +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport_index, + u8 rep_type); +void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, + int vport, + u8 rep_type); +struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, + int vport); +void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); +struct mlx5_flow_handle * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, + int vport, u32 sqn); +#endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index a0b48afcb422..47aecc4fa8c2 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -40,6 +40,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, + MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, + MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, }; enum { @@ -69,6 +71,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, + MLX5_FLOW_NAMESPACE_EGRESS, }; struct mlx5_flow_table; @@ -139,11 +142,20 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); +struct mlx5_fs_vlan { + u16 ethtype; + u16 vid; + u8 prio; +}; + struct mlx5_flow_act { u32 action; + bool has_flow_tag; u32 flow_tag; u32 encap_id; u32 modify_id; + uintptr_t esp_id; + struct mlx5_fs_vlan vlan; }; #define MLX5_DECLARE_FLOW_ACT(name) \ diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h new file mode 100644 index 000000000000..9db21cd0e92c --- /dev/null +++ b/include/linux/mlx5/fs_helpers.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_HELPERS_ +#define _MLX5_FS_HELPERS_ + +#include <linux/mlx5/mlx5_ifc.h> + +#define MLX5_FS_IPV4_VERSION 4 +#define MLX5_FS_IPV6_VERSION 6 + +static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c) +{ + void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, + misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); +} + +static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c, + const u32 *match_v, u8 match) +{ + const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + + return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match; +} + +static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP); +} + +static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP); +} + +static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c) +{ + void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, + misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni); +} + +static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev, + const u32 *match_c, + const u32 *match_v, int version) +{ + int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + ft_field_support.outer_ip_version); + const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + + if (!match_ipv) { + u16 ethertype; + + switch (version) { + case MLX5_FS_IPV4_VERSION: + ethertype = ETH_P_IP; + break; + case MLX5_FS_IPV6_VERSION: + ethertype = ETH_P_IPV6; + break; + default: + return false; + } + + return MLX5_GET(fte_match_set_lyr_2_4, headers_c, + ethertype) == 0xffff && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, + ethertype) == ethertype; + } + + return MLX5_GET(fte_match_set_lyr_2_4, headers_c, + ip_version) == 0xf && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, + ip_version) == version; +} + +static inline bool +mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, + MLX5_FS_IPV4_VERSION); +} + +static inline bool +mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c, + const u32 *match_v) +{ + return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, + MLX5_FS_IPV6_VERSION); +} + +static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c) +{ + void *misc_params_c = + MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); +} + +#endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f4e417686f62..1aad455538f4 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -92,6 +92,8 @@ enum { MLX5_CMD_OP_DESTROY_MKEY = 0x202, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, + MLX5_CMD_OP_ALLOC_MEMIC = 0x205, + MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, @@ -143,6 +145,7 @@ enum { MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, + MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, @@ -295,7 +298,9 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 inner_tcp_dport[0x1]; u8 inner_tcp_flags[0x1]; u8 reserved_at_37[0x9]; - u8 reserved_at_40[0x1a]; + u8 reserved_at_40[0x17]; + u8 outer_esp_spi[0x1]; + u8 reserved_at_58[0x2]; u8 bth_dst_qp[0x1]; u8 reserved_at_5b[0x25]; @@ -311,7 +316,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 flow_table_modify[0x1]; u8 encap[0x1]; u8 decap[0x1]; - u8 reserved_at_9[0x17]; + u8 reserved_at_9[0x1]; + u8 pop_vlan[0x1]; + u8 push_vlan[0x1]; + u8 reserved_at_c[0x14]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; @@ -437,7 +445,9 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_120[0x28]; u8 bth_dst_qp[0x18]; - u8 reserved_at_160[0xa0]; + u8 reserved_at_160[0x20]; + u8 outer_esp_spi[0x20]; + u8 reserved_at_1a0[0x60]; }; struct mlx5_ifc_cmd_pas_bits { @@ -567,7 +577,10 @@ struct mlx5_ifc_qos_cap_bits { u8 esw_scheduling[0x1]; u8 esw_bw_share[0x1]; u8 esw_rate_limit[0x1]; - u8 reserved_at_4[0x1c]; + u8 reserved_at_4[0x1]; + u8 packet_pacing_burst_bound[0x1]; + u8 packet_pacing_typical_size[0x1]; + u8 reserved_at_7[0x19]; u8 reserved_at_20[0x20]; @@ -589,6 +602,16 @@ struct mlx5_ifc_qos_cap_bits { u8 reserved_at_100[0x700]; }; +struct mlx5_ifc_debug_cap_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x2]; + u8 stall_detect[0x1]; + u8 reserved_at_23[0x1d]; + + u8 reserved_at_40[0x7c0]; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; @@ -651,6 +674,24 @@ struct mlx5_ifc_roce_cap_bits { u8 reserved_at_100[0x700]; }; +struct mlx5_ifc_device_mem_cap_bits { + u8 memic[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0xb]; + u8 log_min_memic_alloc_size[0x5]; + u8 reserved_at_30[0x8]; + u8 log_max_memic_addr_alignment[0x8]; + + u8 memic_bar_start_addr[0x40]; + + u8 memic_bar_size[0x20]; + + u8 max_memic_size[0x20]; + + u8 reserved_at_c0[0x740]; +}; + enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, @@ -851,7 +892,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; - u8 reserved_at_183[0x1]; + u8 debug[0x1]; u8 modify_rq_counter_set_id[0x1]; u8 rq_delay_drop[0x1]; u8 max_qp_cnt[0xa]; @@ -861,11 +902,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; - u8 reserved_at_1a4[0x1]; + u8 vnic_env_queue_counters[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; - u8 early_vf_enable[0x1]; + u8 device_memory[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; @@ -909,7 +950,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_202[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_205[0x5]; + u8 reserved_at_205[0x1]; + u8 repeated_block_disabled[0x1]; + u8 umr_modify_entity_size_disabled[0x1]; + u8 umr_modify_atomic_disabled[0x1]; + u8 umr_indirect_mkey_disabled[0x1]; u8 umr_fence[0x2]; u8 reserved_at_20c[0x3]; u8 drain_sigerr[0x1]; @@ -993,7 +1038,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_at_340[0x8]; + u8 nic_receive_steering_discard[0x1]; + u8 receive_discard_vport_down[0x1]; + u8 transmit_discard_vport_down[0x1]; + u8 reserved_at_343[0x5]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; @@ -1017,7 +1065,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_at_3a0[0x3]; + u8 ext_stride_num_range[0x1]; + u8 reserved_at_3a1[0x2]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@ -1091,6 +1140,7 @@ enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, }; @@ -1183,9 +1233,9 @@ struct mlx5_ifc_wq_bits { u8 log_hairpin_num_packets[0x5]; u8 reserved_at_128[0x3]; u8 log_hairpin_data_sz[0x5]; - u8 reserved_at_130[0x5]; - u8 log_wqe_num_of_strides[0x3]; + u8 reserved_at_130[0x4]; + u8 log_wqe_num_of_strides[0x4]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; @@ -1567,7 +1617,17 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_pause_transition_low[0x20]; - u8 reserved_at_3c0[0x400]; + u8 reserved_at_3c0[0x40]; + + u8 device_stall_minor_watermark_cnt_high[0x20]; + + u8 device_stall_minor_watermark_cnt_low[0x20]; + + u8 device_stall_critical_watermark_cnt_high[0x20]; + + u8 device_stall_critical_watermark_cnt_low[0x20]; + + u8 reserved_at_480[0x340]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { @@ -2282,10 +2342,19 @@ enum { MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, +}; + +struct mlx5_ifc_vlan_bits { + u8 ethtype[0x10]; + u8 prio[0x3]; + u8 cfi[0x1]; + u8 vid[0xc]; }; struct mlx5_ifc_flow_context_bits { - u8 reserved_at_0[0x20]; + struct mlx5_ifc_vlan_bits push_vlan; u8 group_id[0x20]; @@ -2361,6 +2430,24 @@ struct mlx5_ifc_xrc_srqc_bits { u8 reserved_at_180[0x80]; }; +struct mlx5_ifc_vnic_diagnostic_statistics_bits { + u8 counter_error_queues[0x20]; + + u8 total_error_queues[0x20]; + + u8 send_queue_priority_update_flow[0x20]; + + u8 reserved_at_60[0x20]; + + u8 nic_receive_steering_discard[0x40]; + + u8 receive_discard_vport_down[0x40]; + + u8 transmit_discard_vport_down[0x40]; + + u8 reserved_at_140[0xec0]; +}; + struct mlx5_ifc_traffic_counter_bits { u8 packets[0x40]; @@ -2688,12 +2775,17 @@ enum { MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, MLX5_MKC_ACCESS_MODE_KSM = 0x3, + MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, }; struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; - u8 reserved_at_2[0xd]; + u8 reserved_at_2[0x1]; + u8 access_mode_4_2[0x3]; + u8 reserved_at_6[0x7]; + u8 relaxed_ordering_write[0x1]; + u8 reserved_at_e[0x1]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; @@ -2701,7 +2793,7 @@ struct mlx5_ifc_mkc_bits { u8 rr[0x1]; u8 lw[0x1]; u8 lr[0x1]; - u8 access_mode[0x2]; + u8 access_mode_1_0[0x2]; u8 reserved_at_18[0x8]; u8 qpn[0x18]; @@ -3636,6 +3728,35 @@ struct mlx5_ifc_query_vport_state_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_query_vnic_env_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env; +}; + +enum { + MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0, +}; + +struct mlx5_ifc_query_vnic_env_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7308,7 +7429,12 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 rate_limit[0x20]; - u8 reserved_at_a0[0x160]; + u8 burst_upper_bound[0x20]; + + u8 reserved_at_c0[0x10]; + u8 typical_packet_size[0x10]; + + u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_access_register_out_bits { @@ -7808,7 +7934,11 @@ struct mlx5_ifc_pifr_reg_bits { struct mlx5_ifc_pfcc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_at_10[0x10]; + u8 reserved_at_10[0xb]; + u8 ppan_mask_n[0x1]; + u8 minor_stall_mask[0x1]; + u8 critical_stall_mask[0x1]; + u8 reserved_at_1e[0x2]; u8 ppan[0x4]; u8 reserved_at_24[0x4]; @@ -7818,17 +7948,22 @@ struct mlx5_ifc_pfcc_reg_bits { u8 pptx[0x1]; u8 aptx[0x1]; - u8 reserved_at_42[0x6]; + u8 pptx_mask_n[0x1]; + u8 reserved_at_43[0x5]; u8 pfctx[0x8]; u8 reserved_at_50[0x10]; u8 pprx[0x1]; u8 aprx[0x1]; - u8 reserved_at_62[0x6]; + u8 pprx_mask_n[0x1]; + u8 reserved_at_63[0x5]; u8 pfcrx[0x8]; u8 reserved_at_70[0x10]; - u8 reserved_at_80[0x80]; + u8 device_stall_minor_watermark[0x10]; + u8 device_stall_critical_watermark[0x10]; + + u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_pelc_reg_bits { @@ -7869,8 +8004,10 @@ struct mlx5_ifc_peir_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x7b]; + u8 reserved_at_0[0x76]; + u8 pfcc_mask[0x1]; + u8 reserved_at_77[0x4]; u8 rx_buffer_fullness_counters[0x1]; u8 ptys_connector_type[0x1]; u8 reserved_at_7d[0x1]; @@ -8851,4 +8988,57 @@ struct mlx5_ifc_destroy_vport_lag_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_alloc_memic_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_30[0x20]; + + u8 reserved_at_40[0x18]; + u8 log_memic_addr_alignment[0x8]; + + u8 range_start_addr[0x40]; + + u8 range_size[0x20]; + + u8 memic_size[0x20]; +}; + +struct mlx5_ifc_alloc_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 memic_start_addr[0x40]; +}; + +struct mlx5_ifc_dealloc_memic_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 memic_start_addr[0x40]; + + u8 memic_size[0x20]; + + u8 reserved_at_e0[0x20]; +}; + +struct mlx5_ifc_dealloc_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 255a88d08078..ec052491ba3d 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -373,7 +373,10 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { struct mlx5_ifc_ipsec_extended_cap_bits { u8 encapsulation[0x20]; - u8 reserved_0[0x15]; + u8 reserved_0[0x12]; + u8 v2_command[0x1]; + u8 udp_encap[0x1]; + u8 rx_no_trailer[0x1]; u8 ipv4_fragment[0x1]; u8 ipv6[0x1]; u8 esn[0x1]; @@ -429,4 +432,91 @@ struct mlx5_ifc_ipsec_counters_bits { u8 dropped_cmd[0x40]; }; +enum mlx5_ifc_fpga_ipsec_response_syndrome { + MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, + MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, + MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, + MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, +}; + +struct mlx5_ifc_fpga_ipsec_cmd_resp { + __be32 syndrome; + union { + __be32 sw_sa_handle; + __be32 flags; + }; + u8 reserved[24]; +} __packed; + +enum mlx5_ifc_fpga_ipsec_cmd_opcode { + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, + MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, + MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, +}; + +enum mlx5_ifc_fpga_ipsec_cap { + MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), +}; + +struct mlx5_ifc_fpga_ipsec_cmd_cap { + __be32 cmd; + __be32 flags; + u8 reserved[24]; +} __packed; + +enum mlx5_ifc_fpga_ipsec_sa_flags { + MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), + MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), + MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), + MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), + MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), + MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), + MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), +}; + +enum mlx5_ifc_fpga_ipsec_sa_enc_mode { + MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, +}; + +struct mlx5_ifc_fpga_ipsec_sa_v1 { + __be32 cmd; + u8 key_enc[32]; + u8 key_auth[32]; + __be32 sip[4]; + __be32 dip[4]; + union { + struct { + __be32 reserved; + u8 salt_iv[8]; + __be32 salt; + } __packed gcm; + struct { + u8 salt[16]; + } __packed cbc; + }; + __be32 spi; + __be32 sw_sa_handle; + __be16 tfclen; + u8 enc_mode; + u8 reserved1[2]; + u8 flags; + u8 reserved2[2]; +}; + +struct mlx5_ifc_fpga_ipsec_sa { + struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; + __be16 udp_sp; + __be16 udp_dp; + u8 reserved1[4]; + __be32 esn; + __be16 vid; /* only 12 bits, rest is reserved */ + __be16 reserved2; +} __packed; + #endif /* MLX5_IFC_FPGA_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 035f0d4dc9fe..34aed6032f86 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -151,6 +151,12 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); +int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, + u16 stall_critical_watermark, + u16 stall_minor_watermark); +int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, + u16 *stall_critical_watermark, u16 *stall_minor_watermark); + int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 7e8f281f8c00..83a33a1873a6 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -47,6 +47,7 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); +int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, @@ -66,7 +67,6 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn); int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 64e193e87394..9208cb8809ac 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -107,6 +107,9 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); +int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, + u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, int vf, u8 port_num, void *out, size_t out_sz); diff --git a/include/linux/mm.h b/include/linux/mm.h index ad06d42adb1a..1ac1f06a4be6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -241,10 +241,11 @@ extern unsigned int kobjsize(const void *objp); # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 -#elif defined(CONFIG_METAG) -# define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) # define VM_GROWSUP VM_ARCH_1 +#elif defined(CONFIG_SPARC64) +# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ +# define VM_ARCH_CLEAR VM_SPARC_ADI #elif !defined(CONFIG_MMU) # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif @@ -287,6 +288,12 @@ extern unsigned int kobjsize(const void *objp); /* This mask is used to clear all the VMA flags used by mlock */ #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) +/* Arch-specific flags to clear when updating VM flags on protection change */ +#ifndef VM_ARCH_CLEAR +# define VM_ARCH_CLEAR VM_NONE +#endif +#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) + /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. @@ -379,17 +386,19 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); int (*split)(struct vm_area_struct * area, unsigned long addr); int (*mremap)(struct vm_area_struct * area); - int (*fault)(struct vm_fault *vmf); - int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); + vm_fault_t (*fault)(struct vm_fault *vmf); + vm_fault_t (*huge_fault)(struct vm_fault *vmf, + enum page_entry_size pe_size); void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); + unsigned long (*pagesize)(struct vm_area_struct * area); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_fault *vmf); + vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - int (*pfn_mkwrite)(struct vm_fault *vmf); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -738,7 +747,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf); * refcount. The each user mapping also has a reference to the page. * * The pagecache pages are stored in a per-mapping radix tree, which is - * rooted at mapping->page_tree, and indexed by offset. + * rooted at mapping->i_pages, and indexed by offset. * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space * lists, we instead now tag pages as dirty/writeback in the radix tree. * @@ -896,7 +905,9 @@ extern int page_to_nid(const struct page *page); #else static inline int page_to_nid(const struct page *page) { - return (page->flags >> NODES_PGSHIFT) & NODES_MASK; + struct page *p = (struct page *)page; + + return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; } #endif @@ -1145,6 +1156,7 @@ static inline pgoff_t page_index(struct page *page) bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); +struct address_space *page_mapping_file(struct page *page); /* * Return true only if the page has been allocated with @@ -1454,6 +1466,7 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); +void __set_page_dirty(struct page *, struct address_space *, int warn); int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, @@ -2096,6 +2109,7 @@ extern void setup_per_cpu_pageset(void); extern void zone_pcp_update(struct zone *zone); extern void zone_pcp_reset(struct zone *zone); +extern void setup_zone_pageset(struct zone *zone); /* page_alloc.c */ extern int min_free_kbytes; @@ -2413,6 +2427,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); +static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, + unsigned long addr, struct page *page) +{ + int err = vm_insert_page(vma, addr, page); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, + unsigned long addr, pfn_t pfn) +{ + int err = vm_insert_mixed(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ + int err = vm_insert_pfn(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags, @@ -2582,7 +2634,7 @@ extern int get_hwpoison_page(struct page *page); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); -extern atomic_long_t num_poisoned_pages; +extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(struct page *page, int flags); @@ -2604,6 +2656,7 @@ enum mf_action_page_type { MF_MSG_POISONED_HUGE, MF_MSG_HUGE, MF_MSG_FREE_HUGE, + MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fd1af6b9591d..21612347d311 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,8 @@ #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) +typedef int vm_fault_t; + struct address_space; struct mem_cgroup; struct hmm; diff --git a/include/linux/mman.h b/include/linux/mman.h index 6a4d1caaff5c..4b08e9c9c538 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -92,7 +92,7 @@ static inline void vm_unacct_memory(long pages) * * Returns true if the prot flags are valid */ -static inline bool arch_validate_prot(unsigned long prot) +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) { return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; } diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 91f1ba0663c8..06607c59c4d0 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -31,6 +31,7 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int debounce, bool *gpio_invert); void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); +int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); void mmc_gpiod_request_cd_irq(struct mmc_host *host); bool mmc_can_gpio_cd(struct mmc_host *host); bool mmc_can_gpio_ro(struct mmc_host *host); diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 57b0030d3800..2ad72d2c8cc5 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -37,10 +37,10 @@ void dump_mm(const struct mm_struct *mm); BUG(); \ } \ } while (0) -#define VM_WARN_ON(cond) WARN_ON(cond) -#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) -#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) -#define VM_WARN(cond, format...) WARN(cond, format) +#define VM_WARN_ON(cond) (void)WARN_ON(cond) +#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) +#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) (void)WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7522a6987595..32699b2dc52a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -180,6 +180,7 @@ enum node_stat_item { NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ + NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ NR_VM_NODE_STAT_ITEMS }; @@ -633,14 +634,15 @@ typedef struct pglist_data { #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; #endif -#ifdef CONFIG_MEMORY_HOTPLUG +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * Must be held any time you expect node_start_pfn, node_present_pages * or node_spanned_pages stay constant. Holding this will also * guarantee that any pfn_valid() stays that way. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to - * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. + * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG + * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. * * Nests above zone->lock and zone->span_seqlock */ @@ -775,7 +777,8 @@ static inline bool is_dev_zone(const struct zone *zone) #include <linux/memory_hotplug.h> void build_all_zonelists(pg_data_t *pgdat); -void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, + enum zone_type classzone_idx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, unsigned int alloc_flags, long free_pages); @@ -816,10 +819,6 @@ int local_memory_node(int node_id); static inline int local_memory_node(int node_id) { return node_id; }; #endif -#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE -unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); -#endif - /* * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. */ @@ -886,7 +885,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; +extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, @@ -1289,7 +1288,6 @@ struct mminit_pfnnid_cache { #endif void memory_present(int nid, unsigned long start, unsigned long end); -unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); /* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 48fb2b43c35a..7d361be2e24f 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -502,6 +502,7 @@ enum dmi_field { DMI_CHASSIS_SERIAL, DMI_CHASSIS_ASSET_TAG, DMI_STRING_MAX, + DMI_OEM_STRING, /* special case - will not be in dmi_ident */ }; struct dmi_strmatch { diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 5396521a776a..9a36fad9e068 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -4,11 +4,10 @@ #include <linux/in.h> #include <linux/pim.h> -#include <linux/rhashtable.h> -#include <net/sock.h> #include <net/fib_rules.h> #include <net/fib_notifier.h> #include <uapi/linux/mroute.h> +#include <linux/mroute_base.h> #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) @@ -56,56 +55,8 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule) } #endif -struct vif_device { - struct net_device *dev; /* Device we are using */ - struct netdev_phys_item_id dev_parent_id; /* Device parent ID */ - unsigned long bytes_in,bytes_out; - unsigned long pkt_in,pkt_out; /* Statistics */ - unsigned long rate_limit; /* Traffic shaping (NI) */ - unsigned char threshold; /* TTL threshold */ - unsigned short flags; /* Control flags */ - __be32 local,remote; /* Addresses(remote for tunnels)*/ - int link; /* Physical interface index */ -}; - -struct vif_entry_notifier_info { - struct fib_notifier_info info; - struct net_device *dev; - vifi_t vif_index; - unsigned short vif_flags; - u32 tb_id; -}; - #define VIFF_STATIC 0x8000 -#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) - -struct mr_table { - struct list_head list; - possible_net_t net; - u32 id; - struct sock __rcu *mroute_sk; - struct timer_list ipmr_expire_timer; - struct list_head mfc_unres_queue; - struct vif_device vif_table[MAXVIFS]; - struct rhltable mfc_hash; - struct list_head mfc_cache_list; - int maxvif; - atomic_t cache_resolve_queue_len; - bool mroute_do_assert; - bool mroute_do_pim; - int mroute_reg_vif_num; -}; - -/* mfc_flags: - * MFC_STATIC - the entry was added statically (not by a routing daemon) - * MFC_OFFLOAD - the entry was offloaded to the hardware - */ -enum { - MFC_STATIC = BIT(0), - MFC_OFFLOAD = BIT(1), -}; - struct mfc_cache_cmp_arg { __be32 mfc_mcastgrp; __be32 mfc_origin; @@ -113,28 +64,13 @@ struct mfc_cache_cmp_arg { /** * struct mfc_cache - multicast routing entries - * @mnode: rhashtable list + * @_c: Common multicast routing information; has to be first [for casting] * @mfc_mcastgrp: destination multicast group address * @mfc_origin: source address * @cmparg: used for rhashtable comparisons - * @mfc_parent: source interface (iif) - * @mfc_flags: entry flags - * @expires: unresolved entry expire time - * @unresolved: unresolved cached skbs - * @last_assert: time of last assert - * @minvif: minimum VIF id - * @maxvif: maximum VIF id - * @bytes: bytes that have passed for this entry - * @pkt: packets that have passed for this entry - * @wrong_if: number of wrong source interface hits - * @lastuse: time of last use of the group (traffic or update) - * @ttls: OIF TTL threshold array - * @refcount: reference count for this entry - * @list: global entry list - * @rcu: used for entry destruction */ struct mfc_cache { - struct rhlist_head mnode; + struct mr_mfc _c; union { struct { __be32 mfc_mcastgrp; @@ -142,57 +78,10 @@ struct mfc_cache { }; struct mfc_cache_cmp_arg cmparg; }; - vifi_t mfc_parent; - int mfc_flags; - - union { - struct { - unsigned long expires; - struct sk_buff_head unresolved; - } unres; - struct { - unsigned long last_assert; - int minvif; - int maxvif; - unsigned long bytes; - unsigned long pkt; - unsigned long wrong_if; - unsigned long lastuse; - unsigned char ttls[MAXVIFS]; - refcount_t refcount; - } res; - } mfc_un; - struct list_head list; - struct rcu_head rcu; -}; - -struct mfc_entry_notifier_info { - struct fib_notifier_info info; - struct mfc_cache *mfc; - u32 tb_id; }; struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, struct rtmsg *rtm, u32 portid); - -#ifdef CONFIG_IP_MROUTE -void ipmr_cache_free(struct mfc_cache *mfc_cache); -#else -static inline void ipmr_cache_free(struct mfc_cache *mfc_cache) -{ -} -#endif - -static inline void ipmr_cache_put(struct mfc_cache *c) -{ - if (refcount_dec_and_test(&c->mfc_un.res.refcount)) - ipmr_cache_free(c); -} -static inline void ipmr_cache_hold(struct mfc_cache *c) -{ - refcount_inc(&c->mfc_un.res.refcount); -} - #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 3014c52bfd86..c4a45859f586 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -7,6 +7,8 @@ #include <linux/skbuff.h> /* for struct sk_buff_head */ #include <net/net_namespace.h> #include <uapi/linux/mroute6.h> +#include <linux/mroute_base.h> +#include <net/fib_rules.h> #ifdef CONFIG_IPV6_MROUTE static inline int ip6_mroute_opt(int opt) @@ -62,57 +64,33 @@ static inline void ip6_mr_cleanup(void) } #endif -struct mif_device { - struct net_device *dev; /* Device we are using */ - unsigned long bytes_in,bytes_out; - unsigned long pkt_in,pkt_out; /* Statistics */ - unsigned long rate_limit; /* Traffic shaping (NI) */ - unsigned char threshold; /* TTL threshold */ - unsigned short flags; /* Control flags */ - int link; /* Physical interface index */ -}; +#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES +bool ip6mr_rule_default(const struct fib_rule *rule); +#else +static inline bool ip6mr_rule_default(const struct fib_rule *rule) +{ + return true; +} +#endif #define VIFF_STATIC 0x8000 -struct mfc6_cache { - struct list_head list; - struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */ - struct in6_addr mf6c_origin; /* Source of packet */ - mifi_t mf6c_parent; /* Source interface */ - int mfc_flags; /* Flags on line */ +struct mfc6_cache_cmp_arg { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; +}; +struct mfc6_cache { + struct mr_mfc _c; union { struct { - unsigned long expires; - struct sk_buff_head unresolved; /* Unresolved buffers */ - } unres; - struct { - unsigned long last_assert; - int minvif; - int maxvif; - unsigned long bytes; - unsigned long pkt; - unsigned long wrong_if; - unsigned long lastuse; - unsigned char ttls[MAXMIFS]; /* TTL thresholds */ - } res; - } mfc_un; + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; + }; + struct mfc6_cache_cmp_arg cmparg; + }; }; -#define MFC_STATIC 1 -#define MFC_NOTIFY 2 - -#define MFC6_LINES 64 - -#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \ - (__force u32)(a)->s6_addr32[1] ^ \ - (__force u32)(a)->s6_addr32[2] ^ \ - (__force u32)(a)->s6_addr32[3] ^ \ - (__force u32)(g)->s6_addr32[0] ^ \ - (__force u32)(g)->s6_addr32[1] ^ \ - (__force u32)(g)->s6_addr32[2] ^ \ - (__force u32)(g)->s6_addr32[3]) % MFC6_LINES) - #define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ struct rtmsg; @@ -120,12 +98,12 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, u32 portid); #ifdef CONFIG_IPV6_MROUTE -extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); +bool mroute6_is_socket(struct net *net, struct sk_buff *skb); extern int ip6mr_sk_done(struct sock *sk); #else -static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) +static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb) { - return NULL; + return false; } static inline int ip6mr_sk_done(struct sock *sk) { diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h new file mode 100644 index 000000000000..d617fe45543e --- /dev/null +++ b/include/linux/mroute_base.h @@ -0,0 +1,474 @@ +#ifndef __LINUX_MROUTE_BASE_H +#define __LINUX_MROUTE_BASE_H + +#include <linux/netdevice.h> +#include <linux/rhashtable.h> +#include <linux/spinlock.h> +#include <net/net_namespace.h> +#include <net/sock.h> +#include <net/fib_notifier.h> + +/** + * struct vif_device - interface representor for multicast routing + * @dev: network device being used + * @bytes_in: statistic; bytes ingressing + * @bytes_out: statistic; bytes egresing + * @pkt_in: statistic; packets ingressing + * @pkt_out: statistic; packets egressing + * @rate_limit: Traffic shaping (NI) + * @threshold: TTL threshold + * @flags: Control flags + * @link: Physical interface index + * @dev_parent_id: device parent id + * @local: Local address + * @remote: Remote address for tunnels + */ +struct vif_device { + struct net_device *dev; + unsigned long bytes_in, bytes_out; + unsigned long pkt_in, pkt_out; + unsigned long rate_limit; + unsigned char threshold; + unsigned short flags; + int link; + + /* Currently only used by ipmr */ + struct netdev_phys_item_id dev_parent_id; + __be32 local, remote; +}; + +struct vif_entry_notifier_info { + struct fib_notifier_info info; + struct net_device *dev; + unsigned short vif_index; + unsigned short vif_flags; + u32 tb_id; +}; + +static inline int mr_call_vif_notifier(struct notifier_block *nb, + struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct vif_device *vif, + unsigned short vif_index, u32 tb_id) +{ + struct vif_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .dev = vif->dev, + .vif_index = vif_index, + .vif_flags = vif->flags, + .tb_id = tb_id, + }; + + return call_fib_notifier(nb, net, event_type, &info.info); +} + +static inline int mr_call_vif_notifiers(struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct vif_device *vif, + unsigned short vif_index, u32 tb_id, + unsigned int *ipmr_seq) +{ + struct vif_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .dev = vif->dev, + .vif_index = vif_index, + .vif_flags = vif->flags, + .tb_id = tb_id, + }; + + ASSERT_RTNL(); + (*ipmr_seq)++; + return call_fib_notifiers(net, event_type, &info.info); +} + +#ifndef MAXVIFS +/* This one is nasty; value is defined in uapi using different symbols for + * mroute and morute6 but both map into same 32. + */ +#define MAXVIFS 32 +#endif + +#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev)) + +/* mfc_flags: + * MFC_STATIC - the entry was added statically (not by a routing daemon) + * MFC_OFFLOAD - the entry was offloaded to the hardware + */ +enum { + MFC_STATIC = BIT(0), + MFC_OFFLOAD = BIT(1), +}; + +/** + * struct mr_mfc - common multicast routing entries + * @mnode: rhashtable list + * @mfc_parent: source interface (iif) + * @mfc_flags: entry flags + * @expires: unresolved entry expire time + * @unresolved: unresolved cached skbs + * @last_assert: time of last assert + * @minvif: minimum VIF id + * @maxvif: maximum VIF id + * @bytes: bytes that have passed for this entry + * @pkt: packets that have passed for this entry + * @wrong_if: number of wrong source interface hits + * @lastuse: time of last use of the group (traffic or update) + * @ttls: OIF TTL threshold array + * @refcount: reference count for this entry + * @list: global entry list + * @rcu: used for entry destruction + * @free: Operation used for freeing an entry under RCU + */ +struct mr_mfc { + struct rhlist_head mnode; + unsigned short mfc_parent; + int mfc_flags; + + union { + struct { + unsigned long expires; + struct sk_buff_head unresolved; + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned long lastuse; + unsigned char ttls[MAXVIFS]; + refcount_t refcount; + } res; + } mfc_un; + struct list_head list; + struct rcu_head rcu; + void (*free)(struct rcu_head *head); +}; + +static inline void mr_cache_put(struct mr_mfc *c) +{ + if (refcount_dec_and_test(&c->mfc_un.res.refcount)) + call_rcu(&c->rcu, c->free); +} + +static inline void mr_cache_hold(struct mr_mfc *c) +{ + refcount_inc(&c->mfc_un.res.refcount); +} + +struct mfc_entry_notifier_info { + struct fib_notifier_info info; + struct mr_mfc *mfc; + u32 tb_id; +}; + +static inline int mr_call_mfc_notifier(struct notifier_block *nb, + struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct mr_mfc *mfc, u32 tb_id) +{ + struct mfc_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .mfc = mfc, + .tb_id = tb_id + }; + + return call_fib_notifier(nb, net, event_type, &info.info); +} + +static inline int mr_call_mfc_notifiers(struct net *net, + unsigned short family, + enum fib_event_type event_type, + struct mr_mfc *mfc, u32 tb_id, + unsigned int *ipmr_seq) +{ + struct mfc_entry_notifier_info info = { + .info = { + .family = family, + .net = net, + }, + .mfc = mfc, + .tb_id = tb_id + }; + + ASSERT_RTNL(); + (*ipmr_seq)++; + return call_fib_notifiers(net, event_type, &info.info); +} + +struct mr_table; + +/** + * struct mr_table_ops - callbacks and info for protocol-specific ops + * @rht_params: parameters for accessing the MFC hash + * @cmparg_any: a hash key to be used for matching on (*,*) routes + */ +struct mr_table_ops { + const struct rhashtable_params *rht_params; + void *cmparg_any; +}; + +/** + * struct mr_table - a multicast routing table + * @list: entry within a list of multicast routing tables + * @net: net where this table belongs + * @ops: protocol specific operations + * @id: identifier of the table + * @mroute_sk: socket associated with the table + * @ipmr_expire_timer: timer for handling unresolved routes + * @mfc_unres_queue: list of unresolved MFC entries + * @vif_table: array containing all possible vifs + * @mfc_hash: Hash table of all resolved routes for easy lookup + * @mfc_cache_list: list of resovled routes for possible traversal + * @maxvif: Identifier of highest value vif currently in use + * @cache_resolve_queue_len: current size of unresolved queue + * @mroute_do_assert: Whether to inform userspace on wrong ingress + * @mroute_do_pim: Whether to receive IGMP PIMv1 + * @mroute_reg_vif_num: PIM-device vif index + */ +struct mr_table { + struct list_head list; + possible_net_t net; + struct mr_table_ops ops; + u32 id; + struct sock __rcu *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct vif_device vif_table[MAXVIFS]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + int mroute_reg_vif_num; +}; + +#ifdef CONFIG_IP_MROUTE_COMMON +void vif_device_init(struct vif_device *v, + struct net_device *dev, + unsigned long rate_limit, + unsigned char threshold, + unsigned short flags, + unsigned short get_iflink_mask); + +struct mr_table * +mr_table_alloc(struct net *net, u32 id, + struct mr_table_ops *ops, + void (*expire_func)(struct timer_list *t), + void (*table_set)(struct mr_table *mrt, + struct net *net)); + +/* These actually return 'struct mr_mfc *', but to avoid need for explicit + * castings they simply return void. + */ +void *mr_mfc_find_parent(struct mr_table *mrt, + void *hasharg, int parent); +void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi); +void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); + +int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mr_mfc *c, struct rtmsg *rtm); +int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, + struct mr_table *(*iter)(struct net *net, + struct mr_table *mrt), + int (*fill)(struct mr_table *mrt, + struct sk_buff *skb, + u32 portid, u32 seq, struct mr_mfc *c, + int cmd, int flags), + spinlock_t *lock); + +int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, + int (*rules_dump)(struct net *net, + struct notifier_block *nb), + struct mr_table *(*mr_iter)(struct net *net, + struct mr_table *mrt), + rwlock_t *mrt_lock); +#else +static inline void vif_device_init(struct vif_device *v, + struct net_device *dev, + unsigned long rate_limit, + unsigned char threshold, + unsigned short flags, + unsigned short get_iflink_mask) +{ +} + +static inline void * +mr_table_alloc(struct net *net, u32 id, + struct mr_table_ops *ops, + void (*expire_func)(struct timer_list *t), + void (*table_set)(struct mr_table *mrt, + struct net *net)) +{ + return NULL; +} + +static inline void *mr_mfc_find_parent(struct mr_table *mrt, + void *hasharg, int parent) +{ + return NULL; +} + +static inline void *mr_mfc_find_any_parent(struct mr_table *mrt, + int vifi) +{ + return NULL; +} + +static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt, + int vifi, void *hasharg) +{ + return NULL; +} + +static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mr_mfc *c, struct rtmsg *rtm) +{ + return -EINVAL; +} + +static inline int +mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, + struct mr_table *(*iter)(struct net *net, + struct mr_table *mrt), + int (*fill)(struct mr_table *mrt, + struct sk_buff *skb, + u32 portid, u32 seq, struct mr_mfc *c, + int cmd, int flags), + spinlock_t *lock) +{ + return -EINVAL; +} + +static inline int mr_dump(struct net *net, struct notifier_block *nb, + unsigned short family, + int (*rules_dump)(struct net *net, + struct notifier_block *nb), + struct mr_table *(*mr_iter)(struct net *net, + struct mr_table *mrt), + rwlock_t *mrt_lock) +{ + return -EINVAL; +} +#endif + +static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg) +{ + return mr_mfc_find_parent(mrt, hasharg, -1); +} + +#ifdef CONFIG_PROC_FS +struct mr_vif_iter { + struct seq_net_private p; + struct mr_table *mrt; + int ct; +}; + +struct mr_mfc_iter { + struct seq_net_private p; + struct mr_table *mrt; + struct list_head *cache; + + /* Lock protecting the mr_table's unresolved queue */ + spinlock_t *lock; +}; + +#ifdef CONFIG_IP_MROUTE_COMMON +void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos); +void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos); + +static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mr_vif_seq_idx(seq_file_net(seq), + seq->private, *pos - 1) + : SEQ_START_TOKEN; +} + +/* These actually return 'struct mr_mfc *', but to avoid need for explicit + * castings they simply return void. + */ +void *mr_mfc_seq_idx(struct net *net, + struct mr_mfc_iter *it, loff_t pos); +void *mr_mfc_seq_next(struct seq_file *seq, void *v, + loff_t *pos); + +static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, + struct mr_table *mrt, spinlock_t *lock) +{ + struct mr_mfc_iter *it = seq->private; + + it->mrt = mrt; + it->cache = NULL; + it->lock = lock; + + return *pos ? mr_mfc_seq_idx(seq_file_net(seq), + seq->private, *pos - 1) + : SEQ_START_TOKEN; +} + +static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) +{ + struct mr_mfc_iter *it = seq->private; + struct mr_table *mrt = it->mrt; + + if (it->cache == &mrt->mfc_unres_queue) + spin_unlock_bh(it->lock); + else if (it->cache == &mrt->mfc_cache_list) + rcu_read_unlock(); +} +#else +static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, + loff_t pos) +{ + return NULL; +} + +static inline void *mr_vif_seq_next(struct seq_file *seq, + void *v, loff_t *pos) +{ + return NULL; +} + +static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) +{ + return NULL; +} + +static inline void *mr_mfc_seq_idx(struct net *net, + struct mr_mfc_iter *it, loff_t pos) +{ + return NULL; +} + +static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + return NULL; +} + +static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, + struct mr_table *mrt, spinlock_t *lock) +{ + return NULL; +} + +static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) +{ +} +#endif +#endif +#endif diff --git a/include/linux/msg.h b/include/linux/msg.h index 0a7eefeee0d1..9a972a296b95 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -3,7 +3,6 @@ #define _LINUX_MSG_H #include <linux/list.h> -#include <linux/time64.h> #include <uapi/linux/msg.h> /* one msg_msg structure for each message */ @@ -16,21 +15,4 @@ struct msg_msg { /* the actual message follows immediately */ }; -/* one msq_queue structure for each present queue on the system */ -struct msg_queue { - struct kern_ipc_perm q_perm; - time64_t q_stime; /* last msgsnd time */ - time64_t q_rtime; /* last msgrcv time */ - time64_t q_ctime; /* last change time */ - unsigned long q_cbytes; /* current number of bytes on queue */ - unsigned long q_qnum; /* number of messages in queue */ - unsigned long q_qbytes; /* max number of bytes on queue */ - pid_t q_lspid; /* pid of last msgsnd */ - pid_t q_lrpid; /* last receive pid */ - - struct list_head q_messages; - struct list_head q_receivers; - struct list_head q_senders; -} __randomize_layout; - #endif /* _LINUX_MSG_H */ diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 3bf8f954b642..3102bd754d18 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -1,6 +1,4 @@ /* - * linux/include/linux/mtd/bbm.h - * * NAND family Bad Block Management (BBM) header file * - Bad Block Table (BBT) implementation * diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 205ededccc60..a86c4fa93115 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -30,32 +30,19 @@ #include <asm/div64.h> -#define MTD_ERASE_PENDING 0x01 -#define MTD_ERASING 0x02 -#define MTD_ERASE_SUSPEND 0x04 -#define MTD_ERASE_DONE 0x08 -#define MTD_ERASE_FAILED 0x10 - #define MTD_FAIL_ADDR_UNKNOWN -1LL +struct mtd_info; + /* * If the erase fails, fail_addr might indicate exactly which block failed. If * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level * or was not specific to any particular block. */ struct erase_info { - struct mtd_info *mtd; uint64_t addr; uint64_t len; uint64_t fail_addr; - u_long time; - u_long retries; - unsigned dev; - unsigned cell; - void (*callback) (struct erase_info *self); - u_long priv; - u_char state; - struct erase_info *next; }; struct mtd_erase_region_info { @@ -595,8 +582,6 @@ extern void register_mtd_user (struct mtd_notifier *new); extern int unregister_mtd_user (struct mtd_notifier *old); void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); -void mtd_erase_callback(struct erase_info *instr); - static inline int mtd_is_bitflip(int err) { return err == -EUCLEAN; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h new file mode 100644 index 000000000000..792ea5c26329 --- /dev/null +++ b/include/linux/mtd/nand.h @@ -0,0 +1,731 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017 - Free Electrons + * + * Authors: + * Boris Brezillon <boris.brezillon@free-electrons.com> + * Peter Pan <peterpandong@micron.com> + */ + +#ifndef __LINUX_MTD_NAND_H +#define __LINUX_MTD_NAND_H + +#include <linux/mtd/mtd.h> + +/** + * struct nand_memory_organization - Memory organization structure + * @bits_per_cell: number of bits per NAND cell + * @pagesize: page size + * @oobsize: OOB area size + * @pages_per_eraseblock: number of pages per eraseblock + * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) + * @planes_per_lun: number of planes per LUN + * @luns_per_target: number of LUN per target (target is a synonym for die) + * @ntargets: total number of targets exposed by the NAND device + */ +struct nand_memory_organization { + unsigned int bits_per_cell; + unsigned int pagesize; + unsigned int oobsize; + unsigned int pages_per_eraseblock; + unsigned int eraseblocks_per_lun; + unsigned int planes_per_lun; + unsigned int luns_per_target; + unsigned int ntargets; +}; + +#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \ + { \ + .bits_per_cell = (bpc), \ + .pagesize = (ps), \ + .oobsize = (os), \ + .pages_per_eraseblock = (ppe), \ + .eraseblocks_per_lun = (epl), \ + .planes_per_lun = (ppl), \ + .luns_per_target = (lpt), \ + .ntargets = (nt), \ + } + +/** + * struct nand_row_converter - Information needed to convert an absolute offset + * into a row address + * @lun_addr_shift: position of the LUN identifier in the row address + * @eraseblock_addr_shift: position of the eraseblock identifier in the row + * address + */ +struct nand_row_converter { + unsigned int lun_addr_shift; + unsigned int eraseblock_addr_shift; +}; + +/** + * struct nand_pos - NAND position object + * @target: the NAND target/die + * @lun: the LUN identifier + * @plane: the plane within the LUN + * @eraseblock: the eraseblock within the LUN + * @page: the page within the LUN + * + * These information are usually used by specific sub-layers to select the + * appropriate target/die and generate a row address to pass to the device. + */ +struct nand_pos { + unsigned int target; + unsigned int lun; + unsigned int plane; + unsigned int eraseblock; + unsigned int page; +}; + +/** + * struct nand_page_io_req - NAND I/O request object + * @pos: the position this I/O request is targeting + * @dataoffs: the offset within the page + * @datalen: number of data bytes to read from/write to this page + * @databuf: buffer to store data in or get data from + * @ooboffs: the OOB offset within the page + * @ooblen: the number of OOB bytes to read from/write to this page + * @oobbuf: buffer to store OOB data in or get OOB data from + * + * This object is used to pass per-page I/O requests to NAND sub-layers. This + * way all useful information are already formatted in a useful way and + * specific NAND layers can focus on translating these information into + * specific commands/operations. + */ +struct nand_page_io_req { + struct nand_pos pos; + unsigned int dataoffs; + unsigned int datalen; + union { + const void *out; + void *in; + } databuf; + unsigned int ooboffs; + unsigned int ooblen; + union { + const void *out; + void *in; + } oobbuf; +}; + +/** + * struct nand_ecc_req - NAND ECC requirements + * @strength: ECC strength + * @step_size: ECC step/block size + */ +struct nand_ecc_req { + unsigned int strength; + unsigned int step_size; +}; + +#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } + +/** + * struct nand_bbt - bad block table object + * @cache: in memory BBT cache + */ +struct nand_bbt { + unsigned long *cache; +}; + +struct nand_device; + +/** + * struct nand_ops - NAND operations + * @erase: erase a specific block. No need to check if the block is bad before + * erasing, this has been taken care of by the generic NAND layer + * @markbad: mark a specific block bad. No need to check if the block is + * already marked bad, this has been taken care of by the generic + * NAND layer. This method should just write the BBM (Bad Block + * Marker) so that future call to struct_nand_ops->isbad() return + * true + * @isbad: check whether a block is bad or not. This method should just read + * the BBM and return whether the block is bad or not based on what it + * reads + * + * These are all low level operations that should be implemented by specialized + * NAND layers (SPI NAND, raw NAND, ...). + */ +struct nand_ops { + int (*erase)(struct nand_device *nand, const struct nand_pos *pos); + int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); + bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); +}; + +/** + * struct nand_device - NAND device + * @mtd: MTD instance attached to the NAND device + * @memorg: memory layout + * @eccreq: ECC requirements + * @rowconv: position to row address converter + * @bbt: bad block table info + * @ops: NAND operations attached to the NAND device + * + * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) + * should declare their own NAND object embedding a nand_device struct (that's + * how inheritance is done). + * struct_nand_device->memorg and struct_nand_device->eccreq should be filled + * at device detection time to reflect the NAND device + * capabilities/requirements. Once this is done nanddev_init() can be called. + * It will take care of converting NAND information into MTD ones, which means + * the specialized NAND layers should never manually tweak + * struct_nand_device->mtd except for the ->_read/write() hooks. + */ +struct nand_device { + struct mtd_info mtd; + struct nand_memory_organization memorg; + struct nand_ecc_req eccreq; + struct nand_row_converter rowconv; + struct nand_bbt bbt; + const struct nand_ops *ops; +}; + +/** + * struct nand_io_iter - NAND I/O iterator + * @req: current I/O request + * @oobbytes_per_page: maximum number of OOB bytes per page + * @dataleft: remaining number of data bytes to read/write + * @oobleft: remaining number of OOB bytes to read/write + * + * Can be used by specialized NAND layers to iterate over all pages covered + * by an MTD I/O request, which should greatly simplifies the boiler-plate + * code needed to read/write data from/to a NAND device. + */ +struct nand_io_iter { + struct nand_page_io_req req; + unsigned int oobbytes_per_page; + unsigned int dataleft; + unsigned int oobleft; +}; + +/** + * mtd_to_nanddev() - Get the NAND device attached to the MTD instance + * @mtd: MTD instance + * + * Return: the NAND device embedding @mtd. + */ +static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_device, mtd); +} + +/** + * nanddev_to_mtd() - Get the MTD device attached to a NAND device + * @nand: NAND device + * + * Return: the MTD device embedded in @nand. + */ +static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) +{ + return &nand->mtd; +} + +/* + * nanddev_bits_per_cell() - Get the number of bits per cell + * @nand: NAND device + * + * Return: the number of bits per cell. + */ +static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) +{ + return nand->memorg.bits_per_cell; +} + +/** + * nanddev_page_size() - Get NAND page size + * @nand: NAND device + * + * Return: the page size. + */ +static inline size_t nanddev_page_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize; +} + +/** + * nanddev_per_page_oobsize() - Get NAND OOB size + * @nand: NAND device + * + * Return: the OOB size. + */ +static inline unsigned int +nanddev_per_page_oobsize(const struct nand_device *nand) +{ + return nand->memorg.oobsize; +} + +/** + * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock + * @nand: NAND device + * + * Return: the number of pages per eraseblock. + */ +static inline unsigned int +nanddev_pages_per_eraseblock(const struct nand_device *nand) +{ + return nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_per_page_oobsize() - Get NAND erase block size + * @nand: NAND device + * + * Return: the eraseblock size. + */ +static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN + * @nand: NAND device + * + * Return: the number of eraseblocks per LUN. + */ +static inline unsigned int +nanddev_eraseblocks_per_lun(const struct nand_device *nand) +{ + return nand->memorg.eraseblocks_per_lun; +} + +/** + * nanddev_target_size() - Get the total size provided by a single target/die + * @nand: NAND device + * + * Return: the total size exposed by a single target/die in bytes. + */ +static inline u64 nanddev_target_size(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock * + nand->memorg.pagesize; +} + +/** + * nanddev_ntarget() - Get the total of targets + * @nand: NAND device + * + * Return: the number of targets/dies exposed by @nand. + */ +static inline unsigned int nanddev_ntargets(const struct nand_device *nand) +{ + return nand->memorg.ntargets; +} + +/** + * nanddev_neraseblocks() - Get the total number of erasablocks + * @nand: NAND device + * + * Return: the total number of eraseblocks exposed by @nand. + */ +static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_size() - Get NAND size + * @nand: NAND device + * + * Return: the total size (in bytes) exposed by @nand. + */ +static inline u64 nanddev_size(const struct nand_device *nand) +{ + return nanddev_target_size(nand) * nanddev_ntargets(nand); +} + +/** + * nanddev_get_memorg() - Extract memory organization info from a NAND device + * @nand: NAND device + * + * This can be used by the upper layer to fill the memorg info before calling + * nanddev_init(). + * + * Return: the memorg object embedded in the NAND device. + */ +static inline struct nand_memory_organization * +nanddev_get_memorg(struct nand_device *nand) +{ + return &nand->memorg; +} + +int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, + struct module *owner); +void nanddev_cleanup(struct nand_device *nand); + +/** + * nanddev_register() - Register a NAND device + * @nand: NAND device + * + * Register a NAND device. + * This function is just a wrapper around mtd_device_register() + * registering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_register(struct nand_device *nand) +{ + return mtd_device_register(&nand->mtd, NULL, 0); +} + +/** + * nanddev_unregister() - Unregister a NAND device + * @nand: NAND device + * + * Unregister a NAND device. + * This function is just a wrapper around mtd_device_unregister() + * unregistering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_unregister(struct nand_device *nand) +{ + return mtd_device_unregister(&nand->mtd); +} + +/** + * nanddev_set_of_node() - Attach a DT node to a NAND device + * @nand: NAND device + * @np: DT node + * + * Attach a DT node to a NAND device. + */ +static inline void nanddev_set_of_node(struct nand_device *nand, + struct device_node *np) +{ + mtd_set_of_node(&nand->mtd, np); +} + +/** + * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device + * @nand: NAND device + * + * Return: the DT node attached to @nand. + */ +static inline struct device_node *nanddev_get_of_node(struct nand_device *nand) +{ + return mtd_get_of_node(&nand->mtd); +} + +/** + * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position + * @nand: NAND device + * @offs: absolute NAND offset (usually passed by the MTD layer) + * @pos: a NAND position object to fill in + * + * Converts @offs into a nand_pos representation. + * + * Return: the offset within the NAND page pointed by @pos. + */ +static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, + loff_t offs, + struct nand_pos *pos) +{ + unsigned int pageoffs; + u64 tmp = offs; + + pageoffs = do_div(tmp, nand->memorg.pagesize); + pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); + pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; + pos->lun = do_div(tmp, nand->memorg.luns_per_target); + pos->target = tmp; + + return pageoffs; +} + +/** + * nanddev_pos_cmp() - Compare two NAND positions + * @a: First NAND position + * @b: Second NAND position + * + * Compares two NAND positions. + * + * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. + */ +static inline int nanddev_pos_cmp(const struct nand_pos *a, + const struct nand_pos *b) +{ + if (a->target != b->target) + return a->target < b->target ? -1 : 1; + + if (a->lun != b->lun) + return a->lun < b->lun ? -1 : 1; + + if (a->eraseblock != b->eraseblock) + return a->eraseblock < b->eraseblock ? -1 : 1; + + if (a->page != b->page) + return a->page < b->page ? -1 : 1; + + return 0; +} + +/** + * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset + * @nand: NAND device + * @pos: the NAND position to convert + * + * Converts @pos NAND position into an absolute offset. + * + * Return: the absolute offset. Note that @pos points to the beginning of a + * page, if one wants to point to a specific offset within this page + * the returned offset has to be adjusted manually. + */ +static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, + const struct nand_pos *pos) +{ + unsigned int npages; + + npages = pos->page + + ((pos->eraseblock + + (pos->lun + + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun) * + nand->memorg.pages_per_eraseblock); + + return (loff_t)npages * nand->memorg.pagesize; +} + +/** + * nanddev_pos_to_row() - Extract a row address from a NAND position + * @nand: NAND device + * @pos: the position to convert + * + * Converts a NAND position into a row address that can then be passed to the + * device. + * + * Return: the row address extracted from @pos. + */ +static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, + const struct nand_pos *pos) +{ + return (pos->lun << nand->rowconv.lun_addr_shift) | + (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | + pos->page; +} + +/** + * nanddev_pos_next_target() - Move a position to the next target/die + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next target/die. Useful when you + * want to iterate over all targets/dies of a NAND device. + */ +static inline void nanddev_pos_next_target(struct nand_device *nand, + struct nand_pos *pos) +{ + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; + pos->lun = 0; + pos->target++; +} + +/** + * nanddev_pos_next_lun() - Move a position to the next LUN + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next LUN. Useful when you want to + * iterate over all LUNs of a NAND device. + */ +static inline void nanddev_pos_next_lun(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->lun >= nand->memorg.luns_per_target - 1) + return nanddev_pos_next_target(nand, pos); + + pos->lun++; + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next eraseblock. Useful when you + * want to iterate over all eraseblocks of a NAND device. + */ +static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) + return nanddev_pos_next_lun(nand, pos); + + pos->eraseblock++; + pos->page = 0; + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next page + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next page. Useful when you want to + * iterate over all pages of a NAND device. + */ +static inline void nanddev_pos_next_page(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->page >= nand->memorg.pages_per_eraseblock - 1) + return nanddev_pos_next_eraseblock(nand, pos); + + pos->page++; +} + +/** + * nand_io_iter_init - Initialize a NAND I/O iterator + * @nand: NAND device + * @offs: absolute offset + * @req: MTD request + * @iter: NAND I/O iterator + * + * Initializes a NAND iterator based on the information passed by the MTD + * layer. + */ +static inline void nanddev_io_iter_init(struct nand_device *nand, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) +{ + struct mtd_info *mtd = nanddev_to_mtd(nand); + + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); + iter->req.ooboffs = req->ooboffs; + iter->oobbytes_per_page = mtd_oobavail(mtd, req); + iter->dataleft = req->len; + iter->oobleft = req->ooblen; + iter->req.databuf.in = req->datbuf; + iter->req.datalen = min_t(unsigned int, + nand->memorg.pagesize - iter->req.dataoffs, + iter->dataleft); + iter->req.oobbuf.in = req->oobbuf; + iter->req.ooblen = min_t(unsigned int, + iter->oobbytes_per_page - iter->req.ooboffs, + iter->oobleft); +} + +/** + * nand_io_iter_next_page - Move to the next page + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Updates the @iter to point to the next page. + */ +static inline void nanddev_io_iter_next_page(struct nand_device *nand, + struct nand_io_iter *iter) +{ + nanddev_pos_next_page(nand, &iter->req.pos); + iter->dataleft -= iter->req.datalen; + iter->req.databuf.in += iter->req.datalen; + iter->oobleft -= iter->req.ooblen; + iter->req.oobbuf.in += iter->req.ooblen; + iter->req.dataoffs = 0; + iter->req.ooboffs = 0; + iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, + iter->dataleft); + iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, + iter->oobleft); +} + +/** + * nand_io_iter_end - Should end iteration or not + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Check whether @iter has reached the end of the NAND portion it was asked to + * iterate on or not. + * + * Return: true if @iter has reached the end of the iteration request, false + * otherwise. + */ +static inline bool nanddev_io_iter_end(struct nand_device *nand, + const struct nand_io_iter *iter) +{ + if (iter->dataleft || iter->oobleft) + return false; + + return true; +} + +/** + * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O + * request + * @nand: NAND device + * @start: start address to read/write from + * @req: MTD I/O request + * @iter: NAND I/O iterator + * + * Should be used for iterate over pages that are contained in an MTD request. + */ +#define nanddev_io_for_each_page(nand, start, req, iter) \ + for (nanddev_io_iter_init(nand, start, req, iter); \ + !nanddev_io_iter_end(nand, iter); \ + nanddev_io_iter_next_page(nand, iter)) + +bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); +bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); + +/* BBT related functions */ +enum nand_bbt_block_status { + NAND_BBT_BLOCK_STATUS_UNKNOWN, + NAND_BBT_BLOCK_GOOD, + NAND_BBT_BLOCK_WORN, + NAND_BBT_BLOCK_RESERVED, + NAND_BBT_BLOCK_FACTORY_BAD, + NAND_BBT_BLOCK_NUM_STATUS, +}; + +int nanddev_bbt_init(struct nand_device *nand); +void nanddev_bbt_cleanup(struct nand_device *nand); +int nanddev_bbt_update(struct nand_device *nand); +int nanddev_bbt_get_block_status(const struct nand_device *nand, + unsigned int entry); +int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, + enum nand_bbt_block_status status); +int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); + +/** + * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry + * @nand: NAND device + * @pos: the NAND position we want to get BBT entry for + * + * Return the BBT entry used to store information about the eraseblock pointed + * by @pos. + * + * Return: the BBT entry storing information about eraseblock pointed by @pos. + */ +static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, + const struct nand_pos *pos) +{ + return pos->eraseblock + + ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun); +} + +/** + * nanddev_bbt_is_initialized() - Check if the BBT has been initialized + * @nand: NAND device + * + * Return: true if the BBT has been initialized, false otherwise. + */ +static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) +{ + return !!nand->bbt.cache; +} + +/* MTD -> NAND helper functions. */ +int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); + +#endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 4d8406c81652..8a2decf7462c 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -1,6 +1,4 @@ /* - * drivers/mtd/nand_ecc.h - * * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> * David Woodhouse <dwmw2@infradead.org> * Thomas Gleixner <tglx@linutronix.de> diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h index d0558a982628..357e88b3263a 100644 --- a/include/linux/mtd/ndfc.h +++ b/include/linux/mtd/ndfc.h @@ -1,6 +1,4 @@ /* - * linux/include/linux/mtd/ndfc.h - * * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de> * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index c4beb70dacbd..11cb0c50cd84 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -77,6 +77,7 @@ struct mtd_part_parser { struct list_head list; struct module *owner; const char *name; + const struct of_device_id *of_match_table; int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, struct mtd_part_parser_data *); void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 56c5570aadbe..5dad59b31244 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -21,6 +21,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> +#include <linux/types.h> struct mtd_info; struct nand_flash_dev; @@ -235,7 +236,8 @@ struct nand_chip; #define ONFI_TIMING_MODE_5 (1 << 5) #define ONFI_TIMING_MODE_UNKNOWN (1 << 6) -/* ONFI feature address */ +/* ONFI feature number/address */ +#define ONFI_FEATURE_NUMBER 256 #define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 /* Vendor-specific feature address (Micron) */ @@ -429,6 +431,47 @@ struct nand_jedec_params { __le16 crc; } __packed; +/** + * struct onfi_params - ONFI specific parameters that will be reused + * @version: ONFI version (BCD encoded), 0 if ONFI is not supported + * @tPROG: Page program time + * @tBERS: Block erase time + * @tR: Page read time + * @tCCS: Change column setup time + * @async_timing_mode: Supported asynchronous timing mode + * @vendor_revision: Vendor specific revision number + * @vendor: Vendor specific data + */ +struct onfi_params { + int version; + u16 tPROG; + u16 tBERS; + u16 tR; + u16 tCCS; + u16 async_timing_mode; + u16 vendor_revision; + u8 vendor[88]; +}; + +/** + * struct nand_parameters - NAND generic parameters from the parameter page + * @model: Model name + * @supports_set_get_features: The NAND chip supports setting/getting features + * @set_feature_list: Bitmap of features that can be set + * @get_feature_list: Bitmap of features that can be get + * @onfi: ONFI specific parameters + */ +struct nand_parameters { + /* Generic parameters */ + char model[100]; + bool supports_set_get_features; + DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER); + DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER); + + /* ONFI parameters */ + struct onfi_params onfi; +}; + /* The maximum expected count of bytes in the NAND ID sequence */ #define NAND_MAX_ID_LEN 8 @@ -1157,21 +1200,15 @@ int nand_op_parser_exec_op(struct nand_chip *chip, * currently in data_buf. * @subpagesize: [INTERN] holds the subpagesize * @id: [INTERN] holds NAND ID - * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), - * non 0 if ONFI supported. - * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded), - * non 0 if JEDEC supported. - * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is - * supported, 0 otherwise. - * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is - * supported, 0 otherwise. + * @parameters: [INTERN] holds generic parameters under an easily + * readable form. * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a * this nand device will encounter their life times. * @blocks_per_die: [INTERN] The number of PEBs in a die * @data_interface: [INTERN] NAND interface timing information * @read_retries: [INTERN] the number of read retry modes supported - * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand - * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand + * @set_features: [REPLACEABLE] set the NAND chip features + * @get_features: [REPLACEABLE] get the NAND chip features * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this * means the configuration should not be applied but @@ -1212,10 +1249,10 @@ struct nand_chip { bool check_only); int (*erase)(struct mtd_info *mtd, int page); int (*scan_bbt)(struct mtd_info *mtd); - int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, - int feature_addr, uint8_t *subfeature_para); - int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, - int feature_addr, uint8_t *subfeature_para); + int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode); int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, const struct nand_data_interface *conf); @@ -1243,12 +1280,7 @@ struct nand_chip { int badblockbits; struct nand_id id; - int onfi_version; - int jedec_version; - union { - struct nand_onfi_params onfi_params; - struct nand_jedec_params jedec_params; - }; + struct nand_parameters parameters; u16 max_bb_per_die; u32 blocks_per_die; @@ -1535,26 +1567,13 @@ struct platform_nand_data { struct platform_nand_ctrl ctrl; }; -/* return the supported features. */ -static inline int onfi_feature(struct nand_chip *chip) -{ - return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0; -} - /* return the supported asynchronous timing mode. */ static inline int onfi_get_async_timing_mode(struct nand_chip *chip) { - if (!chip->onfi_version) + if (!chip->parameters.onfi.version) return ONFI_TIMING_MODE_UNKNOWN; - return le16_to_cpu(chip->onfi_params.async_timing_mode); -} -/* return the supported synchronous timing mode. */ -static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) -{ - if (!chip->onfi_version) - return ONFI_TIMING_MODE_UNKNOWN; - return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); + return chip->parameters.onfi.async_timing_mode; } int onfi_fill_data_interface(struct nand_chip *chip, @@ -1591,13 +1610,6 @@ static inline int nand_opcode_8bits(unsigned int command) return 0; } -/* return the supported JEDEC features. */ -static inline int jedec_feature(struct nand_chip *chip) -{ - return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features) - : 0; -} - /* get timing characteristics from ONFI timing mode. */ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); @@ -1629,10 +1641,12 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, int page); +/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */ +int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); +int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); /* Stub used by drivers that do not support GET/SET FEATURES operations */ -int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd, - struct nand_chip *chip, int addr, - u8 *subfeature_param); +int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip, + int addr, u8 *subfeature_param); /* Default read_page_raw implementation */ int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, diff --git a/include/linux/mutex.h b/include/linux/mutex.h index cb3bbed4e633..14bc0d5d0ee5 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -14,7 +14,6 @@ #include <asm/current.h> #include <linux/list.h> #include <linux/spinlock_types.h> -#include <linux/linkage.h> #include <linux/lockdep.h> #include <linux/atomic.h> #include <asm/processor.h> diff --git a/include/linux/nd.h b/include/linux/nd.h index 5dc6b695437d..43c181a6add5 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -180,6 +180,12 @@ struct nd_region; void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, struct module *module, const char *mod_name); +static inline void nd_driver_unregister(struct nd_device_driver *drv) +{ + driver_unregister(&drv->drv); +} #define nd_driver_register(driver) \ __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define module_nd_driver(driver) \ + module_driver(driver, nd_driver_register, nd_driver_unregister) #endif /* __LINUX_ND_H__ */ diff --git a/include/linux/net.h b/include/linux/net.h index 91216b16feb7..2248a052061d 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -146,7 +146,7 @@ struct proto_ops { struct socket *newsock, int flags, bool kern); int (*getname) (struct socket *sock, struct sockaddr *addr, - int *sockaddr_len, int peer); + int peer); __poll_t (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, @@ -222,6 +222,7 @@ enum { int sock_wake_async(struct socket_wq *sk_wq, int how, int band); int sock_register(const struct net_proto_family *fam); void sock_unregister(int family); +bool sock_is_registered(int family); int __sock_create(struct net *net, int family, int type, int proto, struct socket **res, int kern); int sock_create(int family, int type, int proto, struct socket **res); @@ -294,10 +295,8 @@ int kernel_listen(struct socket *sock, int backlog); int kernel_accept(struct socket *sock, struct socket **newsock, int flags); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags); -int kernel_getsockname(struct socket *sock, struct sockaddr *addr, - int *addrlen); -int kernel_getpeername(struct socket *sock, struct sockaddr *addr, - int *addrlen); +int kernel_getsockname(struct socket *sock, struct sockaddr *addr); +int kernel_getpeername(struct socket *sock, struct sockaddr *addr); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index bebeaad897cc..29ed8fd6379a 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h @@ -231,7 +231,7 @@ static inline void net_dim_exit_parking(struct net_dim *dim) } #define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ static inline int net_dim_stats_compare(struct net_dim_stats *curr, struct net_dim_stats *prev) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index db84c516bcfb..35b79f47a13d 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -79,6 +79,7 @@ enum { NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ + NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ /* * Add your fresh new feature above and remember to update @@ -145,6 +146,7 @@ enum { #define NETIF_F_HW_ESP __NETIF_F(HW_ESP) #define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) #define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT) +#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5eef6c8e2741..cf44503ea81a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -58,6 +58,7 @@ struct device; struct phy_device; struct dsa_port; +struct sfp_bus; /* 802.11 specific */ struct wireless_dev; /* 802.15.4 specific */ @@ -585,6 +586,15 @@ struct netdev_queue { #endif } ____cacheline_aligned_in_smp; +extern int sysctl_fb_tunnels_only_for_init_net; + +static inline bool net_has_fallback_tunnels(const struct net *net) +{ + return net == &init_net || + !IS_ENABLED(CONFIG_SYSCTL) || + !sysctl_fb_tunnels_only_for_init_net; +} + static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) @@ -1381,8 +1391,6 @@ struct net_device_ops { * @IFF_MACVLAN: Macvlan device * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account * underlying stacked devices - * @IFF_IPVLAN_MASTER: IPvlan master device - * @IFF_IPVLAN_SLAVE: IPvlan slave device * @IFF_L3MDEV_MASTER: device is an L3 master device * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master @@ -1392,6 +1400,7 @@ struct net_device_ops { * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external * entity (i.e. the master device for bridged veth) * @IFF_MACSEC: device is a MACsec device + * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1412,16 +1421,15 @@ enum netdev_priv_flags { IFF_LIVE_ADDR_CHANGE = 1<<15, IFF_MACVLAN = 1<<16, IFF_XMIT_DST_RELEASE_PERM = 1<<17, - IFF_IPVLAN_MASTER = 1<<18, - IFF_IPVLAN_SLAVE = 1<<19, - IFF_L3MDEV_MASTER = 1<<20, - IFF_NO_QUEUE = 1<<21, - IFF_OPENVSWITCH = 1<<22, - IFF_L3MDEV_SLAVE = 1<<23, - IFF_TEAM = 1<<24, - IFF_RXFH_CONFIGURED = 1<<25, - IFF_PHONY_HEADROOM = 1<<26, - IFF_MACSEC = 1<<27, + IFF_L3MDEV_MASTER = 1<<18, + IFF_NO_QUEUE = 1<<19, + IFF_OPENVSWITCH = 1<<20, + IFF_L3MDEV_SLAVE = 1<<21, + IFF_TEAM = 1<<22, + IFF_RXFH_CONFIGURED = 1<<23, + IFF_PHONY_HEADROOM = 1<<24, + IFF_MACSEC = 1<<25, + IFF_NO_RX_HANDLER = 1<<26, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1442,8 +1450,6 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM -#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER -#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH @@ -1451,6 +1457,7 @@ enum netdev_priv_flags { #define IFF_TEAM IFF_TEAM #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED #define IFF_MACSEC IFF_MACSEC +#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER /** * struct net_device - The DEVICE structure. @@ -1656,6 +1663,7 @@ enum netdev_priv_flags { * @priomap: XXX: need comments on this one * @phydev: Physical device may attach itself * for hardware timestamping + * @sfp_bus: attached &struct sfp_bus structure. * * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount @@ -1798,11 +1806,17 @@ struct net_device { #if IS_ENABLED(CONFIG_TIPC) struct tipc_bearer __rcu *tipc_ptr; #endif +#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) void *atalk_ptr; +#endif struct in_device __rcu *ip_ptr; +#if IS_ENABLED(CONFIG_DECNET) struct dn_dev __rcu *dn_ptr; +#endif struct inet6_dev __rcu *ip6_ptr; +#if IS_ENABLED(CONFIG_AX25) void *ax25_ptr; +#endif struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; #if IS_ENABLED(CONFIG_MPLS_ROUTING) @@ -1933,6 +1947,7 @@ struct net_device { struct netprio_map __rcu *priomap; #endif struct phy_device *phydev; + struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; @@ -2300,43 +2315,49 @@ struct netdev_lag_lower_state_info { #include <linux/notifier.h> -/* netdevice notifier chain. Please remember to update the rtnetlink - * notification exclusion list in rtnetlink_event() when adding new - * types. +/* netdevice notifier chain. Please remember to update netdev_cmd_to_name() + * and the rtnetlink notification exclusion list in rtnetlink_event() when + * adding new types. */ -#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ -#define NETDEV_DOWN 0x0002 -#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface +enum netdev_cmd { + NETDEV_UP = 1, /* For now you can't veto a device up/down */ + NETDEV_DOWN, + NETDEV_REBOOT, /* Tell a protocol stack a network interface detected a hardware crash and restarted - we can use this eg to kick tcp sessions once done */ -#define NETDEV_CHANGE 0x0004 /* Notify device state change */ -#define NETDEV_REGISTER 0x0005 -#define NETDEV_UNREGISTER 0x0006 -#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */ -#define NETDEV_CHANGEADDR 0x0008 -#define NETDEV_GOING_DOWN 0x0009 -#define NETDEV_CHANGENAME 0x000A -#define NETDEV_FEAT_CHANGE 0x000B -#define NETDEV_BONDING_FAILOVER 0x000C -#define NETDEV_PRE_UP 0x000D -#define NETDEV_PRE_TYPE_CHANGE 0x000E -#define NETDEV_POST_TYPE_CHANGE 0x000F -#define NETDEV_POST_INIT 0x0010 -#define NETDEV_UNREGISTER_FINAL 0x0011 -#define NETDEV_RELEASE 0x0012 -#define NETDEV_NOTIFY_PEERS 0x0013 -#define NETDEV_JOIN 0x0014 -#define NETDEV_CHANGEUPPER 0x0015 -#define NETDEV_RESEND_IGMP 0x0016 -#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ -#define NETDEV_CHANGEINFODATA 0x0018 -#define NETDEV_BONDING_INFO 0x0019 -#define NETDEV_PRECHANGEUPPER 0x001A -#define NETDEV_CHANGELOWERSTATE 0x001B -#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C -#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D -#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E + NETDEV_CHANGE, /* Notify device state change */ + NETDEV_REGISTER, + NETDEV_UNREGISTER, + NETDEV_CHANGEMTU, /* notify after mtu change happened */ + NETDEV_CHANGEADDR, + NETDEV_GOING_DOWN, + NETDEV_CHANGENAME, + NETDEV_FEAT_CHANGE, + NETDEV_BONDING_FAILOVER, + NETDEV_PRE_UP, + NETDEV_PRE_TYPE_CHANGE, + NETDEV_POST_TYPE_CHANGE, + NETDEV_POST_INIT, + NETDEV_RELEASE, + NETDEV_NOTIFY_PEERS, + NETDEV_JOIN, + NETDEV_CHANGEUPPER, + NETDEV_RESEND_IGMP, + NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ + NETDEV_CHANGEINFODATA, + NETDEV_BONDING_INFO, + NETDEV_PRECHANGEUPPER, + NETDEV_CHANGELOWERSTATE, + NETDEV_UDP_TUNNEL_PUSH_INFO, + NETDEV_UDP_TUNNEL_DROP_INFO, + NETDEV_CHANGE_TX_QUEUE_LEN, + NETDEV_CVLAN_FILTER_PUSH_INFO, + NETDEV_CVLAN_FILTER_DROP_INFO, + NETDEV_SVLAN_FILTER_PUSH_INFO, + NETDEV_SVLAN_FILTER_DROP_INFO, +}; +const char *netdev_cmd_to_name(enum netdev_cmd cmd); int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -4217,16 +4238,6 @@ static inline bool netif_is_macvlan_port(const struct net_device *dev) return dev->priv_flags & IFF_MACVLAN_PORT; } -static inline bool netif_is_ipvlan(const struct net_device *dev) -{ - return dev->priv_flags & IFF_IPVLAN_SLAVE; -} - -static inline bool netif_is_ipvlan_port(const struct net_device *dev) -{ - return dev->priv_flags & IFF_IPVLAN_MASTER; -} - static inline bool netif_is_bond_master(const struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h index b4d741195c28..beee8bffe49e 100644 --- a/include/linux/netfilter/nfnetlink_acct.h +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -16,6 +16,5 @@ struct nf_acct; struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); void nfnl_acct_put(struct nf_acct *acct); void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); -int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb, - struct nf_acct *nfacct); +int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct); #endif /* _NFNL_ACCT_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 1313b35c3ab7..9077b3ebea08 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -281,10 +281,14 @@ int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); +int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks); + unsigned int *xt_alloc_entry_offsets(unsigned int size); bool xt_find_jump_offset(const unsigned int *offsets, unsigned int target, unsigned int size); +int xt_check_proc_name(const char *name, unsigned int size); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, @@ -299,6 +303,7 @@ int xt_data_to_user(void __user *dst, const void *src, void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); +struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, @@ -507,7 +512,7 @@ void xt_compat_unlock(u_int8_t af); int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); void xt_compat_flush_offsets(u_int8_t af); -void xt_compat_init_offsets(u_int8_t af, unsigned int number); +int xt_compat_init_offsets(u8 af, unsigned int number); int xt_compat_calc_jump(u_int8_t af, unsigned int offset); int xt_compat_match_offset(const struct xt_match *match); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 38187c68063d..2f129bbfaae8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -198,14 +198,24 @@ struct nfs_inode { /* * Cache validity bit flags */ -#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */ -#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */ -#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */ -#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ -#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ -#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ -#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ -#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */ +#define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */ +#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ +#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ +#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ +#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ +#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ +#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ +#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ +#define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */ +#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ +#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ +#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ + +#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ + | NFS_INO_INVALID_CTIME \ + | NFS_INO_INVALID_MTIME \ + | NFS_INO_INVALID_SIZE \ + | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* * Bit offsets in flags field @@ -292,10 +302,11 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_INVALID_ATTR | - NFS_INO_REVAL_PAGECACHE | - NFS_INO_INVALID_ACCESS | - NFS_INO_INVALID_ACL; + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_ACCESS + | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_CHANGE + | NFS_INO_INVALID_CTIME; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 6959968dc36a..34d28564ecf3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1590,11 +1590,13 @@ struct nfs_rpc_ops { unsigned int); int (*create) (struct inode *, struct dentry *, struct iattr *, int); - int (*remove) (struct inode *, const struct qstr *); - void (*unlink_setup) (struct rpc_message *, struct inode *dir); + int (*remove) (struct inode *, struct dentry *); + void (*unlink_setup) (struct rpc_message *, struct dentry *); void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); int (*unlink_done) (struct rpc_task *, struct inode *); - void (*rename_setup) (struct rpc_message *msg, struct inode *dir); + void (*rename_setup) (struct rpc_message *msg, + struct dentry *old_dentry, + struct dentry *new_dentry); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); @@ -1633,7 +1635,6 @@ struct nfs_rpc_ops { struct iattr *iattr, int *); int (*have_delegation)(struct inode *, fmode_t); - int (*return_delegation)(struct inode *); struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); struct nfs_client *(*init_client) (struct nfs_client *, const struct nfs_client_initdata *); diff --git a/include/linux/node.h b/include/linux/node.h index 4ece0fee0ffc..41f171861dcc 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -67,7 +67,7 @@ extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern int register_mem_sect_under_node(struct memory_block *mem_blk, - int nid); + int nid, bool check_nid); extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, unsigned long phys_index); @@ -97,7 +97,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) return 0; } static inline int register_mem_sect_under_node(struct memory_block *mem_blk, - int nid) + int nid, bool check_nid) { return 0; } diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 497706f5adca..f89598bc4e1c 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -22,6 +22,31 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, void *val, size_t bytes); +/** + * struct nvmem_config - NVMEM device configuration + * + * @dev: Parent device. + * @name: Optional name. + * @id: Optional device ID used in full name. Ignored if name is NULL. + * @owner: Pointer to exporter module. Used for refcounting. + * @cells: Optional array of pre-defined NVMEM cells. + * @ncells: Number of elements in cells. + * @read_only: Device is read-only. + * @root_only: Device is accessibly to root only. + * @reg_read: Callback to read data. + * @reg_write: Callback to write data. + * @size: Device size. + * @word_size: Minimum read/write access granularity. + * @stride: Minimum read/write access stride. + * @priv: User context passed to read/write callbacks. + * + * Note: A default "nvmem<id>" name will be assigned to the device if + * no name is specified in its configuration. In such case "<id>" is + * generated with ida_simple_get() and provided id field is ignored. + * + * Note: Specifying name and setting id to -1 implies a unique device + * whose name is provided as-is (kept unaltered). + */ struct nvmem_config { struct device *dev; const char *name; @@ -47,6 +72,11 @@ struct nvmem_config { struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); int nvmem_unregister(struct nvmem_device *nvmem); +struct nvmem_device *devm_nvmem_register(struct device *dev, + const struct nvmem_config *cfg); + +int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); + #else static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) @@ -59,5 +89,17 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem) return -ENOSYS; } +static inline struct nvmem_device * +devm_nvmem_register(struct device *dev, const struct nvmem_config *c) +{ + return nvmem_register(c); +} + +static inline int +devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) +{ + return nvmem_unregister(nvmem); +} + #endif /* CONFIG_NVMEM */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/of.h b/include/linux/of.h index da1ee95241c1..4d25e4f952d9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -363,6 +363,9 @@ extern struct device_node *of_parse_phandle(const struct device_node *np, extern int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args); +extern int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, const char *stem_name, int index, + struct of_phandle_args *out_args); extern int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cells_count, int index, struct of_phandle_args *out_args); @@ -815,6 +818,15 @@ static inline int of_parse_phandle_with_args(const struct device_node *np, return -ENOSYS; } +static inline int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, + const char *stem_name, + int index, + struct of_phandle_args *out_args) +{ + return -ENOSYS; +} + static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cells_count, int index, struct of_phandle_args *out_args) @@ -1359,8 +1371,8 @@ struct of_overlay_notify_data { #ifdef CONFIG_OF_OVERLAY -/* ID based overlays; the API for external users */ -int of_overlay_apply(struct device_node *tree, int *ovcs_id); +int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, + int *ovcs_id); int of_overlay_remove(int *ovcs_id); int of_overlay_remove_all(void); @@ -1369,7 +1381,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb); #else -static inline int of_overlay_apply(struct device_node *tree, int *ovcs_id) +static inline int of_overlay_fdt_apply(void *overlay_fdt, int *ovcs_id) { return -ENOTSUPP; } diff --git a/include/linux/of_net.h b/include/linux/of_net.h index 9cd72aab76fe..90d81ee9e6a0 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -13,6 +13,7 @@ struct net_device; extern int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); +extern int of_get_nvmem_mac_address(struct device_node *np, void *addr); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np) @@ -25,6 +26,11 @@ static inline const void *of_get_mac_address(struct device_node *np) return NULL; } +static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr) +{ + return -ENODEV; +} + static inline struct net_device *of_find_net_device_by_node(struct device_node *np) { return NULL; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 50c2b8786831..e34a27727b9a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -156,9 +156,18 @@ static __always_inline int PageCompound(struct page *page) return test_bit(PG_head, &page->flags) || PageTail(page); } +#define PAGE_POISON_PATTERN -1l +static inline int PagePoisoned(const struct page *page) +{ + return page->flags == PAGE_POISON_PATTERN; +} + /* * Page flags policies wrt compound pages * + * PF_POISONED_CHECK + * check if this struct page poisoned/uninitialized + * * PF_ANY: * the page flag is relevant for small, head and tail pages. * @@ -176,17 +185,20 @@ static __always_inline int PageCompound(struct page *page) * PF_NO_COMPOUND: * the page flag is not relevant for compound pages. */ -#define PF_ANY(page, enforce) page -#define PF_HEAD(page, enforce) compound_head(page) +#define PF_POISONED_CHECK(page) ({ \ + VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ + page; }) +#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) +#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) #define PF_ONLY_HEAD(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(PageTail(page), page); \ - page;}) + PF_POISONED_CHECK(page); }) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ - compound_head(page);}) + PF_POISONED_CHECK(compound_head(page)); }) #define PF_NO_COMPOUND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ - page;}) + PF_POISONED_CHECK(page); }) /* * Macros to create function definitions for page flags diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index cdad58bbfd8b..4ae347cbc36d 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -63,7 +63,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages); -struct page *alloc_migrate_target(struct page *page, unsigned long private, - int **resultp); +struct page *alloc_migrate_target(struct page *page, unsigned long private); #endif diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 760d74a0e9a9..14d14beb1f7f 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -175,8 +175,7 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - smp_mb(); - atomic_set(&page->_refcount, count); + atomic_set_release(&page->_refcount, count); if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 34ce3ebf97d5..b1bd2186e6d2 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -144,7 +144,7 @@ void release_pages(struct page **pages, int nr); * 3. check the page is still in pagecache (if no, goto 1) * * Remove-side that cares about stability of _refcount (eg. reclaim) has the - * following (with tree_lock held for write): + * following (with the i_pages lock held): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache * C. free the page @@ -157,7 +157,7 @@ void release_pages(struct page **pages, int nr); * * It is possible that between 1 and 2, the page is removed then the exact same * page is inserted into the same position in pagecache. That's OK: the - * old find_get_page using tree_lock could equally have run before or after + * old find_get_page using a lock could equally have run before or after * such a re-insertion, depending on order that locks are granted. * * Lookups racing against pagecache insertion isn't a big problem: either 1 diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index a1a5e5df0f66..af657ca58b70 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -39,10 +39,9 @@ struct pci_epc_ops { int (*write_header)(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int (*set_bar)(struct pci_epc *epc, u8 func_no, - enum pci_barno bar, - dma_addr_t bar_phys, size_t size, int flags); + struct pci_epf_bar *epf_bar); void (*clear_bar)(struct pci_epc *epc, u8 func_no, - enum pci_barno bar); + struct pci_epf_bar *epf_bar); int (*map_addr)(struct pci_epc *epc, u8 func_no, phys_addr_t addr, u64 pci_addr, size_t size); void (*unmap_addr)(struct pci_epc *epc, u8 func_no, @@ -127,9 +126,9 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, - enum pci_barno bar, - dma_addr_t bar_phys, size_t size, int flags); -void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar); + struct pci_epf_bar *epf_bar); +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar); int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr, u64 pci_addr, size_t size); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index e897bf076701..f7d6f4883f8b 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -97,6 +97,8 @@ struct pci_epf_driver { struct pci_epf_bar { dma_addr_t phys_addr; size_t size; + enum pci_barno barno; + int flags; }; /** diff --git a/include/linux/pci.h b/include/linux/pci.h index 024a1beda008..73178a2fcee0 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -256,6 +256,7 @@ enum pci_bus_speed { PCIE_SPEED_2_5GT = 0x14, PCIE_SPEED_5_0GT = 0x15, PCIE_SPEED_8_0GT = 0x16, + PCIE_SPEED_16_0GT = 0x17, PCI_SPEED_UNKNOWN = 0xff, }; @@ -469,6 +470,9 @@ struct pci_host_bridge { struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ + unsigned int native_aer:1; /* OS may use PCIe AER */ + unsigned int native_hotplug:1; /* OS may use PCIe hotplug */ + unsigned int native_pme:1; /* OS may use PCIe PME */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, const struct resource *res, @@ -949,11 +953,6 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); -static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, - unsigned int devfn) -{ - return pci_get_domain_bus_and_slot(0, bus, devfn); -} struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); int pci_dev_present(const struct pci_device_id *ids); @@ -1082,7 +1081,11 @@ int pcie_get_mps(struct pci_dev *dev); int pcie_set_mps(struct pci_dev *dev, int mps); int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width); -void pcie_flr(struct pci_dev *dev); +u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width); +void pcie_print_link_status(struct pci_dev *dev); +int pcie_flr(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); int pci_reset_function_locked(struct pci_dev *dev); @@ -1095,7 +1098,7 @@ int pci_reset_bus(struct pci_bus *bus); int pci_try_reset_bus(struct pci_bus *bus); void pci_reset_secondary_bus(struct pci_dev *dev); void pcibios_reset_secondary_bus(struct pci_dev *dev); -void pci_reset_bridge_secondary_bus(struct pci_dev *dev); +int pci_reset_bridge_secondary_bus(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); @@ -1147,6 +1150,8 @@ void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); +void pci_wakeup_bus(struct pci_bus *bus); +void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); /* PCI Virtual Channel */ int pci_save_vc_state(struct pci_dev *dev); @@ -1226,7 +1231,8 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); -int pci_register_io_range(phys_addr_t addr, resource_size_t size); +int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, + resource_size_t size); unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); @@ -1295,7 +1301,6 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type); -resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) @@ -1446,10 +1451,8 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d, #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; -extern bool pcie_ports_auto; #else #define pcie_ports_disabled true -#define pcie_ports_auto false #endif #ifdef CONFIG_PCIEASPM @@ -1661,9 +1664,6 @@ static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn) { return NULL; } -static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, - unsigned int devfn) -{ return NULL; } static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { return NULL; } @@ -1923,6 +1923,7 @@ void pcibios_release_device(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); int pcibios_alloc_irq(struct pci_dev *dev); void pcibios_free_irq(struct pci_dev *dev); +resource_size_t pcibios_default_alignment(void); #ifdef CONFIG_HIBERNATE_CALLBACKS extern struct dev_pm_ops pcibios_pm_ops; @@ -1955,6 +1956,11 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); + +/* Arch may override these (weak) */ +int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); +int pcibios_sriov_disable(struct pci_dev *pdev); +resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #else static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) { @@ -2182,24 +2188,11 @@ int pci_parse_request_of_pci_ranges(struct device *dev, /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); -static inline struct device_node * -pci_device_to_OF_node(const struct pci_dev *pdev) -{ - return pdev ? pdev->dev.of_node : NULL; -} - -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - return bus ? bus->dev.of_node : NULL; -} - #else /* CONFIG_OF */ static inline void pci_set_of_node(struct pci_dev *dev) { } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } static inline void pci_release_bus_of_node(struct pci_bus *bus) { } -static inline struct device_node * -pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } static inline int pci_parse_request_of_pci_ranges(struct device *dev, @@ -2210,6 +2203,17 @@ static inline int pci_parse_request_of_pci_ranges(struct device *dev, } #endif /* CONFIG_OF */ +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev *pdev) +{ + return pdev ? pdev->dev.of_node : NULL; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + return bus ? bus->dev.of_node : NULL; +} + #ifdef CONFIG_ACPI struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); @@ -2280,41 +2284,9 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) return false; } -/** - * pci_uevent_ers - emit a uevent during recovery path of pci device - * @pdev: pci device to check - * @err_type: type of error event - * - */ -static inline void pci_uevent_ers(struct pci_dev *pdev, - enum pci_ers_result err_type) -{ - int idx = 0; - char *envp[3]; - - switch (err_type) { - case PCI_ERS_RESULT_NONE: - case PCI_ERS_RESULT_CAN_RECOVER: - envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; - envp[idx++] = "DEVICE_ONLINE=0"; - break; - case PCI_ERS_RESULT_RECOVERED: - envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; - envp[idx++] = "DEVICE_ONLINE=1"; - break; - case PCI_ERS_RESULT_DISCONNECT: - envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; - envp[idx++] = "DEVICE_ONLINE=0"; - break; - default: - break; - } - - if (idx > 0) { - envp[idx++] = NULL; - kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); - } -} +#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) +void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); +#endif /* Provide the legacy pci_dma_* API */ #include <linux/pci-dma-compat.h> diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a6b30667a331..cc608fc55334 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -45,6 +45,7 @@ #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 +#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 @@ -1333,6 +1334,7 @@ #define PCI_DEVICE_ID_IMS_TT3D 0x9135 #define PCI_VENDOR_ID_AMCC 0x10e8 +#define PCI_VENDOR_ID_AMPERE 0x1def #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1682 0x1682 @@ -1561,6 +1563,8 @@ #define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 #define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 +#define PCI_VENDOR_ID_ALTERA 0x1172 + #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 @@ -2556,6 +2560,9 @@ #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 #define PCI_DEVICE_ID_TEHUTI_3014 0x3014 +#define PCI_VENDOR_ID_SUNIX 0x1fd4 +#define PCI_DEVICE_ID_SUNIX_1999 0x1999 + #define PCI_VENDOR_ID_HINT 0x3388 #define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h deleted file mode 100644 index b69769dbf659..000000000000 --- a/include/linux/pcieport_if.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * File: pcieport_if.h - * Purpose: PCI Express Port Bus Driver's IF Data Structure - * - * Copyright (C) 2004 Intel - * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) - */ - -#ifndef _PCIEPORT_IF_H_ -#define _PCIEPORT_IF_H_ - -/* Port Type */ -#define PCIE_ANY_PORT (~0) - -/* Service Type */ -#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ -#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) -#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */ -#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT) -#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */ -#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) -#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ -#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) -#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */ -#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) - -struct pcie_device { - int irq; /* Service IRQ/MSI/MSI-X Vector */ - struct pci_dev *port; /* Root/Upstream/Downstream Port */ - u32 service; /* Port service this device represents */ - void *priv_data; /* Service Private Data */ - struct device device; /* Generic Device Interface */ -}; -#define to_pcie_device(d) container_of(d, struct pcie_device, device) - -static inline void set_service_data(struct pcie_device *dev, void *data) -{ - dev->priv_data = data; -} - -static inline void *get_service_data(struct pcie_device *dev) -{ - return dev->priv_data; -} - -struct pcie_port_service_driver { - const char *name; - int (*probe) (struct pcie_device *dev); - void (*remove) (struct pcie_device *dev); - int (*suspend) (struct pcie_device *dev); - int (*resume) (struct pcie_device *dev); - - /* Device driver may resume normal operations */ - void (*error_resume)(struct pci_dev *dev); - - /* Link Reset Capability - AER service driver specific */ - pci_ers_result_t (*reset_link) (struct pci_dev *dev); - - int port_type; /* Type of the port this driver can handle */ - u32 service; /* Port service this device represents */ - - struct device_driver driver; -}; -#define to_service_driver(d) \ - container_of(d, struct pcie_port_service_driver, driver) - -int pcie_port_service_register(struct pcie_port_service_driver *new); -void pcie_port_service_unregister(struct pcie_port_service_driver *new); - -#endif /* _PCIEPORT_IF_H_ */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 864d167a1073..009cdf3d65b6 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -30,10 +30,14 @@ * calls io_destroy() or the process exits. * * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it - * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove - * the kioctx from the proccess's list of kioctxs - after that, there can't be - * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop - * the initial ref with percpu_ref_put(). + * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. + * After that, there can't be any new users of the kioctx (from lookup_ioctx()) + * and it's then safe to drop the initial ref with percpu_ref_put(). + * + * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() + * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't + * imply RCU grace periods of any kind and if a user wants to combine percpu_ref + * with RCU protection, it must be done explicitly. * * Code that does a two stage shutdown like this often needs some kind of * explicit synchronization to ensure the initial refcount can only be dropped @@ -113,8 +117,10 @@ void percpu_ref_reinit(struct percpu_ref *ref); * Must be used to drop the initial ref on a percpu refcount; must be called * precisely once before shutdown. * - * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the - * percpu counters and dropping the initial ref. + * Switches @ref into atomic mode before gathering up the percpu counters + * and dropping the initial ref. + * + * There are no implied RCU grace periods between kill and release. */ static inline void percpu_ref_kill(struct percpu_ref *ref) { diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7546822a1d74..e71e99eb9a4e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -449,14 +449,19 @@ struct pmu { int (*filter_match) (struct perf_event *event); /* optional */ }; +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START, + PERF_ADDR_FILTER_ACTION_FILTER, +}; + /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage * @inode: object file's inode for file-based filters * @offset: filter range offset - * @size: filter range size - * @range: 1: range, 0: address - * @filter: 1: filter/start, 0: stop + * @size: filter range size (size==0 means single address trigger) + * @action: filter/start/stop * * This is a hardware-agnostic filter configuration as specified by the user. */ @@ -465,8 +470,7 @@ struct perf_addr_filter { struct inode *inode; unsigned long offset; unsigned long size; - unsigned int range : 1, - filter : 1; + enum perf_addr_filter_action_t action; }; /** @@ -536,6 +540,10 @@ struct pmu_event_list { struct list_head list; }; +#define for_each_sibling_event(sibling, event) \ + if ((event)->group_leader == (event)) \ + list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) + /** * struct perf_event - performance event kernel representation: */ @@ -549,16 +557,16 @@ struct perf_event { struct list_head event_entry; /* - * XXX: group_entry and sibling_list should be mutually exclusive; - * either you're a sibling on a group, or you're the group leader. - * Rework the code to always use the same list element. - * * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read. */ - struct list_head group_entry; struct list_head sibling_list; - + struct list_head active_list; + /* + * Node on the pinned or flexible tree located at the event context; + */ + struct rb_node group_node; + u64 group_index; /* * We need storage to track the entries in perf_pmu_migrate_context; we * cannot use the event_entry because of RCU and we want to keep the @@ -690,6 +698,12 @@ struct perf_event { #endif /* CONFIG_PERF_EVENTS */ }; + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + /** * struct perf_event_context - event context structure * @@ -710,9 +724,13 @@ struct perf_event_context { struct mutex mutex; struct list_head active_ctx_list; - struct list_head pinned_groups; - struct list_head flexible_groups; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; struct list_head event_list; + + struct list_head pinned_active; + struct list_head flexible_active; + int nr_events; int nr_active; int is_active; diff --git a/include/linux/phy.h b/include/linux/phy.h index d7069539f351..f0b5870a6d40 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -984,6 +984,10 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; } +int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, + u16 regnum); +int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, + u16 regnum, u16 val); /* Clause 45 PHY */ int genphy_c45_restart_aneg(struct phy_device *phydev); @@ -995,6 +999,14 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); +/* The gen10g_* functions are the old Clause 45 stub */ +int gen10g_config_aneg(struct phy_device *phydev); +int gen10g_read_status(struct phy_device *phydev); +int gen10g_no_soft_reset(struct phy_device *phydev); +int gen10g_config_init(struct phy_device *phydev); +int gen10g_suspend(struct phy_device *phydev); +int gen10g_resume(struct phy_device *phydev); + static inline int phy_read_status(struct phy_device *phydev) { if (!phydev->drv) @@ -1012,7 +1024,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); -void phy_change(struct phy_device *phydev); void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 4f8423a948d5..c9d14eeee7f5 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -25,7 +25,15 @@ struct phy; enum phy_mode { PHY_MODE_INVALID, PHY_MODE_USB_HOST, + PHY_MODE_USB_HOST_LS, + PHY_MODE_USB_HOST_FS, + PHY_MODE_USB_HOST_HS, + PHY_MODE_USB_HOST_SS, PHY_MODE_USB_DEVICE, + PHY_MODE_USB_DEVICE_LS, + PHY_MODE_USB_DEVICE_FS, + PHY_MODE_USB_DEVICE_HS, + PHY_MODE_USB_DEVICE_SS, PHY_MODE_USB_OTG, PHY_MODE_SGMII, PHY_MODE_10GKR, @@ -61,6 +69,7 @@ struct phy_ops { */ struct phy_attrs { u32 bus_width; + enum phy_mode mode; }; /** @@ -72,7 +81,8 @@ struct phy_attrs { * @mutex: mutex to protect phy_ops * @init_count: used to protect when the PHY is used by multiple consumers * @power_count: used to protect when the PHY is used by multiple consumers - * @phy_attrs: used to specify PHY specific attributes + * @attrs: used to specify PHY specific attributes + * @pwr: power regulator associated with the phy */ struct phy { struct device dev; @@ -88,9 +98,10 @@ struct phy { /** * struct phy_provider - represents the phy provider * @dev: phy provider device + * @children: can be used to override the default (dev->of_node) child node * @owner: the module owner having of_xlate - * @of_xlate: function pointer to obtain phy instance from phy pointer * @list: to maintain a linked list of PHY providers + * @of_xlate: function pointer to obtain phy instance from phy pointer */ struct phy_provider { struct device *dev; @@ -101,6 +112,13 @@ struct phy_provider { struct of_phandle_args *args); }; +/** + * struct phy_lookup - PHY association in list of phys managed by the phy driver + * @node: list node + * @dev_id: the device of the association + * @con_id: connection ID string on device + * @phy: the phy of the association + */ struct phy_lookup { struct list_head node; const char *dev_id; @@ -144,6 +162,10 @@ int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); int phy_set_mode(struct phy *phy, enum phy_mode mode); +static inline enum phy_mode phy_get_mode(struct phy *phy) +{ + return phy->attrs.mode; +} int phy_reset(struct phy *phy); int phy_calibrate(struct phy *phy); static inline int phy_get_bus_width(struct phy *phy) @@ -260,6 +282,11 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) return -ENOSYS; } +static inline enum phy_mode phy_get_mode(struct phy *phy) +{ + return PHY_MODE_INVALID; +} + static inline int phy_reset(struct phy *phy) { if (!phy) diff --git a/include/linux/phylink.h b/include/linux/phylink.h index bd137c273d38..50eeae025f1e 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -73,8 +73,10 @@ struct phylink_mac_ops { void (*mac_config)(struct net_device *ndev, unsigned int mode, const struct phylink_link_state *state); void (*mac_an_restart)(struct net_device *ndev); - void (*mac_link_down)(struct net_device *ndev, unsigned int mode); + void (*mac_link_down)(struct net_device *ndev, unsigned int mode, + phy_interface_t interface); void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy); }; @@ -161,25 +163,31 @@ void mac_an_restart(struct net_device *ndev); * mac_link_down() - take the link down * @ndev: a pointer to a &struct net_device for the MAC. * @mode: link autonegotiation mode + * @interface: link &typedef phy_interface_t mode * * If @mode is not an in-band negotiation mode (as defined by * phylink_autoneg_inband()), force the link down and disable any - * Energy Efficient Ethernet MAC configuration. + * Energy Efficient Ethernet MAC configuration. Interface type + * selection must be done in mac_config(). */ -void mac_link_down(struct net_device *ndev, unsigned int mode); +void mac_link_down(struct net_device *ndev, unsigned int mode, + phy_interface_t interface); /** * mac_link_up() - allow the link to come up * @ndev: a pointer to a &struct net_device for the MAC. * @mode: link autonegotiation mode + * @interface: link &typedef phy_interface_t mode * @phy: any attached phy * * If @mode is not an in-band negotiation mode (as defined by * phylink_autoneg_inband()), allow the link to come up. If @phy * is non-%NULL, configure Energy Efficient Ethernet by calling * phy_init_eee() and perform appropriate MAC configuration for EEE. + * Interface type selection must be done in mac_config(). */ void mac_link_up(struct net_device *ndev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy); #endif @@ -211,9 +219,6 @@ void phylink_ethtool_get_pauseparam(struct phylink *, struct ethtool_pauseparam *); int phylink_ethtool_set_pauseparam(struct phylink *, struct ethtool_pauseparam *); -int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); -int phylink_ethtool_get_module_eeprom(struct phylink *, - struct ethtool_eeprom *, u8 *); int phylink_get_eee_err(struct phylink *); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h index e684543254f3..e319d0a2ec82 100644 --- a/include/linux/platform_data/asoc-ti-mcbsp.h +++ b/include/linux/platform_data/asoc-ti-mcbsp.h @@ -25,10 +25,6 @@ #include <linux/spinlock.h> #include <linux/clk.h> -#define MCBSP_CONFIG_TYPE2 0x2 -#define MCBSP_CONFIG_TYPE3 0x3 -#define MCBSP_CONFIG_TYPE4 0x4 - /* Platform specific configuration */ struct omap_mcbsp_ops { void (*request)(unsigned int); @@ -47,14 +43,6 @@ struct omap_mcbsp_platform_data { int (*force_ick_on)(struct clk *clk, bool force_on); }; -/** - * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod - * @sidetone: name of the sidetone device - */ -struct omap_mcbsp_dev_attr { - const char *sidetone; -}; - void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata); #endif diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h deleted file mode 100644 index 695035a8d7fb..000000000000 --- a/include/linux/platform_data/atmel_mxt_ts.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Atmel maXTouch Touchscreen driver - * - * Copyright (C) 2010 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H -#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H - -#include <linux/types.h> - -enum mxt_suspend_mode { - MXT_SUSPEND_DEEP_SLEEP = 0, - MXT_SUSPEND_T9_CTRL = 1, -}; - -/* The platform data for the Atmel maXTouch touchscreen driver */ -struct mxt_platform_data { - unsigned long irqflags; - u8 t19_num_keys; - const unsigned int *t19_keymap; - enum mxt_suspend_mode suspend_mode; -}; - -#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */ diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h deleted file mode 100644 index 98829370fee2..000000000000 --- a/include/linux/platform_data/bfin_rotary.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * board initialization should put one of these structures into platform_data - * and place the bfin-rotary onto platform_bus named "bfin-rotary". - * - * Copyright 2008-2010 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#ifndef _BFIN_ROTARY_H -#define _BFIN_ROTARY_H - -/* mode bitmasks */ -#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */ -#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */ -#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */ -#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */ - -#define ROT_DEBE DEBE /* Debounce Enable */ - -#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */ -#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */ -#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */ - -struct bfin_rotary_platform_data { - /* set rotary UP KEY_### or BTN_### in case you prefer - * bfin-rotary to send EV_KEY otherwise set 0 - */ - unsigned int rotary_up_key; - /* set rotary DOWN KEY_### or BTN_### in case you prefer - * bfin-rotary to send EV_KEY otherwise set 0 - */ - unsigned int rotary_down_key; - /* set rotary BUTTON KEY_### or BTN_### */ - unsigned int rotary_button_key; - /* set rotary Relative Axis REL_### in case you prefer - * bfin-rotary to send EV_REL otherwise set 0 - */ - unsigned int rotary_rel_code; - unsigned short debounce; /* 0..17 */ - unsigned short mode; - unsigned short pm_wakeup; - unsigned short *pin_list; -}; - -/* CNT_CONFIG bitmasks */ -#define CNTE (1 << 0) /* Counter Enable */ -#define DEBE (1 << 1) /* Debounce Enable */ -#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */ -#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */ -#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */ -#define CNTMODE_SHIFT 8 -#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */ -#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */ -#define BNDMODE_SHIFT 12 -#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */ -#define INPDIS (1 << 15) /* CUG and CDG Input Disable */ - -#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */ -#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */ -#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */ -#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */ -#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */ - -#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */ -#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */ -#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */ -#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */ - -/* CNT_IMASK bitmasks */ -#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */ -#define UCIE (1 << 1) /* Up count Interrupt Enable */ -#define DCIE (1 << 2) /* Down count Interrupt Enable */ -#define MINCIE (1 << 3) /* Min Count Interrupt Enable */ -#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */ -#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */ -#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */ -#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */ -#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */ -#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */ -#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */ - -/* CNT_STATUS bitmasks */ -#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */ -#define UCII (1 << 1) /* Up count Interrupt Identifier */ -#define DCII (1 << 2) /* Down count Interrupt Identifier */ -#define MINCII (1 << 3) /* Min Count Interrupt Identifier */ -#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */ -#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */ -#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */ -#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */ -#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */ -#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */ -#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */ - -/* CNT_COMMAND bitmasks */ -#define W1LCNT 0xf /* Load Counter Register */ -#define W1LMIN 0xf0 /* Load Min Register */ -#define W1LMAX 0xf00 /* Load Max Register */ -#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */ - -#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */ -#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */ -#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */ - -#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */ -#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */ -#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */ - -#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */ -#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */ -#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */ - -/* CNT_DEBOUNCE bitmasks */ -#define DPRESCALE 0xf /* Load Counter Register */ - -#endif diff --git a/include/linux/platform_data/clk-da8xx-cfgchip.h b/include/linux/platform_data/clk-da8xx-cfgchip.h new file mode 100644 index 000000000000..de0f77d38669 --- /dev/null +++ b/include/linux/platform_data/clk-da8xx-cfgchip.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * clk-da8xx-cfgchip - TI DaVinci DA8xx CFGCHIP clock driver + * + * Copyright (C) 2018 David Lechner <david@lechnology.com> + */ + +#ifndef __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ +#define __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ + +#include <linux/regmap.h> + +/** + * da8xx_cfgchip_clk_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct da8xx_cfgchip_clk_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ */ diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h new file mode 100644 index 000000000000..e55dab1d578b --- /dev/null +++ b/include/linux/platform_data/clk-davinci-pll.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PLL clock driver for TI Davinci SoCs + * + * Copyright (C) 2018 David Lechner <david@lechnology.com> + */ + +#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ +#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ + +#include <linux/regmap.h> + +/** + * davinci_pll_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct davinci_pll_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */ diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h index a19b78d826e9..757a0f9e26f9 100644 --- a/include/linux/platform_data/dmtimer-omap.h +++ b/include/linux/platform_data/dmtimer-omap.h @@ -20,12 +20,50 @@ #ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__ #define __PLATFORM_DATA_DMTIMER_OMAP_H__ +struct omap_dm_timer_ops { + struct omap_dm_timer *(*request_by_node)(struct device_node *np); + struct omap_dm_timer *(*request_specific)(int timer_id); + struct omap_dm_timer *(*request)(void); + + int (*free)(struct omap_dm_timer *timer); + + void (*enable)(struct omap_dm_timer *timer); + void (*disable)(struct omap_dm_timer *timer); + + int (*get_irq)(struct omap_dm_timer *timer); + int (*set_int_enable)(struct omap_dm_timer *timer, + unsigned int value); + int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask); + + struct clk *(*get_fclk)(struct omap_dm_timer *timer); + + int (*start)(struct omap_dm_timer *timer); + int (*stop)(struct omap_dm_timer *timer); + int (*set_source)(struct omap_dm_timer *timer, int source); + + int (*set_load)(struct omap_dm_timer *timer, int autoreload, + unsigned int value); + int (*set_match)(struct omap_dm_timer *timer, int enable, + unsigned int match); + int (*set_pwm)(struct omap_dm_timer *timer, int def_on, + int toggle, int trigger); + int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); + + unsigned int (*read_counter)(struct omap_dm_timer *timer); + int (*write_counter)(struct omap_dm_timer *timer, + unsigned int value); + unsigned int (*read_status)(struct omap_dm_timer *timer); + int (*write_status)(struct omap_dm_timer *timer, + unsigned int value); +}; + struct dmtimer_platform_data { /* set_timer_src - Only used for OMAP1 devices */ int (*set_timer_src)(struct platform_device *pdev, int source); u32 timer_capability; u32 timer_errata; int (*get_context_loss_count)(struct device *); + const struct omap_dm_timer_ops *timer_ops; }; #endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */ diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h index b7baf1e42c55..9a3e78082883 100644 --- a/include/linux/platform_data/gpio-htc-egpio.h +++ b/include/linux/platform_data/gpio-htc-egpio.h @@ -6,8 +6,6 @@ #ifndef __HTC_EGPIO_H__ #define __HTC_EGPIO_H__ -#include <linux/gpio.h> - /* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */ #define HTC_EGPIO_OUTPUT (~0) #define HTC_EGPIO_INPUT 0 diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index cb2618147c34..8612855691b2 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -157,11 +157,6 @@ #define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) #define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) -struct omap_gpio_dev_attr { - int bank_width; /* GPIO bank width */ - bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ -}; - struct omap_gpio_reg_offs { u16 revision; u16 direction; diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index fcdc707eab99..2744cff1b297 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -129,6 +129,8 @@ struct mlxreg_core_platform_data { * @mask: top aggregation interrupt common mask; * @cell_low: location of low aggregation interrupt register; * @mask_low: low aggregation interrupt common mask; + * @deferred_nr: I2C adapter number must be exist prior probing execution; + * @shift_nr: I2C adapter numbers must be incremented by this value; */ struct mlxreg_core_hotplug_platform_data { struct mlxreg_core_item *items; @@ -139,6 +141,8 @@ struct mlxreg_core_hotplug_platform_data { u32 mask; u32 cell_low; u32 mask_low; + int deferred_nr; + int shift_nr; }; #endif /* __LINUX_PLATFORM_DATA_MLXREG_H */ diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index b42ad83cbc20..4fd0f592a2d2 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h @@ -6,41 +6,22 @@ #include <linux/mtd/partitions.h> /* - * Current pxa3xx_nand controller has two chip select which - * both be workable. - * - * Notice should be taken that: - * When you want to use this feature, you should not enable the - * keep configuration feature, for two chip select could be - * attached with different nand chip. The different page size - * and timing requirement make the keep configuration impossible. + * Current pxa3xx_nand controller has two chip select which both be workable but + * historically all platforms remaining on platform data used only one. Switch + * to device tree if you need more. */ - -/* The max num of chip select current support */ -#define NUM_CHIP_SELECT (2) struct pxa3xx_nand_platform_data { - - /* the data flash bus is shared between the Static Memory - * Controller and the Data Flash Controller, the arbiter - * controls the ownership of the bus - */ - int enable_arbiter; - - /* allow platform code to keep OBM/bootloader defined NFC config */ - int keep_config; - - /* indicate how many chip selects will be used */ - int num_cs; - - /* use an flash-based bad block table */ - bool flash_bbt; - - /* requested ECC strength and ECC step size */ + /* Keep OBM/bootloader NFC timing configuration */ + bool keep_config; + /* Use a flash-based bad block table */ + bool flash_bbt; + /* Requested ECC strength and ECC step size */ int ecc_strength, ecc_step_size; - - const struct mtd_partition *parts[NUM_CHIP_SELECT]; - unsigned int nr_parts[NUM_CHIP_SELECT]; + /* Partitions */ + const struct mtd_partition *parts; + unsigned int nr_parts; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); + #endif /* __ASM_ARCH_PXA3XX_NAND_H */ diff --git a/include/linux/platform_data/phy-da8xx-usb.h b/include/linux/platform_data/phy-da8xx-usb.h new file mode 100644 index 000000000000..85c2b99381b2 --- /dev/null +++ b/include/linux/platform_data/phy-da8xx-usb.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver + * + * Copyright (C) 2018 David Lechner <david@lechnology.com> + */ + +#ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ +#define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ + +#include <linux/regmap.h> + +/** + * da8xx_usb_phy_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct da8xx_usb_phy_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */ diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h deleted file mode 100644 index 8f91300617ec..000000000000 --- a/include/linux/platform_data/pinctrl-adi2.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Pinctrl Driver for ADI GPIO2 controller - * - * Copyright 2007-2013 Analog Devices Inc. - * - * Licensed under the GPLv2 or later - */ - - -#ifndef PINCTRL_ADI2_H -#define PINCTRL_ADI2_H - -#include <linux/io.h> -#include <linux/platform_device.h> - -/** - * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data - * for ADI GPIO2 device. - * - * @port_gpio_base: Optional global GPIO index of the GPIO bank. - * 0 means driver decides. - * @port_pin_base: Pin index of the pin controller device. - * @port_width: PIN number of the GPIO bank device - * @pint_id: GPIO PINT device id that this GPIO bank should map to. - * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A - * GPIO bank can be mapped into either low 16 bits[0] or high 16 - * bits[1] of each PINT register. - * @pint_map: GIOP bank mapping code in PINT device - */ -struct adi_pinctrl_gpio_platform_data { - unsigned int port_gpio_base; - unsigned int port_pin_base; - unsigned int port_width; - u8 pinctrl_id; - u8 pint_id; - bool pint_assign; - u8 pint_map; -}; - -#endif diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h new file mode 100644 index 000000000000..f9bed2a0af9d --- /dev/null +++ b/include/linux/platform_data/pm33xx.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TI pm33xx platform data + * + * Copyright (C) 2016-2018 Texas Instruments, Inc. + * Dave Gerlach <d-gerlach@ti.com> + */ + +#ifndef _LINUX_PLATFORM_DATA_PM33XX_H +#define _LINUX_PLATFORM_DATA_PM33XX_H + +#include <linux/kbuild.h> +#include <linux/types.h> + +#ifndef __ASSEMBLER__ +struct am33xx_pm_sram_addr { + void (*do_wfi)(void); + unsigned long *do_wfi_sz; + unsigned long *resume_offset; + unsigned long *emif_sram_table; + unsigned long *ro_sram_data; +}; + +struct am33xx_pm_platform_data { + int (*init)(void); + int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long)); + struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); +}; + +struct am33xx_pm_sram_data { + u32 wfi_flags; + u32 l2_aux_ctrl_val; + u32 l2_prefetch_ctrl_val; +} __packed __aligned(8); + +struct am33xx_pm_ro_sram_data { + u32 amx3_pm_sram_data_virt; + u32 amx3_pm_sram_data_phys; +} __packed __aligned(8); + +#endif /* __ASSEMBLER__ */ +#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */ diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h index 13c83a25958a..0bf9fddb8306 100644 --- a/include/linux/platform_data/spi-omap2-mcspi.h +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -2,10 +2,6 @@ #ifndef _OMAP2_MCSPI_H #define _OMAP2_MCSPI_H -#define OMAP2_MCSPI_REV 0 -#define OMAP3_MCSPI_REV 1 -#define OMAP4_MCSPI_REV 2 - #define OMAP4_MCSPI_REG_OFFSET 0x100 #define MCSPI_PINDIR_D0_IN_D1_OUT 0 @@ -17,10 +13,6 @@ struct omap2_mcspi_platform_config { unsigned int pin_dir:1; }; -struct omap2_mcspi_dev_attr { - unsigned short num_chipselect; -}; - struct omap2_mcspi_device_config { unsigned turbo_mode:1; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 1be356330b96..80ce28d40832 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -16,6 +16,10 @@ enum ti_sysc_module_type { TI_SYSC_OMAP4_USB_HOST_FS, }; +struct ti_sysc_cookie { + void *data; +}; + /** * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets * @midle_shift: Offset of the midle bit @@ -41,6 +45,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_QUIRK_LEGACY_IDLE BIT(8) #define SYSC_QUIRK_RESET_STATUS BIT(7) #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) #define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) @@ -83,4 +88,49 @@ struct sysc_config { u32 quirks; }; +enum sysc_registers { + SYSC_REVISION, + SYSC_SYSCONFIG, + SYSC_SYSSTATUS, + SYSC_MAX_REGS, +}; + +/** + * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module + * @name: legacy "ti,hwmods" module name + * @module_pa: physical address of the interconnect target module + * @module_size: size of the interconnect target module + * @offsets: array of register offsets as listed in enum sysc_registers + * @nr_offsets: number of registers + * @cap: interconnect target module capabilities + * @cfg: interconnect target module configuration + * + * This data is enough to allocate a new struct omap_hwmod_class_sysconfig + * based on device tree data parsed by ti-sysc driver. + */ +struct ti_sysc_module_data { + const char *name; + u64 module_pa; + u32 module_size; + int *offsets; + int nr_offsets; + const struct sysc_capabilities *cap; + struct sysc_config *cfg; +}; + +struct device; + +struct ti_sysc_platform_data { + struct of_dev_auxdata *auxdata; + int (*init_module)(struct device *dev, + const struct ti_sysc_module_data *data, + struct ti_sysc_cookie *cookie); + int (*enable_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); + int (*idle_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); + int (*shutdown_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); +}; + #endif /* __TI_SYSC_DATA_H__ */ diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index d8b187c3925d..7b81dad712de 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -143,6 +143,13 @@ #define OMAP3430_SR_ERRWEIGHT 0x04 #define OMAP3430_SR_ERRMAXLIMIT 0x02 +enum sr_instance { + OMAP_SR_MPU, /* shared with iva on omap3 */ + OMAP_SR_CORE, + OMAP_SR_IVA, + OMAP_SR_NR, +}; + struct omap_sr { char *name; struct list_head node; @@ -207,7 +214,6 @@ struct omap_smartreflex_dev_attr { const char *sensor_voltdm_name; }; -#ifdef CONFIG_POWER_AVS_OMAP /* * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR. * The smartreflex class driver should pass the class type. @@ -290,6 +296,8 @@ struct omap_sr_data { struct voltagedomain *voltdm; }; +#ifdef CONFIG_POWER_AVS_OMAP + /* Smartreflex module enable/disable interface */ void omap_sr_enable(struct voltagedomain *voltdm); void omap_sr_disable(struct voltagedomain *voltdm); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 79e90b3d3288..f0139b460a72 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -371,6 +371,8 @@ devm_power_supply_register_no_ws(struct device *parent, extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); +#define to_power_supply(device) container_of(device, struct power_supply, dev) + extern void *power_supply_get_drvdata(struct power_supply *psy); /* For APM emulation, think legacy userspace. */ extern struct class *power_supply_class; diff --git a/include/linux/printk.h b/include/linux/printk.h index e9b603ee9953..6d7e800affd8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -201,6 +201,7 @@ void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); +extern asmlinkage void dump_stack(void) __cold; extern void printk_safe_init(void); extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); @@ -264,6 +265,10 @@ static inline void show_regs_print_info(const char *log_lvl) { } +static inline asmlinkage void dump_stack(void) +{ +} + static inline void printk_safe_init(void) { } @@ -279,8 +284,6 @@ static inline void printk_safe_flush_on_panic(void) extern int kptr_restrict; -extern asmlinkage void dump_stack(void) __cold; - #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9395f06e8372..e6d226464838 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -39,6 +39,7 @@ struct persistent_ram_ecc_info { int ecc_size; int symsize; int poly; + uint16_t *par; }; struct persistent_ram_zone { diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index a079656b614c..059242030631 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -75,5 +75,9 @@ void __init ptp_classifier_init(void); static inline void ptp_classifier_init(void) { } +static inline unsigned int ptp_classify_raw(struct sk_buff *skb) +{ + return PTP_CLASS_NONE; +} #endif #endif /* _PTP_CLASSIFY_H_ */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index e6335227b844..6894976b54e3 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -296,13 +296,14 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) { void *ptr; + /* The READ_ONCE in __ptr_ring_peek guarantees that anyone + * accessing data through the pointer is up to date. Pairs + * with smp_wmb in __ptr_ring_produce. + */ ptr = __ptr_ring_peek(r); if (ptr) __ptr_ring_discard_one(r); - /* Make sure anyone accessing data through the pointer is up to date. */ - /* Pairs with smp_wmb in __ptr_ring_produce. */ - smp_read_barrier_depends(); return ptr; } diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 2b3b350e07b7..13c8ab171437 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -110,7 +110,7 @@ #define FW_MAJOR_VERSION 8 #define FW_MINOR_VERSION 33 -#define FW_REVISION_VERSION 1 +#define FW_REVISION_VERSION 11 #define FW_ENGINEERING_VERSION 0 /***********************/ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 9db02856623b..d9416ad5ef59 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -105,7 +105,7 @@ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 /* GFS constants */ -#define ETH_GFT_TRASH_CAN_VPORT 0x1FF +#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ /* Destination port mode */ enum dest_port_mode { diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 4cc9b37b8d95..938df614cb6a 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -753,8 +753,8 @@ struct e4_ystorm_iscsi_task_ag_ctx { #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */ +#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7 u8 flags1; #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 15e398c7230e..b5b2bc9eacca 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -483,6 +483,15 @@ struct qed_int_info { u8 used_cnt; }; +#define QED_NVM_SIGNATURE 0x12435687 + +enum qed_nvm_flash_cmd { + QED_NVM_FLASH_CMD_FILE_DATA = 0x2, + QED_NVM_FLASH_CMD_FILE_START = 0x3, + QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4, + QED_NVM_FLASH_CMD_NVM_MAX, +}; + struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); void (*link_update)(void *dev, @@ -658,6 +667,16 @@ struct qed_common_ops { struct qed_chain *p_chain); /** + * @brief nvm_flash - Flash nvm data. + * + * @param cdev + * @param name - file containing the data + * + * @return 0 on success, error otherwise. + */ + int (*nvm_flash)(struct qed_dev *cdev, const char *name); + +/** * @brief nvm_get_image - reads an entire image from nvram * * @param cdev diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index c1a446ebe362..480a57eb36cc 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -51,6 +51,8 @@ #define RDMA_MAX_CQS (64 * 1024) #define RDMA_MAX_TIDS (128 * 1024 - 1) #define RDMA_MAX_PDS (64 * 1024) +#define RDMA_MAX_XRC_SRQS (1024) +#define RDMA_MAX_SRQS (32 * 1024) #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index e15e0da71240..193bcef302e1 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -59,6 +59,9 @@ enum roce_async_events_type { ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, ROCE_ASYNC_EVENT_SRQ_EMPTY, ROCE_ASYNC_EVENT_DESTROY_QP_DONE, + ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR, + ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR, + ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR, MAX_ROCE_ASYNC_EVENTS_TYPE }; diff --git a/include/linux/quota.h b/include/linux/quota.h index 5ac9de4fcd6f..ca9772c8e48b 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -267,7 +267,6 @@ struct dqstats { struct percpu_counter counter[_DQST_DQSTAT_LAST]; }; -extern struct dqstats *dqstats_pcpu; extern struct dqstats dqstats; static inline void dqstats_inc(unsigned int type) diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 2fb6fb11132e..dc905a4ff8d7 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -27,6 +27,9 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); } +int kernel_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr); + #if defined(CONFIG_QUOTA) #define quota_error(sb, fmt, args...) \ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index fc55ff31eca7..34149e8b5f73 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -104,25 +104,29 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ -#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) -#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) +/* The IDR tag is stored in the low bits of the GFP flags */ +#define ROOT_IS_IDR ((__force gfp_t)4) +/* The top bits of gfp_mask are used to store the root tags */ +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) struct radix_tree_root { + spinlock_t xa_lock; gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; }; -#define RADIX_TREE_INIT(mask) { \ +#define RADIX_TREE_INIT(name, mask) { \ + .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ .gfp_mask = (mask), \ .rnode = NULL, \ } #define RADIX_TREE(name, mask) \ - struct radix_tree_root name = RADIX_TREE_INIT(mask) + struct radix_tree_root name = RADIX_TREE_INIT(name, mask) #define INIT_RADIX_TREE(root, mask) \ do { \ + spin_lock_init(&(root)->xa_lock); \ (root)->gfp_mask = (mask); \ (root)->rnode = NULL; \ } while (0) diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 583cdd3d49ca..ea8505204fdf 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -105,8 +105,11 @@ extern const struct raid6_calls raid6_avx2x4; extern const struct raid6_calls raid6_avx512x1; extern const struct raid6_calls raid6_avx512x2; extern const struct raid6_calls raid6_avx512x4; -extern const struct raid6_calls raid6_tilegx8; extern const struct raid6_calls raid6_s390vx8; +extern const struct raid6_calls raid6_vpermxor1; +extern const struct raid6_calls raid6_vpermxor2; +extern const struct raid6_calls raid6_vpermxor4; +extern const struct raid6_calls raid6_vpermxor8; struct raid6_recov_calls { void (*data2)(int, size_t, int, int, void **); diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h index 31e1ff69efc8..ec8655514283 100644 --- a/include/linux/raid_class.h +++ b/include/linux/raid_class.h @@ -38,6 +38,7 @@ enum raid_level { RAID_LEVEL_5, RAID_LEVEL_50, RAID_LEVEL_6, + RAID_LEVEL_JBOD, }; struct raid_data { diff --git a/include/linux/random.h b/include/linux/random.h index 4024f7d9c77d..2ddf13b4281e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -85,10 +85,8 @@ static inline unsigned long get_random_canary(void) static inline int get_random_bytes_wait(void *buf, int nbytes) { int ret = wait_for_random_bytes(); - if (unlikely(ret)) - return ret; get_random_bytes(buf, nbytes); - return 0; + return ret; } #define declare_get_random_var_wait(var) \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 043d04784675..36360d07f25b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -214,10 +214,12 @@ do { \ #endif /* - * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic - * initialization and destruction of rcu_head on the stack. rcu_head structures - * allocated dynamically in the heap or defined statically don't need any - * initialization. + * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls + * are needed for dynamic initialization and destruction of rcu_head + * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for + * dynamic initialization and destruction of statically allocated rcu_head + * structures. However, rcu_head structures allocated dynamically in the + * heap don't need any initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 6a3aeba40e9e..5f7ad0552c03 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -21,6 +21,7 @@ #include <linux/lockdep.h> struct module; +struct clk; struct device; struct i2c_client; struct irq_domain; @@ -905,6 +906,8 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \ sdw, config) +int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk); +void regmap_mmio_detach_clk(struct regmap *map); void regmap_exit(struct regmap *map); int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config); diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index f2fd2d3bf58f..d1f2073e4d5f 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -21,6 +21,8 @@ #define DA9211_MAX_REGULATORS 2 +struct gpio_desc; + enum da9211_chip_id { DA9211, DA9212, @@ -39,7 +41,7 @@ struct da9211_pdata { * 2 : 2 phase 2 buck */ int num_buck; - int gpio_ren[DA9211_MAX_REGULATORS]; + struct gpio_desc *gpiod_ren[DA9211_MAX_REGULATORS]; struct device_node *reg_node[DA9211_MAX_REGULATORS]; struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; }; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4c00486b7a78..4fc96cb8e5d7 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -19,6 +19,7 @@ #include <linux/notifier.h> #include <linux/regulator/consumer.h> +struct gpio_desc; struct regmap; struct regulator_dev; struct regulator_config; @@ -387,6 +388,7 @@ struct regulator_desc { * initialized, meaning that >= 0 is a valid gpio * identifier and < 0 is a non existent gpio. * @ena_gpio: GPIO controlling regulator enable. + * @ena_gpiod: GPIO descriptor controlling regulator enable. * @ena_gpio_invert: Sense for GPIO enable control. * @ena_gpio_flags: Flags to use when calling gpio_request_one() */ @@ -399,6 +401,7 @@ struct regulator_config { bool ena_gpio_initialized; int ena_gpio; + struct gpio_desc *ena_gpiod; unsigned int ena_gpio_invert:1; unsigned int ena_gpio_flags; }; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 728d421fffe9..d09a9c7af109 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -344,7 +344,7 @@ struct rproc_ops { int (*stop)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, int len); - int (*load_rsc_table)(struct rproc *rproc, const struct firmware *fw); + int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); int (*load)(struct rproc *rproc, const struct firmware *fw); @@ -395,6 +395,21 @@ enum rproc_crash_type { }; /** + * struct rproc_dump_segment - segment info from ELF header + * @node: list node related to the rproc segment list + * @da: device address of the segment + * @size: size of the segment + */ +struct rproc_dump_segment { + struct list_head node; + + dma_addr_t da; + size_t size; + + loff_t offset; +}; + +/** * struct rproc - represents a physical remote processor device * @node: list node of this rproc object * @domain: iommu domain @@ -424,6 +439,7 @@ enum rproc_crash_type { * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU + * @dump_segments: list of segments in the firmware */ struct rproc { struct list_head node; @@ -455,19 +471,21 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + struct list_head dump_segments; }; /** * struct rproc_subdev - subdevice tied to a remoteproc * @node: list node related to the rproc subdevs list * @probe: probe function, called as the rproc is started - * @remove: remove function, called as the rproc is stopped + * @remove: remove function, called as the rproc is being stopped, the @crashed + * parameter indicates if this originates from the a recovery */ struct rproc_subdev { struct list_head node; int (*probe)(struct rproc_subdev *subdev); - void (*remove)(struct rproc_subdev *subdev); + void (*remove)(struct rproc_subdev *subdev, bool crashed); }; /* we currently support only two vrings per rvdev */ @@ -534,6 +552,7 @@ void rproc_free(struct rproc *rproc); int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); +int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { @@ -550,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev, int (*probe)(struct rproc_subdev *subdev), - void (*remove)(struct rproc_subdev *subdev)); + void (*remove)(struct rproc_subdev *subdev, bool graceful)); void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index adb88f8cefbc..9326d671b6e6 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -27,12 +27,38 @@ struct device_node; struct of_phandle_args; /** + * struct reset_control_lookup - represents a single lookup entry + * + * @list: internal list of all reset lookup entries + * @provider: name of the reset controller device controlling this reset line + * @index: ID of the reset controller in the reset controller device + * @dev_id: name of the device associated with this reset line + * @con_id name of the reset line (can be NULL) + */ +struct reset_control_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + } + +/** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls * @ops: a pointer to device specific struct reset_control_ops * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices * @reset_control_head: head of internal list of requested reset controls + * @dev: corresponding driver model device struct * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the @@ -44,6 +70,7 @@ struct reset_controller_dev { struct module *owner; struct list_head list; struct list_head reset_control_head; + struct device *dev; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, @@ -58,4 +85,7 @@ struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); +void reset_controller_add_lookup(struct reset_control_lookup *lookup, + unsigned int num_entries); + #endif diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index c9df2527e0cd..1f8ad121eb43 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -152,25 +152,25 @@ struct rhashtable_params { /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @nelems: Number of elements in table * @key_len: Key length for hashfn - * @p: Configuration parameters * @max_elems: Maximum number of elements in table + * @p: Configuration parameters * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table */ struct rhashtable { struct bucket_table __rcu *tbl; - atomic_t nelems; unsigned int key_len; - struct rhashtable_params p; unsigned int max_elems; + struct rhashtable_params p; bool rhlist; struct work_struct run_work; struct mutex mutex; spinlock_t lock; + atomic_t nelems; }; /** @@ -766,8 +766,10 @@ slow_path: if (!key || (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, head)) : - rhashtable_compare(&arg, rht_obj(ht, head)))) + rhashtable_compare(&arg, rht_obj(ht, head)))) { + pprev = &head->next; continue; + } data = rht_obj(ht, head); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 7d9eb39fa76a..a0233edc0718 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -34,10 +34,12 @@ struct ring_buffer_event { * array[0] = time delta (28 .. 59) * size = 8 bytes * - * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock - * array[0] = tv_nsec - * array[1..2] = tv_sec - * size = 16 bytes + * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp + * Same format as TIME_EXTEND except that the + * value is an absolute timestamp, not a delta + * event.time_delta contains bottom 27 bits + * array[0] = top (28 .. 59) bits + * size = 8 bytes * * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: * Data record @@ -54,12 +56,12 @@ enum ring_buffer_type { RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, RINGBUF_TYPE_PADDING, RINGBUF_TYPE_TIME_EXTEND, - /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ RINGBUF_TYPE_TIME_STAMP, }; unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); /* * ring_buffer_discard_commit will remove an event that has not @@ -115,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); +void ring_buffer_nest_start(struct ring_buffer *buffer); +void ring_buffer_nest_end(struct ring_buffer *buffer); + struct ring_buffer_event * ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); @@ -178,6 +183,8 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, int cpu, u64 *ts); void ring_buffer_set_clock(struct ring_buffer *buffer, u64 (*clock)(void)); +void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); +bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); size_t ring_buffer_page_len(void *page); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index fc6c90b57be0..4c007f69082f 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -145,12 +145,17 @@ struct rtc_device { bool registered; - struct nvmem_config *nvmem_config; struct nvmem_device *nvmem; /* Old ABI support */ bool nvram_old_abi; struct bin_attribute *nvram; + time64_t range_min; + timeu64_t range_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; struct timer_list uie_timer; @@ -164,6 +169,11 @@ struct rtc_device { }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) +/* useful timestamps */ +#define RTC_TIMESTAMP_BEGIN_1900 -2208989361LL /* 1900-01-01 00:00:00 */ +#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ +#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ + extern struct rtc_device *rtc_device_register(const char *name, struct device *dev, const struct rtc_class_ops *ops, @@ -212,10 +222,6 @@ void rtc_aie_update_irq(void *private); void rtc_uie_update_irq(void *private); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); -int rtc_register(rtc_task_t *task); -int rtc_unregister(rtc_task_t *task); -int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); - void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); @@ -271,4 +277,17 @@ extern int rtc_hctosys_ret; #define rtc_hctosys_ret -ENODEV #endif +#ifdef CONFIG_RTC_NVMEM +int rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config); +void rtc_nvmem_unregister(struct rtc_device *rtc); +#else +static inline int rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config) +{ + return -ENODEV; +} +static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} +#endif + #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 1fdcde96eb65..5225832bd6ff 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -33,9 +33,11 @@ extern void rtnl_lock(void); extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); +extern int rtnl_lock_killable(void); extern wait_queue_head_t netdev_unregistering_wq; -extern struct mutex net_mutex; +extern struct rw_semaphore pernet_ops_rwsem; +extern struct rw_semaphore net_rwsem; #ifdef CONFIG_PROVE_LOCKING extern bool lockdep_rtnl_is_held(void); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 478acf6efac6..e964bbd03fc2 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -36,12 +36,12 @@ #define CHECK_REG_CMD 2 #define RTSX_HDBAR 0x08 -#define SG_INT 0x04 -#define SG_END 0x02 -#define SG_VALID 0x01 -#define SG_NO_OP 0x00 -#define SG_TRANS_DATA (0x02 << 4) -#define SG_LINK_DESC (0x03 << 4) +#define RTSX_SG_INT 0x04 +#define RTSX_SG_END 0x02 +#define RTSX_SG_VALID 0x01 +#define RTSX_SG_NO_OP 0x00 +#define RTSX_SG_TRANS_DATA (0x02 << 4) +#define RTSX_SG_LINK_DESC (0x03 << 4) #define RTSX_HDBCTLR 0x0C #define SDMA_MODE 0x00 #define ADMA_MODE (0x02 << 26) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 0dcc60e820de..841585f6e5f2 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -171,6 +171,8 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); * starting from the last allocated bit. This is less efficient * than the default behavior (false). * + * This operation provides acquire barrier semantics if it succeeds. + * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); @@ -300,6 +302,12 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } +static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, + unsigned int bitnr) +{ + clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); +} + static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) { return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 22b2131bcdcd..51f52020ad5f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -65,16 +65,18 @@ struct sg_table { */ #define SG_MAGIC 0x87654321 +#define SG_CHAIN 0x01UL +#define SG_END 0x02UL /* * We overload the LSB of the page pointer to indicate whether it's * a valid sg entry, or whether it points to the start of a new scatterlist. * Those low bits are there for everyone! (thanks mason :-) */ -#define sg_is_chain(sg) ((sg)->page_link & 0x01) -#define sg_is_last(sg) ((sg)->page_link & 0x02) +#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN) +#define sg_is_last(sg) ((sg)->page_link & SG_END) #define sg_chain_ptr(sg) \ - ((struct scatterlist *) ((sg)->page_link & ~0x03)) + ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END))) /** * sg_assign_page - Assign a given page to an SG entry @@ -88,13 +90,13 @@ struct sg_table { **/ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) { - unsigned long page_link = sg->page_link & 0x3; + unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END); /* * In order for the low bit stealing approach to work, pages * must be aligned at a 32-bit boundary as a minimum. */ - BUG_ON((unsigned long) page & 0x03); + BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); #ifdef CONFIG_DEBUG_SG BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); @@ -130,7 +132,7 @@ static inline struct page *sg_page(struct scatterlist *sg) BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif - return (struct page *)((sg)->page_link & ~0x3); + return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); } /** @@ -178,7 +180,8 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, * Set lowest bit to indicate a link pointer, and make sure to clear * the termination bit if it happens to be set. */ - prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; + prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN) + & ~SG_END; } /** @@ -198,8 +201,8 @@ static inline void sg_mark_end(struct scatterlist *sg) /* * Set termination bit, clear potential chain bit */ - sg->page_link |= 0x02; - sg->page_link &= ~0x01; + sg->page_link |= SG_END; + sg->page_link &= ~SG_CHAIN; } /** @@ -215,7 +218,7 @@ static inline void sg_unmark_end(struct scatterlist *sg) #ifdef CONFIG_DEBUG_SG BUG_ON(sg->sg_magic != SG_MAGIC); #endif - sg->page_link &= ~0x02; + sg->page_link &= ~SG_END; } /** @@ -248,6 +251,24 @@ static inline void *sg_virt(struct scatterlist *sg) return page_address(sg_page(sg)) + sg->offset; } +/** + * sg_init_marker - Initialize markers in sg table + * @sgl: The SG table + * @nents: Number of entries in table + * + **/ +static inline void sg_init_marker(struct scatterlist *sgl, + unsigned int nents) +{ +#ifdef CONFIG_DEBUG_SG + unsigned int i; + + for (i = 0; i < nents; i++) + sgl[i].sg_magic = SG_MAGIC; +#endif + sg_mark_end(&sgl[nents - 1]); +} + int sg_nents(struct scatterlist *sg); int sg_nents_for_len(struct scatterlist *sg, u64 len); struct scatterlist *sg_next(struct scatterlist *); diff --git a/include/linux/sched.h b/include/linux/sched.h index b161ef8a902e..b3d697f3b573 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -93,7 +93,6 @@ struct task_group; /* Convenience macros for the sake of wake_up(): */ #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) /* get_task_state(): */ #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ @@ -275,6 +274,34 @@ struct load_weight { u32 inv_weight; }; +/** + * struct util_est - Estimation utilization of FAIR tasks + * @enqueued: instantaneous estimated utilization of a task/cpu + * @ewma: the Exponential Weighted Moving Average (EWMA) + * utilization of a task + * + * Support data structure to track an Exponential Weighted Moving Average + * (EWMA) of a FAIR task's utilization. New samples are added to the moving + * average each time a task completes an activation. Sample's weight is chosen + * so that the EWMA will be relatively insensitive to transient changes to the + * task's workload. + * + * The enqueued attribute has a slightly different meaning for tasks and cpus: + * - task: the task's util_avg at last task dequeue time + * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU + * Thus, the util_est.enqueued of a task represents the contribution on the + * estimated utilization of the CPU where that task is currently enqueued. + * + * Only for tasks we track a moving average of the past instantaneous + * estimated utilization. This allows to absorb sporadic drops in utilization + * of an otherwise almost periodic task. + */ +struct util_est { + unsigned int enqueued; + unsigned int ewma; +#define UTIL_EST_WEIGHT_SHIFT 2 +} __attribute__((__aligned__(sizeof(u64)))); + /* * The load_avg/util_avg accumulates an infinite geometric series * (see __update_load_avg() in kernel/sched/fair.c). @@ -336,7 +363,8 @@ struct sched_avg { unsigned long load_avg; unsigned long runnable_load_avg; unsigned long util_avg; -}; + struct util_est util_est; +} ____cacheline_aligned; struct sched_statistics { #ifdef CONFIG_SCHEDSTATS @@ -407,7 +435,7 @@ struct sched_entity { * Put into separate cache line so it does not * collide with read-mostly values above. */ - struct sched_avg avg ____cacheline_aligned_in_smp; + struct sched_avg avg; #endif }; diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 0b55834efd46..59667444669f 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -8,9 +8,8 @@ * Interface between cpufreq drivers and the scheduler: */ -#define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_DL (1U << 1) -#define SCHED_CPUFREQ_IOWAIT (1U << 2) +#define SCHED_CPUFREQ_IOWAIT (1U << 0) +#define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ struct update_util_data { diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index a5bc8728ead7..0cb034331cbb 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,8 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SCHED_DEADLINE_H -#define _LINUX_SCHED_DEADLINE_H - -#include <linux/sched.h> /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -28,5 +24,3 @@ static inline bool dl_time_before(u64 a, u64 b) { return (s64)(a - b) < 0; } - -#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index d849431c8060..4a6582c27dea 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -12,6 +12,7 @@ enum hk_flags { HK_FLAG_SCHED = (1 << 3), HK_FLAG_TICK = (1 << 4), HK_FLAG_DOMAIN = (1 << 5), + HK_FLAG_WQ = (1 << 6), }; #ifdef CONFIG_CPU_ISOLATION diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 9806184bb3d5..2c570cd934af 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -104,7 +104,8 @@ static inline void mm_update_next_owner(struct mm_struct *mm) #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MMU -extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); @@ -113,7 +114,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +static inline void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack) {} #endif static inline bool in_vfork(struct task_struct *tsk) diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 3d3a97d9399d..b36f4cf38111 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -16,11 +16,9 @@ static inline void cpu_load_update_nohz_stop(void) { } #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); -extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else static inline void nohz_balance_enter_idle(int cpu) { } -static inline void set_cpu_sd_state_idle(void) { } #endif #ifdef CONFIG_NO_HZ_COMMON @@ -37,8 +35,4 @@ extern void wake_up_nohz_cpu(int cpu); static inline void wake_up_nohz_cpu(int cpu) { } #endif -#ifdef CONFIG_NO_HZ_FULL -extern u64 scheduler_tick_max_deferment(void); -#endif - #endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h new file mode 100644 index 000000000000..b458c87b866c --- /dev/null +++ b/include/linux/scmi_protocol.h @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Message Protocol driver header + * + * Copyright (C) 2018 ARM Ltd. + */ +#include <linux/device.h> +#include <linux/types.h> + +#define SCMI_MAX_STR_SIZE 16 +#define SCMI_MAX_NUM_RATES 16 + +/** + * struct scmi_revision_info - version information structure + * + * @major_ver: Major ABI version. Change here implies risk of backward + * compatibility break. + * @minor_ver: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @num_protocols: Number of protocols that are implemented, excluding the + * base protocol. + * @num_agents: Number of agents in the system. + * @impl_ver: A vendor-specific implementation version. + * @vendor_id: A vendor identifier(Null terminated ASCII string) + * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string) + */ +struct scmi_revision_info { + u16 major_ver; + u16 minor_ver; + u8 num_protocols; + u8 num_agents; + u32 impl_ver; + char vendor_id[SCMI_MAX_STR_SIZE]; + char sub_vendor_id[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_clock_info { + char name[SCMI_MAX_STR_SIZE]; + bool rate_discrete; + union { + struct { + int num_rates; + u64 rates[SCMI_MAX_NUM_RATES]; + } list; + struct { + u64 min_rate; + u64 max_rate; + u64 step_size; + } range; + }; +}; + +struct scmi_handle; + +/** + * struct scmi_clk_ops - represents the various operations provided + * by SCMI Clock Protocol + * + * @count_get: get the count of clocks provided by SCMI + * @info_get: get the information of the specified clock + * @rate_get: request the current clock rate of a clock + * @rate_set: set the clock rate of a clock + * @enable: enables the specified clock + * @disable: disables the specified clock + */ +struct scmi_clk_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_clock_info *(*info_get) + (const struct scmi_handle *handle, u32 clk_id); + int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, + u64 *rate); + int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, + u32 config, u64 rate); + int (*enable)(const struct scmi_handle *handle, u32 clk_id); + int (*disable)(const struct scmi_handle *handle, u32 clk_id); +}; + +/** + * struct scmi_perf_ops - represents the various operations provided + * by SCMI Performance Protocol + * + * @limits_set: sets limits on the performance level of a domain + * @limits_get: gets limits on the performance level of a domain + * @level_set: sets the performance level of a domain + * @level_get: gets the performance level of a domain + * @device_domain_id: gets the scmi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @freq_set: sets the frequency for a given device using sustained frequency + * to sustained performance level mapping + * @freq_get: gets the frequency for a given device using sustained frequency + * to sustained performance level mapping + */ +struct scmi_perf_ops { + int (*limits_set)(const struct scmi_handle *handle, u32 domain, + u32 max_perf, u32 min_perf); + int (*limits_get)(const struct scmi_handle *handle, u32 domain, + u32 *max_perf, u32 *min_perf); + int (*level_set)(const struct scmi_handle *handle, u32 domain, + u32 level, bool poll); + int (*level_get)(const struct scmi_handle *handle, u32 domain, + u32 *level, bool poll); + int (*device_domain_id)(struct device *dev); + int (*get_transition_latency)(const struct scmi_handle *handle, + struct device *dev); + int (*add_opps_to_device)(const struct scmi_handle *handle, + struct device *dev); + int (*freq_set)(const struct scmi_handle *handle, u32 domain, + unsigned long rate, bool poll); + int (*freq_get)(const struct scmi_handle *handle, u32 domain, + unsigned long *rate, bool poll); +}; + +/** + * struct scmi_power_ops - represents the various operations provided + * by SCMI Power Protocol + * + * @num_domains_get: get the count of power domains provided by SCMI + * @name_get: gets the name of a power domain + * @state_set: sets the power state of a power domain + * @state_get: gets the power state of a power domain + */ +struct scmi_power_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + char *(*name_get)(const struct scmi_handle *handle, u32 domain); +#define SCMI_POWER_STATE_TYPE_SHIFT 30 +#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1) +#define SCMI_POWER_STATE_PARAM(type, id) \ + ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \ + ((id) & SCMI_POWER_STATE_ID_MASK)) +#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0) +#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0) + int (*state_set)(const struct scmi_handle *handle, u32 domain, + u32 state); + int (*state_get)(const struct scmi_handle *handle, u32 domain, + u32 *state); +}; + +struct scmi_sensor_info { + u32 id; + u8 type; + char name[SCMI_MAX_STR_SIZE]; +}; + +/* + * Partial list from Distributed Management Task Force (DMTF) specification: + * DSP0249 (Platform Level Data Model specification) + */ +enum scmi_sensor_class { + NONE = 0x0, + TEMPERATURE_C = 0x2, + VOLTAGE = 0x5, + CURRENT = 0x6, + POWER = 0x7, + ENERGY = 0x8, +}; + +/** + * struct scmi_sensor_ops - represents the various operations provided + * by SCMI Sensor Protocol + * + * @count_get: get the count of sensors provided by SCMI + * @info_get: get the information of the specified sensor + * @configuration_set: control notifications on cross-over events for + * the trip-points + * @trip_point_set: selects and configures a trip-point of interest + * @reading_get: gets the current value of the sensor + */ +struct scmi_sensor_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_sensor_info *(*info_get) + (const struct scmi_handle *handle, u32 sensor_id); + int (*configuration_set)(const struct scmi_handle *handle, + u32 sensor_id); + int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, + u8 trip_id, u64 trip_value); + int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, + bool async, u64 *value); +}; + +/** + * struct scmi_handle - Handle returned to ARM SCMI clients for usage. + * + * @dev: pointer to the SCMI device + * @version: pointer to the structure containing SCMI version information + * @power_ops: pointer to set of power protocol operations + * @perf_ops: pointer to set of performance protocol operations + * @clk_ops: pointer to set of clock protocol operations + * @sensor_ops: pointer to set of sensor protocol operations + */ +struct scmi_handle { + struct device *dev; + struct scmi_revision_info *version; + struct scmi_perf_ops *perf_ops; + struct scmi_clk_ops *clk_ops; + struct scmi_power_ops *power_ops; + struct scmi_sensor_ops *sensor_ops; + /* for protocol internal use */ + void *perf_priv; + void *clk_priv; + void *power_priv; + void *sensor_priv; +}; + +enum scmi_std_protocol { + SCMI_PROTOCOL_BASE = 0x10, + SCMI_PROTOCOL_POWER = 0x11, + SCMI_PROTOCOL_SYSTEM = 0x12, + SCMI_PROTOCOL_PERF = 0x13, + SCMI_PROTOCOL_CLOCK = 0x14, + SCMI_PROTOCOL_SENSOR = 0x15, +}; + +struct scmi_device { + u32 id; + u8 protocol_id; + struct device dev; + struct scmi_handle *handle; +}; + +#define to_scmi_dev(d) container_of(d, struct scmi_device, dev) + +struct scmi_device * +scmi_device_create(struct device_node *np, struct device *parent, int protocol); +void scmi_device_destroy(struct scmi_device *scmi_dev); + +struct scmi_device_id { + u8 protocol_id; +}; + +struct scmi_driver { + const char *name; + int (*probe)(struct scmi_device *sdev); + void (*remove)(struct scmi_device *sdev); + const struct scmi_device_id *id_table; + + struct device_driver driver; +}; + +#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver) + +#ifdef CONFIG_ARM_SCMI_PROTOCOL +int scmi_driver_register(struct scmi_driver *driver, + struct module *owner, const char *mod_name); +void scmi_driver_unregister(struct scmi_driver *driver); +#else +static inline int +scmi_driver_register(struct scmi_driver *driver, struct module *owner, + const char *mod_name) +{ + return -EINVAL; +} + +static inline void scmi_driver_unregister(struct scmi_driver *driver) {} +#endif /* CONFIG_ARM_SCMI_PROTOCOL */ + +#define scmi_register(driver) \ + scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define scmi_unregister(driver) \ + scmi_driver_unregister(driver) + +/** + * module_scmi_driver() - Helper macro for registering a scmi driver + * @__scmi_driver: scmi_driver structure + * + * Helper macro for scmi drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_scmi_driver(__scmi_driver) \ + module_driver(__scmi_driver, scmi_register, scmi_unregister) + +typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); +int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); +void scmi_protocol_unregister(int protocol_id); diff --git a/include/linux/security.h b/include/linux/security.h index 5111fe8159ce..ecb06e1357dd 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -36,7 +36,6 @@ struct linux_binprm; struct cred; struct rlimit; struct siginfo; -struct sem_array; struct sembuf; struct kern_ipc_perm; struct audit_context; @@ -50,9 +49,7 @@ struct qstr; struct iattr; struct fown_struct; struct file_operations; -struct shmid_kernel; struct msg_msg; -struct msg_queue; struct xattr; struct xfrm_sec_ctx; struct mm_struct; @@ -115,6 +112,7 @@ struct xfrm_policy; struct xfrm_state; struct xfrm_user_sec_ctx; struct seq_file; +struct sctp_endpoint; #ifdef CONFIG_MMU extern unsigned long mmap_min_addr; @@ -318,6 +316,7 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); +void security_cred_getsecid(const struct cred *c, u32 *secid); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); @@ -349,24 +348,24 @@ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); int security_msg_msg_alloc(struct msg_msg *msg); void security_msg_msg_free(struct msg_msg *msg); -int security_msg_queue_alloc(struct msg_queue *msq); -void security_msg_queue_free(struct msg_queue *msq); -int security_msg_queue_associate(struct msg_queue *msq, int msqflg); -int security_msg_queue_msgctl(struct msg_queue *msq, int cmd); -int security_msg_queue_msgsnd(struct msg_queue *msq, +int security_msg_queue_alloc(struct kern_ipc_perm *msq); +void security_msg_queue_free(struct kern_ipc_perm *msq); +int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg); +int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd); +int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg); -int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, +int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode); -int security_shm_alloc(struct shmid_kernel *shp); -void security_shm_free(struct shmid_kernel *shp); -int security_shm_associate(struct shmid_kernel *shp, int shmflg); -int security_shm_shmctl(struct shmid_kernel *shp, int cmd); -int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg); -int security_sem_alloc(struct sem_array *sma); -void security_sem_free(struct sem_array *sma); -int security_sem_associate(struct sem_array *sma, int semflg); -int security_sem_semctl(struct sem_array *sma, int cmd); -int security_sem_semop(struct sem_array *sma, struct sembuf *sops, +int security_shm_alloc(struct kern_ipc_perm *shp); +void security_shm_free(struct kern_ipc_perm *shp); +int security_shm_associate(struct kern_ipc_perm *shp, int shmflg); +int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd); +int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg); +int security_sem_alloc(struct kern_ipc_perm *sma); +void security_sem_free(struct kern_ipc_perm *sma); +int security_sem_associate(struct kern_ipc_perm *sma, int semflg); +int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); +int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter); void security_d_instantiate(struct dentry *dentry, struct inode *inode); int security_getprocattr(struct task_struct *p, char *name, char **value); @@ -1031,32 +1030,32 @@ static inline int security_msg_msg_alloc(struct msg_msg *msg) static inline void security_msg_msg_free(struct msg_msg *msg) { } -static inline int security_msg_queue_alloc(struct msg_queue *msq) +static inline int security_msg_queue_alloc(struct kern_ipc_perm *msq) { return 0; } -static inline void security_msg_queue_free(struct msg_queue *msq) +static inline void security_msg_queue_free(struct kern_ipc_perm *msq) { } -static inline int security_msg_queue_associate(struct msg_queue *msq, +static inline int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg) { return 0; } -static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd) +static inline int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) { return 0; } -static inline int security_msg_queue_msgsnd(struct msg_queue *msq, +static inline int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg) { return 0; } -static inline int security_msg_queue_msgrcv(struct msg_queue *msq, +static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) @@ -1064,50 +1063,50 @@ static inline int security_msg_queue_msgrcv(struct msg_queue *msq, return 0; } -static inline int security_shm_alloc(struct shmid_kernel *shp) +static inline int security_shm_alloc(struct kern_ipc_perm *shp) { return 0; } -static inline void security_shm_free(struct shmid_kernel *shp) +static inline void security_shm_free(struct kern_ipc_perm *shp) { } -static inline int security_shm_associate(struct shmid_kernel *shp, +static inline int security_shm_associate(struct kern_ipc_perm *shp, int shmflg) { return 0; } -static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd) +static inline int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) { return 0; } -static inline int security_shm_shmat(struct shmid_kernel *shp, +static inline int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg) { return 0; } -static inline int security_sem_alloc(struct sem_array *sma) +static inline int security_sem_alloc(struct kern_ipc_perm *sma) { return 0; } -static inline void security_sem_free(struct sem_array *sma) +static inline void security_sem_free(struct kern_ipc_perm *sma) { } -static inline int security_sem_associate(struct sem_array *sma, int semflg) +static inline int security_sem_associate(struct kern_ipc_perm *sma, int semflg) { return 0; } -static inline int security_sem_semctl(struct sem_array *sma, int cmd) +static inline int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) { return 0; } -static inline int security_sem_semop(struct sem_array *sma, +static inline int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter) { @@ -1215,6 +1214,11 @@ int security_tun_dev_create(void); int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); +int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); +int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); +void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1407,6 +1411,25 @@ static inline int security_tun_dev_open(void *security) { return 0; } + +static inline int security_sctp_assoc_request(struct sctp_endpoint *ep, + struct sk_buff *skb) +{ + return 0; +} + +static inline int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline void security_sctp_sk_clone(struct sctp_endpoint *ep, + struct sock *sk, + struct sock *newsk) +{ +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/sem.h b/include/linux/sem.h index 9badd322dcee..5608a500c43e 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -2,48 +2,10 @@ #ifndef _LINUX_SEM_H #define _LINUX_SEM_H -#include <linux/atomic.h> -#include <linux/rcupdate.h> -#include <linux/cache.h> -#include <linux/time64.h> #include <uapi/linux/sem.h> struct task_struct; - -/* One semaphore structure for each semaphore in the system. */ -struct sem { - int semval; /* current value */ - /* - * PID of the process that last modified the semaphore. For - * Linux, specifically these are: - * - semop - * - semctl, via SETVAL and SETALL. - * - at task exit when performing undo adjustments (see exit_sem). - */ - int sempid; - spinlock_t lock; /* spinlock for fine-grained semtimedop */ - struct list_head pending_alter; /* pending single-sop operations */ - /* that alter the semaphore */ - struct list_head pending_const; /* pending single-sop operations */ - /* that do not alter the semaphore*/ - time_t sem_otime; /* candidate for sem_otime */ -} ____cacheline_aligned_in_smp; - -/* One sem_array data structure for each set of semaphores in the system. */ -struct sem_array { - struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ - time64_t sem_ctime; /* create/last semctl() time */ - struct list_head pending_alter; /* pending operations */ - /* that alter the array */ - struct list_head pending_const; /* pending complex operations */ - /* that do not alter semvals */ - struct list_head list_id; /* undo requests on this array */ - int sem_nsems; /* no. of semaphores in array */ - int complex_count; /* pending complex operations */ - unsigned int use_global_lock;/* >0: global lock required */ - - struct sem sems[]; -} __randomize_layout; +struct sem_undo_list; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index ab437dd2e3b9..a121982af0f5 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -118,9 +118,14 @@ __printf(2, 3) void seq_printf(struct seq_file *m, const char *fmt, ...); void seq_putc(struct seq_file *m, char c); void seq_puts(struct seq_file *m, const char *s); +void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, + unsigned long long num, unsigned int width); void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, unsigned long long num); void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num); +void seq_put_hex_ll(struct seq_file *m, const char *delimiter, + unsigned long long v, unsigned int width); + void seq_escape(struct seq_file *m, const char *s, const char *esc); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, @@ -235,4 +240,5 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); +void seq_file_init(void); #endif diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index b32df49a3bd5..1d356105f25a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -379,7 +379,7 @@ extern int of_setup_earlycon(const struct earlycon_id *match, extern bool earlycon_acpi_spcr_enable __initdata; int setup_earlycon(char *buf); #else -static const bool earlycon_acpi_spcr_enable; +static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; static inline int setup_earlycon(char *buf) { return 0; } #endif diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index a7f004a3c177..463ed28d2b27 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Internal header file for Samsung S3C2410 serial ports (UART0-2) * @@ -10,21 +11,7 @@ * Internal header file for MX1ADS serial ports (UART1 & 2) * * Copyright (C) 2002 Shane Nay (shane@minirl.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ #ifndef __ASM_ARM_REGS_SERIAL_H #define __ASM_ARM_REGS_SERIAL_H diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index e5140648f638..da5178216da5 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -17,4 +17,16 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif +#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT +static inline int set_memory_encrypted(unsigned long addr, int numpages) +{ + return 0; +} + +static inline int set_memory_decrypted(unsigned long addr, int numpages) +{ + return 0; +} +#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ + #endif /* _LINUX_SET_MEMORY_H_ */ diff --git a/include/linux/sfp.h b/include/linux/sfp.h index e724d5a3dd80..ebce9e24906a 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -422,10 +422,11 @@ struct sfp_upstream_ops { #if IS_ENABLED(CONFIG_SFP) int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support); -phy_interface_t sfp_parse_interface(struct sfp_bus *bus, - const struct sfp_eeprom_id *id); void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support); +phy_interface_t sfp_select_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *link_modes); int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, @@ -444,18 +445,19 @@ static inline int sfp_parse_port(struct sfp_bus *bus, return PORT_OTHER; } -static inline phy_interface_t sfp_parse_interface(struct sfp_bus *bus, - const struct sfp_eeprom_id *id) -{ - return PHY_INTERFACE_MODE_NA; -} - static inline void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { } +static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *link_modes) +{ + return PHY_INTERFACE_MODE_NA; +} + static inline int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) { diff --git a/include/linux/sha256.h b/include/linux/sha256.h new file mode 100644 index 000000000000..244fe01a65fb --- /dev/null +++ b/include/linux/sha256.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 Red Hat Inc. + * + * Author: Vivek Goyal <vgoyal@redhat.com> + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef SHA256_H +#define SHA256_H + +#include <linux/types.h> +#include <crypto/sha.h> + +/* + * Stand-alone implementation of the SHA256 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should use the implementation in + * crypto/. + * + * For details see lib/sha256.c + */ + +extern int sha256_init(struct sha256_state *sctx); +extern int sha256_update(struct sha256_state *sctx, const u8 *input, + unsigned int length); +extern int sha256_final(struct sha256_state *sctx, u8 *hash); + +#endif /* SHA256_H */ diff --git a/include/linux/shm.h b/include/linux/shm.h index 2bbafacfbfc9..d8e69aed3d32 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -7,27 +7,7 @@ #include <uapi/linux/shm.h> #include <asm/shmparam.h> -struct shmid_kernel /* private to the kernel */ -{ - struct kern_ipc_perm shm_perm; - struct file *shm_file; - unsigned long shm_nattch; - unsigned long shm_segsz; - time64_t shm_atim; - time64_t shm_dtim; - time64_t shm_ctim; - pid_t shm_cprid; - pid_t shm_lprid; - struct user_struct *mlock_user; - - /* The task created the shm object. NULL if the task is dead. */ - struct task_struct *shm_creator; - struct list_head shm_clist; /* list by creator */ -} __randomize_layout; - -/* shm_mode upper byte flags */ -#define SHM_DEST 01000 /* segment will be destroyed on last detach */ -#define SHM_LOCKED 02000 /* segment will not be swapped */ +struct file; #ifdef CONFIG_SYSVIPC struct sysv_shm { diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 388ff2936a87..6794490f25b2 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -75,6 +75,9 @@ struct shrinker { #define SHRINKER_NUMA_AWARE (1 << 0) #define SHRINKER_MEMCG_AWARE (1 << 1) -extern int register_shrinker(struct shrinker *); -extern void unregister_shrinker(struct shrinker *); +extern int prealloc_shrinker(struct shrinker *shrinker); +extern void register_shrinker_prepared(struct shrinker *shrinker); +extern int register_shrinker(struct shrinker *shrinker); +extern void unregister_shrinker(struct shrinker *shrinker); +extern void free_prealloced_shrinker(struct shrinker *shrinker); #endif diff --git a/include/linux/sizes.h b/include/linux/sizes.h index ce3e8150c174..fbde0bc7e882 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -8,6 +8,8 @@ #ifndef __LINUX_SIZES_H__ #define __LINUX_SIZES_H__ +#include <linux/const.h> + #define SZ_1 0x00000001 #define SZ_2 0x00000002 #define SZ_4 0x00000004 @@ -44,4 +46,6 @@ #define SZ_1G 0x40000000 #define SZ_2G 0x80000000 +#define SZ_4G _AC(0x100000000, ULL) + #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ddf77cf4ff2d..9065477ed255 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -466,6 +466,9 @@ struct ubuf_info { #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) +int mm_account_pinned_pages(struct mmpin *mmp, size_t size); +void mm_unaccount_pinned_pages(struct mmpin *mmp); + struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *uarg); @@ -669,6 +672,7 @@ struct sk_buff { * UDP receive path is one user. */ unsigned long dev_scratch; + int ip_defrag_offset; }; }; struct rb_node rbnode; /* used in netem & tcp stack */ @@ -4037,6 +4041,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } +/* Note: Should be called only if skb_is_gso(skb) is true */ +static inline bool skb_is_gso_sctp(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; +} + static inline void skb_gso_reset(struct sk_buff *skb) { skb_shinfo(skb)->gso_size = 0; @@ -4044,6 +4054,22 @@ static inline void skb_gso_reset(struct sk_buff *skb) skb_shinfo(skb)->gso_type = 0; } +static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, + u16 increment) +{ + if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) + return; + shinfo->gso_size += increment; +} + +static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, + u16 decrement) +{ + if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) + return; + shinfo->gso_size -= decrement; +} + void __skb_warn_lro_forwarding(const struct sk_buff *skb); static inline bool skb_warn_if_lro(const struct sk_buff *skb) diff --git a/include/linux/slab.h b/include/linux/slab.h index 231abc8976c5..81ebd71f8c03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -125,7 +125,6 @@ #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ (unsigned long)ZERO_SIZE_PTR) -#include <linux/kmemleak.h> #include <linux/kasan.h> struct mem_cgroup; @@ -137,12 +136,13 @@ bool slab_is_available(void); extern bool usercopy_fallback; -struct kmem_cache *kmem_cache_create(const char *name, size_t size, - size_t align, slab_flags_t flags, +struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, void (*ctor)(void *)); struct kmem_cache *kmem_cache_create_usercopy(const char *name, - size_t size, size_t align, slab_flags_t flags, - size_t useroffset, size_t usersize, + unsigned int size, unsigned int align, + slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); @@ -308,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n */ -static __always_inline int kmalloc_index(size_t size) +static __always_inline unsigned int kmalloc_index(size_t size) { if (!size) return 0; @@ -504,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) return kmalloc_large(size, flags); #ifndef CONFIG_SLOB if (!(flags & GFP_DMA)) { - int index = kmalloc_index(size); + unsigned int index = kmalloc_index(size); if (!index) return ZERO_SIZE_PTR; @@ -522,11 +522,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) * return size or 0 if a kmalloc cache for that * size does not exist */ -static __always_inline int kmalloc_size(int n) +static __always_inline unsigned int kmalloc_size(unsigned int n) { #ifndef CONFIG_SLOB if (n > 2) - return 1 << n; + return 1U << n; if (n == 1 && KMALLOC_MIN_SIZE <= 32) return 96; @@ -542,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { - int i = kmalloc_index(size); + unsigned int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 7385547c04b1..d9228e4d0320 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -85,8 +85,8 @@ struct kmem_cache { unsigned int *random_seq; #endif - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 8ad99c47b19c..3773e26c08c1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -73,7 +73,7 @@ struct kmem_cache_cpu { * given order would contain. */ struct kmem_cache_order_objects { - unsigned long x; + unsigned int x; }; /* @@ -84,11 +84,12 @@ struct kmem_cache { /* Used for retriving partial slabs etc */ slab_flags_t flags; unsigned long min_partial; - int size; /* The size of an object including meta data */ - int object_size; /* The size of an object without meta data */ - int offset; /* Free pointer offset. */ + unsigned int size; /* The size of an object including meta data */ + unsigned int object_size;/* The size of an object without meta data */ + unsigned int offset; /* Free pointer offset. */ #ifdef CONFIG_SLUB_CPU_PARTIAL - int cpu_partial; /* Number of per cpu partial objects to keep around */ + /* Number of per cpu partial objects to keep around */ + unsigned int cpu_partial; #endif struct kmem_cache_order_objects oo; @@ -98,10 +99,10 @@ struct kmem_cache { gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(void *); - int inuse; /* Offset to metadata */ - int align; /* Alignment */ - int reserved; /* Reserved bytes at the end of slabs */ - int red_left_pad; /* Left redzone padding size */ + unsigned int inuse; /* Offset to metadata */ + unsigned int align; /* Alignment */ + unsigned int reserved; /* Reserved bytes at the end of slabs */ + unsigned int red_left_pad; /* Left redzone padding size */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS @@ -110,7 +111,8 @@ struct kmem_cache { #endif #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; - int max_attr_size; /* for propagation, maximum size of a stored attr */ + /* for propagation, maximum size of a stored attr */ + unsigned int max_attr_size; #ifdef CONFIG_SYSFS struct kset *memcg_kset; #endif @@ -124,7 +126,7 @@ struct kmem_cache { /* * Defragmentation by allocating from a remote node. */ - int remote_node_defrag_ratio; + unsigned int remote_node_defrag_ratio; #endif #ifdef CONFIG_SLAB_FREELIST_RANDOM @@ -135,8 +137,8 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index b0a507d356ef..fd25f0148566 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -21,6 +21,10 @@ #define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) #define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) +#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) +#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) +#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) + #define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17)) #define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25)) #define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \ diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index bd8e0864b059..5b98bbdabc25 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -14,6 +14,7 @@ struct firmware; ssize_t qcom_mdt_get_size(const struct firmware *fw); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, - phys_addr_t mem_phys, size_t mem_size); + phys_addr_t mem_phys, size_t mem_size, + phys_addr_t *reloc_base); #endif diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index 9f5c6e53f3a5..9e4fdd861a51 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -10,6 +10,7 @@ struct qcom_smd_rpm; /* * Constants used for addressing resources in the RPM. */ +#define QCOM_SMD_RPM_BOBB 0x62626f62 #define QCOM_SMD_RPM_BOOST 0x61747362 #define QCOM_SMD_RPM_BUS_CLK 0x316b6c63 #define QCOM_SMD_RPM_BUS_MASTER 0x73616d62 diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h index e57eb4b6cc5a..fc0b445bb36b 100644 --- a/include/linux/soc/samsung/exynos-pmu.h +++ b/include/linux/soc/samsung/exynos-pmu.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Header for EXYNOS PMU Driver support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_SOC_EXYNOS_PMU_H diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index bebdde5dccd6..66dcb9ec273a 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS - Power management unit definition * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * * Notice: * This is not a list of all Exynos Power Management Unit SFRs. * There are too many of them, not mentioning subtle differences diff --git a/include/linux/socket.h b/include/linux/socket.h index 9286a5a8c60c..ea50f4a65816 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -287,6 +287,7 @@ struct ucred { #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN +#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ @@ -346,11 +347,40 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; -/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ -extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); -extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff + * forbid_cmsg_compat==false + */ +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, + unsigned int flags, bool forbid_cmsg_compat); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, + unsigned int flags, bool forbid_cmsg_compat); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, - unsigned int vlen, unsigned int flags); + unsigned int vlen, unsigned int flags, + bool forbid_cmsg_compat); + +/* helpers which do the actual work for syscalls */ +extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, + unsigned int flags, struct sockaddr __user *addr, + int __user *addr_len); +extern int __sys_sendto(int fd, void __user *buff, size_t len, + unsigned int flags, struct sockaddr __user *addr, + int addr_len); +extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); +extern int __sys_socket(int family, int type, int protocol); +extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); +extern int __sys_connect(int fd, struct sockaddr __user *uservaddr, + int addrlen); +extern int __sys_listen(int fd, int backlog); +extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr, + int __user *usockaddr_len); +extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, + int __user *usockaddr_len); +extern int __sys_socketpair(int family, int type, int protocol, + int __user *usockvec); +extern int __sys_shutdown(int fd, int how); + +extern struct ns_common *get_net_ns(struct ns_common *ns); #endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h index e7bd89a59cd1..9e7e83d8645b 100644 --- a/include/linux/spi/spi_gpio.h +++ b/include/linux/spi/spi_gpio.h @@ -8,64 +8,17 @@ * - id the same as the SPI bus number it implements * - dev.platform data pointing to a struct spi_gpio_platform_data * - * Or, see the driver code for information about speedups that are - * possible on platforms that support inlined access for GPIOs (no - * spi_gpio_platform_data is used). - * - * Use spi_board_info with these busses in the usual way, being sure - * that the controller_data being the GPIO used for each device's - * chipselect: - * - * static struct spi_board_info ... [] = { - * ... - * // this slave uses GPIO 42 for its chipselect - * .controller_data = (void *) 42, - * ... - * // this one uses GPIO 86 for its chipselect - * .controller_data = (void *) 86, - * ... - * }; - * - * If chipselect is not used (there's only one device on the bus), assign - * SPI_GPIO_NO_CHIPSELECT to the controller_data: - * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT; - * - * If the MISO or MOSI pin is not available then it should be set to - * SPI_GPIO_NO_MISO or SPI_GPIO_NO_MOSI. + * Use spi_board_info with these busses in the usual way. * * If the bitbanged bus is later switched to a "native" controller, * that platform_device and controller_data should be removed. */ -#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l) -#define SPI_GPIO_NO_MISO ((unsigned long)-1l) -#define SPI_GPIO_NO_MOSI ((unsigned long)-1l) - /** * struct spi_gpio_platform_data - parameter for bitbanged SPI master - * @sck: number of the GPIO used for clock output - * @mosi: number of the GPIO used for Master Output, Slave In (MOSI) data - * @miso: number of the GPIO used for Master Input, Slave Output (MISO) data * @num_chipselect: how many slaves to allow - * - * All GPIO signals used with the SPI bus managed through this driver - * (chipselects, MOSI, MISO, SCK) must be configured as GPIOs, instead - * of some alternate function. - * - * It can be convenient to use this driver with pins that have alternate - * functions associated with a "native" SPI controller if a driver for that - * controller is not available, or is missing important functionality. - * - * On platforms which can do so, configure MISO with a weak pullup unless - * there's an external pullup on that signal. That saves power by avoiding - * floating signals. (A weak pulldown would save power too, but many - * drivers expect to see all-ones data as the no slave "response".) */ struct spi_gpio_platform_data { - unsigned sck; - unsigned long mosi; - unsigned long miso; - u16 num_chipselect; }; diff --git a/include/linux/stm.h b/include/linux/stm.h index 210ff2292361..c6f577ab6f21 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * System Trace Module (STM) infrastructure apis * Copyright (C) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #ifndef _STM_H_ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ed761f751ecb..9b11b6a0978c 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -217,5 +217,12 @@ void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap); void rpc_cleanup_clids(void); + +static inline int rpc_reply_expected(struct rpc_task *task) +{ + return (task->tk_msg.rpc_proc != NULL) && + (task->tk_msg.rpc_proc->p_decode != NULL); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 786ae2255f05..574368e8a16f 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -272,6 +272,7 @@ struct svc_rqst { #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ unsigned long rq_flags; /* flags field */ + ktime_t rq_qtime; /* enqueue time */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ @@ -283,6 +284,7 @@ struct svc_rqst { int rq_reserved; /* space on socket outq * reserved for this request */ + ktime_t rq_stime; /* start time */ struct cache_req rq_chandle; /* handle passed to caches for * request delaying @@ -493,6 +495,10 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, + struct kvec *first, size_t total); +char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, + struct kvec *first, size_t total); #define RPC_MAX_ADDRBUFLEN (63U) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 4b731b046bcd..7337e1221590 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -132,9 +132,6 @@ struct svcxprt_rdma { #define RDMAXPRT_CONN_PENDING 3 #define RPCRDMA_LISTEN_BACKLOG 10 -/* The default ORD value is based on two outstanding full-size writes with a - * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ -#define RPCRDMA_ORD (64/4) #define RPCRDMA_MAX_REQUESTS 32 /* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 1caf7bc83306..c3d72066d4b1 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -25,7 +25,7 @@ struct svc_xprt_ops { void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); - int (*xpo_secure_port)(struct svc_rqst *); + void (*xpo_secure_port)(struct svc_rqst *rqstp); void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; @@ -83,6 +83,7 @@ struct svc_xprt { size_t xpt_locallen; /* length of address */ struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ + char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ struct list_head xpt_users; /* callbacks on free */ @@ -152,7 +153,10 @@ static inline void svc_xprt_set_remote(struct svc_xprt *xprt, { memcpy(&xprt->xpt_remote, sa, salen); xprt->xpt_remotelen = salen; + snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1, + "%pISpc", sa); } + static inline unsigned short svc_addr_port(const struct sockaddr *sa) { const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index d950223c64b1..2bd68177a442 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -253,6 +253,12 @@ xdr_stream_remaining(const struct xdr_stream *xdr) return xdr->nwords << 2; } +ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, + size_t size); +ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, + size_t maxlen, gfp_t gfp_flags); +ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, + size_t size); ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, size_t maxlen, gfp_t gfp_flags); /** @@ -313,6 +319,31 @@ xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) } /** + * xdr_stream_encode_opaque_inline - Encode opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to void pointer + * @len: size of object + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) { + *ptr = NULL; + return -EMSGSIZE; + } + xdr_encode_opaque(p, NULL, len); + *ptr = ++p; + return count; +} + +/** * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: pointer to opaque data object @@ -356,6 +387,31 @@ xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) } /** + * xdr_stream_encode_uint32_array - Encode variable length array of integers + * @xdr: pointer to xdr_stream + * @array: array of integers + * @array_size: number of elements in @array + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_uint32_array(struct xdr_stream *xdr, + const __u32 *array, size_t array_size) +{ + ssize_t ret = (array_size+1) * sizeof(__u32); + __be32 *p = xdr_reserve_space(xdr, ret); + + if (unlikely(!p)) + return -EMSGSIZE; + *p++ = cpu_to_be32(array_size); + for (; array_size > 0; p++, array++, array_size--) + *p = cpu_to_be32p(array); + return ret; +} + +/** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store integer @@ -432,6 +488,44 @@ xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxle } return len; } + +/** + * xdr_stream_decode_uint32_array - Decode variable length array of integers + * @xdr: pointer to xdr_stream + * @array: location to store the integer array or NULL + * @array_size: number of elements to store + * + * Return values: + * On success, returns number of elements stored in @array + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the array exceeds @array_size + */ +static inline ssize_t +xdr_stream_decode_uint32_array(struct xdr_stream *xdr, + __u32 *array, size_t array_size) +{ + __be32 *p; + __u32 len; + ssize_t retval; + + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + p = xdr_inline_decode(xdr, len * sizeof(*p)); + if (unlikely(!p)) + return -EBADMSG; + if (array == NULL) + return len; + if (len <= array_size) { + if (len < array_size) + memset(array+len, 0, (array_size-len)*sizeof(*array)); + array_size = len; + retval = len; + } else + retval = -EMSGSIZE; + for (; array_size > 0; p++, array++, array_size--) + *array = be32_to_cpup(p); + return retval; +} #endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 7fad83881ce1..5fea0fb420df 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -197,7 +197,7 @@ struct rpc_xprt { struct list_head free; /* free slots */ unsigned int max_reqs; /* max number of slots */ unsigned int min_reqs; /* min number of slots */ - atomic_t num_reqs; /* total slots */ + unsigned int num_reqs; /* total slots */ unsigned long state; /* transport state */ unsigned char resvport : 1; /* use a reserved port */ atomic_t swapper; /* we're swapping over this @@ -373,6 +373,7 @@ void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); void xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); +void xprt_update_rtt(struct rpc_task *task); void xprt_complete_rqst(struct rpc_task *task, int copied); void xprt_pin_rqst(struct rpc_rqst *req); void xprt_unpin_rqst(struct rpc_rqst *req); diff --git a/include/linux/swap.h b/include/linux/swap.h index a1a3f4ed94ce..2417d288e016 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) extern struct address_space *swapper_spaces[]; -extern bool swap_vma_readahead; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) @@ -422,14 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); -extern struct page *swapin_readahead(swp_entry_t, gfp_t, - struct vm_area_struct *vma, unsigned long addr); - -extern struct page *swap_readahead_detect(struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); -extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); +extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); +extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; @@ -437,11 +432,6 @@ extern long total_swap_pages; extern atomic_t nr_rotate_swap; extern bool has_usable_swap(void); -static inline bool swap_use_vma_readahead(void) -{ - return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap); -} - /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { @@ -537,26 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp) { } -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +static inline struct page *swap_cluster_readahead(swp_entry_t entry, + gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; } -static inline bool swap_use_vma_readahead(void) -{ - return false; -} - -static inline struct page *swap_readahead_detect( - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) -{ - return NULL; -} - -static inline struct page *do_swap_page_readahead( - swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) +static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, + struct vm_fault *vmf) { return NULL; } diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5b1f2a00491c..965be92c33b5 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -72,14 +72,6 @@ void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, void swiotlb_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs); -extern void -*swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags); - -extern void -swiotlb_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); - extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a78186d826d7..70fcda1a9049 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -81,6 +81,17 @@ union bpf_attr; #include <linux/key.h> #include <trace/syscall.h> +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +/* + * It may be useful for an architecture to override the definitions of the + * SYSCALL_DEFINE0() and __SYSCALL_DEFINEx() macros, in particular to use a + * different calling convention for syscalls. To allow for that, the prototypes + * for the sys_*() functions below will *not* be included if + * CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#include <asm/syscall_wrapper.h> +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + /* * __MAP - apply a macro to syscall arguments * __MAP(n, m, t1, a1, t2, a2, ..., tn, an) will expand to @@ -91,7 +102,7 @@ union bpf_attr; * for SYSCALL_DEFINE<n>/COMPAT_SYSCALL_DEFINE<n> */ #define __MAP0(m,...) -#define __MAP1(m,t,a) m(t,a) +#define __MAP1(m,t,a,...) m(t,a) #define __MAP2(m,t,a,...) m(t,a), __MAP1(m,__VA_ARGS__) #define __MAP3(m,t,a,...) m(t,a), __MAP2(m,__VA_ARGS__) #define __MAP4(m,t,a,...) m(t,a), __MAP3(m,__VA_ARGS__) @@ -189,9 +200,13 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) } #endif +#ifndef SYSCALL_DEFINE0 #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long sys_##sname(void); \ + ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \ asmlinkage long sys_##sname(void) +#endif /* SYSCALL_DEFINE0 */ #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) @@ -207,19 +222,28 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) #define __PROTECT(...) asmlinkage_protect(__VA_ARGS__) + +/* + * The asmlinkage stub is aliased to a function named __se_sys_*() which + * sign-extends 32-bit ints to longs whenever needed. The actual work is + * done within __do_sys_*(). + */ +#ifndef __SYSCALL_DEFINEx #define __SYSCALL_DEFINEx(x, name, ...) \ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ - __attribute__((alias(__stringify(SyS##name)))); \ - static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ - asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ - asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + __attribute__((alias(__stringify(__se_sys##name)))); \ + ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ { \ - long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ __MAP(x,__SC_TEST,__VA_ARGS__); \ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ return ret; \ } \ - static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) +#endif /* __SYSCALL_DEFINEx */ /* * Called before coming back to user-mode. Returning to user-mode with an @@ -241,63 +265,297 @@ static inline void addr_limit_user_check(void) #endif } -asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, - qid_t id, void __user *addr); -asmlinkage long sys_time(time_t __user *tloc); -asmlinkage long sys_stime(time_t __user *tptr); -asmlinkage long sys_gettimeofday(struct timeval __user *tv, - struct timezone __user *tz); -asmlinkage long sys_settimeofday(struct timeval __user *tv, - struct timezone __user *tz); -asmlinkage long sys_adjtimex(struct timex __user *txc_p); +/* + * These syscall function prototypes are kept in the same order as + * include/uapi/asm-generic/unistd.h. Architecture specific entries go below, + * followed by deprecated or obsolete system calls. + * + * Please note that these prototypes here are only provided for information + * purposes, for static analysis, and for linking from the syscall table. + * These functions should not be called elsewhere from kernel code. + * + * As the syscall calling convention may be different from the default + * for architectures overriding the syscall calling convention, do not + * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx); +asmlinkage long sys_io_destroy(aio_context_t ctx); +asmlinkage long sys_io_submit(aio_context_t, long, + struct iocb __user * __user *); +asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, + struct io_event __user *result); +asmlinkage long sys_io_getevents(aio_context_t ctx_id, + long min_nr, + long nr, + struct io_event __user *events, + struct timespec __user *timeout); -asmlinkage long sys_times(struct tms __user *tbuf); +/* fs/xattr.c */ +asmlinkage long sys_setxattr(const char __user *path, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_fsetxattr(int fd, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_getxattr(const char __user *path, const char __user *name, + void __user *value, size_t size); +asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name, + void __user *value, size_t size); +asmlinkage long sys_fgetxattr(int fd, const char __user *name, + void __user *value, size_t size); +asmlinkage long sys_listxattr(const char __user *path, char __user *list, + size_t size); +asmlinkage long sys_llistxattr(const char __user *path, char __user *list, + size_t size); +asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size); +asmlinkage long sys_removexattr(const char __user *path, + const char __user *name); +asmlinkage long sys_lremovexattr(const char __user *path, + const char __user *name); +asmlinkage long sys_fremovexattr(int fd, const char __user *name); -asmlinkage long sys_gettid(void); -asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp); -asmlinkage long sys_alarm(unsigned int seconds); -asmlinkage long sys_getpid(void); -asmlinkage long sys_getppid(void); -asmlinkage long sys_getuid(void); -asmlinkage long sys_geteuid(void); -asmlinkage long sys_getgid(void); -asmlinkage long sys_getegid(void); -asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid); -asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid); -asmlinkage long sys_getpgid(pid_t pid); -asmlinkage long sys_getpgrp(void); -asmlinkage long sys_getsid(pid_t pid); -asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist); +/* fs/dcache.c */ +asmlinkage long sys_getcwd(char __user *buf, unsigned long size); -asmlinkage long sys_setregid(gid_t rgid, gid_t egid); -asmlinkage long sys_setgid(gid_t gid); -asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); -asmlinkage long sys_setuid(uid_t uid); -asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); -asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); -asmlinkage long sys_setfsuid(uid_t uid); -asmlinkage long sys_setfsgid(gid_t gid); -asmlinkage long sys_setpgid(pid_t pid, pid_t pgid); -asmlinkage long sys_setsid(void); -asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist); +/* fs/cookies.c */ +asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); + +/* fs/eventfd.c */ +asmlinkage long sys_eventfd2(unsigned int count, int flags); + +/* fs/eventpoll.c */ +asmlinkage long sys_epoll_create1(int flags); +asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, + struct epoll_event __user *event); +asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout, + const sigset_t __user *sigmask, + size_t sigsetsize); + +/* fs/fcntl.c */ +asmlinkage long sys_dup(unsigned int fildes); +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); +asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); +#if BITS_PER_LONG == 32 +asmlinkage long sys_fcntl64(unsigned int fd, + unsigned int cmd, unsigned long arg); +#endif + +/* fs/inotify_user.c */ +asmlinkage long sys_inotify_init1(int flags); +asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, + u32 mask); +asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); + +/* fs/ioctl.c */ +asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, + unsigned long arg); + +/* fs/ioprio.c */ +asmlinkage long sys_ioprio_set(int which, int who, int ioprio); +asmlinkage long sys_ioprio_get(int which, int who); + +/* fs/locks.c */ +asmlinkage long sys_flock(unsigned int fd, unsigned int cmd); + +/* fs/namei.c */ +asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode, + unsigned dev); +asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode); +asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag); +asmlinkage long sys_symlinkat(const char __user * oldname, + int newdfd, const char __user * newname); +asmlinkage long sys_linkat(int olddfd, const char __user *oldname, + int newdfd, const char __user *newname, int flags); +asmlinkage long sys_renameat(int olddfd, const char __user * oldname, + int newdfd, const char __user * newname); +/* fs/namespace.c */ +asmlinkage long sys_umount(char __user *name, int flags); +asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name, + char __user *type, unsigned long flags, + void __user *data); +asmlinkage long sys_pivot_root(const char __user *new_root, + const char __user *put_old); + +/* fs/nfsctl.c */ + +/* fs/open.c */ +asmlinkage long sys_statfs(const char __user * path, + struct statfs __user *buf); +asmlinkage long sys_statfs64(const char __user *path, size_t sz, + struct statfs64 __user *buf); +asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf); +asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, + struct statfs64 __user *buf); +asmlinkage long sys_truncate(const char __user *path, long length); +asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); +#if BITS_PER_LONG == 32 +asmlinkage long sys_truncate64(const char __user *path, loff_t length); +asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); +#endif +asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); +asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); +asmlinkage long sys_chdir(const char __user *filename); +asmlinkage long sys_fchdir(unsigned int fd); +asmlinkage long sys_chroot(const char __user *filename); +asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); +asmlinkage long sys_fchmodat(int dfd, const char __user * filename, + umode_t mode); +asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, + gid_t group, int flag); +asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); +asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, + umode_t mode); +asmlinkage long sys_close(unsigned int fd); +asmlinkage long sys_vhangup(void); + +/* fs/pipe.c */ +asmlinkage long sys_pipe2(int __user *fildes, int flags); + +/* fs/quota.c */ +asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr); + +/* fs/readdir.c */ +asmlinkage long sys_getdents64(unsigned int fd, + struct linux_dirent64 __user *dirent, + unsigned int count); + +/* fs/read_write.c */ +asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, + unsigned long offset_low, loff_t __user *result, + unsigned int whence); +asmlinkage long sys_lseek(unsigned int fd, off_t offset, + unsigned int whence); +asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count); +asmlinkage long sys_write(unsigned int fd, const char __user *buf, + size_t count); +asmlinkage long sys_readv(unsigned long fd, + const struct iovec __user *vec, + unsigned long vlen); +asmlinkage long sys_writev(unsigned long fd, + const struct iovec __user *vec, + unsigned long vlen); +asmlinkage long sys_pread64(unsigned int fd, char __user *buf, + size_t count, loff_t pos); +asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, + size_t count, loff_t pos); +asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); +asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); + +/* fs/sendfile.c */ +asmlinkage long sys_sendfile64(int out_fd, int in_fd, + loff_t __user *offset, size_t count); + +/* fs/select.c */ +asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, + fd_set __user *, struct timespec __user *, + void __user *); +asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, + struct timespec __user *, const sigset_t __user *, + size_t); + +/* fs/signalfd.c */ +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); + +/* fs/splice.c */ +asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, + unsigned long nr_segs, unsigned int flags); +asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, + int fd_out, loff_t __user *off_out, + size_t len, unsigned int flags); +asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); + +/* fs/stat.c */ +asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, + int bufsiz); +asmlinkage long sys_newfstatat(int dfd, const char __user *filename, + struct stat __user *statbuf, int flag); +asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf); +#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) +asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); +asmlinkage long sys_fstatat64(int dfd, const char __user *filename, + struct stat64 __user *statbuf, int flag); +#endif + +/* fs/sync.c */ +asmlinkage long sys_sync(void); +asmlinkage long sys_fsync(unsigned int fd); +asmlinkage long sys_fdatasync(unsigned int fd); +asmlinkage long sys_sync_file_range2(int fd, unsigned int flags, + loff_t offset, loff_t nbytes); +asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, + unsigned int flags); + +/* fs/timerfd.c */ +asmlinkage long sys_timerfd_create(int clockid, int flags); +asmlinkage long sys_timerfd_settime(int ufd, int flags, + const struct itimerspec __user *utmr, + struct itimerspec __user *otmr); +asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); + +/* fs/utimes.c */ +asmlinkage long sys_utimensat(int dfd, const char __user *filename, + struct timespec __user *utimes, int flags); + +/* kernel/acct.c */ asmlinkage long sys_acct(const char __user *name); + +/* kernel/capability.c */ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr); asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data); + +/* kernel/exec_domain.c */ asmlinkage long sys_personality(unsigned int personality); -asmlinkage long sys_sigpending(old_sigset_t __user *set); -asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, - old_sigset_t __user *oset); -asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss, - struct sigaltstack __user *uoss); +/* kernel/exit.c */ +asmlinkage long sys_exit(int error_code); +asmlinkage long sys_exit_group(int error_code); +asmlinkage long sys_waitid(int which, pid_t pid, + struct siginfo __user *infop, + int options, struct rusage __user *ru); + +/* kernel/fork.c */ +asmlinkage long sys_set_tid_address(int __user *tidptr); +asmlinkage long sys_unshare(unsigned long unshare_flags); + +/* kernel/futex.c */ +asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, + struct timespec __user *utime, u32 __user *uaddr2, + u32 val3); +asmlinkage long sys_get_robust_list(int pid, + struct robust_list_head __user * __user *head_ptr, + size_t __user *len_ptr); +asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, + size_t len); + +/* kernel/hrtimer.c */ +asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp); +/* kernel/itimer.c */ asmlinkage long sys_getitimer(int which, struct itimerval __user *value); asmlinkage long sys_setitimer(int which, struct itimerval __user *value, struct itimerval __user *ovalue); + +/* kernel/kexec.c */ +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); + +/* kernel/module.c */ +asmlinkage long sys_init_module(void __user *umod, unsigned long len, + const char __user *uargs); +asmlinkage long sys_delete_module(const char __user *name_user, + unsigned int flags); + +/* kernel/posix-timers.c */ asmlinkage long sys_timer_create(clockid_t which_clock, struct sigevent __user *timer_event_spec, timer_t __user * created_timer_id); @@ -312,29 +570,27 @@ asmlinkage long sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp); asmlinkage long sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp); -asmlinkage long sys_clock_adjtime(clockid_t which_clock, - struct timex __user *tx); asmlinkage long sys_clock_getres(clockid_t which_clock, struct timespec __user *tp); asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, const struct timespec __user *rqtp, struct timespec __user *rmtp); -asmlinkage long sys_nice(int increment); -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, - struct sched_param __user *param); +/* kernel/printk.c */ +asmlinkage long sys_syslog(int type, char __user *buf, int len); + +/* kernel/ptrace.c */ +asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, + unsigned long data); +/* kernel/sched/core.c */ + asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param); -asmlinkage long sys_sched_setattr(pid_t pid, - struct sched_attr __user *attr, - unsigned int flags); +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, + struct sched_param __user *param); asmlinkage long sys_sched_getscheduler(pid_t pid); asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param); -asmlinkage long sys_sched_getattr(pid_t pid, - struct sched_attr __user *attr, - unsigned int size, - unsigned int flags); asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr); asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, @@ -344,54 +600,15 @@ asmlinkage long sys_sched_get_priority_max(int policy); asmlinkage long sys_sched_get_priority_min(int policy); asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval); -asmlinkage long sys_setpriority(int which, int who, int niceval); -asmlinkage long sys_getpriority(int which, int who); -asmlinkage long sys_shutdown(int, int); -asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, - void __user *arg); +/* kernel/signal.c */ asmlinkage long sys_restart_syscall(void); -asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, - struct kexec_segment __user *segments, - unsigned long flags); -asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd, - unsigned long cmdline_len, - const char __user *cmdline_ptr, - unsigned long flags); - -asmlinkage long sys_exit(int error_code); -asmlinkage long sys_exit_group(int error_code); -asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, - int options, struct rusage __user *ru); -asmlinkage long sys_waitid(int which, pid_t pid, - struct siginfo __user *infop, - int options, struct rusage __user *ru); -asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); -asmlinkage long sys_set_tid_address(int __user *tidptr); -asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, - struct timespec __user *utime, u32 __user *uaddr2, - u32 val3); - -asmlinkage long sys_init_module(void __user *umod, unsigned long len, - const char __user *uargs); -asmlinkage long sys_delete_module(const char __user *name_user, - unsigned int flags); - -#ifdef CONFIG_OLD_SIGSUSPEND -asmlinkage long sys_sigsuspend(old_sigset_t mask); -#endif - -#ifdef CONFIG_OLD_SIGSUSPEND3 -asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask); -#endif - +asmlinkage long sys_kill(pid_t pid, int sig); +asmlinkage long sys_tkill(pid_t pid, int sig); +asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); +asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss, + struct sigaltstack __user *uoss); asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); - -#ifdef CONFIG_OLD_SIGACTION -asmlinkage long sys_sigaction(int, const struct old_sigaction __user *, - struct old_sigaction __user *); -#endif - #ifndef CONFIG_ODD_RT_SIGACTION asmlinkage long sys_rt_sigaction(int, const struct sigaction __user *, @@ -405,470 +622,136 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, siginfo_t __user *uinfo, const struct timespec __user *uts, size_t sigsetsize); -asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, - siginfo_t __user *uinfo); -asmlinkage long sys_kill(pid_t pid, int sig); -asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig); -asmlinkage long sys_tkill(pid_t pid, int sig); asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); -asmlinkage long sys_sgetmask(void); -asmlinkage long sys_ssetmask(int newmask); -asmlinkage long sys_signal(int sig, __sighandler_t handler); -asmlinkage long sys_pause(void); - -asmlinkage long sys_sync(void); -asmlinkage long sys_fsync(unsigned int fd); -asmlinkage long sys_fdatasync(unsigned int fd); -asmlinkage long sys_bdflush(int func, long data); -asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name, - char __user *type, unsigned long flags, - void __user *data); -asmlinkage long sys_umount(char __user *name, int flags); -asmlinkage long sys_oldumount(char __user *name); -asmlinkage long sys_truncate(const char __user *path, long length); -asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); -asmlinkage long sys_stat(const char __user *filename, - struct __old_kernel_stat __user *statbuf); -asmlinkage long sys_statfs(const char __user * path, - struct statfs __user *buf); -asmlinkage long sys_statfs64(const char __user *path, size_t sz, - struct statfs64 __user *buf); -asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf); -asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, - struct statfs64 __user *buf); -asmlinkage long sys_lstat(const char __user *filename, - struct __old_kernel_stat __user *statbuf); -asmlinkage long sys_fstat(unsigned int fd, - struct __old_kernel_stat __user *statbuf); -asmlinkage long sys_newstat(const char __user *filename, - struct stat __user *statbuf); -asmlinkage long sys_newlstat(const char __user *filename, - struct stat __user *statbuf); -asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf); -asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); -#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) -asmlinkage long sys_stat64(const char __user *filename, - struct stat64 __user *statbuf); -asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); -asmlinkage long sys_lstat64(const char __user *filename, - struct stat64 __user *statbuf); -asmlinkage long sys_fstatat64(int dfd, const char __user *filename, - struct stat64 __user *statbuf, int flag); -#endif -#if BITS_PER_LONG == 32 -asmlinkage long sys_truncate64(const char __user *path, loff_t length); -asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); -#endif - -asmlinkage long sys_setxattr(const char __user *path, const char __user *name, - const void __user *value, size_t size, int flags); -asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, - const void __user *value, size_t size, int flags); -asmlinkage long sys_fsetxattr(int fd, const char __user *name, - const void __user *value, size_t size, int flags); -asmlinkage long sys_getxattr(const char __user *path, const char __user *name, - void __user *value, size_t size); -asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name, - void __user *value, size_t size); -asmlinkage long sys_fgetxattr(int fd, const char __user *name, - void __user *value, size_t size); -asmlinkage long sys_listxattr(const char __user *path, char __user *list, - size_t size); -asmlinkage long sys_llistxattr(const char __user *path, char __user *list, - size_t size); -asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size); -asmlinkage long sys_removexattr(const char __user *path, - const char __user *name); -asmlinkage long sys_lremovexattr(const char __user *path, - const char __user *name); -asmlinkage long sys_fremovexattr(int fd, const char __user *name); - -asmlinkage long sys_brk(unsigned long brk); -asmlinkage long sys_mprotect(unsigned long start, size_t len, - unsigned long prot); -asmlinkage long sys_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr); -asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long prot, unsigned long pgoff, - unsigned long flags); -asmlinkage long sys_msync(unsigned long start, size_t len, int flags); -asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice); -asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); -asmlinkage long sys_munmap(unsigned long addr, size_t len); -asmlinkage long sys_mlock(unsigned long start, size_t len); -asmlinkage long sys_munlock(unsigned long start, size_t len); -asmlinkage long sys_mlockall(int flags); -asmlinkage long sys_munlockall(void); -asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); -asmlinkage long sys_mincore(unsigned long start, size_t len, - unsigned char __user * vec); - -asmlinkage long sys_pivot_root(const char __user *new_root, - const char __user *put_old); -asmlinkage long sys_chroot(const char __user *filename); -asmlinkage long sys_mknod(const char __user *filename, umode_t mode, - unsigned dev); -asmlinkage long sys_link(const char __user *oldname, - const char __user *newname); -asmlinkage long sys_symlink(const char __user *old, const char __user *new); -asmlinkage long sys_unlink(const char __user *pathname); -asmlinkage long sys_rename(const char __user *oldname, - const char __user *newname); -asmlinkage long sys_chmod(const char __user *filename, umode_t mode); -asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); - -asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); -#if BITS_PER_LONG == 32 -asmlinkage long sys_fcntl64(unsigned int fd, - unsigned int cmd, unsigned long arg); -#endif -asmlinkage long sys_pipe(int __user *fildes); -asmlinkage long sys_pipe2(int __user *fildes, int flags); -asmlinkage long sys_dup(unsigned int fildes); -asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); -asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); -asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); -asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, - unsigned long arg); -asmlinkage long sys_flock(unsigned int fd, unsigned int cmd); -asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx); -asmlinkage long sys_io_destroy(aio_context_t ctx); -asmlinkage long sys_io_getevents(aio_context_t ctx_id, - long min_nr, - long nr, - struct io_event __user *events, - struct timespec __user *timeout); -asmlinkage long sys_io_submit(aio_context_t, long, - struct iocb __user * __user *); -asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, - struct io_event __user *result); -asmlinkage long sys_sendfile(int out_fd, int in_fd, - off_t __user *offset, size_t count); -asmlinkage long sys_sendfile64(int out_fd, int in_fd, - loff_t __user *offset, size_t count); -asmlinkage long sys_readlink(const char __user *path, - char __user *buf, int bufsiz); -asmlinkage long sys_creat(const char __user *pathname, umode_t mode); -asmlinkage long sys_open(const char __user *filename, - int flags, umode_t mode); -asmlinkage long sys_close(unsigned int fd); -asmlinkage long sys_access(const char __user *filename, int mode); -asmlinkage long sys_vhangup(void); -asmlinkage long sys_chown(const char __user *filename, - uid_t user, gid_t group); -asmlinkage long sys_lchown(const char __user *filename, - uid_t user, gid_t group); -asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); -#ifdef CONFIG_HAVE_UID16 -asmlinkage long sys_chown16(const char __user *filename, - old_uid_t user, old_gid_t group); -asmlinkage long sys_lchown16(const char __user *filename, - old_uid_t user, old_gid_t group); -asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group); -asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid); -asmlinkage long sys_setgid16(old_gid_t gid); -asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid); -asmlinkage long sys_setuid16(old_uid_t uid); -asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid); -asmlinkage long sys_getresuid16(old_uid_t __user *ruid, - old_uid_t __user *euid, old_uid_t __user *suid); -asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid); -asmlinkage long sys_getresgid16(old_gid_t __user *rgid, - old_gid_t __user *egid, old_gid_t __user *sgid); -asmlinkage long sys_setfsuid16(old_uid_t uid); -asmlinkage long sys_setfsgid16(old_gid_t gid); -asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist); -asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist); -asmlinkage long sys_getuid16(void); -asmlinkage long sys_geteuid16(void); -asmlinkage long sys_getgid16(void); -asmlinkage long sys_getegid16(void); -#endif - -asmlinkage long sys_utime(char __user *filename, - struct utimbuf __user *times); -asmlinkage long sys_utimes(char __user *filename, - struct timeval __user *utimes); -asmlinkage long sys_lseek(unsigned int fd, off_t offset, - unsigned int whence); -asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, - unsigned long offset_low, loff_t __user *result, - unsigned int whence); -asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count); -asmlinkage long sys_readahead(int fd, loff_t offset, size_t count); -asmlinkage long sys_readv(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen); -asmlinkage long sys_write(unsigned int fd, const char __user *buf, - size_t count); -asmlinkage long sys_writev(unsigned long fd, - const struct iovec __user *vec, - unsigned long vlen); -asmlinkage long sys_pread64(unsigned int fd, char __user *buf, - size_t count, loff_t pos); -asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, - size_t count, loff_t pos); -asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h); -asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h, - rwf_t flags); -asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h); -asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, unsigned long pos_l, unsigned long pos_h, - rwf_t flags); -asmlinkage long sys_getcwd(char __user *buf, unsigned long size); -asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); -asmlinkage long sys_chdir(const char __user *filename); -asmlinkage long sys_fchdir(unsigned int fd); -asmlinkage long sys_rmdir(const char __user *pathname); -asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); -asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, - qid_t id, void __user *addr); -asmlinkage long sys_getdents(unsigned int fd, - struct linux_dirent __user *dirent, - unsigned int count); -asmlinkage long sys_getdents64(unsigned int fd, - struct linux_dirent64 __user *dirent, - unsigned int count); -asmlinkage long sys_setsockopt(int fd, int level, int optname, - char __user *optval, int optlen); -asmlinkage long sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen); -asmlinkage long sys_bind(int, struct sockaddr __user *, int); -asmlinkage long sys_connect(int, struct sockaddr __user *, int); -asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); -asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); -asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); -asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); -asmlinkage long sys_send(int, void __user *, size_t, unsigned); -asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, - struct sockaddr __user *, int); -asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); -asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, - unsigned int vlen, unsigned flags); -asmlinkage long sys_recv(int, void __user *, size_t, unsigned); -asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, - struct sockaddr __user *, int __user *); -asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); -asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, - unsigned int vlen, unsigned flags, - struct timespec __user *timeout); -asmlinkage long sys_socket(int, int, int); -asmlinkage long sys_socketpair(int, int, int, int __user *); -asmlinkage long sys_socketcall(int call, unsigned long __user *args); -asmlinkage long sys_listen(int, int); -asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, - int timeout); -asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timeval __user *tvp); -asmlinkage long sys_old_select(struct sel_arg_struct __user *arg); -asmlinkage long sys_epoll_create(int size); -asmlinkage long sys_epoll_create1(int flags); -asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, - struct epoll_event __user *event); -asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout); -asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout, - const sigset_t __user *sigmask, - size_t sigsetsize); -asmlinkage long sys_gethostname(char __user *name, int len); +/* kernel/sys.c */ +asmlinkage long sys_setpriority(int which, int who, int niceval); +asmlinkage long sys_getpriority(int which, int who); +asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, + void __user *arg); +asmlinkage long sys_setregid(gid_t rgid, gid_t egid); +asmlinkage long sys_setgid(gid_t gid); +asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); +asmlinkage long sys_setuid(uid_t uid); +asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); +asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid); +asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); +asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid); +asmlinkage long sys_setfsuid(uid_t uid); +asmlinkage long sys_setfsgid(gid_t gid); +asmlinkage long sys_times(struct tms __user *tbuf); +asmlinkage long sys_setpgid(pid_t pid, pid_t pgid); +asmlinkage long sys_getpgid(pid_t pid); +asmlinkage long sys_getsid(pid_t pid); +asmlinkage long sys_setsid(void); +asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist); +asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist); +asmlinkage long sys_newuname(struct new_utsname __user *name); asmlinkage long sys_sethostname(char __user *name, int len); asmlinkage long sys_setdomainname(char __user *name, int len); -asmlinkage long sys_newuname(struct new_utsname __user *name); -asmlinkage long sys_uname(struct old_utsname __user *); -asmlinkage long sys_olduname(struct oldold_utsname __user *); - asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim); -#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT -asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); -#endif asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim); -asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, - const struct rlimit64 __user *new_rlim, - struct rlimit64 __user *old_rlim); asmlinkage long sys_getrusage(int who, struct rusage __user *ru); asmlinkage long sys_umask(int mask); +asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); + +/* kernel/time.c */ +asmlinkage long sys_gettimeofday(struct timeval __user *tv, + struct timezone __user *tz); +asmlinkage long sys_settimeofday(struct timeval __user *tv, + struct timezone __user *tz); +asmlinkage long sys_adjtimex(struct timex __user *txc_p); +/* kernel/timer.c */ +asmlinkage long sys_getpid(void); +asmlinkage long sys_getppid(void); +asmlinkage long sys_getuid(void); +asmlinkage long sys_geteuid(void); +asmlinkage long sys_getgid(void); +asmlinkage long sys_getegid(void); +asmlinkage long sys_gettid(void); +asmlinkage long sys_sysinfo(struct sysinfo __user *info); + +/* ipc/mqueue.c */ +asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr); +asmlinkage long sys_mq_unlink(const char __user *name); +asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout); +asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout); +asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); +asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); + +/* ipc/msg.c */ asmlinkage long sys_msgget(key_t key, int msgflg); -asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, - size_t msgsz, int msgflg); +asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, long msgtyp, int msgflg); -asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); +asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, + size_t msgsz, int msgflg); +/* ipc/sem.c */ asmlinkage long sys_semget(key_t key, int nsems, int semflg); -asmlinkage long sys_semop(int semid, struct sembuf __user *sops, - unsigned nsops); asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, unsigned nsops, const struct timespec __user *timeout); -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); +asmlinkage long sys_semop(int semid, struct sembuf __user *sops, + unsigned nsops); + +/* ipc/shm.c */ asmlinkage long sys_shmget(key_t key, size_t size, int flag); -asmlinkage long sys_shmdt(char __user *shmaddr); asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); -asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, - unsigned long third, void __user *ptr, long fifth); +asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); +asmlinkage long sys_shmdt(char __user *shmaddr); -asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr); -asmlinkage long sys_mq_unlink(const char __user *name); -asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout); -asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout); -asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); -asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); +/* net/socket.c */ +asmlinkage long sys_socket(int, int, int); +asmlinkage long sys_socketpair(int, int, int, int __user *); +asmlinkage long sys_bind(int, struct sockaddr __user *, int); +asmlinkage long sys_listen(int, int); +asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_connect(int, struct sockaddr __user *, int); +asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, + struct sockaddr __user *, int); +asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, + struct sockaddr __user *, int __user *); +asmlinkage long sys_setsockopt(int fd, int level, int optname, + char __user *optval, int optlen); +asmlinkage long sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long sys_shutdown(int, int); +asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); -asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn); -asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, - unsigned long off, unsigned long len, - void __user *buf); -asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, - unsigned long off, unsigned long len, - void __user *buf); +/* mm/filemap.c */ +asmlinkage long sys_readahead(int fd, loff_t offset, size_t count); -asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5); -asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags); -asmlinkage long sys_swapoff(const char __user *specialfile); -asmlinkage long sys_sysctl(struct __sysctl_args __user *args); -asmlinkage long sys_sysinfo(struct sysinfo __user *info); -asmlinkage long sys_sysfs(int option, - unsigned long arg1, unsigned long arg2); -asmlinkage long sys_syslog(int type, char __user *buf, int len); -asmlinkage long sys_uselib(const char __user *library); -asmlinkage long sys_ni_syscall(void); -asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, - unsigned long data); +/* mm/nommu.c, also with MMU */ +asmlinkage long sys_brk(unsigned long brk); +asmlinkage long sys_munmap(unsigned long addr, size_t len); +asmlinkage long sys_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr); +/* security/keys/keyctl.c */ asmlinkage long sys_add_key(const char __user *_type, const char __user *_description, const void __user *_payload, size_t plen, key_serial_t destringid); - asmlinkage long sys_request_key(const char __user *_type, const char __user *_description, const char __user *_callout_info, key_serial_t destringid); - asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); -asmlinkage long sys_ioprio_set(int which, int who, int ioprio); -asmlinkage long sys_ioprio_get(int which, int who); -asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask, - unsigned long maxnode); -asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, - const unsigned long __user *from, - const unsigned long __user *to); -asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, - const void __user * __user *pages, - const int __user *nodes, - int __user *status, - int flags); -asmlinkage long sys_mbind(unsigned long start, unsigned long len, - unsigned long mode, - const unsigned long __user *nmask, - unsigned long maxnode, - unsigned flags); -asmlinkage long sys_get_mempolicy(int __user *policy, - unsigned long __user *nmask, - unsigned long maxnode, - unsigned long addr, unsigned long flags); - -asmlinkage long sys_inotify_init(void); -asmlinkage long sys_inotify_init1(int flags); -asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, - u32 mask); -asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); - -asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, - __u32 __user *ustatus); -asmlinkage long sys_spu_create(const char __user *name, - unsigned int flags, umode_t mode, int fd); - -asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode, - unsigned dev); -asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode); -asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag); -asmlinkage long sys_symlinkat(const char __user * oldname, - int newdfd, const char __user * newname); -asmlinkage long sys_linkat(int olddfd, const char __user *oldname, - int newdfd, const char __user *newname, int flags); -asmlinkage long sys_renameat(int olddfd, const char __user * oldname, - int newdfd, const char __user * newname); -asmlinkage long sys_renameat2(int olddfd, const char __user *oldname, - int newdfd, const char __user *newname, - unsigned int flags); -asmlinkage long sys_futimesat(int dfd, const char __user *filename, - struct timeval __user *utimes); -asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); -asmlinkage long sys_fchmodat(int dfd, const char __user * filename, - umode_t mode); -asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, - gid_t group, int flag); -asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, - umode_t mode); -asmlinkage long sys_newfstatat(int dfd, const char __user *filename, - struct stat __user *statbuf, int flag); -asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, - int bufsiz); -asmlinkage long sys_utimensat(int dfd, const char __user *filename, - struct timespec __user *utimes, int flags); -asmlinkage long sys_unshare(unsigned long unshare_flags); - -asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, - int fd_out, loff_t __user *off_out, - size_t len, unsigned int flags); - -asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, - unsigned long nr_segs, unsigned int flags); - -asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); - -asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, - unsigned int flags); -asmlinkage long sys_sync_file_range2(int fd, unsigned int flags, - loff_t offset, loff_t nbytes); -asmlinkage long sys_get_robust_list(int pid, - struct robust_list_head __user * __user *head_ptr, - size_t __user *len_ptr); -asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, - size_t len); -asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); -asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); -asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); -asmlinkage long sys_timerfd_create(int clockid, int flags); -asmlinkage long sys_timerfd_settime(int ufd, int flags, - const struct itimerspec __user *utmr, - struct itimerspec __user *otmr); -asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); -asmlinkage long sys_eventfd(unsigned int count); -asmlinkage long sys_eventfd2(unsigned int count, int flags); -asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); -asmlinkage long sys_userfaultfd(int flags); -asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); -asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); -asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, - fd_set __user *, struct timespec __user *, - void __user *); -asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, - struct timespec __user *, const sigset_t __user *, - size_t); -asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); -asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, - u64 mask, int fd, - const char __user *pathname); -asmlinkage long sys_syncfs(int fd); - -asmlinkage long sys_fork(void); -asmlinkage long sys_vfork(void); +/* arch/example/kernel/sys_example.c */ #ifdef CONFIG_CLONE_BACKWARDS asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long, int __user *); @@ -881,26 +764,80 @@ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int __user *, unsigned long); #endif #endif - asmlinkage long sys_execve(const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp); +/* mm/fadvise.c */ +asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); + +/* mm/, CONFIG_MMU only */ +asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags); +asmlinkage long sys_swapoff(const char __user *specialfile); +asmlinkage long sys_mprotect(unsigned long start, size_t len, + unsigned long prot); +asmlinkage long sys_msync(unsigned long start, size_t len, int flags); +asmlinkage long sys_mlock(unsigned long start, size_t len); +asmlinkage long sys_munlock(unsigned long start, size_t len); +asmlinkage long sys_mlockall(int flags); +asmlinkage long sys_munlockall(void); +asmlinkage long sys_mincore(unsigned long start, size_t len, + unsigned char __user * vec); +asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); +asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); +asmlinkage long sys_mbind(unsigned long start, unsigned long len, + unsigned long mode, + const unsigned long __user *nmask, + unsigned long maxnode, + unsigned flags); +asmlinkage long sys_get_mempolicy(int __user *policy, + unsigned long __user *nmask, + unsigned long maxnode, + unsigned long addr, unsigned long flags); +asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask, + unsigned long maxnode); +asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, + const unsigned long __user *from, + const unsigned long __user *to); +asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, + const void __user * __user *pages, + const int __user *nodes, + int __user *status, + int flags); + +asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, + siginfo_t __user *uinfo); asmlinkage long sys_perf_event_open( struct perf_event_attr __user *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); +asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); +asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags, + struct timespec __user *timeout); -asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); -asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); +asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, + int options, struct rusage __user *ru); +asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, + const struct rlimit64 __user *new_rlim, + struct rlimit64 __user *old_rlim); +asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); +asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, + u64 mask, int fd, + const char __user *pathname); asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, struct file_handle __user *handle, int __user *mnt_id, int flag); asmlinkage long sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); +asmlinkage long sys_clock_adjtime(clockid_t which_clock, + struct timex __user *tx); +asmlinkage long sys_syncfs(int fd); asmlinkage long sys_setns(int fd, int nstype); +asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags); asmlinkage long sys_process_vm_readv(pid_t pid, const struct iovec __user *lvec, unsigned long liovcnt, @@ -913,27 +850,40 @@ asmlinkage long sys_process_vm_writev(pid_t pid, const struct iovec __user *rvec, unsigned long riovcnt, unsigned long flags); - asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1, unsigned long idx2); asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags); +asmlinkage long sys_sched_setattr(pid_t pid, + struct sched_attr __user *attr, + unsigned int flags); +asmlinkage long sys_sched_getattr(pid_t pid, + struct sched_attr __user *attr, + unsigned int size, + unsigned int flags); +asmlinkage long sys_renameat2(int olddfd, const char __user *oldname, + int newdfd, const char __user *newname, + unsigned int flags); asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, const char __user *uargs); asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); +asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); - asmlinkage long sys_execveat(int dfd, const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp, int flags); - +asmlinkage long sys_userfaultfd(int flags); asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in, int fd_out, loff_t __user *off_out, size_t len, unsigned int flags); - -asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); - +asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h, + rwf_t flags); +asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h, + rwf_t flags); asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); @@ -941,4 +891,381 @@ asmlinkage long sys_pkey_free(int pkey); asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, unsigned mask, struct statx __user *buffer); + +/* + * Architecture-specific system calls + */ + +/* arch/x86/kernel/ioport.c */ +asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); + +/* pciconfig: alpha, arm, arm64, ia64, sparc */ +asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, + void __user *buf); +asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, + void __user *buf); +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn); + +/* powerpc */ +asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, + __u32 __user *ustatus); +asmlinkage long sys_spu_create(const char __user *name, + unsigned int flags, umode_t mode, int fd); + + +/* + * Deprecated system calls which are still defined in + * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch + */ + +/* __ARCH_WANT_SYSCALL_NO_AT */ +asmlinkage long sys_open(const char __user *filename, + int flags, umode_t mode); +asmlinkage long sys_link(const char __user *oldname, + const char __user *newname); +asmlinkage long sys_unlink(const char __user *pathname); +asmlinkage long sys_mknod(const char __user *filename, umode_t mode, + unsigned dev); +asmlinkage long sys_chmod(const char __user *filename, umode_t mode); +asmlinkage long sys_chown(const char __user *filename, + uid_t user, gid_t group); +asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); +asmlinkage long sys_rmdir(const char __user *pathname); +asmlinkage long sys_lchown(const char __user *filename, + uid_t user, gid_t group); +asmlinkage long sys_access(const char __user *filename, int mode); +asmlinkage long sys_rename(const char __user *oldname, + const char __user *newname); +asmlinkage long sys_symlink(const char __user *old, const char __user *new); +asmlinkage long sys_utimes(char __user *filename, + struct timeval __user *utimes); +#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) +asmlinkage long sys_stat64(const char __user *filename, + struct stat64 __user *statbuf); +asmlinkage long sys_lstat64(const char __user *filename, + struct stat64 __user *statbuf); +#endif + +/* __ARCH_WANT_SYSCALL_NO_FLAGS */ +asmlinkage long sys_pipe(int __user *fildes); +asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); +asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_inotify_init(void); +asmlinkage long sys_eventfd(unsigned int count); +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); + +/* __ARCH_WANT_SYSCALL_OFF_T */ +asmlinkage long sys_sendfile(int out_fd, int in_fd, + off_t __user *offset, size_t count); +asmlinkage long sys_newstat(const char __user *filename, + struct stat __user *statbuf); +asmlinkage long sys_newlstat(const char __user *filename, + struct stat __user *statbuf); +asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice); + +/* __ARCH_WANT_SYSCALL_DEPRECATED */ +asmlinkage long sys_alarm(unsigned int seconds); +asmlinkage long sys_getpgrp(void); +asmlinkage long sys_pause(void); +asmlinkage long sys_time(time_t __user *tloc); +asmlinkage long sys_utime(char __user *filename, + struct utimbuf __user *times); +asmlinkage long sys_creat(const char __user *pathname, umode_t mode); +asmlinkage long sys_getdents(unsigned int fd, + struct linux_dirent __user *dirent, + unsigned int count); +asmlinkage long sys_futimesat(int dfd, const char __user *filename, + struct timeval __user *utimes); +asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, + fd_set __user *exp, struct timeval __user *tvp); +asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, + int timeout); +asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout); +asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); +asmlinkage long sys_vfork(void); +asmlinkage long sys_recv(int, void __user *, size_t, unsigned); +asmlinkage long sys_send(int, void __user *, size_t, unsigned); +asmlinkage long sys_bdflush(int func, long data); +asmlinkage long sys_oldumount(char __user *name); +asmlinkage long sys_uselib(const char __user *library); +asmlinkage long sys_sysctl(struct __sysctl_args __user *args); +asmlinkage long sys_sysfs(int option, + unsigned long arg1, unsigned long arg2); +asmlinkage long sys_fork(void); + +/* obsolete: kernel/time/time.c */ +asmlinkage long sys_stime(time_t __user *tptr); + +/* obsolete: kernel/signal.c */ +asmlinkage long sys_sigpending(old_sigset_t __user *uset); +asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, + old_sigset_t __user *oset); +#ifdef CONFIG_OLD_SIGSUSPEND +asmlinkage long sys_sigsuspend(old_sigset_t mask); +#endif + +#ifdef CONFIG_OLD_SIGSUSPEND3 +asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask); +#endif + +#ifdef CONFIG_OLD_SIGACTION +asmlinkage long sys_sigaction(int, const struct old_sigaction __user *, + struct old_sigaction __user *); +#endif +asmlinkage long sys_sgetmask(void); +asmlinkage long sys_ssetmask(int newmask); +asmlinkage long sys_signal(int sig, __sighandler_t handler); + +/* obsolete: kernel/sched/core.c */ +asmlinkage long sys_nice(int increment); + +/* obsolete: kernel/kexec_file.c */ +asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd, + unsigned long cmdline_len, + const char __user *cmdline_ptr, + unsigned long flags); + +/* obsolete: kernel/exit.c */ +asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); + +/* obsolete: kernel/uid16.c */ +#ifdef CONFIG_HAVE_UID16 +asmlinkage long sys_chown16(const char __user *filename, + old_uid_t user, old_gid_t group); +asmlinkage long sys_lchown16(const char __user *filename, + old_uid_t user, old_gid_t group); +asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group); +asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid); +asmlinkage long sys_setgid16(old_gid_t gid); +asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid); +asmlinkage long sys_setuid16(old_uid_t uid); +asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid); +asmlinkage long sys_getresuid16(old_uid_t __user *ruid, + old_uid_t __user *euid, old_uid_t __user *suid); +asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid); +asmlinkage long sys_getresgid16(old_gid_t __user *rgid, + old_gid_t __user *egid, old_gid_t __user *sgid); +asmlinkage long sys_setfsuid16(old_uid_t uid); +asmlinkage long sys_setfsgid16(old_gid_t gid); +asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist); +asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist); +asmlinkage long sys_getuid16(void); +asmlinkage long sys_geteuid16(void); +asmlinkage long sys_getgid16(void); +asmlinkage long sys_getegid16(void); +#endif + +/* obsolete: net/socket.c */ +asmlinkage long sys_socketcall(int call, unsigned long __user *args); + +/* obsolete: fs/stat.c */ +asmlinkage long sys_stat(const char __user *filename, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_lstat(const char __user *filename, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_fstat(unsigned int fd, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_readlink(const char __user *path, + char __user *buf, int bufsiz); + +/* obsolete: fs/select.c */ +asmlinkage long sys_old_select(struct sel_arg_struct __user *arg); + +/* obsolete: fs/readdir.c */ +asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); + +/* obsolete: kernel/sys.c */ +asmlinkage long sys_gethostname(char __user *name, int len); +asmlinkage long sys_uname(struct old_utsname __user *); +asmlinkage long sys_olduname(struct oldold_utsname __user *); +#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT +asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); +#endif + +/* obsolete: ipc */ +asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, + unsigned long third, void __user *ptr, long fifth); + +/* obsolete: mm/ */ +asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); + + +/* + * Not a real system call, but a placeholder for syscalls which are + * not implemented -- see kernel/sys_ni.c + */ +asmlinkage long sys_ni_syscall(void); + +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + + +/* + * Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly. + * Instead, use one of the functions which work equivalently, such as + * the ksys_xyzyyz() functions prototyped below. + */ + +int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type, + unsigned long flags, void __user *data); +int ksys_umount(char __user *name, int flags); +int ksys_dup(unsigned int fildes); +int ksys_chroot(const char __user *filename); +ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); +int ksys_chdir(const char __user *filename); +int ksys_fchmod(unsigned int fd, umode_t mode); +int ksys_fchown(unsigned int fd, uid_t user, gid_t group); +int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, + unsigned int count); +int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); +off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence); +ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); +void ksys_sync(void); +int ksys_unshare(unsigned long unshare_flags); +int ksys_setsid(void); +int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes, + unsigned int flags); +ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count, + loff_t pos); +ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf, + size_t count, loff_t pos); +int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len); +#ifdef CONFIG_ADVISE_SYSCALLS +int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); +#else +static inline int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, + int advice) +{ + return -EINVAL; +} +#endif +unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +ssize_t ksys_readahead(int fd, loff_t offset, size_t count); + +/* + * The following kernel syscall equivalents are just wrappers to fs-internal + * functions. Therefore, provide stubs to be inlined at the callsites. + */ +extern long do_unlinkat(int dfd, struct filename *name); + +static inline long ksys_unlink(const char __user *pathname) +{ + return do_unlinkat(AT_FDCWD, getname(pathname)); +} + +extern long do_rmdir(int dfd, const char __user *pathname); + +static inline long ksys_rmdir(const char __user *pathname) +{ + return do_rmdir(AT_FDCWD, pathname); +} + +extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); + +static inline long ksys_mkdir(const char __user *pathname, umode_t mode) +{ + return do_mkdirat(AT_FDCWD, pathname, mode); +} + +extern long do_symlinkat(const char __user *oldname, int newdfd, + const char __user *newname); + +static inline long ksys_symlink(const char __user *oldname, + const char __user *newname) +{ + return do_symlinkat(oldname, AT_FDCWD, newname); +} + +extern long do_mknodat(int dfd, const char __user *filename, umode_t mode, + unsigned int dev); + +static inline long ksys_mknod(const char __user *filename, umode_t mode, + unsigned int dev) +{ + return do_mknodat(AT_FDCWD, filename, mode, dev); +} + +extern int do_linkat(int olddfd, const char __user *oldname, int newdfd, + const char __user *newname, int flags); + +static inline long ksys_link(const char __user *oldname, + const char __user *newname) +{ + return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); +} + +extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode); + +static inline int ksys_chmod(const char __user *filename, umode_t mode) +{ + return do_fchmodat(AT_FDCWD, filename, mode); +} + +extern long do_faccessat(int dfd, const char __user *filename, int mode); + +static inline long ksys_access(const char __user *filename, int mode) +{ + return do_faccessat(AT_FDCWD, filename, mode); +} + +extern int do_fchownat(int dfd, const char __user *filename, uid_t user, + gid_t group, int flag); + +static inline long ksys_chown(const char __user *filename, uid_t user, + gid_t group) +{ + return do_fchownat(AT_FDCWD, filename, user, group, 0); +} + +static inline long ksys_lchown(const char __user *filename, uid_t user, + gid_t group) +{ + return do_fchownat(AT_FDCWD, filename, user, group, + AT_SYMLINK_NOFOLLOW); +} + +extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); + +static inline long ksys_ftruncate(unsigned int fd, unsigned long length) +{ + return do_sys_ftruncate(fd, length, 1); +} + +extern int __close_fd(struct files_struct *files, unsigned int fd); + +/* + * In contrast to sys_close(), this stub does not check whether the syscall + * should or should not be restarted, but returns the raw error codes from + * __close_fd(). + */ +static inline int ksys_close(unsigned int fd) +{ + return __close_fd(current->files, fd); +} + +extern long do_sys_open(int dfd, const char __user *filename, int flags, + umode_t mode); + +static inline long ksys_open(const char __user *filename, int flags, + umode_t mode) +{ + if (force_o_largefile()) + flags |= O_LARGEFILE; + return do_sys_open(AT_FDCWD, filename, flags, mode); +} + +extern long do_sys_truncate(const char __user *pathname, loff_t length); + +static inline long ksys_truncate(const char __user *pathname, loff_t length) +{ + return do_sys_truncate(pathname, length); +} + #endif diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 0494db3fd9e8..13770cfe33ad 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h @@ -62,7 +62,7 @@ struct ts_config int flags; /** - * get_next_block - fetch next block of data + * @get_next_block: fetch next block of data * @consumed: number of bytes consumed by the caller * @dst: destination buffer * @conf: search configuration @@ -79,7 +79,7 @@ struct ts_config struct ts_state *state); /** - * finish - finalize/clean a series of get_next_block() calls + * @finish: finalize/clean a series of get_next_block() calls * @conf: search configuration * @state: search state * diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 8c5302374eaa..7834be668d80 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -148,6 +148,7 @@ struct thermal_cooling_device { struct device device; struct device_node *np; void *devdata; + void *stats; const struct thermal_cooling_device_ops *ops; bool updated; /* true if the cooling device does not need update */ struct mutex lock; /* protect thermal_instances list */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 34f053a150a9..cf2862bd134a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -43,11 +43,7 @@ enum { #define THREAD_ALIGN THREAD_SIZE #endif -#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) -#else -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) -#endif +#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) /* * flag set/clear/test wrappers diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 7b69853188b1..a3ed26082bc1 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -45,12 +45,16 @@ enum tb_cfg_pkg_type { * @TB_SECURITY_USER: User approval required at minimum * @TB_SECURITY_SECURE: One time saved key required at minimum * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB) + * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected + * Thunderbolt dock (and Display Port). All PCIe + * links downstream of the dock are removed. */ enum tb_security_level { TB_SECURITY_NONE, TB_SECURITY_USER, TB_SECURITY_SECURE, TB_SECURITY_DPONLY, + TB_SECURITY_USBONLY, }; /** @@ -65,6 +69,7 @@ enum tb_security_level { * @cm_ops: Connection manager specific operations vector * @index: Linux assigned domain number * @security_level: Current security level + * @nboot_acl: Number of boot ACLs the domain supports * @privdata: Private connection manager specific data */ struct tb { @@ -77,6 +82,7 @@ struct tb { const struct tb_cm_ops *cm_ops; int index; enum tb_security_level security_level; + size_t nboot_acl; unsigned long privdata[0]; }; @@ -237,6 +243,7 @@ int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, u16 receive_ring); int tb_xdomain_disable_paths(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); +struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); static inline struct tb_xdomain * tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid) @@ -250,6 +257,18 @@ tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid) return xd; } +static inline struct tb_xdomain * +tb_xdomain_find_by_route_locked(struct tb *tb, u64 route) +{ + struct tb_xdomain *xd; + + mutex_lock(&tb->lock); + xd = tb_xdomain_find_by_route(tb, route); + mutex_unlock(&tb->lock); + + return xd; +} + static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd) { if (xd) diff --git a/include/linux/tick.h b/include/linux/tick.h index 7cc35921218e..55388ab45fd4 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -113,27 +113,48 @@ enum tick_dep_bits { #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; -extern int tick_nohz_tick_stopped(void); +extern bool tick_nohz_tick_stopped(void); +extern bool tick_nohz_tick_stopped_cpu(int cpu); +extern void tick_nohz_idle_stop_tick(void); +extern void tick_nohz_idle_retain_tick(void); +extern void tick_nohz_idle_restart_tick(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); -extern ktime_t tick_nohz_get_sleep_length(void); +extern bool tick_nohz_idle_got_tick(void); +extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); + +static inline void tick_nohz_idle_stop_tick_protected(void) +{ + local_irq_disable(); + tick_nohz_idle_stop_tick(); + local_irq_enable(); +} + #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } +static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } +static inline void tick_nohz_idle_stop_tick(void) { } +static inline void tick_nohz_idle_retain_tick(void) { } +static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } +static inline bool tick_nohz_idle_got_tick(void) { return false; } -static inline ktime_t tick_nohz_get_sleep_length(void) +static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { - return NSEC_PER_SEC / HZ; + *delta_next = TICK_NSEC; + return *delta_next; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } + +static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL diff --git a/include/linux/time32.h b/include/linux/time32.h index 65b1de25198d..d2bcd4377b56 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -217,5 +217,6 @@ static inline s64 timeval_to_ns(const struct timeval *tv) * Returns the timeval representation of the nsec parameter. */ extern struct timeval ns_to_timeval(const s64 nsec); +extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); #endif diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index d315c3d6725c..4b3dca173e89 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -52,6 +52,7 @@ struct tk_read_base { * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai + * @time_suspended: Accumulated suspend time * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events * @cs_was_changed_seq: The sequence number of clocksource change events @@ -94,6 +95,7 @@ struct timekeeper { ktime_t offs_real; ktime_t offs_boot; ktime_t offs_tai; + ktime_t time_suspended; s32 tai_offset; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; @@ -117,6 +119,8 @@ struct timekeeper { s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; + /* Flag used to avoid updating NTP twice with same second */ + u32 skip_second_overflow; #ifdef CONFIG_DEBUG_TIMEKEEPING long last_warning; /* diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index b17bcce58bc4..9737fbec7019 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -31,21 +31,27 @@ struct timespec64 get_monotonic_coarse64(void); extern void getrawmonotonic64(struct timespec64 *ts); extern void ktime_get_ts64(struct timespec64 *ts); extern time64_t ktime_get_seconds(void); +extern time64_t __ktime_get_real_seconds(void); extern time64_t ktime_get_real_seconds(void); +extern void ktime_get_active_ts64(struct timespec64 *ts); extern int __getnstimeofday64(struct timespec64 *tv); extern void getnstimeofday64(struct timespec64 *tv); extern void getboottime64(struct timespec64 *ts); -#define ktime_get_real_ts64(ts) getnstimeofday64(ts) +#define ktime_get_real_ts64(ts) getnstimeofday64(ts) + +/* Clock BOOTTIME compatibility wrappers */ +static inline void get_monotonic_boottime64(struct timespec64 *ts) +{ + ktime_get_ts64(ts); +} /* * ktime_t based interfaces */ - enum tk_offsets { TK_OFFS_REAL, - TK_OFFS_BOOT, TK_OFFS_TAI, TK_OFFS_MAX, }; @@ -56,6 +62,10 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); extern ktime_t ktime_get_raw(void); extern u32 ktime_get_resolution_ns(void); +/* Clock BOOTTIME compatibility wrappers */ +static inline ktime_t ktime_get_boottime(void) { return ktime_get(); } +static inline u64 ktime_get_boot_ns(void) { return ktime_get(); } + /** * ktime_get_real - get the real (wall-) time in ktime_t format */ @@ -65,17 +75,6 @@ static inline ktime_t ktime_get_real(void) } /** - * ktime_get_boottime - Returns monotonic time since boot in ktime_t format - * - * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the - * time spent in suspend. - */ -static inline ktime_t ktime_get_boottime(void) -{ - return ktime_get_with_offset(TK_OFFS_BOOT); -} - -/** * ktime_get_clocktai - Returns the TAI time of day in ktime_t format */ static inline ktime_t ktime_get_clocktai(void) @@ -101,11 +100,6 @@ static inline u64 ktime_get_real_ns(void) return ktime_to_ns(ktime_get_real()); } -static inline u64 ktime_get_boot_ns(void) -{ - return ktime_to_ns(ktime_get_boottime()); -} - static inline u64 ktime_get_tai_ns(void) { return ktime_to_ns(ktime_get_clocktai()); @@ -118,17 +112,11 @@ static inline u64 ktime_get_raw_ns(void) extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); -extern u64 ktime_get_boot_fast_ns(void); extern u64 ktime_get_real_fast_ns(void); /* * timespec64 interfaces utilizing the ktime based ones */ -static inline void get_monotonic_boottime64(struct timespec64 *ts) -{ - *ts = ktime_to_timespec64(ktime_get_boottime()); -} - static inline void timekeeping_clocktai64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_clocktai()); diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index af4114d5dc17..3616b4becb59 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h @@ -9,9 +9,6 @@ extern void do_gettimeofday(struct timeval *tv); unsigned long get_seconds(void); -/* does not take xtime_lock */ -struct timespec __current_kernel_time(void); - static inline struct timespec current_kernel_time(void) { struct timespec64 now = current_kernel_time64(); diff --git a/include/linux/timer.h b/include/linux/timer.h index 2448f9cc48a3..7b066fd38248 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -8,8 +8,6 @@ #include <linux/debugobjects.h> #include <linux/stringify.h> -struct tvec_base; - struct timer_list { /* * All fields that change during normal runtime grouped to the diff --git a/include/linux/tpm.h b/include/linux/tpm.h index bcdd3790e94d..06639fb6ab85 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -44,7 +44,7 @@ struct tpm_class_ops { bool (*update_timeouts)(struct tpm_chip *chip, unsigned long *timeout_cap); int (*request_locality)(struct tpm_chip *chip, int loc); - void (*relinquish_locality)(struct tpm_chip *chip, int loc); + int (*relinquish_locality)(struct tpm_chip *chip, int loc); void (*clk_enable)(struct tpm_chip *chip, bool value); }; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 8a1442c4e513..2bde3eff564c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -430,11 +430,13 @@ enum event_trigger_type { extern int filter_match_preds(struct event_filter *filter, void *rec); -extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, - void *rec); -extern void event_triggers_post_call(struct trace_event_file *file, - enum event_trigger_type tt, - void *rec); +extern enum event_trigger_type +event_triggers_call(struct trace_event_file *file, void *rec, + struct ring_buffer_event *event); +extern void +event_triggers_post_call(struct trace_event_file *file, + enum event_trigger_type tt, + void *rec, struct ring_buffer_event *event); bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); @@ -454,7 +456,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL); + event_triggers_call(file, NULL, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) @@ -468,6 +470,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); +struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -487,6 +492,18 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info) { return -EOPNOTSUPP; } +static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) +{ + return -EOPNOTSUPP; +} +static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) +{ + return -EOPNOTSUPP; +} +static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) +{ + return NULL; +} #endif enum { @@ -540,12 +557,47 @@ extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); extern int perf_trace_add(struct perf_event *event, int flags); extern void perf_trace_del(struct perf_event *event, int flags); +#ifdef CONFIG_KPROBE_EVENTS +extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); +extern void perf_kprobe_destroy(struct perf_event *event); +#endif +#ifdef CONFIG_UPROBE_EVENTS +extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe); +extern void perf_uprobe_destroy(struct perf_event *event); +#endif extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); +void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); +void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); +void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3); +void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4); +void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5); +void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6); +void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); +void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8); +void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9); +void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10); +void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10, u64 arg11); +void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, struct trace_event_call *call, u64 count, struct pt_regs *regs, struct hlist_head *head, diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 64ed7064f1fa..22c5a46e9693 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -35,4 +35,10 @@ struct tracepoint { struct tracepoint_func __rcu *funcs; }; +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; +} __aligned(32); + #endif diff --git a/include/linux/types.h b/include/linux/types.h index c94d59ef96cc..ec13d02b3481 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -217,7 +217,7 @@ struct ustat { * * This guarantee is important for few reasons: * - future call_rcu_lazy() will make use of lower bits in the pointer; - * - the structure shares storage spacer in struct page with @compound_head, + * - the structure shares storage space in struct page with @compound_head, * which encode PageTail() in bit 0. The guarantee is needed to avoid * false-positive PageTail(). */ diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 5bdbd9f49395..07ee0f84a46c 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -90,6 +90,28 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) #endif } +static inline unsigned long +u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) +{ + unsigned long flags = 0; + +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + local_irq_save(flags); + write_seqcount_begin(&syncp->seq); +#endif + return flags; +} + +static inline void +u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, + unsigned long flags) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_end(&syncp->seq); + local_irq_restore(flags); +#endif +} + static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 3119d0ace7aa..aaafecf073ff 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -34,14 +34,14 @@ * */ -static inline bool uac2_control_is_readable(u32 bmControls, u8 control) +static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control) { - return (bmControls >> (control * 2)) & 0x1; + return (bmControls >> ((control - 1) * 2)) & 0x1; } -static inline bool uac2_control_is_writeable(u32 bmControls, u8 control) +static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control) { - return (bmControls >> (control * 2)) & 0x2; + return (bmControls >> ((control - 1) * 2)) & 0x2; } /* 4.7.2 Class-Specific AC Interface Descriptor */ diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h new file mode 100644 index 000000000000..a8959aaba0ae --- /dev/null +++ b/include/linux/usb/audio-v3.h @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2017 Ruslan Bilovol <ruslan.bilovol@gmail.com> + * + * This file holds USB constants and structures defined + * by the USB DEVICE CLASS DEFINITION FOR AUDIO DEVICES Release 3.0. + */ + +#ifndef __LINUX_USB_AUDIO_V3_H +#define __LINUX_USB_AUDIO_V3_H + +#include <linux/types.h> + +/* + * v1.0, v2.0 and v3.0 of this standard have many things in common. For the rest + * of the definitions, please refer to audio.h and audio-v2.h + */ + +/* All High Capability descriptors have these 2 fields at the beginning */ +struct uac3_hc_descriptor_header { + __le16 wLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __le16 wDescriptorID; +} __attribute__ ((packed)); + +/* 4.3.1 CLUSTER DESCRIPTOR HEADER */ +struct uac3_cluster_header_descriptor { + __le16 wLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __le16 wDescriptorID; + __u8 bNrChannels; +} __attribute__ ((packed)); + +/* 4.3.2.1 SEGMENTS */ +struct uac3_cluster_segment_descriptor { + __le16 wLength; + __u8 bSegmentType; + /* __u8[0]; segment-specific data */ +} __attribute__ ((packed)); + +/* 4.3.2.1.1 END SEGMENT */ +struct uac3_cluster_end_segment_descriptor { + __le16 wLength; + __u8 bSegmentType; /* Constant END_SEGMENT */ +} __attribute__ ((packed)); + +/* 4.3.2.1.3.1 INFORMATION SEGMENT */ +struct uac3_cluster_information_segment_descriptor { + __le16 wLength; + __u8 bSegmentType; + __u8 bChPurpose; + __u8 bChRelationship; + __u8 bChGroupID; +} __attribute__ ((packed)); + +/* 4.5.2 CLASS-SPECIFIC AC INTERFACE DESCRIPTOR */ +struct uac3_ac_header_descriptor { + __u8 bLength; /* 10 */ + __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ + __u8 bDescriptorSubtype; /* HEADER descriptor subtype */ + __u8 bCategory; + + /* includes Clock Source, Unit, Terminal, and Power Domain desc. */ + __le16 wTotalLength; + + __le32 bmControls; +} __attribute__ ((packed)); + +/* 4.5.2.1 INPUT TERMINAL DESCRIPTOR */ +struct uac3_input_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bTerminalID; + __le16 wTerminalType; + __u8 bAssocTerminal; + __u8 bCSourceID; + __le32 bmControls; + __le16 wClusterDescrID; + __le16 wExTerminalDescrID; + __le16 wConnectorsDescrID; + __le16 wTerminalDescrStr; +} __attribute__((packed)); + +/* 4.5.2.2 OUTPUT TERMINAL DESCRIPTOR */ +struct uac3_output_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bTerminalID; + __le16 wTerminalType; + __u8 bAssocTerminal; + __u8 bSourceID; + __u8 bCSourceID; + __le32 bmControls; + __le16 wExTerminalDescrID; + __le16 wConnectorsDescrID; + __le16 wTerminalDescrStr; +} __attribute__((packed)); + +/* 4.5.2.7 FEATURE UNIT DESCRIPTOR */ +struct uac3_feature_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __u8 bSourceID; + /* bmaControls is actually u32, + * but u8 is needed for the hybrid parser */ + __u8 bmaControls[0]; /* variable length */ + /* wFeatureDescrStr omitted */ +} __attribute__((packed)); + +#define UAC3_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 4) + +/* As above, but more useful for defining your own descriptors */ +#define DECLARE_UAC3_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac3_feature_unit_descriptor_##ch { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bUnitID; \ + __u8 bSourceID; \ + __le32 bmaControls[ch + 1]; \ + __le16 wFeatureDescrStr; \ +} __attribute__ ((packed)) + +/* 4.5.2.12 CLOCK SOURCE DESCRIPTOR */ +struct uac3_clock_source_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bmAttributes; + __le32 bmControls; + __u8 bReferenceTerminal; + __le16 wClockSourceStr; +} __attribute__((packed)); + +/* bmAttribute fields */ +#define UAC3_CLOCK_SOURCE_TYPE_EXT 0x0 +#define UAC3_CLOCK_SOURCE_TYPE_INT 0x1 +#define UAC3_CLOCK_SOURCE_ASYNC (0 << 2) +#define UAC3_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 1) + +/* 4.5.2.13 CLOCK SELECTOR DESCRIPTOR */ +struct uac3_clock_selector_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bNrInPins; + __u8 baCSourceID[]; + /* bmControls and wCSelectorDescrStr omitted */ +} __attribute__((packed)); + +/* 4.5.2.14 CLOCK MULTIPLIER DESCRIPTOR */ +struct uac3_clock_multiplier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bCSourceID; + __le32 bmControls; + __le16 wCMultiplierDescrStr; +} __attribute__((packed)); + +/* 4.5.2.15 POWER DOMAIN DESCRIPTOR */ +struct uac3_power_domain_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bPowerDomainID; + __le16 waRecoveryTime1; + __le16 waRecoveryTime2; + __u8 bNrEntities; + __u8 baEntityID[]; + /* wPDomainDescrStr omitted */ +} __attribute__((packed)); + +/* As above, but more useful for defining your own descriptors */ +#define DECLARE_UAC3_POWER_DOMAIN_DESCRIPTOR(n) \ +struct uac3_power_domain_descriptor_##n { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bPowerDomainID; \ + __le16 waRecoveryTime1; \ + __le16 waRecoveryTime2; \ + __u8 bNrEntities; \ + __u8 baEntityID[n]; \ + __le16 wPDomainDescrStr; \ +} __attribute__ ((packed)) + +/* 4.7.2 CLASS-SPECIFIC AS INTERFACE DESCRIPTOR */ +struct uac3_as_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bTerminalLink; + __le32 bmControls; + __le16 wClusterDescrID; + __le64 bmFormats; + __u8 bSubslotSize; + __u8 bBitResolution; + __le16 bmAuxProtocols; + __u8 bControlSize; +} __attribute__((packed)); + +#define UAC3_FORMAT_TYPE_I_RAW_DATA (1 << 6) + +/* 4.8.1.2 CLASS-SPECIFIC AS ISOCHRONOUS AUDIO DATA ENDPOINT DESCRIPTOR */ +struct uac3_iso_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __le32 bmControls; + __u8 bLockDelayUnits; + __le16 wLockDelay; +} __attribute__((packed)); + +/* 6.1 INTERRUPT DATA MESSAGE */ +struct uac3_interrupt_data_msg { + __u8 bInfo; + __u8 bSourceType; + __le16 wValue; + __le16 wIndex; +} __attribute__((packed)); + +/* A.2 AUDIO AUDIO FUNCTION SUBCLASS CODES */ +#define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00 +#define UAC3_FUNCTION_SUBCLASS_FULL_ADC_3_0 0x01 +/* BADD profiles */ +#define UAC3_FUNCTION_SUBCLASS_GENERIC_IO 0x20 +#define UAC3_FUNCTION_SUBCLASS_HEADPHONE 0x21 +#define UAC3_FUNCTION_SUBCLASS_SPEAKER 0x22 +#define UAC3_FUNCTION_SUBCLASS_MICROPHONE 0x23 +#define UAC3_FUNCTION_SUBCLASS_HEADSET 0x24 +#define UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER 0x25 +#define UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE 0x26 + +/* A.7 AUDIO FUNCTION CATEGORY CODES */ +#define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00 +#define UAC3_FUNCTION_DESKTOP_SPEAKER 0x01 +#define UAC3_FUNCTION_HOME_THEATER 0x02 +#define UAC3_FUNCTION_MICROPHONE 0x03 +#define UAC3_FUNCTION_HEADSET 0x04 +#define UAC3_FUNCTION_TELEPHONE 0x05 +#define UAC3_FUNCTION_CONVERTER 0x06 +#define UAC3_FUNCTION_SOUND_RECORDER 0x07 +#define UAC3_FUNCTION_IO_BOX 0x08 +#define UAC3_FUNCTION_MUSICAL_INSTRUMENT 0x09 +#define UAC3_FUNCTION_PRO_AUDIO 0x0a +#define UAC3_FUNCTION_AUDIO_VIDEO 0x0b +#define UAC3_FUNCTION_CONTROL_PANEL 0x0c +#define UAC3_FUNCTION_HEADPHONE 0x0d +#define UAC3_FUNCTION_GENERIC_SPEAKER 0x0e +#define UAC3_FUNCTION_HEADSET_ADAPTER 0x0f +#define UAC3_FUNCTION_SPEAKERPHONE 0x10 +#define UAC3_FUNCTION_OTHER 0xff + +/* A.8 AUDIO CLASS-SPECIFIC DESCRIPTOR TYPES */ +#define UAC3_CS_UNDEFINED 0x20 +#define UAC3_CS_DEVICE 0x21 +#define UAC3_CS_CONFIGURATION 0x22 +#define UAC3_CS_STRING 0x23 +#define UAC3_CS_INTERFACE 0x24 +#define UAC3_CS_ENDPOINT 0x25 +#define UAC3_CS_CLUSTER 0x26 + +/* A.10 CLUSTER DESCRIPTOR SEGMENT TYPES */ +#define UAC3_SEGMENT_UNDEFINED 0x00 +#define UAC3_CLUSTER_DESCRIPTION 0x01 +#define UAC3_CLUSTER_VENDOR_DEFINED 0x1F +#define UAC3_CHANNEL_INFORMATION 0x20 +#define UAC3_CHANNEL_AMBISONIC 0x21 +#define UAC3_CHANNEL_DESCRIPTION 0x22 +#define UAC3_CHANNEL_VENDOR_DEFINED 0xFE +#define UAC3_END_SEGMENT 0xFF + +/* A.11 CHANNEL PURPOSE DEFINITIONS */ +#define UAC3_PURPOSE_UNDEFINED 0x00 +#define UAC3_PURPOSE_GENERIC_AUDIO 0x01 +#define UAC3_PURPOSE_VOICE 0x02 +#define UAC3_PURPOSE_SPEECH 0x03 +#define UAC3_PURPOSE_AMBIENT 0x04 +#define UAC3_PURPOSE_REFERENCE 0x05 +#define UAC3_PURPOSE_ULTRASONIC 0x06 +#define UAC3_PURPOSE_VIBROKINETIC 0x07 +#define UAC3_PURPOSE_NON_AUDIO 0xFF + +/* A.12 CHANNEL RELATIONSHIP DEFINITIONS */ +#define UAC3_CH_RELATIONSHIP_UNDEFINED 0x00 +#define UAC3_CH_MONO 0x01 +#define UAC3_CH_LEFT 0x02 +#define UAC3_CH_RIGHT 0x03 +#define UAC3_CH_ARRAY 0x04 +#define UAC3_CH_PATTERN_X 0x20 +#define UAC3_CH_PATTERN_Y 0x21 +#define UAC3_CH_PATTERN_A 0x22 +#define UAC3_CH_PATTERN_B 0x23 +#define UAC3_CH_PATTERN_M 0x24 +#define UAC3_CH_PATTERN_S 0x25 +#define UAC3_CH_FRONT_LEFT 0x80 +#define UAC3_CH_FRONT_RIGHT 0x81 +#define UAC3_CH_FRONT_CENTER 0x82 +#define UAC3_CH_FRONT_LEFT_OF_CENTER 0x83 +#define UAC3_CH_FRONT_RIGHT_OF_CENTER 0x84 +#define UAC3_CH_FRONT_WIDE_LEFT 0x85 +#define UAC3_CH_FRONT_WIDE_RIGHT 0x86 +#define UAC3_CH_SIDE_LEFT 0x87 +#define UAC3_CH_SIDE_RIGHT 0x88 +#define UAC3_CH_SURROUND_ARRAY_LEFT 0x89 +#define UAC3_CH_SURROUND_ARRAY_RIGHT 0x8A +#define UAC3_CH_BACK_LEFT 0x8B +#define UAC3_CH_BACK_RIGHT 0x8C +#define UAC3_CH_BACK_CENTER 0x8D +#define UAC3_CH_BACK_LEFT_OF_CENTER 0x8E +#define UAC3_CH_BACK_RIGHT_OF_CENTER 0x8F +#define UAC3_CH_BACK_WIDE_LEFT 0x90 +#define UAC3_CH_BACK_WIDE_RIGHT 0x91 +#define UAC3_CH_TOP_CENTER 0x92 +#define UAC3_CH_TOP_FRONT_LEFT 0x93 +#define UAC3_CH_TOP_FRONT_RIGHT 0x94 +#define UAC3_CH_TOP_FRONT_CENTER 0x95 +#define UAC3_CH_TOP_FRONT_LOC 0x96 +#define UAC3_CH_TOP_FRONT_ROC 0x97 +#define UAC3_CH_TOP_FRONT_WIDE_LEFT 0x98 +#define UAC3_CH_TOP_FRONT_WIDE_RIGHT 0x99 +#define UAC3_CH_TOP_SIDE_LEFT 0x9A +#define UAC3_CH_TOP_SIDE_RIGHT 0x9B +#define UAC3_CH_TOP_SURR_ARRAY_LEFT 0x9C +#define UAC3_CH_TOP_SURR_ARRAY_RIGHT 0x9D +#define UAC3_CH_TOP_BACK_LEFT 0x9E +#define UAC3_CH_TOP_BACK_RIGHT 0x9F +#define UAC3_CH_TOP_BACK_CENTER 0xA0 +#define UAC3_CH_TOP_BACK_LOC 0xA1 +#define UAC3_CH_TOP_BACK_ROC 0xA2 +#define UAC3_CH_TOP_BACK_WIDE_LEFT 0xA3 +#define UAC3_CH_TOP_BACK_WIDE_RIGHT 0xA4 +#define UAC3_CH_BOTTOM_CENTER 0xA5 +#define UAC3_CH_BOTTOM_FRONT_LEFT 0xA6 +#define UAC3_CH_BOTTOM_FRONT_RIGHT 0xA7 +#define UAC3_CH_BOTTOM_FRONT_CENTER 0xA8 +#define UAC3_CH_BOTTOM_FRONT_LOC 0xA9 +#define UAC3_CH_BOTTOM_FRONT_ROC 0xAA +#define UAC3_CH_BOTTOM_FRONT_WIDE_LEFT 0xAB +#define UAC3_CH_BOTTOM_FRONT_WIDE_RIGHT 0xAC +#define UAC3_CH_BOTTOM_SIDE_LEFT 0xAD +#define UAC3_CH_BOTTOM_SIDE_RIGHT 0xAE +#define UAC3_CH_BOTTOM_SURR_ARRAY_LEFT 0xAF +#define UAC3_CH_BOTTOM_SURR_ARRAY_RIGHT 0xB0 +#define UAC3_CH_BOTTOM_BACK_LEFT 0xB1 +#define UAC3_CH_BOTTOM_BACK_RIGHT 0xB2 +#define UAC3_CH_BOTTOM_BACK_CENTER 0xB3 +#define UAC3_CH_BOTTOM_BACK_LOC 0xB4 +#define UAC3_CH_BOTTOM_BACK_ROC 0xB5 +#define UAC3_CH_BOTTOM_BACK_WIDE_LEFT 0xB6 +#define UAC3_CH_BOTTOM_BACK_WIDE_RIGHT 0xB7 +#define UAC3_CH_LOW_FREQUENCY_EFFECTS 0xB8 +#define UAC3_CH_LFE_LEFT 0xB9 +#define UAC3_CH_LFE_RIGHT 0xBA +#define UAC3_CH_HEADPHONE_LEFT 0xBB +#define UAC3_CH_HEADPHONE_RIGHT 0xBC + +/* A.15 AUDIO CLASS-SPECIFIC AC INTERFACE DESCRIPTOR SUBTYPES */ +/* see audio.h for the rest, which is identical to v1 */ +#define UAC3_EXTENDED_TERMINAL 0x04 +#define UAC3_MIXER_UNIT 0x05 +#define UAC3_SELECTOR_UNIT 0x06 +#define UAC3_FEATURE_UNIT 0x07 +#define UAC3_EFFECT_UNIT 0x08 +#define UAC3_PROCESSING_UNIT 0x09 +#define UAC3_EXTENSION_UNIT 0x0a +#define UAC3_CLOCK_SOURCE 0x0b +#define UAC3_CLOCK_SELECTOR 0x0c +#define UAC3_CLOCK_MULTIPLIER 0x0d +#define UAC3_SAMPLE_RATE_CONVERTER 0x0e +#define UAC3_CONNECTORS 0x0f +#define UAC3_POWER_DOMAIN 0x10 + +/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */ +/* see audio-v2.h for the rest, which is identical to v2 */ +#define UAC3_CS_REQ_INTEN 0x04 +#define UAC3_CS_REQ_STRING 0x05 +#define UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR 0x06 + +/* A.23.1 AUDIOCONTROL INTERFACE CONTROL SELECTORS */ +#define UAC3_AC_CONTROL_UNDEFINED 0x00 +#define UAC3_AC_ACTIVE_INTERFACE_CONTROL 0x01 +#define UAC3_AC_POWER_DOMAIN_CONTROL 0x02 + +#endif /* __LINUX_USB_AUDIO_V3_H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index cef0e44601f8..4b6b9283fa7b 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -54,6 +54,9 @@ /* big enough to hold our biggest descriptor */ #define USB_COMP_EP0_BUFSIZ 1024 +/* OS feature descriptor length <= 4kB */ +#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 + #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) struct usb_configuration; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 66a5cff7ee14..847f423ad9b3 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -129,6 +129,7 @@ struct usb_ep_ops { int (*enable) (struct usb_ep *ep, const struct usb_endpoint_descriptor *desc); int (*disable) (struct usb_ep *ep); + void (*dispose) (struct usb_ep *ep); struct usb_request *(*alloc_request) (struct usb_ep *ep, gfp_t gfp_flags); @@ -805,6 +806,7 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget, /* utility to simplify map/unmap of usb_requests to/from DMA */ +#ifdef CONFIG_HAS_DMA extern int usb_gadget_map_request_by_dev(struct device *dev, struct usb_request *req, int is_in); extern int usb_gadget_map_request(struct usb_gadget *gadget, @@ -814,6 +816,17 @@ extern void usb_gadget_unmap_request_by_dev(struct device *dev, struct usb_request *req, int is_in); extern void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); +#else /* !CONFIG_HAS_DMA */ +static inline int usb_gadget_map_request_by_dev(struct device *dev, + struct usb_request *req, int is_in) { return -ENOSYS; } +static inline int usb_gadget_map_request(struct usb_gadget *gadget, + struct usb_request *req, int is_in) { return -ENOSYS; } + +static inline void usb_gadget_unmap_request_by_dev(struct device *dev, + struct usb_request *req, int is_in) { } +static inline void usb_gadget_unmap_request(struct usb_gadget *gadget, + struct usb_request *req, int is_in) { } +#endif /* !CONFIG_HAS_DMA */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 176900528822..aef50cb2ed1b 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -103,7 +103,7 @@ struct usb_hcd { * other external phys should be software-transparent */ struct usb_phy *usb_phy; - struct phy *phy; + struct usb_phy_roothub *phy_roothub; /* Flags that need to be manipulated atomically because they can * change while the host controller is running. Always use @@ -151,6 +151,12 @@ struct usb_hcd { unsigned msix_enabled:1; /* driver has MSI-X enabled? */ unsigned msi_enabled:1; /* driver has MSI enabled? */ unsigned remove_phy:1; /* auto-remove USB phy */ + /* + * do not manage the PHY state in the HCD core, instead let the driver + * handle this (for example if the PHY can only be turned on after a + * specific event) + */ + unsigned skip_phy_initialization:1; /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 5d19e6730475..9eb908a98033 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -89,13 +89,6 @@ struct musb_hdrc_config { u8 ram_bits; /* ram address size */ struct musb_hdrc_eps_bits *eps_bits __deprecated; -#ifdef CONFIG_BLACKFIN - /* A GPIO controlling VRSEL in Blackfin */ - unsigned int gpio_vrsel; - unsigned int gpio_vrsel_active; - /* musb CLKIN in Blackfin in MHZ */ - unsigned char clkin; -#endif u32 maximum_speed; }; diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index b3d41d7409b3..ff359bdfdc7b 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -35,6 +35,13 @@ enum pd_ctrl_msg_type { PD_CTRL_WAIT = 12, PD_CTRL_SOFT_RESET = 13, /* 14-15 Reserved */ + PD_CTRL_NOT_SUPP = 16, + PD_CTRL_GET_SOURCE_CAP_EXT = 17, + PD_CTRL_GET_STATUS = 18, + PD_CTRL_FR_SWAP = 19, + PD_CTRL_GET_PPS_STATUS = 20, + PD_CTRL_GET_COUNTRY_CODES = 21, + /* 22-31 Reserved */ }; enum pd_data_msg_type { @@ -43,13 +50,39 @@ enum pd_data_msg_type { PD_DATA_REQUEST = 2, PD_DATA_BIST = 3, PD_DATA_SINK_CAP = 4, - /* 5-14 Reserved */ + PD_DATA_BATT_STATUS = 5, + PD_DATA_ALERT = 6, + PD_DATA_GET_COUNTRY_INFO = 7, + /* 8-14 Reserved */ PD_DATA_VENDOR_DEF = 15, + /* 16-31 Reserved */ +}; + +enum pd_ext_msg_type { + /* 0 Reserved */ + PD_EXT_SOURCE_CAP_EXT = 1, + PD_EXT_STATUS = 2, + PD_EXT_GET_BATT_CAP = 3, + PD_EXT_GET_BATT_STATUS = 4, + PD_EXT_BATT_CAP = 5, + PD_EXT_GET_MANUFACTURER_INFO = 6, + PD_EXT_MANUFACTURER_INFO = 7, + PD_EXT_SECURITY_REQUEST = 8, + PD_EXT_SECURITY_RESPONSE = 9, + PD_EXT_FW_UPDATE_REQUEST = 10, + PD_EXT_FW_UPDATE_RESPONSE = 11, + PD_EXT_PPS_STATUS = 12, + PD_EXT_COUNTRY_INFO = 13, + PD_EXT_COUNTRY_CODES = 14, + /* 15-31 Reserved */ }; #define PD_REV10 0x0 #define PD_REV20 0x1 +#define PD_REV30 0x2 +#define PD_MAX_REV PD_REV30 +#define PD_HEADER_EXT_HDR BIT(15) #define PD_HEADER_CNT_SHIFT 12 #define PD_HEADER_CNT_MASK 0x7 #define PD_HEADER_ID_SHIFT 9 @@ -59,18 +92,19 @@ enum pd_data_msg_type { #define PD_HEADER_REV_MASK 0x3 #define PD_HEADER_DATA_ROLE BIT(5) #define PD_HEADER_TYPE_SHIFT 0 -#define PD_HEADER_TYPE_MASK 0xf +#define PD_HEADER_TYPE_MASK 0x1f -#define PD_HEADER(type, pwr, data, id, cnt) \ +#define PD_HEADER(type, pwr, data, rev, id, cnt, ext_hdr) \ ((((type) & PD_HEADER_TYPE_MASK) << PD_HEADER_TYPE_SHIFT) | \ ((pwr) == TYPEC_SOURCE ? PD_HEADER_PWR_ROLE : 0) | \ ((data) == TYPEC_HOST ? PD_HEADER_DATA_ROLE : 0) | \ - (PD_REV20 << PD_HEADER_REV_SHIFT) | \ + (rev << PD_HEADER_REV_SHIFT) | \ (((id) & PD_HEADER_ID_MASK) << PD_HEADER_ID_SHIFT) | \ - (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT)) + (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT) | \ + ((ext_hdr) ? PD_HEADER_EXT_HDR : 0)) #define PD_HEADER_LE(type, pwr, data, id, cnt) \ - cpu_to_le16(PD_HEADER((type), (pwr), (data), (id), (cnt))) + cpu_to_le16(PD_HEADER((type), (pwr), (data), PD_REV20, (id), (cnt), (0))) static inline unsigned int pd_header_cnt(u16 header) { @@ -102,16 +136,75 @@ static inline unsigned int pd_header_msgid_le(__le16 header) return pd_header_msgid(le16_to_cpu(header)); } +static inline unsigned int pd_header_rev(u16 header) +{ + return (header >> PD_HEADER_REV_SHIFT) & PD_HEADER_REV_MASK; +} + +static inline unsigned int pd_header_rev_le(__le16 header) +{ + return pd_header_rev(le16_to_cpu(header)); +} + +#define PD_EXT_HDR_CHUNKED BIT(15) +#define PD_EXT_HDR_CHUNK_NUM_SHIFT 11 +#define PD_EXT_HDR_CHUNK_NUM_MASK 0xf +#define PD_EXT_HDR_REQ_CHUNK BIT(10) +#define PD_EXT_HDR_DATA_SIZE_SHIFT 0 +#define PD_EXT_HDR_DATA_SIZE_MASK 0x1ff + +#define PD_EXT_HDR(data_size, req_chunk, chunk_num, chunked) \ + ((((data_size) & PD_EXT_HDR_DATA_SIZE_MASK) << PD_EXT_HDR_DATA_SIZE_SHIFT) | \ + ((req_chunk) ? PD_EXT_HDR_REQ_CHUNK : 0) | \ + (((chunk_num) & PD_EXT_HDR_CHUNK_NUM_MASK) << PD_EXT_HDR_CHUNK_NUM_SHIFT) | \ + ((chunked) ? PD_EXT_HDR_CHUNKED : 0)) + +#define PD_EXT_HDR_LE(data_size, req_chunk, chunk_num, chunked) \ + cpu_to_le16(PD_EXT_HDR((data_size), (req_chunk), (chunk_num), (chunked))) + +static inline unsigned int pd_ext_header_chunk_num(u16 ext_header) +{ + return (ext_header >> PD_EXT_HDR_CHUNK_NUM_SHIFT) & + PD_EXT_HDR_CHUNK_NUM_MASK; +} + +static inline unsigned int pd_ext_header_data_size(u16 ext_header) +{ + return (ext_header >> PD_EXT_HDR_DATA_SIZE_SHIFT) & + PD_EXT_HDR_DATA_SIZE_MASK; +} + +static inline unsigned int pd_ext_header_data_size_le(__le16 ext_header) +{ + return pd_ext_header_data_size(le16_to_cpu(ext_header)); +} + #define PD_MAX_PAYLOAD 7 +#define PD_EXT_MAX_CHUNK_DATA 26 /** - * struct pd_message - PD message as seen on wire - * @header: PD message header - * @payload: PD message payload - */ + * struct pd_chunked_ext_message_data - PD chunked extended message data as + * seen on wire + * @header: PD extended message header + * @data: PD extended message data + */ +struct pd_chunked_ext_message_data { + __le16 header; + u8 data[PD_EXT_MAX_CHUNK_DATA]; +} __packed; + +/** + * struct pd_message - PD message as seen on wire + * @header: PD message header + * @payload: PD message payload + * @ext_msg: PD message chunked extended message data + */ struct pd_message { __le16 header; - __le32 payload[PD_MAX_PAYLOAD]; + union { + __le32 payload[PD_MAX_PAYLOAD]; + struct pd_chunked_ext_message_data ext_msg; + }; } __packed; /* PDO: Power Data Object */ @@ -121,6 +214,7 @@ enum pd_pdo_type { PDO_TYPE_FIXED = 0, PDO_TYPE_BATT = 1, PDO_TYPE_VAR = 2, + PDO_TYPE_APDO = 3, }; #define PDO_TYPE_SHIFT 30 @@ -174,6 +268,34 @@ enum pd_pdo_type { (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \ PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma)) +enum pd_apdo_type { + APDO_TYPE_PPS = 0, +}; + +#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */ +#define PDO_APDO_TYPE_MASK 0x3 + +#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT) + +#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */ +#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */ +#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */ + +#define PDO_PPS_APDO_VOLT_MASK 0xff +#define PDO_PPS_APDO_CURR_MASK 0x7f + +#define PDO_PPS_APDO_MIN_VOLT(mv) \ + ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT) +#define PDO_PPS_APDO_MAX_VOLT(mv) \ + ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT) +#define PDO_PPS_APDO_MAX_CURR(ma) \ + ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT) + +#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \ + (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \ + PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \ + PDO_PPS_APDO_MAX_CURR(max_ma)) + static inline enum pd_pdo_type pdo_type(u32 pdo) { return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK; @@ -204,6 +326,29 @@ static inline unsigned int pdo_max_power(u32 pdo) return ((pdo >> PDO_BATT_MAX_PWR_SHIFT) & PDO_PWR_MASK) * 250; } +static inline enum pd_apdo_type pdo_apdo_type(u32 pdo) +{ + return (pdo >> PDO_APDO_TYPE_SHIFT) & PDO_APDO_TYPE_MASK; +} + +static inline unsigned int pdo_pps_apdo_min_voltage(u32 pdo) +{ + return ((pdo >> PDO_PPS_APDO_MIN_VOLT_SHIFT) & + PDO_PPS_APDO_VOLT_MASK) * 100; +} + +static inline unsigned int pdo_pps_apdo_max_voltage(u32 pdo) +{ + return ((pdo >> PDO_PPS_APDO_MAX_VOLT_SHIFT) & + PDO_PPS_APDO_VOLT_MASK) * 100; +} + +static inline unsigned int pdo_pps_apdo_max_current(u32 pdo) +{ + return ((pdo >> PDO_PPS_APDO_MAX_CURR_SHIFT) & + PDO_PPS_APDO_CURR_MASK) * 50; +} + /* RDO: Request Data Object */ #define RDO_OBJ_POS_SHIFT 28 #define RDO_OBJ_POS_MASK 0x7 @@ -237,6 +382,24 @@ static inline unsigned int pdo_max_power(u32 pdo) (RDO_OBJ(idx) | (flags) | \ RDO_BATT_OP_PWR(op_mw) | RDO_BATT_MAX_PWR(max_mw)) +#define RDO_PROG_VOLT_MASK 0x7ff +#define RDO_PROG_CURR_MASK 0x7f + +#define RDO_PROG_VOLT_SHIFT 9 +#define RDO_PROG_CURR_SHIFT 0 + +#define RDO_PROG_VOLT_MV_STEP 20 +#define RDO_PROG_CURR_MA_STEP 50 + +#define PDO_PROG_OUT_VOLT(mv) \ + ((((mv) / RDO_PROG_VOLT_MV_STEP) & RDO_PROG_VOLT_MASK) << RDO_PROG_VOLT_SHIFT) +#define PDO_PROG_OP_CURR(ma) \ + ((((ma) / RDO_PROG_CURR_MA_STEP) & RDO_PROG_CURR_MASK) << RDO_PROG_CURR_SHIFT) + +#define RDO_PROG(idx, out_mv, op_ma, flags) \ + (RDO_OBJ(idx) | (flags) | \ + PDO_PROG_OUT_VOLT(out_mv) | PDO_PROG_OP_CURR(op_ma)) + static inline unsigned int rdo_index(u32 rdo) { return (rdo >> RDO_OBJ_POS_SHIFT) & RDO_OBJ_POS_MASK; diff --git a/include/linux/usb/pd_ado.h b/include/linux/usb/pd_ado.h new file mode 100644 index 000000000000..9aa1cf31c93c --- /dev/null +++ b/include/linux/usb/pd_ado.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2017 Dialog Semiconductor + * + * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> + */ + +#ifndef __LINUX_USB_PD_ADO_H +#define __LINUX_USB_PD_ADO_H + +/* ADO : Alert Data Object */ +#define USB_PD_ADO_TYPE_SHIFT 24 +#define USB_PD_ADO_TYPE_MASK 0xff +#define USB_PD_ADO_FIXED_BATT_SHIFT 20 +#define USB_PD_ADO_FIXED_BATT_MASK 0xf +#define USB_PD_ADO_HOT_SWAP_BATT_SHIFT 16 +#define USB_PD_ADO_HOT_SWAP_BATT_MASK 0xf + +#define USB_PD_ADO_TYPE_BATT_STATUS_CHANGE BIT(1) +#define USB_PD_ADO_TYPE_OCP BIT(2) +#define USB_PD_ADO_TYPE_OTP BIT(3) +#define USB_PD_ADO_TYPE_OP_COND_CHANGE BIT(4) +#define USB_PD_ADO_TYPE_SRC_INPUT_CHANGE BIT(5) +#define USB_PD_ADO_TYPE_OVP BIT(6) + +static inline unsigned int usb_pd_ado_type(u32 ado) +{ + return (ado >> USB_PD_ADO_TYPE_SHIFT) & USB_PD_ADO_TYPE_MASK; +} + +static inline unsigned int usb_pd_ado_fixed_batt(u32 ado) +{ + return (ado >> USB_PD_ADO_FIXED_BATT_SHIFT) & + USB_PD_ADO_FIXED_BATT_MASK; +} + +static inline unsigned int usb_pd_ado_hot_swap_batt(u32 ado) +{ + return (ado >> USB_PD_ADO_HOT_SWAP_BATT_SHIFT) & + USB_PD_ADO_HOT_SWAP_BATT_MASK; +} +#endif /* __LINUX_USB_PD_ADO_H */ diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h new file mode 100644 index 000000000000..0eb83ce19597 --- /dev/null +++ b/include/linux/usb/pd_ext_sdb.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2017 Dialog Semiconductor + * + * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> + */ + +#ifndef __LINUX_USB_PD_EXT_SDB_H +#define __LINUX_USB_PD_EXT_SDB_H + +/* SDB : Status Data Block */ +enum usb_pd_ext_sdb_fields { + USB_PD_EXT_SDB_INTERNAL_TEMP = 0, + USB_PD_EXT_SDB_PRESENT_INPUT, + USB_PD_EXT_SDB_PRESENT_BATT_INPUT, + USB_PD_EXT_SDB_EVENT_FLAGS, + USB_PD_EXT_SDB_TEMP_STATUS, + USB_PD_EXT_SDB_DATA_SIZE, +}; + +/* Event Flags */ +#define USB_PD_EXT_SDB_EVENT_OCP BIT(1) +#define USB_PD_EXT_SDB_EVENT_OTP BIT(2) +#define USB_PD_EXT_SDB_EVENT_OVP BIT(3) +#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4) + +#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \ + USB_PD_EXT_SDB_EVENT_OTP | \ + USB_PD_EXT_SDB_EVENT_OVP) + +#endif /* __LINUX_USB_PD_EXT_SDB_H */ diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h new file mode 100644 index 000000000000..edc51be4a77c --- /dev/null +++ b/include/linux/usb/role.h @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef __LINUX_USB_ROLE_H +#define __LINUX_USB_ROLE_H + +#include <linux/device.h> + +struct usb_role_switch; + +enum usb_role { + USB_ROLE_NONE, + USB_ROLE_HOST, + USB_ROLE_DEVICE, +}; + +typedef int (*usb_role_switch_set_t)(struct device *dev, enum usb_role role); +typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev); + +/** + * struct usb_role_switch_desc - USB Role Switch Descriptor + * @usb2_port: Optional reference to the host controller port device (USB2) + * @usb3_port: Optional reference to the host controller port device (USB3) + * @udc: Optional reference to the peripheral controller device + * @set: Callback for setting the role + * @get: Callback for getting the role (optional) + * @allow_userspace_control: If true userspace may change the role through sysfs + * + * @usb2_port and @usb3_port will point to the USB host port and @udc to the USB + * device controller behind the USB connector with the role switch. If + * @usb2_port, @usb3_port and @udc are included in the description, the + * reference count for them should be incremented by the caller of + * usb_role_switch_register() before registering the switch. + */ +struct usb_role_switch_desc { + struct device *usb2_port; + struct device *usb3_port; + struct device *udc; + usb_role_switch_set_t set; + usb_role_switch_get_t get; + bool allow_userspace_control; +}; + +int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role); +enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw); +struct usb_role_switch *usb_role_switch_get(struct device *dev); +void usb_role_switch_put(struct usb_role_switch *sw); + +struct usb_role_switch * +usb_role_switch_register(struct device *parent, + const struct usb_role_switch_desc *desc); +void usb_role_switch_unregister(struct usb_role_switch *sw); + +#endif /* __LINUX_USB_ROLE_H */ diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index ca1c0b57f03f..f0d839daeaea 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -91,17 +91,13 @@ struct tcpc_config { unsigned int operating_snk_mw; enum typec_port_type type; + enum typec_port_data data; enum typec_role default_role; bool try_role_hw; /* try.{src,snk} implemented in hardware */ const struct typec_altmode_desc *alt_modes; }; -enum tcpc_usb_switch { - TCPC_USB_SWITCH_CONNECT, - TCPC_USB_SWITCH_DISCONNECT, -}; - /* Mux state attributes */ #define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */ #define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */ @@ -116,14 +112,6 @@ enum tcpc_mux_mode { TCPC_MUX_DP_ENABLED, }; -struct tcpc_mux_dev { - int (*set)(struct tcpc_mux_dev *dev, enum tcpc_mux_mode mux_mode, - enum tcpc_usb_switch usb_config, - enum typec_cc_polarity polarity); - bool dfp_only; - void *priv_data; -}; - /** * struct tcpc_dev - Port configuration and callback functions * @config: Pointer to port configuration @@ -175,7 +163,6 @@ struct tcpc_dev { int (*try_role)(struct tcpc_dev *dev, int role); int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type, const struct pd_message *msg); - struct tcpc_mux_dev *mux; }; struct tcpm_port; diff --git a/include/linux/usb/tilegx.h b/include/linux/usb/tilegx.h deleted file mode 100644 index 817908573fe8..000000000000 --- a/include/linux/usb/tilegx.h +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2012 Tilera Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation, version 2. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for - * more details. - * - * Structure to contain platform-specific data related to Tile-Gx USB - * controllers. - */ - -#ifndef _LINUX_USB_TILEGX_H -#define _LINUX_USB_TILEGX_H - -#include <gxio/usb_host.h> - -struct tilegx_usb_platform_data { - /* GXIO device index. */ - int dev_index; - - /* GXIO device context. */ - gxio_usb_host_context_t usb_ctx; - - /* Device IRQ. */ - unsigned int irq; -}; - -#endif /* _LINUX_USB_TILEGX_H */ diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 0d44ce6af08f..672b39bb0adc 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -22,9 +22,15 @@ struct typec_port; struct fwnode_handle; enum typec_port_type { + TYPEC_PORT_SRC, + TYPEC_PORT_SNK, + TYPEC_PORT_DRP, +}; + +enum typec_port_data { TYPEC_PORT_DFP, TYPEC_PORT_UFP, - TYPEC_PORT_DRP, + TYPEC_PORT_DRD, }; enum typec_plug_type { @@ -60,6 +66,12 @@ enum typec_accessory { #define TYPEC_MAX_ACCESSORY 3 +enum typec_orientation { + TYPEC_ORIENTATION_NONE, + TYPEC_ORIENTATION_NORMAL, + TYPEC_ORIENTATION_REVERSE, +}; + /* * struct usb_pd_identity - USB Power Delivery identity data * @id_header: ID Header VDO @@ -180,11 +192,14 @@ struct typec_partner_desc { /* * struct typec_capability - USB Type-C Port Capabilities - * @role: DFP (Host-only), UFP (Device-only) or DRP (Dual Role) + * @type: Supported power role of the port + * @data: Supported data role of the port * @revision: USB Type-C Specification release. Binary coded decimal * @pd_revision: USB Power Delivery Specification revision if supported - * @prefer_role: Initial role preference + * @prefer_role: Initial role preference (DRP ports). * @accessory: Supported Accessory Modes + * @sw: Cable plug orientation switch + * @mux: Multiplexer switch for Alternate/Accessory Modes * @fwnode: Optional fwnode of the port * @try_role: Set data role preference for DRP port * @dr_set: Set Data Role @@ -197,11 +212,14 @@ struct typec_partner_desc { */ struct typec_capability { enum typec_port_type type; + enum typec_port_data data; u16 revision; /* 0120H = "1.2" */ u16 pd_revision; /* 0300H = "3.0" */ int prefer_role; enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; + struct typec_switch *sw; + struct typec_mux *mux; struct fwnode_handle *fwnode; int (*try_role)(const struct typec_capability *, @@ -245,4 +263,8 @@ void typec_set_pwr_role(struct typec_port *port, enum typec_role role); void typec_set_vconn_role(struct typec_port *port, enum typec_role role); void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode); +int typec_set_orientation(struct typec_port *port, + enum typec_orientation orientation); +int typec_set_mode(struct typec_port *port, int mode); + #endif /* __LINUX_USB_TYPEC_H */ diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h new file mode 100644 index 000000000000..12c1b057834b --- /dev/null +++ b/include/linux/usb/typec_mux.h @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef __USB_TYPEC_MUX +#define __USB_TYPEC_MUX + +#include <linux/list.h> +#include <linux/usb/typec.h> + +struct device; + +/** + * struct typec_switch - USB Type-C cable orientation switch + * @dev: Switch device + * @entry: List entry + * @set: Callback to the driver for setting the orientation + * + * USB Type-C pin flipper switch routing the correct data pairs from the + * connector to the USB controller depending on the orientation of the cable + * plug. + */ +struct typec_switch { + struct device *dev; + struct list_head entry; + + int (*set)(struct typec_switch *sw, enum typec_orientation orientation); +}; + +/** + * struct typec_switch - USB Type-C connector pin mux + * @dev: Mux device + * @entry: List entry + * @set: Callback to the driver for setting the state of the mux + * + * Pin Multiplexer/DeMultiplexer switch routing the USB Type-C connector pins to + * different components depending on the requested mode of operation. Used with + * Accessory/Alternate modes. + */ +struct typec_mux { + struct device *dev; + struct list_head entry; + + int (*set)(struct typec_mux *mux, int state); +}; + +struct typec_switch *typec_switch_get(struct device *dev); +void typec_switch_put(struct typec_switch *sw); +int typec_switch_register(struct typec_switch *sw); +void typec_switch_unregister(struct typec_switch *sw); + +struct typec_mux *typec_mux_get(struct device *dev); +void typec_mux_put(struct typec_mux *mux); +int typec_mux_register(struct typec_mux *mux); +void typec_mux_unregister(struct typec_mux *mux); + +#endif /* __USB_TYPEC_MUX */ diff --git a/include/linux/utsname.h b/include/linux/utsname.h index c8060c2ecd04..44429d9142ca 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -44,6 +44,8 @@ static inline void put_uts_ns(struct uts_namespace *ns) { kref_put(&ns->kref, free_uts_ns); } + +void uts_ns_init(void); #else static inline void get_uts_ns(struct uts_namespace *ns) { @@ -61,6 +63,10 @@ static inline struct uts_namespace *copy_utsname(unsigned long flags, return old_ns; } + +static inline void uts_ns_init(void) +{ +} #endif #ifdef CONFIG_PROC_SYSCTL diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index 960bedbdec87..77f0f0af3a71 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -168,11 +168,8 @@ int vga_switcheroo_process_delayed_switch(void); bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); -void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); - int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); void vga_switcheroo_fini_domain_pm_ops(struct device *dev); -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); #else static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} @@ -192,11 +189,8 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } -static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} - static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} -static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } #endif #endif /* _LINUX_VGA_SWITCHEROO_H_ */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index a4c2317d8b9f..f25cef84b41d 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -20,6 +20,17 @@ extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif +struct reclaim_stat { + unsigned nr_dirty; + unsigned nr_unqueued_dirty; + unsigned nr_congested; + unsigned nr_writeback; + unsigned nr_immediate; + unsigned nr_activate; + unsigned nr_ref_keep; + unsigned nr_unmap_fail; +}; + #ifdef CONFIG_VM_EVENT_COUNTERS /* * Light weight per cpu counter implementation. diff --git a/include/linux/wait.h b/include/linux/wait.h index 55a611486bac..d9f131ecf708 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -599,6 +599,120 @@ do { \ __ret; \ }) +/** + * wait_event_idle - wait for a condition without contributing to system load + * @wq_head: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_IDLE) until the + * @condition evaluates to true. + * The @condition is checked each time the waitqueue @wq_head is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + */ +#define wait_event_idle(wq_head, condition) \ +do { \ + might_sleep(); \ + if (!(condition)) \ + ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ +} while (0) + +/** + * wait_event_idle_exclusive - wait for a condition with contributing to system load + * @wq_head: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_IDLE) until the + * @condition evaluates to true. + * The @condition is checked each time the waitqueue @wq_head is woken up. + * + * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag + * set thus if other processes wait on the same list, when this + * process is woken further processes are not considered. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + */ +#define wait_event_idle_exclusive(wq_head, condition) \ +do { \ + might_sleep(); \ + if (!(condition)) \ + ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ +} while (0) + +#define __wait_event_idle_timeout(wq_head, condition, timeout) \ + ___wait_event(wq_head, ___wait_cond_timeout(condition), \ + TASK_IDLE, 0, timeout, \ + __ret = schedule_timeout(__ret)) + +/** + * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses + * @wq_head: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_IDLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq_head is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * Returns: + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * or the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed. + */ +#define wait_event_idle_timeout(wq_head, condition, timeout) \ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ + __ret; \ +}) + +#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ + ___wait_event(wq_head, ___wait_cond_timeout(condition), \ + TASK_IDLE, 1, timeout, \ + __ret = schedule_timeout(__ret)) + +/** + * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses + * @wq_head: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_IDLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq_head is woken up. + * + * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag + * set thus if other processes wait on the same list, when this + * process is woken further processes are not considered. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * Returns: + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * or the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed. + */ +#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ + __ret; \ +}) + extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 61b39eaf7cad..9318b2166439 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -10,7 +10,6 @@ struct wait_bit_key { void *flags; int bit_nr; -#define WAIT_ATOMIC_T_BIT_NR -1 unsigned long timeout; }; @@ -22,21 +21,15 @@ struct wait_bit_queue_entry { #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } -#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ - { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } - typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); -typedef int wait_atomic_t_action_f(atomic_t *counter, unsigned int mode); void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); void wake_up_bit(void *word, int bit); -void wake_up_atomic_t(atomic_t *p); int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); -int out_of_line_wait_on_atomic_t(atomic_t *p, wait_atomic_t_action_f action, unsigned int mode); struct wait_queue_head *bit_waitqueue(void *word, int bit); extern void __init wait_bit_init(void); @@ -57,7 +50,6 @@ extern int bit_wait(struct wait_bit_key *key, int mode); extern int bit_wait_io(struct wait_bit_key *key, int mode); extern int bit_wait_timeout(struct wait_bit_key *key, int mode); extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); -extern int atomic_t_wait(atomic_t *counter, unsigned int mode); /** * wait_on_bit - wait for a bit to be cleared @@ -243,23 +235,74 @@ wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, return out_of_line_wait_on_bit_lock(word, bit, action, mode); } -/** - * wait_on_atomic_t - Wait for an atomic_t to become 0 - * @val: The atomic value being waited on, a kernel virtual address - * @action: the function used to sleep, which may take special actions - * @mode: the task state to sleep in - * - * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for - * the purpose of getting a waitqueue, but we set the key to a bit number - * outside of the target 'word'. - */ -static inline -int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode) -{ - might_sleep(); - if (atomic_read(val) == 0) - return 0; - return out_of_line_wait_on_atomic_t(val, action, mode); -} +extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags); +extern void wake_up_var(void *var); +extern wait_queue_head_t *__var_waitqueue(void *p); + +#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \ +({ \ + __label__ __out; \ + struct wait_queue_head *__wq_head = __var_waitqueue(var); \ + struct wait_bit_queue_entry __wbq_entry; \ + long __ret = ret; /* explicit shadow */ \ + \ + init_wait_var_entry(&__wbq_entry, var, \ + exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ + for (;;) { \ + long __int = prepare_to_wait_event(__wq_head, \ + &__wbq_entry.wq_entry, \ + state); \ + if (condition) \ + break; \ + \ + if (___wait_is_interruptible(state) && __int) { \ + __ret = __int; \ + goto __out; \ + } \ + \ + cmd; \ + } \ + finish_wait(__wq_head, &__wbq_entry.wq_entry); \ +__out: __ret; \ +}) + +#define __wait_var_event(var, condition) \ + ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ + schedule()) + +#define wait_var_event(var, condition) \ +do { \ + might_sleep(); \ + if (condition) \ + break; \ + __wait_var_event(var, condition); \ +} while (0) + +#define __wait_var_event_killable(var, condition) \ + ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ + schedule()) + +#define wait_var_event_killable(var, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_var_event_killable(var, condition); \ + __ret; \ +}) + +#define __wait_var_event_timeout(var, condition, timeout) \ + ___wait_var_event(var, ___wait_cond_timeout(condition), \ + TASK_UNINTERRUPTIBLE, 0, timeout, \ + __ret = schedule_timeout(__ret)) + +#define wait_var_event_timeout(var, condition, timeout) \ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_var_event_timeout(var, condition, timeout); \ + __ret; \ +}) #endif /* _LINUX_WAIT_BIT_H */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bc0cda180c8b..39a0e215022a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -13,6 +13,7 @@ #include <linux/threads.h> #include <linux/atomic.h> #include <linux/cpumask.h> +#include <linux/rcupdate.h> struct workqueue_struct; @@ -120,6 +121,14 @@ struct delayed_work { int cpu; }; +struct rcu_work { + struct work_struct work; + struct rcu_head rcu; + + /* target workqueue ->rcu uses to queue ->work */ + struct workqueue_struct *wq; +}; + /** * struct workqueue_attrs - A struct for workqueue attributes. * @@ -151,6 +160,11 @@ static inline struct delayed_work *to_delayed_work(struct work_struct *work) return container_of(work, struct delayed_work, work); } +static inline struct rcu_work *to_rcu_work(struct work_struct *work) +{ + return container_of(work, struct rcu_work, work); +} + struct execute_work { struct work_struct work; }; @@ -266,6 +280,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) +#define INIT_RCU_WORK(_work, _func) \ + INIT_WORK(&(_work)->work, (_func)) + +#define INIT_RCU_WORK_ONSTACK(_work, _func) \ + INIT_WORK_ONSTACK(&(_work)->work, (_func)) + /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question @@ -447,6 +467,7 @@ extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); +extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); extern void flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); @@ -456,13 +477,14 @@ extern int schedule_on_each_cpu(work_func_t func); int execute_in_process_context(work_func_t fn, struct execute_work *); extern bool flush_work(struct work_struct *work); -extern bool cancel_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); +extern bool flush_rcu_work(struct rcu_work *rwork); + extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); extern struct work_struct *current_work(void); diff --git a/include/linux/xarray.h b/include/linux/xarray.h new file mode 100644 index 000000000000..2dfc8006fe64 --- /dev/null +++ b/include/linux/xarray.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_XARRAY_H +#define _LINUX_XARRAY_H +/* + * eXtensible Arrays + * Copyright (c) 2017 Microsoft Corporation + * Author: Matthew Wilcox <mawilcox@microsoft.com> + */ + +#include <linux/spinlock.h> + +#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) +#define xa_lock(xa) spin_lock(&(xa)->xa_lock) +#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) +#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) +#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) +#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) +#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) +#define xa_lock_irqsave(xa, flags) \ + spin_lock_irqsave(&(xa)->xa_lock, flags) +#define xa_unlock_irqrestore(xa, flags) \ + spin_unlock_irqrestore(&(xa)->xa_lock, flags) + +#endif /* _LINUX_XARRAY_H */ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 57a8e98f2708..2219cce81ca4 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool); unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); +size_t zs_huge_class_size(struct zs_pool *pool); + void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); |