diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpu.h | 2 | ||||
-rw-r--r-- | include/linux/f2fs_fs.h | 11 | ||||
-rw-r--r-- | include/linux/fsnotify.h | 33 | ||||
-rw-r--r-- | include/linux/fsnotify_backend.h | 4 | ||||
-rw-r--r-- | include/linux/percpu.h | 12 | ||||
-rw-r--r-- | include/net/dsa.h | 15 | ||||
-rw-r--r-- | include/trace/events/f2fs.h | 57 | ||||
-rw-r--r-- | include/uapi/linux/bpf.h | 145 | ||||
-rw-r--r-- | include/uapi/linux/fuse.h | 22 | ||||
-rw-r--r-- | include/uapi/linux/input-event-codes.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/netfilter/nf_tables.h | 4 |
11 files changed, 171 insertions, 135 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 732745f865b7..3813fe45effd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -57,6 +57,8 @@ extern ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mds(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index f5740423b002..65559900d4d7 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -164,6 +164,10 @@ struct f2fs_checkpoint { unsigned char sit_nat_version_bitmap[1]; } __packed; +#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ +#define CP_MIN_CHKSUM_OFFSET \ + (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap)) + /* * For orphan inode management */ @@ -198,11 +202,12 @@ struct f2fs_extent { get_extra_isize(inode)) #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ #define ADDRS_PER_INODE(inode) addrs_per_inode(inode) -#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode) #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ #define ADDRS_PER_PAGE(page, inode) \ - (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK) + (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode)) #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) @@ -267,7 +272,7 @@ struct f2fs_inode { } __packed; struct direct_node { - __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */ + __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */ } __packed; struct indirect_node { diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 0c0ef3078a22..94972e8eb6d1 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -152,39 +152,6 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) } /* - * fsnotify_nameremove - a filename was removed from a directory - * - * This is mostly called under parent vfs inode lock so name and - * dentry->d_parent should be stable. However there are some corner cases where - * inode lock is not held. So to be on the safe side and be reselient to future - * callers and out of tree users of d_delete(), we do not assume that d_parent - * and d_name are stable and we use dget_parent() and - * take_dentry_name_snapshot() to grab stable references. - */ -static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) -{ - struct dentry *parent; - struct name_snapshot name; - __u32 mask = FS_DELETE; - - /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */ - if (IS_ROOT(dentry)) - return; - - if (isdir) - mask |= FS_ISDIR; - - parent = dget_parent(dentry); - take_dentry_name_snapshot(&name, dentry); - - fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, - &name.name, 0); - - release_dentry_name_snapshot(&name); - dput(parent); -} - -/* * fsnotify_inoderemove - an inode is going away */ static inline void fsnotify_inoderemove(struct inode *inode) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index c28f6ed1f59b..a9f9dcc1e515 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -355,6 +355,7 @@ extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); +extern void fsnotify_nameremove(struct dentry *dentry, int isdir); extern u32 fsnotify_get_cookie(void); static inline int fsnotify_inode_watches_children(struct inode *inode) @@ -524,6 +525,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) static inline void fsnotify_sb_delete(struct super_block *sb) {} +static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) +{} + static inline void fsnotify_update_flags(struct dentry *dentry) {} diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 70b7123f38c7..9909dc0e273a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -26,16 +26,10 @@ #define PCPU_MIN_ALLOC_SHIFT 2 #define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT) -/* number of bits per page, used to trigger a scan if blocks are > PAGE_SIZE */ -#define PCPU_BITS_PER_PAGE (PAGE_SIZE >> PCPU_MIN_ALLOC_SHIFT) - /* - * This determines the size of each metadata block. There are several subtle - * constraints around this constant. The reserved region must be a multiple of - * PCPU_BITMAP_BLOCK_SIZE. Additionally, PCPU_BITMAP_BLOCK_SIZE must be a - * multiple of PAGE_SIZE or PAGE_SIZE must be a multiple of - * PCPU_BITMAP_BLOCK_SIZE to align with the populated page map. The unit_size - * also has to be a multiple of PCPU_BITMAP_BLOCK_SIZE to ensure full blocks. + * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the + * updating of hints is used to manage the nr_empty_pop_pages in both + * the chunk and globally. */ #define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE #define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \ diff --git a/include/net/dsa.h b/include/net/dsa.h index 6aaaadd6a413..685294817712 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -99,24 +99,9 @@ struct __dsa_skb_cb { #define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb)) -#define DSA_SKB_CB_COPY(nskb, skb) \ - { *__DSA_SKB_CB(nskb) = *__DSA_SKB_CB(skb); } - -#define DSA_SKB_CB_ZERO(skb) \ - { *__DSA_SKB_CB(skb) = (struct __dsa_skb_cb) {0}; } - #define DSA_SKB_CB_PRIV(skb) \ ((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv)) -#define DSA_SKB_CB_CLONE(_clone, _skb) \ - { \ - struct sk_buff *clone = _clone; \ - struct sk_buff *skb = _skb; \ - \ - DSA_SKB_CB_COPY(clone, skb); \ - DSA_SKB_CB(skb)->clone = clone; \ - } - struct dsa_switch_tree { struct list_head list; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index a3916b4dd57e..53b96f12300c 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -533,6 +533,37 @@ TRACE_EVENT(f2fs_truncate_partial_nodes, __entry->err) ); +TRACE_EVENT(f2fs_file_write_iter, + + TP_PROTO(struct inode *inode, unsigned long offset, + unsigned long length, int ret), + + TP_ARGS(inode, offset, length, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned long, offset) + __field(unsigned long, length) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->offset = offset; + __entry->length = length; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, " + "offset = %lu, length = %lu, written(err) = %d", + show_dev_ino(__entry), + __entry->offset, + __entry->length, + __entry->ret) +); + TRACE_EVENT(f2fs_map_blocks, TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret), @@ -1253,6 +1284,32 @@ DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page, TP_ARGS(page, type) ); +TRACE_EVENT(f2fs_filemap_fault, + + TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret), + + TP_ARGS(inode, index, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(pgoff_t, index) + __field(unsigned long, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->index = index; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx", + show_dev_ino(__entry), + (unsigned long)__entry->index, + __entry->ret) +); + TRACE_EVENT(f2fs_writepages, TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type), diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 72336bac7573..63e0cf66f01a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -629,7 +629,7 @@ union bpf_attr { * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **->swhash** and *skb*\ **->l4hash** to 0). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -654,7 +654,7 @@ union bpf_attr { * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -686,7 +686,7 @@ union bpf_attr { * flexibility and can handle sizes larger than 2 or 4 for the * checksum to update. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -741,7 +741,7 @@ union bpf_attr { * efficient, but it is handled through an action code where the * redirection happens only after the eBPF program has returned. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -806,7 +806,7 @@ union bpf_attr { * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * be **ETH_P_8021Q**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -818,7 +818,7 @@ union bpf_attr { * Description * Pop a VLAN header from the packet associated to *skb*. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1168,7 +1168,7 @@ union bpf_attr { * All values for *flags* are reserved for future usage, and must * be left at zero. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1281,7 +1281,7 @@ union bpf_attr { * implicitly linearizes, unclones and drops offloads from the * *skb*. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1317,7 +1317,7 @@ union bpf_attr { * **bpf_skb_pull_data()** to effectively unclone the *skb* from * the very beginning in case it is indeed cloned. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1369,7 +1369,7 @@ union bpf_attr { * All values for *flags* are reserved for future usage, and must * be left at zero. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1384,7 +1384,7 @@ union bpf_attr { * can be used to prepare the packet for pushing or popping * headers. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1518,20 +1518,20 @@ union bpf_attr { * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * Adjusting mss in this way is not allowed for datagrams. * - * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **: - * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **: + * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: * Any new space is reserved to hold a tunnel header. * Configure skb offsets and other fields accordingly. * - * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **: - * * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: + * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, + * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: * Use with ENCAP_L3 flags to further specify the tunnel type. * - * * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: + * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): * Use with ENCAP_L3/L4 flags to further specify the tunnel - * type; **len** is the length of the inner MAC header. + * type; *len* is the length of the inner MAC header. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1610,7 +1610,7 @@ union bpf_attr { * more flexibility as the user is free to store whatever meta * data they need. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1852,7 +1852,7 @@ union bpf_attr { * copied if necessary (i.e. if data was not linear and if start * and end pointers do not point to the same chunk). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -1886,7 +1886,7 @@ union bpf_attr { * only possible to shrink the packet as of this writing, * therefore *delta* must be a negative integer. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2061,18 +2061,18 @@ union bpf_attr { * **BPF_LWT_ENCAP_IP** * IP encapsulation (GRE/GUE/IPIP/etc). The outer header * must be IPv4 or IPv6, followed by zero or more - * additional headers, up to LWT_BPF_MAX_HEADROOM total - * bytes in all prepended headers. Please note that - * if skb_is_gso(skb) is true, no more than two headers - * can be prepended, and the inner header, if present, - * should be either GRE or UDP/GUE. - * - * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of - * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called - * by bpf programs of types BPF_PROG_TYPE_LWT_IN and - * BPF_PROG_TYPE_LWT_XMIT. - * - * A call to this helper is susceptible to change the underlaying + * additional headers, up to **LWT_BPF_MAX_HEADROOM** + * total bytes in all prepended headers. Please note that + * if **skb_is_gso**\ (*skb*) is true, no more than two + * headers can be prepended, and the inner header, if + * present, should be either GRE or UDP/GUE. + * + * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs + * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can + * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and + * **BPF_PROG_TYPE_LWT_XMIT**. + * + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2087,7 +2087,7 @@ union bpf_attr { * inside the outermost IPv6 Segment Routing Header can be * modified through this helper. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2103,7 +2103,7 @@ union bpf_attr { * after the segments are accepted. *delta* can be as well * positive (growing) as negative (shrinking). * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2126,13 +2126,13 @@ union bpf_attr { * Type of *param*: **int**. * **SEG6_LOCAL_ACTION_END_B6** * End.B6 action: Endpoint bound to an SRv6 policy. - * Type of param: **struct ipv6_sr_hdr**. + * Type of *param*: **struct ipv6_sr_hdr**. * **SEG6_LOCAL_ACTION_END_B6_ENCAP** * End.B6.Encap action: Endpoint bound to an SRv6 * encapsulation policy. - * Type of param: **struct ipv6_sr_hdr**. + * Type of *param*: **struct ipv6_sr_hdr**. * - * A call to this helper is susceptible to change the underlaying + * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be * performed again, if the helper is used in combination with @@ -2285,7 +2285,8 @@ union bpf_attr { * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description @@ -2321,7 +2322,8 @@ union bpf_attr { * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * int bpf_sk_release(struct bpf_sock *sock) * Description @@ -2490,31 +2492,34 @@ union bpf_attr { * network namespace *netns*. The return value must be checked, * and if non-**NULL**, released via **bpf_sk_release**\ (). * - * This function is identical to bpf_sk_lookup_tcp, except that it - * also returns timewait or request sockets. Use bpf_sk_fullsock - * or bpf_tcp_socket to access the full structure. + * This function is identical to **bpf_sk_lookup_tcp**\ (), except + * that it also returns timewait or request sockets. Use + * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the + * full structure. * * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return * Pointer to **struct bpf_sock**, or **NULL** in case of failure. * For sockets with reuseport option, the **struct bpf_sock** - * result is from **reuse->socks**\ [] using the hash of the tuple. + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. * * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description - * Check whether iph and th contain a valid SYN cookie ACK for - * the listening socket in sk. + * Check whether *iph* and *th* contain a valid SYN cookie ACK for + * the listening socket in *sk*. * - * iph points to the start of the IPv4 or IPv6 header, while - * iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr). + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). * - * th points to the start of the TCP header, while th_len contains - * sizeof(struct tcphdr). + * *th* points to the start of the TCP header, while *th_len* + * contains **sizeof**\ (**struct tcphdr**). * * Return - * 0 if iph and th are a valid SYN cookie ACK, or a negative error - * otherwise. + * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative + * error otherwise. * * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description @@ -2592,17 +2597,17 @@ union bpf_attr { * and save the result in *res*. * * The string may begin with an arbitrary amount of white space - * (as determined by isspace(3)) followed by a single optional '-' - * sign. + * (as determined by **isspace**\ (3)) followed by a single + * optional '**-**' sign. * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically - * similar to user space strtol(3). + * similar to user space **strtol**\ (3). * Return * Number of characters consumed on success. Must be positive but - * no more than buf_len. + * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. @@ -2616,16 +2621,16 @@ union bpf_attr { * given base and save the result in *res*. * * The string may begin with an arbitrary amount of white space - * (as determined by isspace(3)). + * (as determined by **isspace**\ (3)). * * Five least significant bits of *flags* encode base, other bits * are currently unused. * * Base must be either 8, 10, 16 or 0 to detect it automatically - * similar to user space strtoul(3). + * similar to user space **strtoul**\ (3). * Return * Number of characters consumed on success. Must be positive but - * no more than buf_len. + * no more than *buf_len*. * * **-EINVAL** if no valid digits were found or unsupported base * was provided. @@ -2634,26 +2639,26 @@ union bpf_attr { * * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) * Description - * Get a bpf-local-storage from a sk. + * Get a bpf-local-storage from a *sk*. * * Logically, it could be thought of getting the value from * a *map* with *sk* as the **key**. From this * perspective, the usage is not much different from - * **bpf_map_lookup_elem(map, &sk)** except this - * helper enforces the key must be a **bpf_fullsock()** - * and the map must be a BPF_MAP_TYPE_SK_STORAGE also. + * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this + * helper enforces the key must be a full socket and the map must + * be a **BPF_MAP_TYPE_SK_STORAGE** also. * * Underneath, the value is stored locally at *sk* instead of - * the map. The *map* is used as the bpf-local-storage **type**. - * The bpf-local-storage **type** (i.e. the *map*) is searched - * against all bpf-local-storages residing at sk. + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf-local-storages residing at *sk*. * - * An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be + * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be * used such that a new bpf-local-storage will be * created if one does not exist. *value* can be used - * together with BPF_SK_STORAGE_GET_F_CREATE to specify + * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify * the initial value of a bpf-local-storage. If *value* is - * NULL, the new bpf-local-storage will be zero initialized. + * **NULL**, the new bpf-local-storage will be zero initialized. * Return * A bpf-local-storage pointer is returned on success. * @@ -2662,7 +2667,7 @@ union bpf_attr { * * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description - * Delete a bpf-local-storage from a sk. + * Delete a bpf-local-storage from a *sk*. * Return * 0 on success. * diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 2ac598614a8f..19fb55e3c73e 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -44,6 +44,7 @@ * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in * - add blksize field to fuse_attr * - add file flags field to fuse_read_in and fuse_write_in + * - Add ATIME_NOW and MTIME_NOW flags to fuse_setattr_in * * 7.10 * - add nonseekable open flag @@ -54,7 +55,7 @@ * - add POLL message and NOTIFY_POLL notification * * 7.12 - * - add umask flag to input argument of open, mknod and mkdir + * - add umask flag to input argument of create, mknod and mkdir * - add notification messages for invalidation of inodes and * directory entries * @@ -125,6 +126,10 @@ * * 7.29 * - add FUSE_NO_OPENDIR_SUPPORT flag + * + * 7.30 + * - add FUSE_EXPLICIT_INVAL_DATA + * - add FUSE_IOCTL_COMPAT_X32 */ #ifndef _LINUX_FUSE_H @@ -160,7 +165,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 29 +#define FUSE_KERNEL_MINOR_VERSION 30 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -229,11 +234,13 @@ struct fuse_file_lock { * FOPEN_KEEP_CACHE: don't invalidate the data cache on open * FOPEN_NONSEEKABLE: the file is not seekable * FOPEN_CACHE_DIR: allow caching this directory + * FOPEN_STREAM: the file is stream-like (no file position at all) */ #define FOPEN_DIRECT_IO (1 << 0) #define FOPEN_KEEP_CACHE (1 << 1) #define FOPEN_NONSEEKABLE (1 << 2) #define FOPEN_CACHE_DIR (1 << 3) +#define FOPEN_STREAM (1 << 4) /** * INIT request/reply flags @@ -263,6 +270,7 @@ struct fuse_file_lock { * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages * FUSE_CACHE_SYMLINKS: cache READLINK responses * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir + * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -289,6 +297,7 @@ struct fuse_file_lock { #define FUSE_MAX_PAGES (1 << 22) #define FUSE_CACHE_SYMLINKS (1 << 23) #define FUSE_NO_OPENDIR_SUPPORT (1 << 24) +#define FUSE_EXPLICIT_INVAL_DATA (1 << 25) /** * CUSE INIT request/reply flags @@ -335,6 +344,7 @@ struct fuse_file_lock { * FUSE_IOCTL_RETRY: retry with new iovecs * FUSE_IOCTL_32BIT: 32bit ioctl * FUSE_IOCTL_DIR: is a directory + * FUSE_IOCTL_COMPAT_X32: x32 compat ioctl on 64bit machine (64bit time_t) * * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs */ @@ -343,6 +353,7 @@ struct fuse_file_lock { #define FUSE_IOCTL_RETRY (1 << 2) #define FUSE_IOCTL_32BIT (1 << 3) #define FUSE_IOCTL_DIR (1 << 4) +#define FUSE_IOCTL_COMPAT_X32 (1 << 5) #define FUSE_IOCTL_MAX_IOV 256 @@ -353,6 +364,13 @@ struct fuse_file_lock { */ #define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0) +/** + * Fsync flags + * + * FUSE_FSYNC_FDATASYNC: Sync data only, not metadata + */ +#define FUSE_FSYNC_FDATASYNC (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 64cee116928e..85387c76c24f 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -606,6 +606,7 @@ #define KEY_SCREENSAVER 0x245 /* AL Screen Saver */ #define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */ #define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ +#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f0cf7b0f4f35..505393c6e959 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -966,7 +966,6 @@ enum nft_socket_keys { * @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address) * @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address) * @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address) - * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack * @NFT_CT_ID: conntrack id */ enum nft_ct_keys { @@ -993,7 +992,6 @@ enum nft_ct_keys { NFT_CT_DST_IP, NFT_CT_SRC_IP6, NFT_CT_DST_IP6, - NFT_CT_TIMEOUT, NFT_CT_ID, __NFT_CT_MAX }; @@ -1138,7 +1136,7 @@ enum nft_log_level { NFT_LOGLEVEL_AUDIT, __NFT_LOGLEVEL_MAX }; -#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1) +#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX - 1) /** * enum nft_queue_attributes - nf_tables queue expression netlink attributes |