diff options
Diffstat (limited to 'include')
288 files changed, 5207 insertions, 2239 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0dc1ea0b52f5..48f0fd499274 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -365,8 +365,6 @@ struct acpi_device { acpi_handle handle; /* no handle for fixed hardware */ struct fwnode_handle fwnode; struct acpi_device *parent; - struct list_head children; - struct list_head node; struct list_head wakeup_list; struct list_head del_list; struct acpi_device_status status; @@ -379,7 +377,6 @@ struct acpi_device { struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; - struct acpi_driver *driver; const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; @@ -483,6 +480,9 @@ extern struct bus_type acpi_bus_type; int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data); int acpi_dev_for_each_child(struct acpi_device *adev, int (*fn)(struct acpi_device *, void *), void *data); +int acpi_dev_for_each_child_reverse(struct acpi_device *adev, + int (*fn)(struct acpi_device *, void *), + void *data); /* * Events @@ -521,6 +521,7 @@ const char *acpi_power_state_string(int state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); +void acpi_device_fix_up_power_extended(struct acpi_device *adev); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); @@ -622,6 +623,8 @@ static inline int acpi_dma_configure(struct device *dev, } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); +struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, + acpi_bus_address adr); int acpi_is_root_bridge(acpi_handle); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index c6108581d97d..f73d357ecdf5 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -17,7 +17,7 @@ #include <acpi/pcc.h> #include <acpi/processor.h> -/* Support CPPCv2 and CPPCv3 */ +/* CPPCv2 and CPPCv3 support */ #define CPPC_V2_REV 2 #define CPPC_V3_REV 3 #define CPPC_V2_NUM_ENT 21 @@ -145,6 +145,7 @@ extern bool cppc_allow_fast_switch(void); extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data); extern unsigned int cppc_get_transition_latency(int cpu); extern bool cpc_ffh_supported(void); +extern bool cpc_supported_by_cpu(void); extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); #else /* !CONFIG_ACPI_CPPC_LIB */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 194027371928..9fa49686957a 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -441,9 +441,12 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ /* in processor_thermal.c */ -int acpi_processor_get_limit_info(struct acpi_processor *pr); +int acpi_processor_thermal_init(struct acpi_processor *pr, + struct acpi_device *device); +void acpi_processor_thermal_exit(struct acpi_processor *pr, + struct acpi_device *device); extern const struct thermal_cooling_device_ops processor_cooling_ops; -#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) +#ifdef CONFIG_CPU_FREQ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); #else @@ -455,6 +458,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) { return; } -#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ +#endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 302506bbc2a4..36db8b9eb68a 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -5,6 +5,7 @@ # asm headers from the host architecutre.) mandatory-y += atomic.h +mandatory-y += archrandom.h mandatory-y += barrier.h mandatory-y += bitops.h mandatory-y += bug.h @@ -44,6 +45,7 @@ mandatory-y += msi.h mandatory-y += pci.h mandatory-y += percpu.h mandatory-y += pgalloc.h +mandatory-y += platform-feature.h mandatory-y += preempt.h mandatory-y += rwonce.h mandatory-y += sections.h diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h new file mode 100644 index 000000000000..3cd7f980cfdc --- /dev/null +++ b/include/asm-generic/archrandom.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ARCHRANDOM_H__ +#define __ASM_GENERIC_ARCHRANDOM_H__ + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +#endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index fd7e8fbaeef1..961f4d88f9ef 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -38,6 +38,10 @@ #define wmb() do { kcsan_wmb(); __wmb(); } while (0) #endif +#ifdef __dma_mb +#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0) +#endif + #ifdef __dma_rmb #define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) #endif @@ -65,6 +69,10 @@ #define wmb() mb() #endif +#ifndef dma_mb +#define dma_mb() mb() +#endif + #ifndef dma_rmb #define dma_rmb() rmb() #endif diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 7ce93aaf69f8..72974cb81343 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -964,7 +964,34 @@ static inline void iounmap(volatile void __iomem *addr) #elif defined(CONFIG_GENERIC_IOREMAP) #include <linux/pgtable.h> -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); +/* + * Arch code can implement the following two hooks when using GENERIC_IOREMAP + * ioremap_allowed() return a bool, + * - true means continue to remap + * - false means skip remap and return directly + * iounmap_allowed() return a bool, + * - true means continue to vunmap + * - false means skip vunmap and return directly + */ +#ifndef ioremap_allowed +#define ioremap_allowed ioremap_allowed +static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, + unsigned long prot) +{ + return true; +} +#endif + +#ifndef iounmap_allowed +#define iounmap_allowed iounmap_allowed +static inline bool iounmap_allowed(void *addr) +{ + return true; +} +#endif + +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot); void iounmap(volatile void __iomem *addr); static inline void __iomem *ioremap(phys_addr_t addr, size_t size) @@ -1125,9 +1152,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, } #endif -#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED extern int devmem_is_allowed(unsigned long pfn); -#endif #endif /* __KERNEL__ */ diff --git a/include/asm-generic/platform-feature.h b/include/asm-generic/platform-feature.h new file mode 100644 index 000000000000..4b0af3d51588 --- /dev/null +++ b/include/asm-generic/platform-feature.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PLATFORM_FEATURE_H +#define _ASM_GENERIC_PLATFORM_FEATURE_H + +/* Number of arch specific feature flags. */ +#define PLATFORM_ARCH_FEAT_N 0 + +#endif /* _ASM_GENERIC_PLATFORM_FEATURE_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ff3e82553a76..492dce43236e 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -158,9 +158,24 @@ * Useful if your architecture doesn't use IPIs for remote TLB invalidates * and therefore doesn't naturally serialize with software page-table walkers. * + * MMU_GATHER_NO_FLUSH_CACHE + * + * Indicates the architecture has flush_cache_range() but it needs *NOT* be called + * before unmapping a VMA. + * + * NOTE: strictly speaking we shouldn't have this knob and instead rely on + * flush_cache_range() being a NOP, except Sparc64 seems to be + * different here. + * + * MMU_GATHER_MERGE_VMAS + * + * Indicates the architecture wants to merge ranges over VMAs; typical when + * multiple range invalidates are more expensive than a full invalidate. + * * MMU_GATHER_NO_RANGE * - * Use this if your architecture lacks an efficient flush_tlb_range(). + * Use this if your architecture lacks an efficient flush_tlb_range(). This + * option implies MMU_GATHER_MERGE_VMAS above. * * MMU_GATHER_NO_GATHER * @@ -288,6 +303,7 @@ struct mmu_gather { */ unsigned int vma_exec : 1; unsigned int vma_huge : 1; + unsigned int vma_pfn : 1; unsigned int batch_count; @@ -334,8 +350,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) #ifdef CONFIG_MMU_GATHER_NO_RANGE -#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) -#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() +#if defined(tlb_flush) +#error MMU_GATHER_NO_RANGE relies on default tlb_flush() #endif /* @@ -352,20 +368,9 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); } -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#define tlb_end_vma tlb_end_vma -static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - #else /* CONFIG_MMU_GATHER_NO_RANGE */ #ifndef tlb_flush - -#if defined(tlb_start_vma) || defined(tlb_end_vma) -#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() -#endif - /* * When an architecture does not provide its own tlb_flush() implementation * but does have a reasonably efficient flush_vma_range() implementation @@ -385,6 +390,9 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_range(&vma, tlb->start, tlb->end); } } +#endif + +#endif /* CONFIG_MMU_GATHER_NO_RANGE */ static inline void tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) @@ -402,17 +410,9 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) */ tlb->vma_huge = is_vm_hugetlb_page(vma); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); + tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); } -#else - -static inline void -tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } - -#endif - -#endif /* CONFIG_MMU_GATHER_NO_RANGE */ - static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { /* @@ -486,32 +486,36 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ -#ifndef tlb_start_vma static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (tlb->fullmm) return; tlb_update_vma_flags(tlb, vma); +#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE flush_cache_range(vma, vma->vm_start, vma->vm_end); -} #endif +} -#ifndef tlb_end_vma static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (tlb->fullmm) return; /* - * Do a TLB flush and reset the range at VMA boundaries; this avoids - * the ranges growing with the unused space between consecutive VMAs, - * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on - * this. + * VM_PFNMAP is more fragile because the core mm will not track the + * page mapcount -- there might not be page-frames for these PFNs after + * all. Force flush TLBs for such ranges to avoid munmap() vs + * unmap_mapping_range() races. */ - tlb_flush_mmu_tlbonly(tlb); + if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { + /* + * Do a TLB flush and reset the range at VMA boundaries; this avoids + * the ranges growing with the unused space between consecutive VMAs. + */ + tlb_flush_mmu_tlbonly(tlb); + } } -#endif /* * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index f6da8a132639..b0f80cfd2a26 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -247,148 +247,4 @@ int omap_dm_timers_active(void); #define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT)) -/* - * The below are inlined to optimize code size for system timers. Other code - * should not need these at all. - */ -#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS) -static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, - int posted) -{ - if (posted) - while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) - cpu_relax(); - - return readl_relaxed(timer->func_base + (reg & 0xff)); -} - -static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, - u32 reg, u32 val, int posted) -{ - if (posted) - while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) - cpu_relax(); - - writel_relaxed(val, timer->func_base + (reg & 0xff)); -} - -static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) -{ - u32 tidr; - - /* Assume v1 ip if bits [31:16] are zero */ - tidr = readl_relaxed(timer->io_base); - if (!(tidr >> 16)) { - timer->revision = 1; - timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; - timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; - timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; - timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; - timer->func_base = timer->io_base; - } else { - timer->revision = 2; - timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; - timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; - timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; - timer->pend = timer->io_base + - _OMAP_TIMER_WRITE_PEND_OFFSET + - OMAP_TIMER_V2_FUNC_OFFSET; - timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; - } -} - -/* - * __omap_dm_timer_enable_posted - enables write posted mode - * @timer: pointer to timer instance handle - * - * Enables the write posted mode for the timer. When posted mode is enabled - * writes to certain timer registers are immediately acknowledged by the - * internal bus and hence prevents stalling the CPU waiting for the write to - * complete. Enabling this feature can improve performance for writing to the - * timer registers. - */ -static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) -{ - if (timer->posted) - return; - - if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { - timer->posted = OMAP_TIMER_NONPOSTED; - __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); - return; - } - - __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, - OMAP_TIMER_CTRL_POSTED, 0); - timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; - timer->posted = OMAP_TIMER_POSTED; -} - -/** - * __omap_dm_timer_override_errata - override errata flags for a timer - * @timer: pointer to timer handle - * @errata: errata flags to be ignored - * - * For a given timer, override a timer errata by clearing the flags - * specified by the errata argument. A specific erratum should only be - * overridden for a timer if the timer is used in such a way the erratum - * has no impact. - */ -static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer, - u32 errata) -{ - timer->errata &= ~errata; -} - -static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, - int posted, unsigned long rate) -{ - u32 l; - - l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); - if (l & OMAP_TIMER_CTRL_ST) { - l &= ~0x1; - __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); -#ifdef CONFIG_ARCH_OMAP2PLUS - /* Readback to make sure write has completed */ - __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); - /* - * Wait for functional clock period x 3.5 to make sure that - * timer is stopped - */ - udelay(3500000 / rate + 1); -#endif - } - - /* Ack possibly pending interrupt */ - writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); -} - -static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer, - u32 ctrl, unsigned int load, - int posted) -{ - __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted); - __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted); -} - -static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, - unsigned int value) -{ - writel_relaxed(value, timer->irq_ena); - __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); -} - -static inline unsigned int -__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) -{ - return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); -} - -static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, - unsigned int value) -{ - writel_relaxed(value, timer->irq_stat); -} -#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */ #endif /* __CLOCKSOURCE_DMTIMER_H */ diff --git a/include/crypto/aria.h b/include/crypto/aria.h new file mode 100644 index 000000000000..4a86661788e8 --- /dev/null +++ b/include/crypto/aria.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#ifndef _CRYPTO_ARIA_H +#define _CRYPTO_ARIA_H + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/crypto.h> +#include <asm/byteorder.h> + +#define ARIA_MIN_KEY_SIZE 16 +#define ARIA_MAX_KEY_SIZE 32 +#define ARIA_BLOCK_SIZE 16 +#define ARIA_MAX_RD_KEYS 17 +#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32)) + +struct aria_ctx { + int key_length; + int rounds; + u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; + u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; +}; + +static const u32 key_rc[5][4] = { + { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, + { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }, + { 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e }, + { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, + { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 } +}; + +static const u32 s1[256] = { + 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b, + 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5, + 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b, + 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676, + 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d, + 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0, + 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf, + 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0, + 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626, + 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc, + 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1, + 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515, + 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3, + 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a, + 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2, + 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575, + 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a, + 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0, + 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3, + 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484, + 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed, + 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b, + 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939, + 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf, + 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb, + 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585, + 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f, + 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8, + 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f, + 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5, + 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121, + 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2, + 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec, + 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717, + 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d, + 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373, + 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc, + 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888, + 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414, + 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb, + 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a, + 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c, + 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262, + 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979, + 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d, + 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9, + 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea, + 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808, + 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e, + 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6, + 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f, + 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a, + 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666, + 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e, + 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9, + 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e, + 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111, + 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494, + 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9, + 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf, + 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d, + 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868, + 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f, + 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616 +}; + +static const u32 s2[256] = { + 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc, + 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc, + 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646, + 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1, + 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb, + 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b, + 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303, + 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1, + 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b, + 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969, + 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae, + 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb, + 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb, + 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4, + 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa, + 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb, + 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929, + 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191, + 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595, + 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd, + 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838, + 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828, + 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7, + 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353, + 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131, + 0x36003636, 0x21002121, 0x58005858, 0x48004848, + 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474, + 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1, + 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7, + 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626, + 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9, + 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040, + 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd, + 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404, + 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f, + 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc, + 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757, + 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565, + 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e, + 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5, + 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717, + 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a, + 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767, + 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343, + 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5, + 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434, + 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a, + 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8, + 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b, + 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6, + 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424, + 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada, + 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef, + 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f, + 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a, + 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c, + 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333, + 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3, + 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0, + 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d, + 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5, + 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8, + 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a, + 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181 +}; + +static const u32 x1[256] = { + 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5, + 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038, + 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e, + 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb, + 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082, + 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087, + 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044, + 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb, + 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032, + 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d, + 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b, + 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e, + 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066, + 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2, + 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049, + 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025, + 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064, + 0x86860086, 0x68680068, 0x98980098, 0x16160016, + 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc, + 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092, + 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050, + 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da, + 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057, + 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084, + 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000, + 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a, + 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005, + 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006, + 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f, + 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002, + 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003, + 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b, + 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041, + 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea, + 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce, + 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073, + 0x96960096, 0xacac00ac, 0x74740074, 0x22220022, + 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085, + 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8, + 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e, + 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071, + 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089, + 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e, + 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b, + 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b, + 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020, + 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe, + 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4, + 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033, + 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031, + 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059, + 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f, + 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9, + 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d, + 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f, + 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef, + 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d, + 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0, + 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c, + 0x83830083, 0x53530053, 0x99990099, 0x61610061, + 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e, + 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026, + 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063, + 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d +}; + +static const u32 x2[256] = { + 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00, + 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800, + 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100, + 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00, + 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00, + 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300, + 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600, + 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00, + 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900, + 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600, + 0x57575700, 0x43434300, 0x56565600, 0x17171700, + 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00, + 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300, + 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00, + 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800, + 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00, + 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00, + 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800, + 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300, + 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00, + 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00, + 0x02020200, 0x24242400, 0x75757500, 0x93939300, + 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200, + 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00, + 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00, + 0x12121200, 0x97979700, 0x32323200, 0xababab00, + 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300, + 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900, + 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100, + 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900, + 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600, + 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100, + 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500, + 0x86868600, 0x36363600, 0xbebebe00, 0x61616100, + 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00, + 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00, + 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00, + 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500, + 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00, + 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700, + 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00, + 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000, + 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100, + 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00, + 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600, + 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000, + 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00, + 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500, + 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500, + 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00, + 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300, + 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500, + 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00, + 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300, + 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900, + 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00, + 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400, + 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00, + 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00, + 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300, + 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700, + 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00, + 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300, + 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000 +}; + +static inline u32 rotl32(u32 v, u32 r) +{ + return ((v << r) | (v >> (32 - r))); +} + +static inline u32 rotr32(u32 v, u32 r) +{ + return ((v >> r) | (v << (32 - r))); +} + +static inline u32 bswap32(u32 v) +{ + return ((v << 24) ^ + (v >> 24) ^ + ((v & 0x0000ff00) << 8) ^ + ((v & 0x00ff0000) >> 8)); +} + +static inline u8 get_u8(u32 x, u32 y) +{ + return (x >> ((3 - y) * 8)); +} + +static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3) +{ + return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3); +} + +static inline u32 aria_m(u32 t0) +{ + return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16); +} + +/* S-Box Layer 1 + M */ +static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = s1[get_u8(*t0, 0)] ^ + s2[get_u8(*t0, 1)] ^ + x1[get_u8(*t0, 2)] ^ + x2[get_u8(*t0, 3)]; + *t1 = s1[get_u8(*t1, 0)] ^ + s2[get_u8(*t1, 1)] ^ + x1[get_u8(*t1, 2)] ^ + x2[get_u8(*t1, 3)]; + *t2 = s1[get_u8(*t2, 0)] ^ + s2[get_u8(*t2, 1)] ^ + x1[get_u8(*t2, 2)] ^ + x2[get_u8(*t2, 3)]; + *t3 = s1[get_u8(*t3, 0)] ^ + s2[get_u8(*t3, 1)] ^ + x1[get_u8(*t3, 2)] ^ + x2[get_u8(*t3, 3)]; +} + +/* S-Box Layer 2 + M */ +static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = x1[get_u8(*t0, 0)] ^ + x2[get_u8(*t0, 1)] ^ + s1[get_u8(*t0, 2)] ^ + s2[get_u8(*t0, 3)]; + *t1 = x1[get_u8(*t1, 0)] ^ + x2[get_u8(*t1, 1)] ^ + s1[get_u8(*t1, 2)] ^ + s2[get_u8(*t1, 3)]; + *t2 = x1[get_u8(*t2, 0)] ^ + x2[get_u8(*t2, 1)] ^ + s1[get_u8(*t2, 2)] ^ + s2[get_u8(*t2, 3)]; + *t3 = x1[get_u8(*t3, 0)] ^ + x2[get_u8(*t3, 1)] ^ + s1[get_u8(*t3, 2)] ^ + s2[get_u8(*t3, 3)]; +} + +/* Word-level diffusion */ +static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + *t1 ^= *t2; + *t2 ^= *t3; + *t0 ^= *t1; + + *t3 ^= *t1; + *t2 ^= *t0; + *t1 ^= *t2; +} + +/* Byte-level diffusion */ +static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3) +{ + *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff); + *t2 = rotr32(*t2, 16); + *t3 = bswap32(*t3); +} + +/* Key XOR Layer */ +static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 ^= rk[0]; + *t1 ^= rk[1]; + *t2 ^= rk[2]; + *t3 ^= rk[3]; +} +/* Odd round Substitution & Diffusion */ +static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); +} + +/* Even round Substitution & Diffusion */ +static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t3, t0, t1); + aria_diff_word(t0, t1, t2, t3); +} + +/* Q, R Macro expanded ARIA GSRK */ +static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) +{ + int q = 4 - (n / 32); + int r = n % 32; + + rk[0] = (x[0]) ^ + ((y[q % 4]) >> r) ^ + ((y[(q + 3) % 4]) << (32 - r)); + rk[1] = (x[1]) ^ + ((y[(q + 1) % 4]) >> r) ^ + ((y[q % 4]) << (32 - r)); + rk[2] = (x[2]) ^ + ((y[(q + 2) % 4]) >> r) ^ + ((y[(q + 1) % 4]) << (32 - r)); + rk[3] = (x[3]) ^ + ((y[(q + 3) % 4]) >> r) ^ + ((y[(q + 2) % 4]) << (32 - r)); +} + +#endif diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h index 52363eee2b20..506d56530ca9 100644 --- a/include/crypto/internal/blake2s.h +++ b/include/crypto/internal/blake2s.h @@ -8,7 +8,6 @@ #define _CRYPTO_INTERNAL_BLAKE2S_H #include <crypto/blake2s.h> -#include <crypto/internal/hash.h> #include <linux/string.h> void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, @@ -19,111 +18,4 @@ void blake2s_compress(struct blake2s_state *state, const u8 *block, bool blake2s_selftest(void); -static inline void blake2s_set_lastblock(struct blake2s_state *state) -{ - state->f[0] = -1; -} - -/* Helper functions for BLAKE2s shared by the library and shash APIs */ - -static __always_inline void -__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen, - bool force_generic) -{ - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - if (force_generic) - blake2s_compress_generic(state, state->buf, 1, - BLAKE2S_BLOCK_SIZE); - else - blake2s_compress(state, state->buf, 1, - BLAKE2S_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2S_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - if (force_generic) - blake2s_compress_generic(state, in, nblocks - 1, - BLAKE2S_BLOCK_SIZE); - else - blake2s_compress(state, in, nblocks - 1, - BLAKE2S_BLOCK_SIZE); - in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; -} - -static __always_inline void -__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic) -{ - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - if (force_generic) - blake2s_compress_generic(state, state->buf, 1, state->buflen); - else - blake2s_compress(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); -} - -/* Helper functions for shash implementations of BLAKE2s */ - -struct blake2s_tfm_ctx { - u8 key[BLAKE2S_KEY_SIZE]; - unsigned int keylen; -}; - -static inline int crypto_blake2s_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; -} - -static inline int crypto_blake2s_init(struct shash_desc *desc) -{ - const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2s_state *state = shash_desc_ctx(desc); - unsigned int outlen = crypto_shash_digestsize(desc->tfm); - - __blake2s_init(state, outlen, tctx->key, tctx->keylen); - return 0; -} - -static inline int crypto_blake2s_update(struct shash_desc *desc, - const u8 *in, unsigned int inlen, - bool force_generic) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - __blake2s_update(state, in, inlen, force_generic); - return 0; -} - -static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, - bool force_generic) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - __blake2s_final(state, out, force_generic); - return 0; -} - #endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h new file mode 100644 index 000000000000..1d630f371f77 --- /dev/null +++ b/include/crypto/polyval.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Polyval hash algorithm + * + * Copyright 2021 Google LLC + */ + +#ifndef _CRYPTO_POLYVAL_H +#define _CRYPTO_POLYVAL_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define POLYVAL_BLOCK_SIZE 16 +#define POLYVAL_DIGEST_SIZE 16 + +void polyval_mul_non4k(u8 *op1, const u8 *op2); + +void polyval_update_non4k(const u8 *key, const u8 *in, + size_t nblocks, u8 *accumulator); + +#endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 0777725085df..10b1990bc1f6 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -1022,6 +1022,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); for ((__i) = 0; \ (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ + (void)(obj) /* Only to avoid unused-but-set-variable warning */, \ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ (__i)++) diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 0fca8f38bee4..addb135eeea6 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -28,7 +28,7 @@ #include <linux/dma-fence.h> #include <linux/completion.h> #include <linux/xarray.h> -#include <linux/irq_work.h> +#include <linux/workqueue.h> #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) @@ -295,7 +295,7 @@ struct drm_sched_job { */ union { struct dma_fence_cb finish_cb; - struct irq_work work; + struct work_struct work; }; uint64_t id; diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 441653693970..ca89a48c2460 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -311,12 +311,12 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) } void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); -void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res); -void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res); void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); +void ttm_resource_add_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo); +void ttm_resource_del_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo); void ttm_resource_move_to_lru_tail(struct ttm_resource *res); void ttm_resource_init(struct ttm_buffer_object *bo, diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h index 1f8701691d62..8256e7430b63 100644 --- a/include/dt-bindings/clock/exynos7885.h +++ b/include/dt-bindings/clock/exynos7885.h @@ -54,17 +54,39 @@ #define CLK_GOUT_PERI_USI0 43 #define CLK_GOUT_PERI_USI1 44 #define CLK_GOUT_PERI_USI2 45 -#define TOP_NR_CLK 46 +#define CLK_MOUT_FSYS_BUS 46 +#define CLK_MOUT_FSYS_MMC_CARD 47 +#define CLK_MOUT_FSYS_MMC_EMBD 48 +#define CLK_MOUT_FSYS_MMC_SDIO 49 +#define CLK_MOUT_FSYS_USB30DRD 50 +#define CLK_DOUT_FSYS_BUS 51 +#define CLK_DOUT_FSYS_MMC_CARD 52 +#define CLK_DOUT_FSYS_MMC_EMBD 53 +#define CLK_DOUT_FSYS_MMC_SDIO 54 +#define CLK_DOUT_FSYS_USB30DRD 55 +#define CLK_GOUT_FSYS_BUS 56 +#define CLK_GOUT_FSYS_MMC_CARD 57 +#define CLK_GOUT_FSYS_MMC_EMBD 58 +#define CLK_GOUT_FSYS_MMC_SDIO 59 +#define CLK_GOUT_FSYS_USB30DRD 60 +#define TOP_NR_CLK 61 /* CMU_CORE */ -#define CLK_MOUT_CORE_BUS_USER 1 -#define CLK_MOUT_CORE_CCI_USER 2 -#define CLK_MOUT_CORE_G3D_USER 3 -#define CLK_MOUT_CORE_GIC 4 -#define CLK_DOUT_CORE_BUSP 5 -#define CLK_GOUT_CCI_ACLK 6 -#define CLK_GOUT_GIC400_CLK 7 -#define CORE_NR_CLK 8 +#define CLK_MOUT_CORE_BUS_USER 1 +#define CLK_MOUT_CORE_CCI_USER 2 +#define CLK_MOUT_CORE_G3D_USER 3 +#define CLK_MOUT_CORE_GIC 4 +#define CLK_DOUT_CORE_BUSP 5 +#define CLK_GOUT_CCI_ACLK 6 +#define CLK_GOUT_GIC400_CLK 7 +#define CLK_GOUT_TREX_D_CORE_ACLK 8 +#define CLK_GOUT_TREX_D_CORE_GCLK 9 +#define CLK_GOUT_TREX_D_CORE_PCLK 10 +#define CLK_GOUT_TREX_P_CORE_ACLK_P_CORE 11 +#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12 +#define CLK_GOUT_TREX_P_CORE_PCLK 13 +#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14 +#define CORE_NR_CLK 15 /* CMU_PERI */ #define CLK_MOUT_PERI_BUS_USER 1 @@ -112,4 +134,18 @@ #define CLK_GOUT_WDT1_PCLK 43 #define PERI_NR_CLK 44 +/* CMU_FSYS */ +#define CLK_MOUT_FSYS_BUS_USER 1 +#define CLK_MOUT_FSYS_MMC_CARD_USER 2 +#define CLK_MOUT_FSYS_MMC_EMBD_USER 3 +#define CLK_MOUT_FSYS_MMC_SDIO_USER 4 +#define CLK_MOUT_FSYS_USB30DRD_USER 4 +#define CLK_GOUT_MMC_CARD_ACLK 5 +#define CLK_GOUT_MMC_CARD_SDCLKIN 6 +#define CLK_GOUT_MMC_EMBD_ACLK 7 +#define CLK_GOUT_MMC_EMBD_SDCLKIN 8 +#define CLK_GOUT_MMC_SDIO_ACLK 9 +#define CLK_GOUT_MMC_SDIO_SDCLKIN 10 +#define FSYS_NR_CLK 11 + #endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */ diff --git a/include/dt-bindings/clock/nuvoton,npcm845-clk.h b/include/dt-bindings/clock/nuvoton,npcm845-clk.h new file mode 100644 index 000000000000..e5cce08b00e1 --- /dev/null +++ b/include/dt-bindings/clock/nuvoton,npcm845-clk.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2021 Nuvoton Technologies. + * Author: Tomer Maimon <tomer.maimon@nuvoton.com> + * + * Device Tree binding constants for NPCM8XX clock controller. + */ + +#ifndef __DT_BINDINGS_CLOCK_NPCM8XX_H +#define __DT_BINDINGS_CLOCK_NPCM8XX_H + +#define NPCM8XX_CLK_CPU 0 +#define NPCM8XX_CLK_GFX_PIXEL 1 +#define NPCM8XX_CLK_MC 2 +#define NPCM8XX_CLK_ADC 3 +#define NPCM8XX_CLK_AHB 4 +#define NPCM8XX_CLK_TIMER 5 +#define NPCM8XX_CLK_UART 6 +#define NPCM8XX_CLK_UART2 7 +#define NPCM8XX_CLK_MMC 8 +#define NPCM8XX_CLK_SPI3 9 +#define NPCM8XX_CLK_PCI 10 +#define NPCM8XX_CLK_AXI 11 +#define NPCM8XX_CLK_APB4 12 +#define NPCM8XX_CLK_APB3 13 +#define NPCM8XX_CLK_APB2 14 +#define NPCM8XX_CLK_APB1 15 +#define NPCM8XX_CLK_APB5 16 +#define NPCM8XX_CLK_CLKOUT 17 +#define NPCM8XX_CLK_GFX 18 +#define NPCM8XX_CLK_SU 19 +#define NPCM8XX_CLK_SU48 20 +#define NPCM8XX_CLK_SDHC 21 +#define NPCM8XX_CLK_SPI0 22 +#define NPCM8XX_CLK_SPI1 23 +#define NPCM8XX_CLK_SPIX 24 +#define NPCM8XX_CLK_RG 25 +#define NPCM8XX_CLK_RCP 26 +#define NPCM8XX_CLK_PRE_ADC 27 +#define NPCM8XX_CLK_ATB 28 +#define NPCM8XX_CLK_PRE_CLK 29 +#define NPCM8XX_CLK_TH 30 +#define NPCM8XX_CLK_REFCLK 31 +#define NPCM8XX_CLK_SYSBYPCK 32 +#define NPCM8XX_CLK_MCBYPCK 33 + +#define NPCM8XX_NUM_CLOCKS (NPCM8XX_CLK_MCBYPCK + 1) + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8350.h b/include/dt-bindings/clock/qcom,dispcc-sm8350.h new file mode 120000 index 000000000000..0312b4544acb --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sm8350.h @@ -0,0 +1 @@ +qcom,dispcc-sm8250.h
\ No newline at end of file diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index 8e2bec1c91bf..55f8322a1e50 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -367,4 +367,7 @@ #define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 #define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131 +#define USB0_GDSC 0 +#define USB1_GDSC 1 + #endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8350.h b/include/dt-bindings/clock/qcom,gpucc-sm8350.h new file mode 100644 index 000000000000..2ca857f5bfd2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sm8350.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H + +/* GPU_CC clocks */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CB_CLK 1 +#define GPU_CC_CRC_AHB_CLK 2 +#define GPU_CC_CX_APB_CLK 3 +#define GPU_CC_CX_GMU_CLK 4 +#define GPU_CC_CX_QDSS_AT_CLK 5 +#define GPU_CC_CX_QDSS_TRIG_CLK 6 +#define GPU_CC_CX_QDSS_TSCTR_CLK 7 +#define GPU_CC_CX_SNOC_DVM_CLK 8 +#define GPU_CC_CXO_AON_CLK 9 +#define GPU_CC_CXO_CLK 10 +#define GPU_CC_FREQ_MEASURE_CLK 11 +#define GPU_CC_GMU_CLK_SRC 12 +#define GPU_CC_GX_GMU_CLK 13 +#define GPU_CC_GX_QDSS_TSCTR_CLK 14 +#define GPU_CC_GX_VSENSE_CLK 15 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16 +#define GPU_CC_HUB_AHB_DIV_CLK_SRC 17 +#define GPU_CC_HUB_AON_CLK 18 +#define GPU_CC_HUB_CLK_SRC 19 +#define GPU_CC_HUB_CX_INT_CLK 20 +#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 21 +#define GPU_CC_MND1X_0_GFX3D_CLK 22 +#define GPU_CC_MND1X_1_GFX3D_CLK 23 +#define GPU_CC_PLL0 24 +#define GPU_CC_PLL1 25 +#define GPU_CC_SLEEP_CLK 26 + +/* GPU_CC resets */ +#define GPUCC_GPU_CC_ACD_BCR 0 +#define GPUCC_GPU_CC_CB_BCR 1 +#define GPUCC_GPU_CC_CX_BCR 2 +#define GPUCC_GPU_CC_FAST_HUB_BCR 3 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 4 +#define GPUCC_GPU_CC_GMU_BCR 5 +#define GPUCC_GPU_CC_GX_BCR 6 +#define GPUCC_GPU_CC_XO_BCR 7 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8450-camcc.h b/include/dt-bindings/clock/qcom,sm8450-camcc.h new file mode 100644 index 000000000000..7ff67acf301a --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8450-camcc.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_CLK 1 +#define CAM_CC_BPS_CLK_SRC 2 +#define CAM_CC_BPS_FAST_AHB_CLK 3 +#define CAM_CC_CAMNOC_AXI_CLK 4 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 5 +#define CAM_CC_CAMNOC_DCD_XO_CLK 6 +#define CAM_CC_CCI_0_CLK 7 +#define CAM_CC_CCI_0_CLK_SRC 8 +#define CAM_CC_CCI_1_CLK 9 +#define CAM_CC_CCI_1_CLK_SRC 10 +#define CAM_CC_CORE_AHB_CLK 11 +#define CAM_CC_CPAS_AHB_CLK 12 +#define CAM_CC_CPAS_BPS_CLK 13 +#define CAM_CC_CPAS_FAST_AHB_CLK 14 +#define CAM_CC_CPAS_IFE_0_CLK 15 +#define CAM_CC_CPAS_IFE_1_CLK 16 +#define CAM_CC_CPAS_IFE_2_CLK 17 +#define CAM_CC_CPAS_IFE_LITE_CLK 18 +#define CAM_CC_CPAS_IPE_NPS_CLK 19 +#define CAM_CC_CPAS_SBI_CLK 20 +#define CAM_CC_CPAS_SFE_0_CLK 21 +#define CAM_CC_CPAS_SFE_1_CLK 22 +#define CAM_CC_CPHY_RX_CLK_SRC 23 +#define CAM_CC_CSI0PHYTIMER_CLK 24 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 25 +#define CAM_CC_CSI1PHYTIMER_CLK 26 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 27 +#define CAM_CC_CSI2PHYTIMER_CLK 28 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 29 +#define CAM_CC_CSI3PHYTIMER_CLK 30 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 31 +#define CAM_CC_CSI4PHYTIMER_CLK 32 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 33 +#define CAM_CC_CSI5PHYTIMER_CLK 34 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 35 +#define CAM_CC_CSID_CLK 36 +#define CAM_CC_CSID_CLK_SRC 37 +#define CAM_CC_CSID_CSIPHY_RX_CLK 38 +#define CAM_CC_CSIPHY0_CLK 39 +#define CAM_CC_CSIPHY1_CLK 40 +#define CAM_CC_CSIPHY2_CLK 41 +#define CAM_CC_CSIPHY3_CLK 42 +#define CAM_CC_CSIPHY4_CLK 43 +#define CAM_CC_CSIPHY5_CLK 44 +#define CAM_CC_FAST_AHB_CLK_SRC 45 +#define CAM_CC_GDSC_CLK 46 +#define CAM_CC_ICP_AHB_CLK 47 +#define CAM_CC_ICP_CLK 48 +#define CAM_CC_ICP_CLK_SRC 49 +#define CAM_CC_IFE_0_CLK 50 +#define CAM_CC_IFE_0_CLK_SRC 51 +#define CAM_CC_IFE_0_DSP_CLK 52 +#define CAM_CC_IFE_0_FAST_AHB_CLK 53 +#define CAM_CC_IFE_1_CLK 54 +#define CAM_CC_IFE_1_CLK_SRC 55 +#define CAM_CC_IFE_1_DSP_CLK 56 +#define CAM_CC_IFE_1_FAST_AHB_CLK 57 +#define CAM_CC_IFE_2_CLK 58 +#define CAM_CC_IFE_2_CLK_SRC 59 +#define CAM_CC_IFE_2_DSP_CLK 60 +#define CAM_CC_IFE_2_FAST_AHB_CLK 61 +#define CAM_CC_IFE_LITE_AHB_CLK 62 +#define CAM_CC_IFE_LITE_CLK 63 +#define CAM_CC_IFE_LITE_CLK_SRC 64 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 65 +#define CAM_CC_IFE_LITE_CSID_CLK 66 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 67 +#define CAM_CC_IPE_NPS_AHB_CLK 68 +#define CAM_CC_IPE_NPS_CLK 69 +#define CAM_CC_IPE_NPS_CLK_SRC 70 +#define CAM_CC_IPE_NPS_FAST_AHB_CLK 71 +#define CAM_CC_IPE_PPS_CLK 72 +#define CAM_CC_IPE_PPS_FAST_AHB_CLK 73 +#define CAM_CC_JPEG_CLK 74 +#define CAM_CC_JPEG_CLK_SRC 75 +#define CAM_CC_MCLK0_CLK 76 +#define CAM_CC_MCLK0_CLK_SRC 77 +#define CAM_CC_MCLK1_CLK 78 +#define CAM_CC_MCLK1_CLK_SRC 79 +#define CAM_CC_MCLK2_CLK 80 +#define CAM_CC_MCLK2_CLK_SRC 81 +#define CAM_CC_MCLK3_CLK 82 +#define CAM_CC_MCLK3_CLK_SRC 83 +#define CAM_CC_MCLK4_CLK 84 +#define CAM_CC_MCLK4_CLK_SRC 85 +#define CAM_CC_MCLK5_CLK 86 +#define CAM_CC_MCLK5_CLK_SRC 87 +#define CAM_CC_MCLK6_CLK 88 +#define CAM_CC_MCLK6_CLK_SRC 89 +#define CAM_CC_MCLK7_CLK 90 +#define CAM_CC_MCLK7_CLK_SRC 91 +#define CAM_CC_PLL0 92 +#define CAM_CC_PLL0_OUT_EVEN 93 +#define CAM_CC_PLL0_OUT_ODD 94 +#define CAM_CC_PLL1 95 +#define CAM_CC_PLL1_OUT_EVEN 96 +#define CAM_CC_PLL2 97 +#define CAM_CC_PLL3 98 +#define CAM_CC_PLL3_OUT_EVEN 99 +#define CAM_CC_PLL4 100 +#define CAM_CC_PLL4_OUT_EVEN 101 +#define CAM_CC_PLL5 102 +#define CAM_CC_PLL5_OUT_EVEN 103 +#define CAM_CC_PLL6 104 +#define CAM_CC_PLL6_OUT_EVEN 105 +#define CAM_CC_PLL7 106 +#define CAM_CC_PLL7_OUT_EVEN 107 +#define CAM_CC_PLL8 108 +#define CAM_CC_PLL8_OUT_EVEN 109 +#define CAM_CC_QDSS_DEBUG_CLK 110 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 111 +#define CAM_CC_QDSS_DEBUG_XO_CLK 112 +#define CAM_CC_SBI_AHB_CLK 113 +#define CAM_CC_SBI_CLK 114 +#define CAM_CC_SFE_0_CLK 115 +#define CAM_CC_SFE_0_CLK_SRC 116 +#define CAM_CC_SFE_0_FAST_AHB_CLK 117 +#define CAM_CC_SFE_1_CLK 118 +#define CAM_CC_SFE_1_CLK_SRC 119 +#define CAM_CC_SFE_1_FAST_AHB_CLK 120 +#define CAM_CC_SLEEP_CLK 121 +#define CAM_CC_SLEEP_CLK_SRC 122 +#define CAM_CC_SLOW_AHB_CLK_SRC 123 +#define CAM_CC_XO_CLK_SRC 124 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_ICP_BCR 1 +#define CAM_CC_IFE_0_BCR 2 +#define CAM_CC_IFE_1_BCR 3 +#define CAM_CC_IFE_2_BCR 4 +#define CAM_CC_IPE_0_BCR 5 +#define CAM_CC_QDSS_DEBUG_BCR 6 +#define CAM_CC_SBI_BCR 7 +#define CAM_CC_SFE_0_BCR 8 +#define CAM_CC_SFE_1_BCR 9 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define SBI_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define IFE_2_GDSC 5 +#define SFE_0_GDSC 6 +#define SFE_1_GDSC 7 +#define TITAN_TOP_GDSC 8 + +#endif diff --git a/include/dt-bindings/clock/sunplus,sp7021-clkc.h b/include/dt-bindings/clock/sunplus,sp7021-clkc.h new file mode 100644 index 000000000000..cd84321eb2b5 --- /dev/null +++ b/include/dt-bindings/clock/sunplus,sp7021-clkc.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#ifndef _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H +#define _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H + +/* gates */ +#define CLK_RTC 0 +#define CLK_OTPRX 1 +#define CLK_NOC 2 +#define CLK_BR 3 +#define CLK_SPIFL 4 +#define CLK_PERI0 5 +#define CLK_PERI1 6 +#define CLK_STC0 7 +#define CLK_STC_AV0 8 +#define CLK_STC_AV1 9 +#define CLK_STC_AV2 10 +#define CLK_UA0 11 +#define CLK_UA1 12 +#define CLK_UA2 13 +#define CLK_UA3 14 +#define CLK_UA4 15 +#define CLK_HWUA 16 +#define CLK_DDC0 17 +#define CLK_UADMA 18 +#define CLK_CBDMA0 19 +#define CLK_CBDMA1 20 +#define CLK_SPI_COMBO_0 21 +#define CLK_SPI_COMBO_1 22 +#define CLK_SPI_COMBO_2 23 +#define CLK_SPI_COMBO_3 24 +#define CLK_AUD 25 +#define CLK_USBC0 26 +#define CLK_USBC1 27 +#define CLK_UPHY0 28 +#define CLK_UPHY1 29 +#define CLK_I2CM0 30 +#define CLK_I2CM1 31 +#define CLK_I2CM2 32 +#define CLK_I2CM3 33 +#define CLK_PMC 34 +#define CLK_CARD_CTL0 35 +#define CLK_CARD_CTL1 36 +#define CLK_CARD_CTL4 37 +#define CLK_BCH 38 +#define CLK_DDFCH 39 +#define CLK_CSIIW0 40 +#define CLK_CSIIW1 41 +#define CLK_MIPICSI0 42 +#define CLK_MIPICSI1 43 +#define CLK_HDMI_TX 44 +#define CLK_VPOST 45 +#define CLK_TGEN 46 +#define CLK_DMIX 47 +#define CLK_TCON 48 +#define CLK_GPIO 49 +#define CLK_MAILBOX 50 +#define CLK_SPIND 51 +#define CLK_I2C2CBUS 52 +#define CLK_SEC 53 +#define CLK_DVE 54 +#define CLK_GPOST0 55 +#define CLK_OSD0 56 +#define CLK_DISP_PWM 57 +#define CLK_UADBG 58 +#define CLK_FIO_CTL 59 +#define CLK_FPGA 60 +#define CLK_L2SW 61 +#define CLK_ICM 62 +#define CLK_AXI_GLOBAL 63 + +/* plls */ +#define PLL_A 64 +#define PLL_E 65 +#define PLL_E_2P5 66 +#define PLL_E_25 67 +#define PLL_E_112P5 68 +#define PLL_F 69 +#define PLL_TV 70 +#define PLL_TV_A 71 +#define PLL_SYS 72 + +#define CLK_MAX 73 + +#endif diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h index bd4c3086a2da..173364a93381 100644 --- a/include/dt-bindings/clock/tegra234-clock.h +++ b/include/dt-bindings/clock/tegra234-clock.h @@ -38,6 +38,8 @@ * throughput and memory controller power. */ #define TEGRA234_CLK_EMC 31U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */ +#define TEGRA234_CLK_HOST1X 46U /** @brief output of gate CLK_ENB_FUSE */ #define TEGRA234_CLK_FUSE 40U /** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */ @@ -132,6 +134,8 @@ #define TEGRA234_CLK_UARTA 155U /** @brief output of gate CLK_ENB_PEX1_CORE_6 */ #define TEGRA234_CLK_PEX1_C6_CORE 161U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */ +#define TEGRA234_CLK_VIC 167U /** @brief output of gate CLK_ENB_PEX2_CORE_7 */ #define TEGRA234_CLK_PEX2_C7_CORE 171U /** @brief output of gate CLK_ENB_PEX2_CORE_8 */ @@ -164,10 +168,111 @@ #define TEGRA234_CLK_PEX1_C5_CORE 225U /** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */ #define TEGRA234_CLK_PLLC4 237U +/** @brief RX clock recovered from MGBE0 lane input */ +#define TEGRA234_CLK_MGBE0_RX_INPUT 248U +/** @brief RX clock recovered from MGBE1 lane input */ +#define TEGRA234_CLK_MGBE1_RX_INPUT 249U +/** @brief RX clock recovered from MGBE2 lane input */ +#define TEGRA234_CLK_MGBE2_RX_INPUT 250U +/** @brief RX clock recovered from MGBE3 lane input */ +#define TEGRA234_CLK_MGBE3_RX_INPUT 251U /** @brief 32K input clock provided by PMIC */ #define TEGRA234_CLK_CLK_32K 289U +/** @brief Monitored branch of MBGE0 RX input clock */ +#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U +/** @brief Monitored branch of MBGE1 RX input clock */ +#define TEGRA234_CLK_MGBE1_RX_INPUT_M 358U +/** @brief Monitored branch of MBGE2 RX input clock */ +#define TEGRA234_CLK_MGBE2_RX_INPUT_M 359U +/** @brief Monitored branch of MBGE3 RX input clock */ +#define TEGRA234_CLK_MGBE3_RX_INPUT_M 360U +/** @brief Monitored branch of MGBE0 RX PCS mux output */ +#define TEGRA234_CLK_MGBE0_RX_PCS_M 361U +/** @brief Monitored branch of MGBE1 RX PCS mux output */ +#define TEGRA234_CLK_MGBE1_RX_PCS_M 362U +/** @brief Monitored branch of MGBE2 RX PCS mux output */ +#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U +/** @brief Monitored branch of MGBE3 RX PCS mux output */ +#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U +/** @brief RX PCS clock recovered from MGBE0 lane input */ +#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U +/** @brief RX PCS clock recovered from MGBE1 lane input */ +#define TEGRA234_CLK_MGBE1_RX_PCS_INPUT 370U +/** @brief RX PCS clock recovered from MGBE2 lane input */ +#define TEGRA234_CLK_MGBE2_RX_PCS_INPUT 371U +/** @brief RX PCS clock recovered from MGBE3 lane input */ +#define TEGRA234_CLK_MGBE3_RX_PCS_INPUT 372U +/** @brief output of mux controlled by GBE_UPHY_MGBE0_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE0_RX_PCS 373U +/** @brief GBE_UPHY_MGBE0_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE0_TX 374U +/** @brief GBE_UPHY_MGBE0_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE0_TX_PCS 375U +/** @brief GBE_UPHY_MGBE0_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE0_MAC_DIVIDER 376U +/** @brief GBE_UPHY_MGBE0_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE0_MAC 377U +/** @brief GBE_UPHY_MGBE0_MACSEC_CLK gate output */ +#define TEGRA234_CLK_MGBE0_MACSEC 378U +/** @brief GBE_UPHY_MGBE0_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE0_EEE_PCS 379U +/** @brief GBE_UPHY_MGBE0_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE0_APP 380U +/** @brief GBE_UPHY_MGBE0_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE0_PTP_REF 381U +/** @brief output of mux controlled by GBE_UPHY_MGBE1_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE1_RX_PCS 382U +/** @brief GBE_UPHY_MGBE1_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE1_TX 383U +/** @brief GBE_UPHY_MGBE1_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE1_TX_PCS 384U +/** @brief GBE_UPHY_MGBE1_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U +/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE1_MAC 386U +/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE1_EEE_PCS 388U +/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE1_APP 389U +/** @brief GBE_UPHY_MGBE1_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE1_PTP_REF 390U +/** @brief output of mux controlled by GBE_UPHY_MGBE2_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE2_RX_PCS 391U +/** @brief GBE_UPHY_MGBE2_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE2_TX 392U +/** @brief GBE_UPHY_MGBE2_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE2_TX_PCS 393U +/** @brief GBE_UPHY_MGBE2_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U +/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE2_MAC 395U +/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE2_EEE_PCS 397U +/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE2_APP 398U +/** @brief GBE_UPHY_MGBE2_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE2_PTP_REF 399U +/** @brief output of mux controlled by GBE_UPHY_MGBE3_RX_PCS_CLK_SRC_SEL */ +#define TEGRA234_CLK_MGBE3_RX_PCS 400U +/** @brief GBE_UPHY_MGBE3_TX_CLK divider gated output */ +#define TEGRA234_CLK_MGBE3_TX 401U +/** @brief GBE_UPHY_MGBE3_TX_PCS_CLK divider gated output */ +#define TEGRA234_CLK_MGBE3_TX_PCS 402U +/** @brief GBE_UPHY_MGBE3_MAC_CLK divider output */ +#define TEGRA234_CLK_MGBE3_MAC_DIVIDER 403U +/** @brief GBE_UPHY_MGBE3_MAC_CLK gate output */ +#define TEGRA234_CLK_MGBE3_MAC 404U +/** @brief GBE_UPHY_MGBE3_MACSEC_CLK gate output */ +#define TEGRA234_CLK_MGBE3_MACSEC 405U +/** @brief GBE_UPHY_MGBE3_EEE_PCS_CLK gate output */ +#define TEGRA234_CLK_MGBE3_EEE_PCS 406U +/** @brief GBE_UPHY_MGBE3_APP_CLK gate output */ +#define TEGRA234_CLK_MGBE3_APP 407U +/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */ +#define TEGRA234_CLK_MGBE3_PTP_REF 408U /** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */ #define TEGRA234_CLK_AZA_2XBIT 457U /** @brief aza_2xbitclk / 2 (aza_bitclk) */ #define TEGRA234_CLK_AZA_BIT 458U + #endif diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h index 9296d0bb5f34..fbfa3febc66d 100644 --- a/include/dt-bindings/mailbox/qcom-ipcc.h +++ b/include/dt-bindings/mailbox/qcom-ipcc.h @@ -30,6 +30,7 @@ #define IPCC_CLIENT_PCIE1 14 #define IPCC_CLIENT_PCIE2 15 #define IPCC_CLIENT_SPSS 16 +#define IPCC_CLIENT_NSP1 18 #define IPCC_CLIENT_TME 23 #define IPCC_CLIENT_WPSS 24 diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h index e3b0e9da295d..62987b47ce81 100644 --- a/include/dt-bindings/memory/tegra234-mc.h +++ b/include/dt-bindings/memory/tegra234-mc.h @@ -11,11 +11,16 @@ /* NISO0 stream IDs */ #define TEGRA234_SID_APE 0x02 #define TEGRA234_SID_HDA 0x03 +#define TEGRA234_SID_GPCDMA 0x04 +#define TEGRA234_SID_MGBE 0x06 #define TEGRA234_SID_PCIE0 0x12 #define TEGRA234_SID_PCIE4 0x13 #define TEGRA234_SID_PCIE5 0x14 #define TEGRA234_SID_PCIE6 0x15 #define TEGRA234_SID_PCIE9 0x1f +#define TEGRA234_SID_MGBE_VF1 0x49 +#define TEGRA234_SID_MGBE_VF2 0x4a +#define TEGRA234_SID_MGBE_VF3 0x4b /* NISO1 stream IDs */ #define TEGRA234_SID_SDMMC4 0x02 @@ -26,6 +31,8 @@ #define TEGRA234_SID_PCIE8 0x09 #define TEGRA234_SID_PCIE10 0x0b #define TEGRA234_SID_BPMP 0x10 +#define TEGRA234_SID_HOST1X 0x27 +#define TEGRA234_SID_VIC 0x34 /* * memory client IDs @@ -33,6 +40,7 @@ /* High-definition audio (HDA) read clients */ #define TEGRA234_MEMORY_CLIENT_HDAR 0x15 +#define TEGRA234_MEMORY_CLIENT_HOST1XDMAR 0x16 /* PCIE6 read clients */ #define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28 /* PCIE6 write clients */ @@ -61,10 +69,28 @@ #define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48 /* PCIE7r1 read clients */ #define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49 +/* MGBE0 read client */ +#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58 +/* MGBEB read client */ +#define TEGRA234_MEMORY_CLIENT_MGBEBRD 0x59 +/* MGBEC read client */ +#define TEGRA234_MEMORY_CLIENT_MGBECRD 0x5a +/* MGBED read client */ +#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b +/* MGBE0 write client */ +#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c +/* MGBEB write client */ +#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f +/* MGBEC write client */ +#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61 /* sdmmcd memory read client */ #define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63 +/* MGBED write client */ +#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65 /* sdmmcd memory write client */ #define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67 +#define TEGRA234_MEMORY_CLIENT_VICSRD 0x6c +#define TEGRA234_MEMORY_CLIENT_VICSWR 0x6d /* BPMP read client */ #define TEGRA234_MEMORY_CLIENT_BPMPR 0x93 /* BPMP write client */ diff --git a/include/dt-bindings/net/pcs-rzn1-miic.h b/include/dt-bindings/net/pcs-rzn1-miic.h new file mode 100644 index 000000000000..784782eaec9e --- /dev/null +++ b/include/dt-bindings/net/pcs-rzn1-miic.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 Schneider-Electric + * + * Clément Léger <clement.leger@bootlin.com> + */ + +#ifndef _DT_BINDINGS_PCS_RZN1_MIIC +#define _DT_BINDINGS_PCS_RZN1_MIIC + +/* + * Reefer to the datasheet [1] section 8.2.1, Internal Connection of Ethernet + * Ports to check the available combination + * + * [1] REN_r01uh0750ej0140-rzn1-introduction_MAT_20210228.pdf + */ + +#define MIIC_GMAC1_PORT 0 +#define MIIC_GMAC2_PORT 1 +#define MIIC_RTOS_PORT 2 +#define MIIC_SERCOS_PORTA 3 +#define MIIC_SERCOS_PORTB 4 +#define MIIC_ETHERCAT_PORTA 5 +#define MIIC_ETHERCAT_PORTB 6 +#define MIIC_ETHERCAT_PORTC 7 +#define MIIC_SWITCH_PORTA 8 +#define MIIC_SWITCH_PORTB 9 +#define MIIC_SWITCH_PORTC 10 +#define MIIC_SWITCH_PORTD 11 +#define MIIC_HSR_PORTA 12 +#define MIIC_HSR_PORTB 13 + +#endif diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h new file mode 100644 index 000000000000..b0fc26cb1da4 --- /dev/null +++ b/include/dt-bindings/power/mt6795-power.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H +#define _DT_BINDINGS_POWER_MT6795_POWER_H + +#define MT6795_POWER_DOMAIN_MM 0 +#define MT6795_POWER_DOMAIN_VDEC 1 +#define MT6795_POWER_DOMAIN_VENC 2 +#define MT6795_POWER_DOMAIN_ISP 3 +#define MT6795_POWER_DOMAIN_MJC 4 +#define MT6795_POWER_DOMAIN_AUDIO 5 +#define MT6795_POWER_DOMAIN_MFG_ASYNC 6 +#define MT6795_POWER_DOMAIN_MFG_2D 7 +#define MT6795_POWER_DOMAIN_MFG 8 +#define MT6795_POWER_DOMAIN_MODEM 9 + +#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 6cce5b7aa940..d81de63ae31c 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -187,6 +187,13 @@ #define MSM8916_VDDMX 3 #define MSM8916_VDDMX_AO 4 +/* MSM8909 Power Domain Indexes */ +#define MSM8909_VDDCX MSM8916_VDDCX +#define MSM8909_VDDCX_AO MSM8916_VDDCX_AO +#define MSM8909_VDDCX_VFC MSM8916_VDDCX_VFC +#define MSM8909_VDDMX MSM8916_VDDMX +#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO + /* MSM8953 Power Domain Indexes */ #define MSM8953_VDDMD 0 #define MSM8953_VDDMD_AO 1 diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h index f610eee9bce8..ae9286cef85c 100644 --- a/include/dt-bindings/power/tegra234-powergate.h +++ b/include/dt-bindings/power/tegra234-powergate.h @@ -18,5 +18,7 @@ #define TEGRA234_POWER_DOMAIN_MGBEA 17U #define TEGRA234_POWER_DOMAIN_MGBEB 18U #define TEGRA234_POWER_DOMAIN_MGBEC 19U +#define TEGRA234_POWER_DOMAIN_MGBED 20U +#define TEGRA234_POWER_DOMAIN_VIC 29U #endif diff --git a/include/dt-bindings/reset/sunplus,sp7021-reset.h b/include/dt-bindings/reset/sunplus,sp7021-reset.h new file mode 100644 index 000000000000..ab486707387f --- /dev/null +++ b/include/dt-bindings/reset/sunplus,sp7021-reset.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) Sunplus Technology Co., Ltd. + * All rights reserved. + */ +#ifndef _DT_BINDINGS_RST_SUNPLUS_SP7021_H +#define _DT_BINDINGS_RST_SUNPLUS_SP7021_H + +#define RST_SYSTEM 0 +#define RST_RTC 1 +#define RST_IOCTL 2 +#define RST_IOP 3 +#define RST_OTPRX 4 +#define RST_NOC 5 +#define RST_BR 6 +#define RST_RBUS_L00 7 +#define RST_SPIFL 8 +#define RST_SDCTRL0 9 +#define RST_PERI0 10 +#define RST_A926 11 +#define RST_UMCTL2 12 +#define RST_PERI1 13 +#define RST_DDR_PHY0 14 +#define RST_ACHIP 15 +#define RST_STC0 16 +#define RST_STC_AV0 17 +#define RST_STC_AV1 18 +#define RST_STC_AV2 19 +#define RST_UA0 20 +#define RST_UA1 21 +#define RST_UA2 22 +#define RST_UA3 23 +#define RST_UA4 24 +#define RST_HWUA 25 +#define RST_DDC0 26 +#define RST_UADMA 27 +#define RST_CBDMA0 28 +#define RST_CBDMA1 29 +#define RST_SPI_COMBO_0 30 +#define RST_SPI_COMBO_1 31 +#define RST_SPI_COMBO_2 32 +#define RST_SPI_COMBO_3 33 +#define RST_AUD 34 +#define RST_USBC0 35 +#define RST_USBC1 36 +#define RST_UPHY0 37 +#define RST_UPHY1 38 +#define RST_I2CM0 39 +#define RST_I2CM1 40 +#define RST_I2CM2 41 +#define RST_I2CM3 42 +#define RST_PMC 43 +#define RST_CARD_CTL0 44 +#define RST_CARD_CTL1 45 +#define RST_CARD_CTL4 46 +#define RST_BCH 47 +#define RST_DDFCH 48 +#define RST_CSIIW0 49 +#define RST_CSIIW1 50 +#define RST_MIPICSI0 51 +#define RST_MIPICSI1 52 +#define RST_HDMI_TX 53 +#define RST_VPOST 54 +#define RST_TGEN 55 +#define RST_DMIX 56 +#define RST_TCON 57 +#define RST_INTERRUPT 58 +#define RST_RGST 59 +#define RST_GPIO 60 +#define RST_RBUS_TOP 61 +#define RST_MAILBOX 62 +#define RST_SPIND 63 +#define RST_I2C2CBUS 64 +#define RST_SEC 65 +#define RST_DVE 66 +#define RST_GPOST0 67 +#define RST_OSD0 68 +#define RST_DISP_PWM 69 +#define RST_UADBG 70 +#define RST_DUMMY_MASTER 71 +#define RST_FIO_CTL 72 +#define RST_FPGA 73 +#define RST_L2SW 74 +#define RST_ICM 75 +#define RST_AXI_GLOBAL 76 + +#endif diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h index 547ca3b60caa..d48d22b2bc7f 100644 --- a/include/dt-bindings/reset/tegra234-reset.h +++ b/include/dt-bindings/reset/tegra234-reset.h @@ -15,6 +15,7 @@ #define TEGRA234_RESET_PEX1_COMMON_APB 13U #define TEGRA234_RESET_PEX2_CORE_7 14U #define TEGRA234_RESET_PEX2_CORE_7_APB 15U +#define TEGRA234_RESET_GPCDMA 18U #define TEGRA234_RESET_HDA 20U #define TEGRA234_RESET_HDACODEC 21U #define TEGRA234_RESET_I2C1 24U @@ -29,6 +30,12 @@ #define TEGRA234_RESET_I2C7 33U #define TEGRA234_RESET_I2C8 34U #define TEGRA234_RESET_I2C9 35U +#define TEGRA234_RESET_MGBE0_PCS 45U +#define TEGRA234_RESET_MGBE0_MAC 46U +#define TEGRA234_RESET_MGBE1_PCS 49U +#define TEGRA234_RESET_MGBE1_MAC 50U +#define TEGRA234_RESET_MGBE2_PCS 53U +#define TEGRA234_RESET_MGBE2_MAC 54U #define TEGRA234_RESET_PEX2_CORE_10 56U #define TEGRA234_RESET_PEX2_CORE_10_APB 57U #define TEGRA234_RESET_PEX2_COMMON_APB 58U @@ -43,7 +50,10 @@ #define TEGRA234_RESET_QSPI0 76U #define TEGRA234_RESET_QSPI1 77U #define TEGRA234_RESET_SDMMC4 85U +#define TEGRA234_RESET_MGBE3_PCS 87U +#define TEGRA234_RESET_MGBE3_MAC 88U #define TEGRA234_RESET_UARTA 100U +#define TEGRA234_RESET_VIC 113U #define TEGRA234_RESET_PEX0_CORE_0 116U #define TEGRA234_RESET_PEX0_CORE_1 117U #define TEGRA234_RESET_PEX0_CORE_2 118U diff --git a/include/dt-bindings/soc/samsung,boot-mode.h b/include/dt-bindings/soc/samsung,boot-mode.h new file mode 100644 index 000000000000..47ef1cdd3916 --- /dev/null +++ b/include/dt-bindings/soc/samsung,boot-mode.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * Author: Chanho Park <chanho61.park@samsung.com> + * + * Device Tree bindings for Samsung Boot Mode. + */ + +#ifndef __DT_BINDINGS_SAMSUNG_BOOT_MODE_H +#define __DT_BINDINGS_SAMSUNG_BOOT_MODE_H + +/* Boot mode definitions for Exynos Auto v9 SoC */ + +#define EXYNOSAUTOV9_BOOT_FASTBOOT 0xfa +#define EXYNOSAUTOV9_BOOT_BOOTLOADER 0xfc +#define EXYNOSAUTOV9_BOOT_RECOVERY 0xff + +#endif /* __DT_BINDINGS_SAMSUNG_BOOT_MODE_H */ diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 6c5d4963e15b..69a13e1e5b2e 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -84,6 +84,9 @@ extern struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_2, bool partial); +int x509_load_certificate_list(const u8 cert_list[], const unsigned long list_size, + const struct key *keyring); + /* * The payload is at the discretion of the subtype. */ diff --git a/include/kunit/test.h b/include/kunit/test.h index 8ffcd7de9607..c958855681cc 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -237,9 +237,9 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite); unsigned int kunit_test_case_num(struct kunit_suite *suite, struct kunit_case *test_case); -int __kunit_test_suites_init(struct kunit_suite * const * const suites); +int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites); -void __kunit_test_suites_exit(struct kunit_suite **suites); +void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites); #if IS_BUILTIN(CONFIG_KUNIT) int kunit_run_all_tests(void); @@ -250,43 +250,11 @@ static inline int kunit_run_all_tests(void) } #endif /* IS_BUILTIN(CONFIG_KUNIT) */ -#ifdef MODULE -/** - * kunit_test_suites_for_module() - used to register one or more - * &struct kunit_suite with KUnit. - * - * @__suites: a statically allocated list of &struct kunit_suite. - * - * Registers @__suites with the test framework. See &struct kunit_suite for - * more information. - * - * If a test suite is built-in, module_init() gets translated into - * an initcall which we don't want as the idea is that for builtins - * the executor will manage execution. So ensure we do not define - * module_{init|exit} functions for the builtin case when registering - * suites via kunit_test_suites() below. - */ -#define kunit_test_suites_for_module(__suites) \ - static int __init kunit_test_suites_init(void) \ - { \ - return __kunit_test_suites_init(__suites); \ - } \ - module_init(kunit_test_suites_init); \ - \ - static void __exit kunit_test_suites_exit(void) \ - { \ - return __kunit_test_suites_exit(__suites); \ - } \ - module_exit(kunit_test_suites_exit) -#else -#define kunit_test_suites_for_module(__suites) -#endif /* MODULE */ - -#define __kunit_test_suites(unique_array, unique_suites, ...) \ - static struct kunit_suite *unique_array[] = { __VA_ARGS__, NULL }; \ - kunit_test_suites_for_module(unique_array); \ - static struct kunit_suite **unique_suites \ - __used __section(".kunit_test_suites") = unique_array +#define __kunit_test_suites(unique_array, ...) \ + MODULE_INFO(test, "Y"); \ + static struct kunit_suite *unique_array[] \ + __aligned(sizeof(struct kunit_suite *)) \ + __used __section(".kunit_test_suites") = { __VA_ARGS__ } /** * kunit_test_suites() - used to register one or more &struct kunit_suite @@ -294,21 +262,16 @@ static inline int kunit_run_all_tests(void) * * @__suites: a statically allocated list of &struct kunit_suite. * - * Registers @suites with the test framework. See &struct kunit_suite for - * more information. - * - * When builtin, KUnit tests are all run via executor; this is done - * by placing the array of struct kunit_suite * in the .kunit_test_suites - * ELF section. + * Registers @suites with the test framework. + * This is done by placing the array of struct kunit_suite * in the + * .kunit_test_suites ELF section. * - * An alternative is to build the tests as a module. Because modules do not - * support multiple initcall()s, we need to initialize an array of suites for a - * module. + * When builtin, KUnit tests are all run via the executor at boot, and when + * built as a module, they run on module load. * */ #define kunit_test_suites(__suites...) \ __kunit_test_suites(__UNIQUE_ID(array), \ - __UNIQUE_ID(suites), \ ##__suites) #define kunit_test_suite(suite) kunit_test_suites(&suite) @@ -320,7 +283,7 @@ static inline int kunit_run_all_tests(void) * * @__suites: a statically allocated list of &struct kunit_suite. * - * This functions identically as &kunit_test_suites() except that it suppresses + * This functions identically as kunit_test_suites() except that it suppresses * modpost warnings for referencing functions marked __init or data marked * __initdata; this is OK because currently KUnit only runs tests upon boot * during the init phase or upon loading a module during the init phase. diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4f82a5bc6d98..7e7a33b6c8d7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -105,6 +105,7 @@ enum acpi_irq_model_id { ACPI_IRQ_MODEL_IOSAPIC, ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_GIC, + ACPI_IRQ_MODEL_LPIC, ACPI_IRQ_MODEL_COUNT }; @@ -356,7 +357,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, - struct fwnode_handle *fwnode); + struct fwnode_handle *(*)(u32)); +void acpi_set_gsi_to_irq_fallback(u32 (*)(u32)); struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, unsigned int size, @@ -584,7 +586,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; -extern bool osc_sb_cppc_not_supported; +extern bool osc_sb_cppc2_support_acked; extern bool osc_cpc_flexible_adr_space_confirmed; /* USB4 Capabilities */ diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h index 1eb8ee5b0e5f..a5a122431563 100644 --- a/include/linux/acpi_viot.h +++ b/include/linux/acpi_viot.h @@ -6,9 +6,11 @@ #include <linux/acpi.h> #ifdef CONFIG_ACPI_VIOT +void __init acpi_viot_early_init(void); void __init acpi_viot_init(void); int viot_iommu_configure(struct device *dev); #else +static inline void acpi_viot_early_init(void) {} static inline void acpi_viot_init(void) {} static inline int viot_iommu_configure(struct device *dev) { diff --git a/include/linux/audit.h b/include/linux/audit.h index cece70231138..00f7a80f1a3e 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -119,8 +119,6 @@ enum audit_nfcfgop { AUDIT_NFT_OP_INVALID, }; -extern int is_audit_feature_set(int which); - extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); extern int audit_classify_arch(int arch); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 2bd073fa6bb5..d452071db572 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -119,6 +119,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); extern struct backing_dev_info noop_backing_dev_info; +int bdi_init(struct backing_dev_info *bdi); + /** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index edb7f6d41faa..5ca2d5699620 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -57,7 +57,6 @@ struct balloon_dev_info { struct list_head pages; /* Pages enqueued & handled to Host */ int (*migratepage)(struct balloon_dev_info *, struct page *newpage, struct page *page, enum migrate_mode mode); - struct inode *inode; }; extern struct page *balloon_page_alloc(void); @@ -75,11 +74,10 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) spin_lock_init(&balloon->pages_lock); INIT_LIST_HEAD(&balloon->pages); balloon->migratepage = NULL; - balloon->inode = NULL; } #ifdef CONFIG_BALLOON_COMPACTION -extern const struct address_space_operations balloon_aops; +extern const struct movable_operations balloon_mops; /* * balloon_page_insert - insert a page into the balloon's page list and make @@ -94,7 +92,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageOffline(page); - __SetPageMovable(page, balloon->inode->i_mapping); + __SetPageMovable(page, &balloon_mops); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 1cf3738ef1ea..ca22b06700a9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -403,10 +403,9 @@ enum { extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); -extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, - unsigned int opf, gfp_t gfp_mask, + blk_opf_t opf, gfp_t gfp_mask, struct bio_set *bs); struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); extern void bio_put(struct bio *); @@ -419,7 +418,7 @@ int bio_init_clone(struct block_device *bdev, struct bio *bio, extern struct bio_set fs_bio_set; static inline struct bio *bio_alloc(struct block_device *bdev, - unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask) + unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) { return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); } @@ -457,9 +456,9 @@ struct request_queue; extern int submit_bio_wait(struct bio *bio); void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, - unsigned short max_vecs, unsigned int opf); + unsigned short max_vecs, blk_opf_t opf); extern void bio_uninit(struct bio *); -void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf); +void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); void bio_chain(struct bio *, struct bio *); int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); @@ -790,6 +789,6 @@ static inline void bio_clear_polled(struct bio *bio) } struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, - unsigned int nr_pages, unsigned int opf, gfp_t gfp); + unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); #endif /* __LINUX_BIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index e2d9daf7e8dd..effee1dc715a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -57,6 +57,7 @@ typedef __u32 __bitwise req_flags_t; #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) /* queue has elevator attached */ #define RQF_ELV ((__force req_flags_t)(1 << 22)) +#define RQF_RESV ((__force req_flags_t)(1 << 23)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ @@ -79,7 +80,7 @@ struct request { struct blk_mq_ctx *mq_ctx; struct blk_mq_hw_ctx *mq_hctx; - unsigned int cmd_flags; /* op and common flags */ + blk_opf_t cmd_flags; /* op and common flags */ req_flags_t rq_flags; int tag; @@ -197,8 +198,10 @@ struct request { void *end_io_data; }; -#define req_op(req) \ - ((req)->cmd_flags & REQ_OP_MASK) +static inline enum req_op req_op(const struct request *req) +{ + return req->cmd_flags & REQ_OP_MASK; +} static inline bool blk_rq_is_passthrough(struct request *rq) { @@ -519,7 +522,7 @@ struct blk_mq_queue_data { bool last; }; -typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); +typedef bool (busy_tag_iter_fn)(struct request *, void *); /** * struct blk_mq_ops - Callback functions that implements block driver @@ -574,7 +577,7 @@ struct blk_mq_ops { /** * @timeout: Called on request timeout. */ - enum blk_eh_timer_return (*timeout)(struct request *, bool); + enum blk_eh_timer_return (*timeout)(struct request *); /** * @poll: Called to poll for completion of a specific tag. @@ -686,10 +689,12 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, \ __blk_mq_alloc_disk(set, queuedata, &__key); \ }) +struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, + struct lock_class_key *lkclass); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); -void blk_mq_unregister_dev(struct device *, struct request_queue *); +void blk_mq_destroy_queue(struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, @@ -710,10 +715,10 @@ enum { BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), }; -struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, +struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, - unsigned int op, blk_mq_req_flags_t flags, + blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx); /* @@ -823,6 +828,11 @@ static inline bool blk_mq_need_time_stamp(struct request *rq) return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); } +static inline bool blk_mq_is_reserved_rq(struct request *rq) +{ + return rq->rq_flags & RQF_RESV; +} + /* * Batched completions only work when there is no I/O error and no special * ->end_io handler. @@ -1121,12 +1131,12 @@ void blk_dump_rq_flags(struct request *, char *); #ifdef CONFIG_BLK_DEV_ZONED static inline unsigned int blk_rq_zone_no(struct request *rq) { - return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); + return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); } static inline unsigned int blk_rq_zone_is_seq(struct request *rq) { - return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); + return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); } bool blk_req_needs_zone_write_lock(struct request *rq); @@ -1148,8 +1158,8 @@ static inline void blk_req_zone_write_unlock(struct request *rq) static inline bool blk_req_zone_is_write_locked(struct request *rq) { - return rq->q->seq_zones_wlock && - test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); + return rq->q->disk->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); } static inline bool blk_req_can_dispatch_to_zone(struct request *rq) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a24d4078fb21..1ef99790f6ed 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -240,6 +240,8 @@ static inline void bio_issue_init(struct bio_issue *issue, ((u64)size << BIO_ISSUE_SIZE_SHIFT)); } +typedef __u32 __bitwise blk_opf_t; + typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U @@ -250,7 +252,7 @@ typedef unsigned int blk_qc_t; struct bio { struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; - unsigned int bi_opf; /* bottom bits REQ_OP, top bits + blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits * req_flags. */ unsigned short bi_flags; /* BIO_* below */ @@ -337,8 +339,12 @@ enum { typedef __u32 __bitwise blk_mq_req_flags_t; -/* - * Operations and flags common to the bio and request structures. +#define REQ_OP_BITS 8 +#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) +#define REQ_FLAG_BITS 24 + +/** + * enum req_op - Operations common to the bio and request structures. * We use 8 bits for encoding the operation, and the remaining 24 for flags. * * The least significant bit of the operation number indicates the data @@ -350,41 +356,37 @@ typedef __u32 __bitwise blk_mq_req_flags_t; * If a operation does not transfer data the least significant bit has no * meaning. */ -#define REQ_OP_BITS 8 -#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) -#define REQ_FLAG_BITS 24 - -enum req_opf { +enum req_op { /* read sectors from the device */ - REQ_OP_READ = 0, + REQ_OP_READ = (__force blk_opf_t)0, /* write sectors to the device */ - REQ_OP_WRITE = 1, + REQ_OP_WRITE = (__force blk_opf_t)1, /* flush the volatile write cache */ - REQ_OP_FLUSH = 2, + REQ_OP_FLUSH = (__force blk_opf_t)2, /* discard sectors */ - REQ_OP_DISCARD = 3, + REQ_OP_DISCARD = (__force blk_opf_t)3, /* securely erase sectors */ - REQ_OP_SECURE_ERASE = 5, + REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, /* write the zero filled sector many times */ - REQ_OP_WRITE_ZEROES = 9, + REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, /* Open a zone */ - REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, /* Close a zone */ - REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, /* Transition a zone to full */ - REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, /* write data at the current zone write pointer */ - REQ_OP_ZONE_APPEND = 13, + REQ_OP_ZONE_APPEND = (__force blk_opf_t)13, /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = 15, + REQ_OP_ZONE_RESET = (__force blk_opf_t)15, /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = 17, + REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, /* Driver private requests */ - REQ_OP_DRV_IN = 34, - REQ_OP_DRV_OUT = 35, + REQ_OP_DRV_IN = (__force blk_opf_t)34, + REQ_OP_DRV_OUT = (__force blk_opf_t)35, - REQ_OP_LAST, + REQ_OP_LAST = (__force blk_opf_t)36, }; enum req_flag_bits { @@ -425,28 +427,31 @@ enum req_flag_bits { __REQ_NR_BITS, /* stops here */ }; -#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) -#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) -#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) -#define REQ_SYNC (1ULL << __REQ_SYNC) -#define REQ_META (1ULL << __REQ_META) -#define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) -#define REQ_IDLE (1ULL << __REQ_IDLE) -#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) -#define REQ_FUA (1ULL << __REQ_FUA) -#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) -#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) -#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) -#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) -#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) - -#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) -#define REQ_POLLED (1ULL << __REQ_POLLED) -#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE) - -#define REQ_DRV (1ULL << __REQ_DRV) -#define REQ_SWAP (1ULL << __REQ_SWAP) +#define REQ_FAILFAST_DEV \ + (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) +#define REQ_FAILFAST_TRANSPORT \ + (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) +#define REQ_FAILFAST_DRIVER \ + (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) +#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) +#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) +#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) +#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) +#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) +#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) +#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) +#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) +#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) +#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) +#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) +#define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT) + +#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) +#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) +#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) + +#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) +#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) @@ -463,26 +468,28 @@ enum stat_group { NR_STAT_GROUPS }; -#define bio_op(bio) \ - ((bio)->bi_opf & REQ_OP_MASK) +static inline enum req_op bio_op(const struct bio *bio) +{ + return bio->bi_opf & REQ_OP_MASK; +} /* obsolete, don't use in new code */ -static inline void bio_set_op_attrs(struct bio *bio, unsigned op, - unsigned op_flags) +static inline void bio_set_op_attrs(struct bio *bio, enum req_op op, + blk_opf_t op_flags) { bio->bi_opf = op | op_flags; } -static inline bool op_is_write(unsigned int op) +static inline bool op_is_write(blk_opf_t op) { - return (op & 1); + return !!(op & (__force blk_opf_t)1); } /* * Check if the bio or request is one that needs special treatment in the * flush state machine. */ -static inline bool op_is_flush(unsigned int op) +static inline bool op_is_flush(blk_opf_t op) { return op & (REQ_FUA | REQ_PREFLUSH); } @@ -492,13 +499,13 @@ static inline bool op_is_flush(unsigned int op) * PREFLUSH flag. Other operations may be marked as synchronous using the * REQ_SYNC flag. */ -static inline bool op_is_sync(unsigned int op) +static inline bool op_is_sync(blk_opf_t op) { return (op & REQ_OP_MASK) == REQ_OP_READ || (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); } -static inline bool op_is_discard(unsigned int op) +static inline bool op_is_discard(blk_opf_t op) { return (op & REQ_OP_MASK) == REQ_OP_DISCARD; } @@ -509,7 +516,7 @@ static inline bool op_is_discard(unsigned int op) * due to its different handling in the block layer and device response in * case of command failure. */ -static inline bool op_is_zone_mgmt(enum req_opf op) +static inline bool op_is_zone_mgmt(enum req_op op) { switch (op & REQ_OP_MASK) { case REQ_OP_ZONE_RESET: @@ -522,7 +529,7 @@ static inline bool op_is_zone_mgmt(enum req_opf op) } } -static inline int op_stat_group(unsigned int op) +static inline int op_stat_group(enum req_op op) { if (op_is_discard(op)) return STAT_DISCARD; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 608d577734c2..d04bdf549efa 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -148,6 +148,7 @@ struct gendisk { #define GD_NATIVE_CAPACITY 3 #define GD_ADDED 4 #define GD_SUPPRESS_PART_SCAN 5 +#define GD_OWNS_QUEUE 6 struct mutex open_mutex; /* open/close mutex */ unsigned open_partitions; /* number of open partitions */ @@ -163,6 +164,29 @@ struct gendisk { #ifdef CONFIG_BLK_DEV_INTEGRITY struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#ifdef CONFIG_BLK_DEV_ZONED + /* + * Zoned block device information for request dispatch control. + * nr_zones is the total number of zones of the device. This is always + * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones + * bits which indicates if a zone is conventional (bit set) or + * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones + * bits which indicates if a zone is write locked, that is, if a write + * request targeting the zone was dispatched. + * + * Reads of this information must be protected with blk_queue_enter() / + * blk_queue_exit(). Modifying this information is only allowed while + * no requests are being processed. See also blk_mq_freeze_queue() and + * blk_mq_unfreeze_queue(). + */ + unsigned int nr_zones; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned long *conv_zones_bitmap; + unsigned long *seq_zones_wlock; +#endif /* CONFIG_BLK_DEV_ZONED */ + #if IS_ENABLED(CONFIG_CDROM) struct cdrom_device_info *cdi; #endif @@ -170,6 +194,12 @@ struct gendisk { struct badblocks *bb; struct lockdep_map lockdep_map; u64 diskseq; + + /* + * Independent sector access ranges. This is always NULL for + * devices that do not have multiple independent access ranges. + */ + struct blk_independent_access_ranges *ia_ranges; }; static inline bool disk_live(struct gendisk *disk) @@ -220,7 +250,7 @@ static inline int blk_validate_block_size(unsigned long bsize) return 0; } -static inline bool blk_op_is_passthrough(unsigned int op) +static inline bool blk_op_is_passthrough(blk_opf_t op) { op &= REQ_OP_MASK; return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; @@ -284,15 +314,15 @@ struct queue_limits { typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); -void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model); +void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model); #ifdef CONFIG_BLK_DEV_ZONED #define BLK_ALL_ZONES ((unsigned int)-1) int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); -unsigned int blkdev_nr_zones(struct gendisk *disk); -extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, +unsigned int bdev_nr_zones(struct block_device *bdev); +extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); int blk_revalidate_disk_zones(struct gendisk *disk, @@ -305,7 +335,7 @@ extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, #else /* CONFIG_BLK_DEV_ZONED */ -static inline unsigned int blkdev_nr_zones(struct gendisk *disk) +static inline unsigned int bdev_nr_zones(struct block_device *bdev) { return 0; } @@ -342,7 +372,6 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, */ struct blk_independent_access_range { struct kobject kobj; - struct request_queue *queue; sector_t sector; sector_t nr_sectors; }; @@ -425,6 +454,11 @@ struct request_queue { unsigned long nr_requests; /* Max # of requests */ unsigned int dma_pad_mask; + /* + * Drivers that set dma_alignment to less than 511 must be prepared to + * handle individual bvec's that are not a multiple of a SECTOR_SIZE + * due to possible offsets. + */ unsigned int dma_alignment; #ifdef CONFIG_BLK_INLINE_ENCRYPTION @@ -456,33 +490,7 @@ struct request_queue { unsigned int required_elevator_features; -#ifdef CONFIG_BLK_DEV_ZONED - /* - * Zoned block device information for request dispatch control. - * nr_zones is the total number of zones of the device. This is always - * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones - * bits which indicates if a zone is conventional (bit set) or - * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones - * bits which indicates if a zone is write locked, that is, if a write - * request targeting the zone was dispatched. All three fields are - * initialized by the low level device driver (e.g. scsi/sd.c). - * Stacking drivers (device mappers) may or may not initialize - * these fields. - * - * Reads of this information must be protected with blk_queue_enter() / - * blk_queue_exit(). Modifying this information is only allowed while - * no requests are being processed. See also blk_mq_freeze_queue() and - * blk_mq_unfreeze_queue(). - */ - unsigned int nr_zones; - unsigned long *conv_zones_bitmap; - unsigned long *seq_zones_wlock; - unsigned int max_open_zones; - unsigned int max_active_zones; -#endif /* CONFIG_BLK_DEV_ZONED */ - int node; - struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; #endif @@ -526,19 +534,14 @@ struct request_queue { struct bio_set bio_split; struct dentry *debugfs_dir; - -#ifdef CONFIG_BLK_DEBUG_FS struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; -#endif - - bool mq_sysfs_init_done; - /* - * Independent sector access ranges. This is always NULL for - * devices that do not have multiple independent access ranges. + * Serializes all debugfs metadata operations using the above dentries. */ - struct blk_independent_access_ranges *ia_ranges; + struct mutex debugfs_mutex; + + bool mq_sysfs_init_done; /** * @srcu: Sleepable RCU. Use as lock when type of the request queue @@ -560,7 +563,6 @@ struct request_queue { #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ -#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ @@ -575,6 +577,7 @@ struct request_queue { #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ +#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ @@ -587,7 +590,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags) -#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ @@ -616,6 +618,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) +#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); @@ -662,76 +665,69 @@ static inline bool blk_queue_is_zoned(struct request_queue *q) } } -static inline sector_t blk_queue_zone_sectors(struct request_queue *q) -{ - return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; -} - #ifdef CONFIG_BLK_DEV_ZONED -static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +static inline unsigned int disk_nr_zones(struct gendisk *disk) { - return blk_queue_is_zoned(q) ? q->nr_zones : 0; + return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; } -static inline unsigned int blk_queue_zone_no(struct request_queue *q, - sector_t sector) +static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) { - if (!blk_queue_is_zoned(q)) + if (!blk_queue_is_zoned(disk->queue)) return 0; - return sector >> ilog2(q->limits.chunk_sectors); + return sector >> ilog2(disk->queue->limits.chunk_sectors); } -static inline bool blk_queue_zone_is_seq(struct request_queue *q, - sector_t sector) +static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) { - if (!blk_queue_is_zoned(q)) + if (!blk_queue_is_zoned(disk->queue)) return false; - if (!q->conv_zones_bitmap) + if (!disk->conv_zones_bitmap) return true; - return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); + return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); } -static inline void blk_queue_max_open_zones(struct request_queue *q, +static inline void disk_set_max_open_zones(struct gendisk *disk, unsigned int max_open_zones) { - q->max_open_zones = max_open_zones; + disk->max_open_zones = max_open_zones; } -static inline unsigned int queue_max_open_zones(const struct request_queue *q) +static inline void disk_set_max_active_zones(struct gendisk *disk, + unsigned int max_active_zones) { - return q->max_open_zones; + disk->max_active_zones = max_active_zones; } -static inline void blk_queue_max_active_zones(struct request_queue *q, - unsigned int max_active_zones) +static inline unsigned int bdev_max_open_zones(struct block_device *bdev) { - q->max_active_zones = max_active_zones; + return bdev->bd_disk->max_open_zones; } -static inline unsigned int queue_max_active_zones(const struct request_queue *q) +static inline unsigned int bdev_max_active_zones(struct block_device *bdev) { - return q->max_active_zones; + return bdev->bd_disk->max_active_zones; } + #else /* CONFIG_BLK_DEV_ZONED */ -static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +static inline unsigned int disk_nr_zones(struct gendisk *disk) { return 0; } -static inline bool blk_queue_zone_is_seq(struct request_queue *q, - sector_t sector) +static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) { return false; } -static inline unsigned int blk_queue_zone_no(struct request_queue *q, - sector_t sector) +static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) { return 0; } -static inline unsigned int queue_max_open_zones(const struct request_queue *q) +static inline unsigned int bdev_max_open_zones(struct block_device *bdev) { return 0; } -static inline unsigned int queue_max_active_zones(const struct request_queue *q) + +static inline unsigned int bdev_max_active_zones(struct block_device *bdev) { return 0; } @@ -811,8 +807,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb) int bdev_disk_changed(struct gendisk *disk, bool invalidate); -struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, - struct lock_class_key *lkclass); void put_disk(struct gendisk *disk); struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); @@ -831,7 +825,6 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); \ __blk_alloc_disk(node_id, &__key); \ }) -void blk_cleanup_disk(struct gendisk *disk); int __register_blkdev(unsigned int major, const char *name, void (*probe)(dev_t devt)); @@ -879,7 +872,7 @@ extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); /* Helper to convert REQ_OP_XXX to its string format XXX */ -extern const char *blk_op_str(unsigned int op); +extern const char *blk_op_str(enum req_op op); int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); @@ -897,64 +890,33 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) return bdev->bd_queue; /* this is never NULL */ } -#ifdef CONFIG_BLK_DEV_ZONED - /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); static inline unsigned int bio_zone_no(struct bio *bio) { - return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev), - bio->bi_iter.bi_sector); + return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); } static inline unsigned int bio_zone_is_seq(struct bio *bio) { - return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), - bio->bi_iter.bi_sector); -} -#endif /* CONFIG_BLK_DEV_ZONED */ - -static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, - int op) -{ - if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) - return min(q->limits.max_discard_sectors, - UINT_MAX >> SECTOR_SHIFT); - - if (unlikely(op == REQ_OP_WRITE_ZEROES)) - return q->limits.max_write_zeroes_sectors; - - return q->limits.max_sectors; + return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); } /* - * Return maximum size of a request at given offset. Only valid for - * file system requests. + * Return how much of the chunk is left to be used for I/O at a given offset. */ -static inline unsigned int blk_max_size_offset(struct request_queue *q, - sector_t offset, - unsigned int chunk_sectors) -{ - if (!chunk_sectors) { - if (q->limits.chunk_sectors) - chunk_sectors = q->limits.chunk_sectors; - else - return q->limits.max_sectors; - } - - if (likely(is_power_of_2(chunk_sectors))) - chunk_sectors -= offset & (chunk_sectors - 1); - else - chunk_sectors -= sector_div(offset, chunk_sectors); - - return min(q->limits.max_sectors, chunk_sectors); +static inline unsigned int blk_chunk_sectors_left(sector_t offset, + unsigned int chunk_sectors) +{ + if (unlikely(!is_power_of_2(chunk_sectors))) + return chunk_sectors - sector_div(offset, chunk_sectors); + return chunk_sectors - (offset & (chunk_sectors - 1)); } /* * Access functions for manipulating queue properties */ -extern void blk_cleanup_queue(struct request_queue *); void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); @@ -1006,8 +968,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk, */ /* Supports zoned block devices sequential write constraint */ #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) -/* Supports scheduling on multiple hardware queues */ -#define ELEVATOR_F_MQ_AWARE (1U << 1) extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); @@ -1338,32 +1298,26 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - if (q) - return blk_queue_zone_sectors(q); - return 0; + if (!blk_queue_is_zoned(q)) + return 0; + return q->limits.chunk_sectors; } -static inline unsigned int bdev_max_open_zones(struct block_device *bdev) +static inline int queue_dma_alignment(const struct request_queue *q) { - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return queue_max_open_zones(q); - return 0; + return q ? q->dma_alignment : 511; } -static inline unsigned int bdev_max_active_zones(struct block_device *bdev) +static inline unsigned int bdev_dma_alignment(struct block_device *bdev) { - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return queue_max_active_zones(q); - return 0; + return queue_dma_alignment(bdev_get_queue(bdev)); } -static inline int queue_dma_alignment(const struct request_queue *q) +static inline bool bdev_iter_is_aligned(struct block_device *bdev, + struct iov_iter *iter) { - return q ? q->dma_alignment : 511; + return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), + bdev_logical_block_size(bdev) - 1); } static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, @@ -1427,7 +1381,7 @@ struct block_device_operations { unsigned int flags); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); - int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); + int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); unsigned int (*check_events) (struct gendisk *disk, @@ -1480,9 +1434,9 @@ static inline void blk_wake_io_task(struct task_struct *waiter) } unsigned long bdev_start_io_acct(struct block_device *bdev, - unsigned int sectors, unsigned int op, + unsigned int sectors, enum req_op op, unsigned long start_time); -void bdev_end_io_acct(struct block_device *bdev, unsigned int op, +void bdev_end_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time); void bio_start_io_acct_time(struct bio *bio, unsigned long start_time); @@ -1503,7 +1457,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) int bdev_read_only(struct block_device *bdev); int set_blocksize(struct block_device *bdev, int size); -const char *bdevname(struct block_device *bdev, char *buffer); int lookup_bdev(const char *pathname, dev_t *dev); void blkdev_show(struct seq_file *seqf, off_t offset); diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 623e22492afa..cfbda114348c 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -7,6 +7,7 @@ #include <linux/compat.h> #include <uapi/linux/blktrace_api.h> #include <linux/list.h> +#include <linux/blk_types.h> #if defined(CONFIG_BLK_DEV_IO_TRACE) @@ -77,10 +78,6 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, char __user *arg); extern int blk_trace_startstop(struct request_queue *q, int start); extern int blk_trace_remove(struct request_queue *q); -extern void blk_trace_remove_sysfs(struct device *dev); -extern int blk_trace_init_sysfs(struct device *dev); - -extern struct attribute_group blk_trace_attr_group; #else /* !CONFIG_BLK_DEV_IO_TRACE */ # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) @@ -91,13 +88,7 @@ extern struct attribute_group blk_trace_attr_group; # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) -# define blk_trace_remove_sysfs(dev) do { } while (0) # define blk_trace_note_message_enabled(q) (false) -static inline int blk_trace_init_sysfs(struct device *dev) -{ - return 0; -} - #endif /* CONFIG_BLK_DEV_IO_TRACE */ #ifdef CONFIG_COMPAT @@ -115,7 +106,7 @@ struct compat_blk_user_trace_setup { #endif -void blk_fill_rwbs(char *rwbs, unsigned int op); +void blk_fill_rwbs(char *rwbs, blk_opf_t opf); static inline sector_t blk_rq_trace_sector(struct request *rq) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c9d1463bb20f..307445d1c69e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -9,6 +9,7 @@ #define _LINUX_BUFFER_HEAD_H #include <linux/types.h> +#include <linux/blk_types.h> #include <linux/fs.h> #include <linux/linkage.h> #include <linux/pagemap.h> @@ -201,11 +202,11 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh); -void ll_rw_block(int, int, int, struct buffer_head * bh[]); +void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); -int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); -void write_dirty_buffer(struct buffer_head *bh, int op_flags); -int submit_bh(int, int, struct buffer_head *); +int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); +void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); +int submit_bh(blk_opf_t, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); int bh_uptodate_or_lock(struct buffer_head *bh); @@ -258,14 +259,16 @@ static inline vm_fault_t block_page_mkwrite_return(int err) } sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); int block_truncate_page(struct address_space *, loff_t, get_block_t *); -int nobh_write_begin(struct address_space *, loff_t, unsigned len, - struct page **, void **, get_block_t*); -int nobh_write_end(struct file *, struct address_space *, - loff_t, unsigned, unsigned, - struct page *, void *); -int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); -int nobh_writepage(struct page *page, get_block_t *get_block, - struct writeback_control *wbc); + +#ifdef CONFIG_MIGRATION +extern int buffer_migrate_folio(struct address_space *, + struct folio *dst, struct folio *src, enum migrate_mode); +extern int buffer_migrate_folio_norefs(struct address_space *, + struct folio *dst, struct folio *src, enum migrate_mode); +#else +#define buffer_migrate_folio NULL +#define buffer_migrate_folio_norefs NULL +#endif void buffer_init(void); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 1bfcfb1af352..4bcf56b3491c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -89,19 +89,32 @@ enum { CGRP_ROOT_NS_DELEGATE = (1 << 3), /* + * Reduce latencies on dynamic cgroup modifications such as task + * migrations and controller on/offs by disabling percpu operation on + * cgroup_threadgroup_rwsem. This makes hot path operations such as + * forks and exits into the slow path and more expensive. + * + * The static usage pattern of creating a cgroup, enabling controllers, + * and then seeding it with CLONE_INTO_CGROUP doesn't require write + * locking cgroup_threadgroup_rwsem and thus doesn't benefit from + * favordynmod. + */ + CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), + + /* * Enable cpuset controller in v1 cgroup to use v2 behavior. */ - CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), + CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), /* * Enable legacy local memory.events. */ - CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), + CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), /* * Enable recursive subtree protection */ - CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6), + CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), }; /* cftype->flags */ @@ -264,7 +277,8 @@ struct css_set { * List of csets participating in the on-going migration either as * source or destination. Protected by cgroup_mutex. */ - struct list_head mg_preload_node; + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; struct list_head mg_node; /* @@ -287,6 +301,10 @@ struct css_set { struct cgroup_base_stat { struct task_cputime cputime; + +#ifdef CONFIG_SCHED_CORE + u64 forceidle_sum; +#endif }; /* @@ -475,7 +493,7 @@ struct cgroup { struct work_struct release_agent_work; /* used to track pressure stalls */ - struct psi_group psi; + struct psi_group *psi; /* used to store eBPF programs */ struct cgroup_bpf bpf; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0d1ada8968d7..ed53bfe7c46c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -674,7 +674,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) { - return &cgrp->psi; + return cgrp->psi; } bool cgroup_psi_enabled(void); diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index d08dfcb0ac68..4f2a819fd60a 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -24,6 +24,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) +# define __cond_acquires(x) __attribute__((context(x,0,-1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) @@ -50,6 +51,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* context/locking */ # define __must_hold(x) # define __acquires(x) +# define __cond_acquires(x) # define __releases(x) # define __acquire(x) (void)0 # define __release(x) (void)0 diff --git a/include/linux/console.h b/include/linux/console.h index 143653090c48..8c1686e2c233 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -16,7 +16,6 @@ #include <linux/atomic.h> #include <linux/types.h> -#include <linux/mutex.h> struct vc_data; struct console_font_op; @@ -154,22 +153,6 @@ struct console { uint ospeed; u64 seq; unsigned long dropped; - struct task_struct *thread; - bool blocked; - - /* - * The per-console lock is used by printing kthreads to synchronize - * this console with callers of console_lock(). This is necessary in - * order to allow printing kthreads to run in parallel to each other, - * while each safely accessing the @blocked field and synchronizing - * against direct printing via console_lock/console_unlock. - * - * Note: For synchronizing against direct printing via - * console_trylock/console_unlock, see the static global - * variable @console_kthreads_active. - */ - struct mutex lock; - void *data; struct console *next; }; diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 7a14807c9d1a..dcef4a9e4d63 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -10,71 +10,72 @@ #include <asm/ptrace.h> -#ifdef CONFIG_CONTEXT_TRACKING -extern void context_tracking_cpu_set(int cpu); +#ifdef CONFIG_CONTEXT_TRACKING_USER +extern void ct_cpu_track_user(int cpu); /* Called with interrupts disabled. */ -extern void __context_tracking_enter(enum ctx_state state); -extern void __context_tracking_exit(enum ctx_state state); +extern void __ct_user_enter(enum ctx_state state); +extern void __ct_user_exit(enum ctx_state state); -extern void context_tracking_enter(enum ctx_state state); -extern void context_tracking_exit(enum ctx_state state); -extern void context_tracking_user_enter(void); -extern void context_tracking_user_exit(void); +extern void ct_user_enter(enum ctx_state state); +extern void ct_user_exit(enum ctx_state state); + +extern void user_enter_callable(void); +extern void user_exit_callable(void); static inline void user_enter(void) { if (context_tracking_enabled()) - context_tracking_enter(CONTEXT_USER); + ct_user_enter(CONTEXT_USER); } static inline void user_exit(void) { if (context_tracking_enabled()) - context_tracking_exit(CONTEXT_USER); + ct_user_exit(CONTEXT_USER); } /* Called with interrupts disabled. */ static __always_inline void user_enter_irqoff(void) { if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_USER); + __ct_user_enter(CONTEXT_USER); } static __always_inline void user_exit_irqoff(void) { if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_USER); + __ct_user_exit(CONTEXT_USER); } static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; - if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || + if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) || !context_tracking_enabled()) return 0; - prev_ctx = this_cpu_read(context_tracking.state); + prev_ctx = __ct_state(); if (prev_ctx != CONTEXT_KERNEL) - context_tracking_exit(prev_ctx); + ct_user_exit(prev_ctx); return prev_ctx; } static inline void exception_exit(enum ctx_state prev_ctx) { - if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && + if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && context_tracking_enabled()) { if (prev_ctx != CONTEXT_KERNEL) - context_tracking_enter(prev_ctx); + ct_user_enter(prev_ctx); } } static __always_inline bool context_tracking_guest_enter(void) { if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_GUEST); + __ct_user_enter(CONTEXT_GUEST); return context_tracking_enabled_this_cpu(); } @@ -82,40 +83,56 @@ static __always_inline bool context_tracking_guest_enter(void) static __always_inline void context_tracking_guest_exit(void) { if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_GUEST); + __ct_user_exit(CONTEXT_GUEST); } -/** - * ct_state() - return the current context tracking state if known - * - * Returns the current cpu's context tracking state if context tracking - * is enabled. If context tracking is disabled, returns - * CONTEXT_DISABLED. This should be used primarily for debugging. - */ -static __always_inline enum ctx_state ct_state(void) -{ - return context_tracking_enabled() ? - this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; -} +#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) + #else static inline void user_enter(void) { } static inline void user_exit(void) { } static inline void user_enter_irqoff(void) { } static inline void user_exit_irqoff(void) { } -static inline enum ctx_state exception_enter(void) { return 0; } +static inline int exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } -static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } +static inline int ct_state(void) { return -1; } static __always_inline bool context_tracking_guest_enter(void) { return false; } static inline void context_tracking_guest_exit(void) { } +#define CT_WARN_ON(cond) do { } while (0) +#endif /* !CONFIG_CONTEXT_TRACKING_USER */ -#endif /* !CONFIG_CONTEXT_TRACKING */ - -#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) - -#ifdef CONFIG_CONTEXT_TRACKING_FORCE +#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE extern void context_tracking_init(void); #else static inline void context_tracking_init(void) { } -#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ +#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +extern void ct_idle_enter(void); +extern void ct_idle_exit(void); + +/* + * Is the current CPU in an extended quiescent state? + * + * No ordering, as we are sampling CPU-local information. + */ +static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) +{ + return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); +} + +/* + * Increment the current CPU's context_tracking structure's ->state field + * with ordering. Return the new value. + */ +static __always_inline unsigned long ct_state_inc(int incby) +{ + return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); +} + +#else +static inline void ct_idle_enter(void) { } +static inline void ct_idle_exit(void) { } +#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */ #endif diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h new file mode 100644 index 000000000000..c50b5670c4a5 --- /dev/null +++ b/include/linux/context_tracking_irq.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H +#define _LINUX_CONTEXT_TRACKING_IRQ_H + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +void ct_irq_enter(void); +void ct_irq_exit(void); +void ct_irq_enter_irqson(void); +void ct_irq_exit_irqson(void); +void ct_nmi_enter(void); +void ct_nmi_exit(void); +#else +static inline void ct_irq_enter(void) { } +static inline void ct_irq_exit(void) { } +static inline void ct_irq_enter_irqson(void) { } +static inline void ct_irq_exit_irqson(void) { } +static inline void ct_nmi_enter(void) { } +static inline void ct_nmi_exit(void) { } +#endif + +#endif diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index ae1e63e26947..4a4d56f77180 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -4,8 +4,28 @@ #include <linux/percpu.h> #include <linux/static_key.h> +#include <linux/context_tracking_irq.h> + +/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ +#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) + +enum ctx_state { + CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ + CONTEXT_KERNEL = 0, + CONTEXT_IDLE = 1, + CONTEXT_USER = 2, + CONTEXT_GUEST = 3, + CONTEXT_MAX = 4, +}; + +/* Even value for idle, else odd. */ +#define RCU_DYNTICKS_IDX CONTEXT_MAX + +#define CT_STATE_MASK (CONTEXT_MAX - 1) +#define CT_DYNTICKS_MASK (~CT_STATE_MASK) struct context_tracking { +#ifdef CONFIG_CONTEXT_TRACKING_USER /* * When active is false, probes are unset in order * to minimize overhead: TIF flags are cleared @@ -14,18 +34,73 @@ struct context_tracking { */ bool active; int recursion; - enum ctx_state { - CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ - CONTEXT_KERNEL = 0, - CONTEXT_USER, - CONTEXT_GUEST, - } state; +#endif +#ifdef CONFIG_CONTEXT_TRACKING + atomic_t state; +#endif +#ifdef CONFIG_CONTEXT_TRACKING_IDLE + long dynticks_nesting; /* Track process nesting level. */ + long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ +#endif }; #ifdef CONFIG_CONTEXT_TRACKING -extern struct static_key_false context_tracking_key; DECLARE_PER_CPU(struct context_tracking, context_tracking); +static __always_inline int __ct_state(void) +{ + return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; +} +#endif + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +static __always_inline int ct_dynticks(void) +{ + return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK; +} + +static __always_inline int ct_dynticks_cpu(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return atomic_read(&ct->state) & CT_DYNTICKS_MASK; +} + +static __always_inline int ct_dynticks_cpu_acquire(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK; +} + +static __always_inline long ct_dynticks_nesting(void) +{ + return __this_cpu_read(context_tracking.dynticks_nesting); +} + +static __always_inline long ct_dynticks_nesting_cpu(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return ct->dynticks_nesting; +} + +static __always_inline long ct_dynticks_nmi_nesting(void) +{ + return __this_cpu_read(context_tracking.dynticks_nmi_nesting); +} + +static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return ct->dynticks_nmi_nesting; +} +#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ + +#ifdef CONFIG_CONTEXT_TRACKING_USER +extern struct static_key_false context_tracking_key; + static __always_inline bool context_tracking_enabled(void) { return static_branch_unlikely(&context_tracking_key); @@ -41,15 +116,31 @@ static inline bool context_tracking_enabled_this_cpu(void) return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } -static __always_inline bool context_tracking_in_user(void) +/** + * ct_state() - return the current context tracking state if known + * + * Returns the current cpu's context tracking state if context tracking + * is enabled. If context tracking is disabled, returns + * CONTEXT_DISABLED. This should be used primarily for debugging. + */ +static __always_inline int ct_state(void) { - return __this_cpu_read(context_tracking.state) == CONTEXT_USER; + int ret; + + if (!context_tracking_enabled()) + return CONTEXT_DISABLED; + + preempt_disable(); + ret = __ct_state(); + preempt_enable(); + + return ret; } + #else -static __always_inline bool context_tracking_in_user(void) { return false; } static __always_inline bool context_tracking_enabled(void) { return false; } static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; } static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; } -#endif /* CONFIG_CONTEXT_TRACKING */ +#endif /* CONFIG_CONTEXT_TRACKING_USER */ #endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 54dc2f9a2d56..314802f98b9d 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,6 +65,11 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); +extern ssize_t cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 19f0dbfdd7fe..f61447913db9 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -130,7 +130,6 @@ enum cpuhp_state { CPUHP_ZCOMP_PREPARE, CPUHP_TIMERS_PREPARE, CPUHP_MIPS_SOC_PREPARE, - CPUHP_LOONGARCH_SOC_PREPARE, CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, @@ -151,6 +150,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_RISCV_STARTING, + CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, @@ -230,6 +230,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h index a4367051e192..2f991a427ade 100644 --- a/include/linux/crc-itu-t.h +++ b/include/linux/crc-itu-t.h @@ -4,7 +4,7 @@ * * Implements the standard CRC ITU-T V.41: * Width 16 - * Poly 0x1021 (x^16 + x^12 + x^15 + 1) + * Poly 0x1021 (x^16 + x^12 + x^5 + 1) * Init 0 */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f5bba51480b2..c73e5e327e76 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -349,7 +349,7 @@ static inline void dont_mount(struct dentry *dentry) spin_unlock(&dentry->d_lock); } -extern void __d_lookup_done(struct dentry *); +extern void __d_lookup_unhash_wake(struct dentry *dentry); static inline int d_in_lookup(const struct dentry *dentry) { @@ -358,11 +358,8 @@ static inline int d_in_lookup(const struct dentry *dentry) static inline void d_lookup_done(struct dentry *dentry) { - if (unlikely(d_in_lookup(dentry))) { - spin_lock(&dentry->d_lock); - __d_lookup_done(dentry); - spin_unlock(&dentry->d_lock); - } + if (unlikely(d_in_lookup(dentry))) + __d_lookup_unhash_wake(dentry); } extern void dput(struct dentry *); diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index dc10bee75a72..34aab4dd336c 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -148,6 +148,8 @@ struct devfreq_stats { * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. * @work: delayed work for load monitoring. + * @freq_table: current frequency table used by the devfreq driver. + * @max_state: count of entry present in the frequency table. * @previous_freq: previously configured frequency value. * @last_status: devfreq user device info, performance statistics * @data: Private data of the governor. The devfreq framework does not @@ -185,6 +187,9 @@ struct devfreq { struct notifier_block nb; struct delayed_work work; + unsigned long *freq_table; + unsigned int max_state; + unsigned long previous_freq; struct devfreq_dev_status last_status; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 47a01c7cffdf..04c6acf7faaa 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -373,6 +373,12 @@ struct dm_target { * after returning DM_MAPIO_SUBMITTED from its map function. */ bool accounts_remapped_io:1; + + /* + * Set if the target will submit the DM bio without first calling + * bio_set_dev(). NOTE: ideally a target should _not_ need this. + */ + bool needs_bio_set_dev:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -561,7 +567,6 @@ void dm_sync_table(struct mapped_device *md); * Queries */ sector_t dm_table_get_size(struct dm_table *t); -unsigned int dm_table_get_num_targets(struct dm_table *t); fmode_t dm_table_get_mode(struct dm_table *t); struct mapped_device *dm_table_get_md(struct dm_table *t); const char *dm_table_device_name(struct dm_table *t); diff --git a/include/linux/dim.h b/include/linux/dim.h index b698266d0035..6c5733981563 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -21,7 +21,7 @@ * We consider 10% difference as significant. */ #define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100UL * abs((val) - (ref))) / (ref)) > 10) + ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10)) /* * Calculate the gap between two values. diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index a52c6580cc9a..8e1c4ab5df04 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -13,6 +13,7 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/blk_types.h> struct dm_io_region { struct block_device *bdev; @@ -57,8 +58,7 @@ struct dm_io_notify { */ struct dm_io_client; struct dm_io_request { - int bi_op; /* REQ_OP */ - int bi_op_flags; /* req_flag_bits */ + blk_opf_t bi_opf; /* Request type and flags */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h new file mode 100644 index 000000000000..552b817ab102 --- /dev/null +++ b/include/linux/dm-verity-loadpin.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_DM_VERITY_LOADPIN_H +#define __LINUX_DM_VERITY_LOADPIN_H + +#include <linux/list.h> + +struct block_device; + +extern struct list_head dm_verity_loadpin_trusted_root_digests; + +struct dm_verity_loadpin_trusted_root_digest { + struct list_head node; + unsigned int len; + u8 data[]; +}; + +#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY) +bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev); +#else +static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev) +{ + return false; +} +#endif + +#endif /* __LINUX_DM_VERITY_LOADPIN_H */ diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 8419bffb4398..b9caa01dfac4 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -62,7 +62,7 @@ struct em_perf_domain { /* * em_perf_domain flags: * - * EM_PERF_DOMAIN_MILLIWATTS: The power values are in milli-Watts or some + * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some * other scale. * * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating @@ -71,7 +71,7 @@ struct em_perf_domain { * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be * created by platform missing real power information */ -#define EM_PERF_DOMAIN_MILLIWATTS BIT(0) +#define EM_PERF_DOMAIN_MICROWATTS BIT(0) #define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1) #define EM_PERF_DOMAIN_ARTIFICIAL BIT(2) @@ -79,22 +79,44 @@ struct em_perf_domain { #define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL) #ifdef CONFIG_ENERGY_MODEL -#define EM_MAX_POWER 0xFFFF +/* + * The max power value in micro-Watts. The limit of 64 Watts is set as + * a safety net to not overflow multiplications on 32bit platforms. The + * 32bit value limit for total Perf Domain power implies a limit of + * maximum CPUs in such domain to 64. + */ +#define EM_MAX_POWER (64000000) /* 64 Watts */ + +/* + * To avoid possible energy estimation overflow on 32bit machines add + * limits to number of CPUs in the Perf. Domain. + * We are safe on 64bit machine, thus some big number. + */ +#ifdef CONFIG_64BIT +#define EM_MAX_NUM_CPUS 4096 +#else +#define EM_MAX_NUM_CPUS 16 +#endif /* - * Increase resolution of energy estimation calculations for 64-bit - * architectures. The extra resolution improves decision made by EAS for the - * task placement when two Performance Domains might provide similar energy - * estimation values (w/o better resolution the values could be equal). + * To avoid an overflow on 32bit machines while calculating the energy + * use a different order in the operation. First divide by the 'cpu_scale' + * which would reduce big value stored in the 'cost' field, then multiply by + * the 'sum_util'. This would allow to handle existing platforms, which have + * e.g. power ~1.3 Watt at max freq, so the 'cost' value > 1mln micro-Watts. + * In such scenario, where there are 4 CPUs in the Perf. Domain the 'sum_util' + * could be 4096, then multiplication: 'cost' * 'sum_util' would overflow. + * This reordering of operations has some limitations, we lose small + * precision in the estimation (comparing to 64bit platform w/o reordering). * - * We increase resolution only if we have enough bits to allow this increased - * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit - * are pretty high and the returns do not justify the increased costs. + * We are safe on 64bit machine. */ #ifdef CONFIG_64BIT -#define em_scale_power(p) ((p) * 1000) +#define em_estimate_energy(cost, sum_util, scale_cpu) \ + (((cost) * (sum_util)) / (scale_cpu)) #else -#define em_scale_power(p) (p) +#define em_estimate_energy(cost, sum_util, scale_cpu) \ + (((cost) / (scale_cpu)) * (sum_util)) #endif struct em_data_callback { @@ -112,7 +134,7 @@ struct em_data_callback { * and frequency. * * In case of CPUs, the power is the one of a single CPU in the domain, - * expressed in milli-Watts or an abstract scale. It is expected to + * expressed in micro-Watts or an abstract scale. It is expected to * fit in the [0, EM_MAX_POWER] range. * * Return 0 on success. @@ -148,7 +170,7 @@ struct em_perf_domain *em_cpu_get(int cpu); struct em_perf_domain *em_pd_get(struct device *dev); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span, - bool milliwatts); + bool microwatts); void em_dev_unregister_perf_domain(struct device *dev); /** @@ -273,7 +295,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, * pd_nrg = ------------------------ (4) * scale_cpu */ - return ps->cost * sum_util / scale_cpu; + return em_estimate_energy(ps->cost, sum_util, scale_cpu); } /** @@ -297,7 +319,7 @@ struct em_data_callback {}; static inline int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span, - bool milliwatts) + bool microwatts) { return -EINVAL; } diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index c92ac75d6556..84a466b176cf 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -357,7 +357,7 @@ void irqentry_exit_to_user_mode(struct pt_regs *regs); /** * struct irqentry_state - Opaque object for exception state storage * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the - * exit path has to invoke rcu_irq_exit(). + * exit path has to invoke ct_irq_exit(). * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that * lockdep state is restored correctly on exit from nmi. * @@ -395,12 +395,12 @@ typedef struct irqentry_state { * * For kernel mode entries RCU handling is done conditional. If RCU is * watching then the only RCU requirement is to check whether the tick has - * to be restarted. If RCU is not watching then rcu_irq_enter() has to be - * invoked on entry and rcu_irq_exit() on exit. + * to be restarted. If RCU is not watching then ct_irq_enter() has to be + * invoked on entry and ct_irq_exit() on exit. * - * Avoiding the rcu_irq_enter/exit() calls is an optimization but also + * Avoiding the ct_irq_enter/exit() calls is an optimization but also * solves the problem of kernel mode pagefaults which can schedule, which - * is not possible after invoking rcu_irq_enter() without undoing it. + * is not possible after invoking ct_irq_enter() without undoing it. * * For user mode entries irqentry_enter_from_user_mode() is invoked to * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit diff --git a/include/linux/evm.h b/include/linux/evm.h index 4c374be70247..aa63e0b3c0a2 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -21,7 +21,8 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry, void *xattr_value, size_t xattr_value_len, struct integrity_iint_cache *iint); -extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); +extern int evm_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *attr); extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); extern int evm_inode_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry, const char *name, @@ -68,7 +69,8 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, } #endif -static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) +static inline int evm_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *attr) { return 0; } diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index edc28555814c..8ad743def6f3 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -59,15 +59,19 @@ #define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ FAN_MARK_FILESYSTEM) +#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \ + FAN_MARK_FLUSH) + +#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \ + FAN_MARK_IGNORE) + #define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \ - FAN_MARK_ADD | \ - FAN_MARK_REMOVE | \ + FANOTIFY_MARK_CMD_BITS | \ + FANOTIFY_MARK_IGNORE_BITS | \ FAN_MARK_DONT_FOLLOW | \ FAN_MARK_ONLYDIR | \ - FAN_MARK_IGNORED_MASK | \ FAN_MARK_IGNORED_SURV_MODIFY | \ - FAN_MARK_EVICTABLE | \ - FAN_MARK_FLUSH) + FAN_MARK_EVICTABLE) /* * Events that can be reported with data type FSNOTIFY_EVENT_PATH. @@ -111,6 +115,10 @@ FANOTIFY_PERM_EVENTS | \ FAN_Q_OVERFLOW | FAN_ONDIR) +/* Events and flags relevant only for directories */ +#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \ + FAN_EVENT_ON_CHILD | FAN_ONDIR) + #define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ FANOTIFY_EVENT_FLAGS) diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index ff5596dd30f8..2382dec6d6ab 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -15,6 +15,8 @@ void fbcon_new_modelist(struct fb_info *info); void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps); void fbcon_fb_blanked(struct fb_info *info, int blank); +int fbcon_modechange_possible(struct fb_info *info, + struct fb_var_screeninfo *var); void fbcon_update_vcs(struct fb_info *info, bool all); void fbcon_remap_all(struct fb_info *info); int fbcon_set_con2fb_map_ioctl(void __user *argp); @@ -33,6 +35,8 @@ static inline void fbcon_new_modelist(struct fb_info *info) {} static inline void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) {} static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {} +static inline int fbcon_modechange_possible(struct fb_info *info, + struct fb_var_screeninfo *var) { return 0; } static inline void fbcon_update_vcs(struct fb_info *info, bool all) {} static inline void fbcon_remap_all(struct fb_info *info) {} static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; } diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 1ec73d5352c3..cbde3b1fa414 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -34,6 +34,7 @@ #define PM_API_VERSION_2 2 /* ATF only commands */ +#define TF_A_PM_REGISTER_SGI 0xa04 #define PM_GET_TRUSTZONE_VERSION 0xa03 #define PM_SET_SUSPEND_MODE 0xa02 #define GET_CALLBACK_DATA 0xa01 @@ -468,6 +469,7 @@ int zynqmp_pm_feature(const u32 api_id); int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); +int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -733,6 +735,11 @@ static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, { return -ENODEV; } + +static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ad5e3520fae..cc64873d76c5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -180,6 +180,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File supports async buffered reads */ #define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) +/* File supports async nowait buffered writes */ +#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000) + /* * Attribute flags. These should be or-ed together to figure out what * has been changed! @@ -221,8 +224,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, struct iattr { unsigned int ia_valid; umode_t ia_mode; - kuid_t ia_uid; - kgid_t ia_gid; + /* + * The two anonymous unions wrap structures with the same member. + * + * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which + * are a dedicated type requiring the filesystem to use the dedicated + * helpers. Other filesystem can continue to use ia_{g,u}id until they + * have been ported. + * + * They always contain the same value. In other words FS_ALLOW_IDMAP + * pass down the same value on idmapped mounts as they would on regular + * mounts. + */ + union { + kuid_t ia_uid; + vfsuid_t ia_vfsuid; + }; + union { + kgid_t ia_gid; + vfsgid_t ia_vfsgid; + }; loff_t ia_size; struct timespec64 ia_atime; struct timespec64 ia_mtime; @@ -362,13 +383,11 @@ struct address_space_operations { void (*free_folio)(struct folio *folio); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* - * migrate the contents of a page to the specified target. If + * migrate the contents of a folio to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. */ - int (*migratepage) (struct address_space *, - struct page *, struct page *, enum migrate_mode); - bool (*isolate_page)(struct page *, isolate_mode_t); - void (*putback_page)(struct page *); + int (*migrate_folio)(struct address_space *, struct folio *dst, + struct folio *src, enum migrate_mode); int (*launder_folio)(struct folio *); bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); @@ -924,9 +943,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) struct file { union { - struct llist_node fu_llist; - struct rcu_head fu_rcuhead; - } f_u; + struct llist_node f_llist; + struct rcu_head f_rcuhead; + unsigned int f_iocb_flags; + }; struct path f_path; struct inode *f_inode; /* cached value */ const struct file_operations *f_op; @@ -1600,13 +1620,68 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) * @mnt_userns: user namespace of the mount the inode was found from * @inode: inode to map * + * Note, this will eventually be removed completely in favor of the type-safe + * i_uid_into_vfsuid(). + * * Return: the inode's i_uid mapped down according to @mnt_userns. * If the inode's i_uid has no mapping INVALID_UID is returned. */ static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, const struct inode *inode) { - return mapped_kuid_fs(mnt_userns, i_user_ns(inode), inode->i_uid); + return AS_KUIDT(make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid)); +} + +/** + * i_uid_into_vfsuid - map an inode's i_uid down into a mnt_userns + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode to map + * + * Return: whe inode's i_uid mapped down according to @mnt_userns. + * If the inode's i_uid has no mapping INVALID_VFSUID is returned. + */ +static inline vfsuid_t i_uid_into_vfsuid(struct user_namespace *mnt_userns, + const struct inode *inode) +{ + return make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid); +} + +/** + * i_uid_needs_update - check whether inode's i_uid needs to be updated + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Check whether the $inode's i_uid field needs to be updated taking idmapped + * mounts into account if the filesystem supports it. + * + * Return: true if @inode's i_uid field needs to be updated, false if not. + */ +static inline bool i_uid_needs_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + const struct inode *inode) +{ + return ((attr->ia_valid & ATTR_UID) && + !vfsuid_eq(attr->ia_vfsuid, + i_uid_into_vfsuid(mnt_userns, inode))); +} + +/** + * i_uid_update - update @inode's i_uid field + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Safely update @inode's i_uid field translating the vfsuid of any idmapped + * mount into the filesystem kuid. + */ +static inline void i_uid_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + struct inode *inode) +{ + if (attr->ia_valid & ATTR_UID) + inode->i_uid = from_vfsuid(mnt_userns, i_user_ns(inode), + attr->ia_vfsuid); } /** @@ -1614,13 +1689,68 @@ static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, * @mnt_userns: user namespace of the mount the inode was found from * @inode: inode to map * + * Note, this will eventually be removed completely in favor of the type-safe + * i_gid_into_vfsgid(). + * * Return: the inode's i_gid mapped down according to @mnt_userns. * If the inode's i_gid has no mapping INVALID_GID is returned. */ static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns, const struct inode *inode) { - return mapped_kgid_fs(mnt_userns, i_user_ns(inode), inode->i_gid); + return AS_KGIDT(make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid)); +} + +/** + * i_gid_into_vfsgid - map an inode's i_gid down into a mnt_userns + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode to map + * + * Return: the inode's i_gid mapped down according to @mnt_userns. + * If the inode's i_gid has no mapping INVALID_VFSGID is returned. + */ +static inline vfsgid_t i_gid_into_vfsgid(struct user_namespace *mnt_userns, + const struct inode *inode) +{ + return make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid); +} + +/** + * i_gid_needs_update - check whether inode's i_gid needs to be updated + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Check whether the $inode's i_gid field needs to be updated taking idmapped + * mounts into account if the filesystem supports it. + * + * Return: true if @inode's i_gid field needs to be updated, false if not. + */ +static inline bool i_gid_needs_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + const struct inode *inode) +{ + return ((attr->ia_valid & ATTR_GID) && + !vfsgid_eq(attr->ia_vfsgid, + i_gid_into_vfsgid(mnt_userns, inode))); +} + +/** + * i_gid_update - update @inode's i_gid field + * @mnt_userns: user namespace of the mount the inode was found from + * @attr: the new attributes of @inode + * @inode: the inode to update + * + * Safely update @inode's i_gid field translating the vfsgid of any idmapped + * mount into the filesystem kgid. + */ +static inline void i_gid_update(struct user_namespace *mnt_userns, + const struct iattr *attr, + struct inode *inode) +{ + if (attr->ia_valid & ATTR_GID) + inode->i_gid = from_vfsgid(mnt_userns, i_user_ns(inode), + attr->ia_vfsgid); } /** @@ -2195,17 +2325,15 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, struct inode *inode) { - return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) || - !gid_valid(i_gid_into_mnt(mnt_userns, inode)); + return !vfsuid_valid(i_uid_into_vfsuid(mnt_userns, inode)) || + !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode)); } -static inline int iocb_flags(struct file *file); - static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, - .ki_flags = iocb_flags(filp), + .ki_flags = filp->f_iocb_flags, .ki_ioprio = get_current_ioprio(), }; } @@ -2387,6 +2515,7 @@ static inline void file_accessed(struct file *file) } extern int file_modified(struct file *file); +int kiocb_modified(struct kiocb *iocb); int sync_inode_metadata(struct inode *inode, int wait); @@ -2720,6 +2849,12 @@ extern int vfs_fsync(struct file *file, int datasync); extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, unsigned int flags); +static inline bool iocb_is_dsync(const struct kiocb *iocb) +{ + return (iocb->ki_flags & IOCB_DSYNC) || + IS_SYNC(iocb->ki_filp->f_mapping->host); +} + /* * Sync the bytes written if this was a synchronous write. Expect ki_pos * to already be updated for the write, and will return either the amount @@ -2727,7 +2862,7 @@ extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, */ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) { - if (iocb->ki_flags & IOCB_DSYNC) { + if (iocb_is_dsync(iocb)) { int ret = vfs_fsync_range(iocb->ki_filp, iocb->ki_pos - count, iocb->ki_pos - 1, (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); @@ -3022,7 +3157,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); extern loff_t noop_llseek(struct file *file, loff_t offset, int whence); -extern loff_t no_llseek(struct file *file, loff_t offset, int whence); +#define no_llseek NULL extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, @@ -3215,18 +3350,6 @@ extern int generic_check_addressable(unsigned, u64); extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); -#ifdef CONFIG_MIGRATION -extern int buffer_migrate_page(struct address_space *, - struct page *, struct page *, - enum migrate_mode); -extern int buffer_migrate_page_norefs(struct address_space *, - struct page *, struct page *, - enum migrate_mode); -#else -#define buffer_migrate_page NULL -#define buffer_migrate_page_norefs NULL -#endif - int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, unsigned int ia_valid); int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); @@ -3262,7 +3385,7 @@ static inline int iocb_flags(struct file *file) res |= IOCB_APPEND; if (file->f_flags & O_DIRECT) res |= IOCB_DIRECT; - if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) + if (file->f_flags & O_DSYNC) res |= IOCB_DSYNC; if (file->f_flags & __O_SYNC) res |= IOCB_SYNC; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 72585c9729a2..720874e6ee94 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -130,6 +130,7 @@ struct fscache_cookie { #define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */ #define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */ #define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */ +#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */ enum fscache_cookie_state state; u8 advice; /* FSCACHE_ADV_* */ @@ -378,7 +379,7 @@ void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data, * * Request that the size of an object be changed. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 9560734759fa..d7d96c806bff 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -518,8 +518,8 @@ struct fsnotify_mark { struct hlist_node obj_list; /* Head of list of marks for an object [mark ref] */ struct fsnotify_mark_connector *connector; - /* Events types to ignore [mark->lock, group->mark_mutex] */ - __u32 ignored_mask; + /* Events types and flags to ignore [mark->lock, group->mark_mutex] */ + __u32 ignore_mask; /* General fsnotify mark flags */ #define FSNOTIFY_MARK_FLAG_ALIVE 0x0001 #define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002 @@ -529,6 +529,7 @@ struct fsnotify_mark { /* fanotify mark flags */ #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100 #define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200 +#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400 unsigned int flags; /* flags [mark->lock] */ }; @@ -655,15 +656,91 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group, /* functions used to manipulate the marks attached to inodes */ -/* Get mask for calculating object interest taking ignored mask into account */ +/* + * Canonical "ignore mask" including event flags. + * + * Note the subtle semantic difference from the legacy ->ignored_mask. + * ->ignored_mask traditionally only meant which events should be ignored, + * while ->ignore_mask also includes flags regarding the type of objects on + * which events should be ignored. + */ +static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark) +{ + __u32 ignore_mask = mark->ignore_mask; + + /* The event flags in ignore mask take effect */ + if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS) + return ignore_mask; + + /* + * Legacy behavior: + * - Always ignore events on dir + * - Ignore events on child if parent is watching children + */ + ignore_mask |= FS_ISDIR; + ignore_mask &= ~FS_EVENT_ON_CHILD; + ignore_mask |= mark->mask & FS_EVENT_ON_CHILD; + + return ignore_mask; +} + +/* Legacy ignored_mask - only event types to ignore */ +static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark) +{ + return mark->ignore_mask & ALL_FSNOTIFY_EVENTS; +} + +/* + * Check if mask (or ignore mask) should be applied depending if victim is a + * directory and whether it is reported to a watching parent. + */ +static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir, + int iter_type) +{ + /* Should mask be applied to a directory? */ + if (is_dir && !(mask & FS_ISDIR)) + return false; + + /* Should mask be applied to a child? */ + if (iter_type == FSNOTIFY_ITER_TYPE_PARENT && + !(mask & FS_EVENT_ON_CHILD)) + return false; + + return true; +} + +/* + * Effective ignore mask taking into account if event victim is a + * directory and whether it is reported to a watching parent. + */ +static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark, + bool is_dir, int iter_type) +{ + __u32 ignore_mask = fsnotify_ignored_events(mark); + + if (!ignore_mask) + return 0; + + /* For non-dir and non-child, no need to consult the event flags */ + if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT) + return ignore_mask; + + ignore_mask = fsnotify_ignore_mask(mark); + if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type)) + return 0; + + return ignore_mask & ALL_FSNOTIFY_EVENTS; +} + +/* Get mask for calculating object interest taking ignore mask into account */ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) { __u32 mask = mark->mask; - if (!mark->ignored_mask) + if (!fsnotify_ignored_events(mark)) return mask; - /* Interest in FS_MODIFY may be needed for clearing ignored mask */ + /* Interest in FS_MODIFY may be needed for clearing ignore mask */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) mask |= FS_MODIFY; @@ -671,7 +748,7 @@ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) * If mark is interested in ignoring events on children, the object must * show interest in those events for fsnotify_parent() to notice it. */ - return mask | (mark->ignored_mask & ALL_FSNOTIFY_EVENTS); + return mask | mark->ignore_mask; } /* Get mask of events for a list of marks */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2d2ccae933c2..0ace7759acd2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -348,7 +348,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON) + __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b1e0f1f8ee2e..6aeea1071b1b 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -12,6 +12,8 @@ #include <linux/property.h> #include <linux/types.h> +#include <asm/msi.h> + struct gpio_desc; struct of_phandle_args; struct device_node; @@ -23,6 +25,13 @@ enum gpio_lookup_flags; struct gpio_chip; +union gpio_irq_fwspec { + struct irq_fwspec fwspec; +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + msi_alloc_info_t msiinfo; +#endif +}; + #define GPIO_LINE_DIRECTION_IN 1 #define GPIO_LINE_DIRECTION_OUT 0 @@ -103,9 +112,10 @@ struct gpio_irq_chip { * variant named &gpiochip_populate_parent_fwspec_fourcell is also * available. */ - void *(*populate_parent_alloc_arg)(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); + int (*populate_parent_alloc_arg)(struct gpio_chip *gc, + union gpio_irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); /** * @child_offset_to_irq: @@ -167,21 +177,24 @@ struct gpio_irq_chip { */ irq_flow_handler_t parent_handler; - /** - * @parent_handler_data: - * - * If @per_parent_data is false, @parent_handler_data is a single - * pointer used as the data associated with every parent interrupt. - * - * @parent_handler_data_array: - * - * If @per_parent_data is true, @parent_handler_data_array is - * an array of @num_parents pointers, and is used to associate - * different data for each parent. This cannot be NULL if - * @per_parent_data is true. - */ union { + /** + * @parent_handler_data: + * + * If @per_parent_data is false, @parent_handler_data is a + * single pointer used as the data associated with every + * parent interrupt. + */ void *parent_handler_data; + + /** + * @parent_handler_data_array: + * + * If @per_parent_data is true, @parent_handler_data_array is + * an array of @num_parents pointers, and is used to associate + * different data for each parent. This cannot be NULL if + * @per_parent_data is true. + */ void **parent_handler_data_array; }; @@ -646,28 +659,14 @@ struct bgpio_pdata { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, +int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type); +int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, unsigned int parent_hwirq, unsigned int parent_type); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type); - -#else - -static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - return NULL; -} - -static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) -{ - return NULL; -} #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 76878b357ffa..d57cab4d4c06 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -92,14 +92,6 @@ void irq_exit_rcu(void); #define arch_nmi_exit() do { } while (0) #endif -#ifdef CONFIG_TINY_RCU -static inline void rcu_nmi_enter(void) { } -static inline void rcu_nmi_exit(void) { } -#else -extern void rcu_nmi_enter(void); -extern void rcu_nmi_exit(void); -#endif - /* * NMI vs Tracing * -------------- @@ -124,7 +116,7 @@ extern void rcu_nmi_exit(void); do { \ __nmi_enter(); \ lockdep_hardirq_enter(); \ - rcu_nmi_enter(); \ + ct_nmi_enter(); \ instrumentation_begin(); \ ftrace_nmi_enter(); \ instrumentation_end(); \ @@ -143,7 +135,7 @@ extern void rcu_nmi_exit(void); instrumentation_begin(); \ ftrace_nmi_exit(); \ instrumentation_end(); \ - rcu_nmi_exit(); \ + ct_nmi_exit(); \ lockdep_hardirq_exit(); \ __nmi_exit(); \ } while (0) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 3af34de54330..56d6a0196534 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -149,19 +149,19 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset); * It is used in atomic context when code wants to access the contents of a * page that might be allocated from high memory (see __GFP_HIGHMEM), for * example a page in the pagecache. The API has two functions, and they - * can be used in a manner similar to the following: + * can be used in a manner similar to the following:: * - * -- Find the page of interest. -- - * struct page *page = find_get_page(mapping, offset); + * // Find the page of interest. + * struct page *page = find_get_page(mapping, offset); * - * -- Gain access to the contents of that page. -- - * void *vaddr = kmap_atomic(page); + * // Gain access to the contents of that page. + * void *vaddr = kmap_atomic(page); * - * -- Do something to the contents of that page. -- - * memset(vaddr, 0, PAGE_SIZE); + * // Do something to the contents of that page. + * memset(vaddr, 0, PAGE_SIZE); * - * -- Unmap that page. -- - * kunmap_atomic(vaddr); + * // Unmap that page. + * kunmap_atomic(vaddr); * * Note that the kunmap_atomic() call takes the result of the kmap_atomic() * call, not the argument. diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 6cabafffd0dd..116e8bd68c99 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -265,6 +265,12 @@ struct hisi_qm_list { void (*unregister_from_crypto)(struct hisi_qm *qm); }; +struct hisi_qm_poll_data { + struct hisi_qm *qm; + struct work_struct work; + u16 *qp_finish_id; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -302,6 +308,7 @@ struct hisi_qm { struct rw_semaphore qps_lock; struct idr qp_idr; struct hisi_qp *qp_array; + struct hisi_qm_poll_data *poll_data; struct mutex mailbox_lock; @@ -312,7 +319,6 @@ struct hisi_qm { u32 error_mask; struct workqueue_struct *wq; - struct work_struct work; struct work_struct rst_work; struct work_struct cmd_process; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index de29821231c9..4ddaf6ad73ef 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -461,4 +461,16 @@ static inline int split_folio_to_list(struct folio *folio, return split_huge_page_to_list(&folio->page, list); } +/* + * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to + * limitations in the implementation like arm64 MTE can override this to + * false + */ +#ifndef arch_thp_swp_supported +static inline bool arch_thp_swp_supported(void) +{ + return true; +} +#endif + #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/ima.h b/include/linux/ima.h index 426b1744215e..81708ca0ebc7 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -140,6 +140,11 @@ static inline int ima_measure_critical_data(const char *event_label, #endif /* CONFIG_IMA */ +#ifdef CONFIG_HAVE_IMA_KEXEC +int __init ima_free_kexec_buffer(void); +int __init ima_get_kexec_buffer(void **addr, size_t *size); +#endif + #ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT extern bool arch_ima_get_secureboot(void); extern const char * const *arch_get_ima_policy(void); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4f29139bbfc3..5fcf89faa31a 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -612,7 +612,6 @@ struct intel_iommu { struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ - struct list_head table; /* link to pasid table */ u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ @@ -729,8 +728,6 @@ extern int dmar_ir_support(void); void *alloc_pgtable_page(int node); void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); -int for_each_device_domain(int (*fn)(struct device_domain_info *info, - void *data), void *data); void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h new file mode 100644 index 000000000000..f7fab3758cb9 --- /dev/null +++ b/include/linux/io_uring_types.h @@ -0,0 +1,574 @@ +#ifndef IO_URING_TYPES_H +#define IO_URING_TYPES_H + +#include <linux/blkdev.h> +#include <linux/task_work.h> +#include <linux/bitmap.h> +#include <linux/llist.h> +#include <uapi/linux/io_uring.h> + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_wq_work { + struct io_wq_work_node list; + unsigned flags; + /* place it here instead of io_kiocb as it fills padding and saves 4B */ + int cancel_seq; +}; + +struct io_fixed_file { + /* file * with additional FFS_* flags */ + unsigned long file_ptr; +}; + +struct io_file_table { + struct io_fixed_file *files; + unsigned long *bitmap; + unsigned int alloc_hint; +}; + +struct io_notif; +struct io_notif_slot; + +struct io_hash_bucket { + spinlock_t lock; + struct hlist_head list; +} ____cacheline_aligned_in_smp; + +struct io_hash_table { + struct io_hash_bucket *hbs; + unsigned hash_bits; +}; + +/* + * Arbitrary limit, can be raised if need be + */ +#define IO_RINGFD_REG_MAX 16 + +struct io_uring_task { + /* submission side */ + int cached_refs; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct file *registered_rings[IO_RINGFD_REG_MAX]; + + struct xarray xa; + struct wait_queue_head wait; + atomic_t in_idle; + atomic_t inflight_tracked; + struct percpu_counter inflight; + + struct { /* task_work */ + struct llist_head task_list; + struct callback_head task_work; + } ____cacheline_aligned_in_smp; +}; + +struct io_uring { + u32 head ____cacheline_aligned_in_smp; + u32 tail ____cacheline_aligned_in_smp; +}; + +/* + * This data is shared with the application through the mmap at offsets + * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. + * + * The offsets to the member fields are published through struct + * io_sqring_offsets when calling io_uring_setup. + */ +struct io_rings { + /* + * Head and tail offsets into the ring; the offsets need to be + * masked to get valid indices. + * + * The kernel controls head of the sq ring and the tail of the cq ring, + * and the application controls tail of the sq ring and the head of the + * cq ring. + */ + struct io_uring sq, cq; + /* + * Bitmasks to apply to head and tail offsets (constant, equals + * ring_entries - 1) + */ + u32 sq_ring_mask, cq_ring_mask; + /* Ring sizes (constant, power of 2) */ + u32 sq_ring_entries, cq_ring_entries; + /* + * Number of invalid entries dropped by the kernel due to + * invalid index stored in array + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * After a new SQ head value was read by the application this + * counter includes all submissions that were dropped reaching + * the new SQ head (and possibly more). + */ + u32 sq_dropped; + /* + * Runtime SQ flags + * + * Written by the kernel, shouldn't be modified by the + * application. + * + * The application needs a full memory barrier before checking + * for IORING_SQ_NEED_WAKEUP after updating the sq tail. + */ + atomic_t sq_flags; + /* + * Runtime CQ flags + * + * Written by the application, shouldn't be modified by the + * kernel. + */ + u32 cq_flags; + /* + * Number of completion events lost because the queue was full; + * this should be avoided by the application by making sure + * there are not more requests pending than there is space in + * the completion queue. + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * As completion events come in out of order this counter is not + * ordered with any other data. + */ + u32 cq_overflow; + /* + * Ring buffer of completion events. + * + * The kernel writes completion events fresh every time they are + * produced, so the application is allowed to modify pending + * entries. + */ + struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; +}; + +struct io_restriction { + DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); + DECLARE_BITMAP(sqe_op, IORING_OP_LAST); + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + /* inline/task_work completion list, under ->uring_lock */ + struct io_wq_work_node free_list; + /* batch completion logic */ + struct io_wq_work_list compl_reqs; + struct io_submit_link link; + + bool plug_started; + bool need_plug; + unsigned short submit_nr; + struct blk_plug plug; +}; + +struct io_ev_fd { + struct eventfd_ctx *cq_ev_fd; + unsigned int eventfd_async: 1; + struct rcu_head rcu; +}; + +struct io_alloc_cache { + struct hlist_head list; + unsigned int nr_cached; +}; + +struct io_ring_ctx { + /* const or read-mostly hot data */ + struct { + struct percpu_ref refs; + + struct io_rings *rings; + unsigned int flags; + enum task_work_notify_mode notify_method; + unsigned int compat: 1; + unsigned int drain_next: 1; + unsigned int restricted: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + unsigned int drain_disabled: 1; + unsigned int has_evfd: 1; + unsigned int syscall_iopoll: 1; + } ____cacheline_aligned_in_smp; + + /* submission data */ + struct { + struct mutex uring_lock; + + /* + * Ring buffer of indices into array of io_uring_sqe, which is + * mmapped by the application using the IORING_OFF_SQES offset. + * + * This indirection could e.g. be used to assign fixed + * io_uring_sqe entries to operations and only submit them to + * the queue when needed. + * + * The kernel modifies neither the indices array nor the entries + * array. + */ + u32 *sq_array; + struct io_uring_sqe *sq_sqes; + unsigned cached_sq_head; + unsigned sq_entries; + + /* + * Fixed resources fast path, should be accessed only under + * uring_lock, and updated through io_uring_register(2) + */ + struct io_rsrc_node *rsrc_node; + int rsrc_cached_refs; + atomic_t cancel_seq; + struct io_file_table file_table; + unsigned nr_user_files; + unsigned nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + struct io_notif_slot *notif_slots; + unsigned nr_notif_slots; + + struct io_submit_state submit_state; + + struct io_buffer_list *io_bl; + struct xarray io_bl_xa; + struct list_head io_buffers_cache; + + struct io_hash_table cancel_table_locked; + struct list_head cq_overflow_list; + struct io_alloc_cache apoll_cache; + struct io_alloc_cache netmsg_cache; + } ____cacheline_aligned_in_smp; + + /* IRQ completion list, under ->completion_lock */ + struct io_wq_work_list locked_free_list; + unsigned int locked_free_nr; + + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ + struct io_sq_data *sq_data; /* if using sq thread polling */ + + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + + unsigned long check_cq; + + unsigned int file_alloc_start; + unsigned int file_alloc_end; + + struct xarray personalities; + u32 pers_next; + + struct { + /* + * We cache a range of free CQEs we can use, once exhausted it + * should go through a slower range setup, see __io_get_cqe() + */ + struct io_uring_cqe *cqe_cached; + struct io_uring_cqe *cqe_sentinel; + + unsigned cached_cq_tail; + unsigned cq_entries; + struct io_ev_fd __rcu *io_ev_fd; + struct wait_queue_head cq_wait; + unsigned cq_extra; + } ____cacheline_aligned_in_smp; + + struct { + spinlock_t completion_lock; + + /* + * ->iopoll_list is protected by the ctx->uring_lock for + * io_uring instances that don't use IORING_SETUP_SQPOLL. + * For SQPOLL, only the single threaded io_sq_thread() will + * manipulate the list, hence no extra locking is needed there. + */ + struct io_wq_work_list iopoll_list; + struct io_hash_table cancel_table; + bool poll_multi_queue; + + struct list_head io_buffers_comp; + } ____cacheline_aligned_in_smp; + + /* timeouts */ + struct { + spinlock_t timeout_lock; + atomic_t cq_timeouts; + struct list_head timeout_list; + struct list_head ltimeout_list; + unsigned cq_last_tm_flush; + } ____cacheline_aligned_in_smp; + + /* Keep this last, we don't need it for the fast path */ + + struct io_restriction restrictions; + struct task_struct *submitter_task; + + /* slow path rsrc auxilary data, used by update/register */ + struct io_rsrc_node *rsrc_backup_node; + struct io_mapped_ubuf *dummy_ubuf; + struct io_rsrc_data *file_data; + struct io_rsrc_data *buf_data; + + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; + + struct list_head io_buffers_pages; + + #if defined(CONFIG_UNIX) + struct socket *ring_sock; + #endif + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + + /* Only used for accounting purposes */ + struct user_struct *user; + struct mm_struct *mm_account; + + /* ctx exit and cancelation */ + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + + /* io-wq management, e.g. thread count */ + u32 iowq_limits[2]; + bool iowq_limits_set; + + struct list_head defer_list; + unsigned sq_thread_idle; + /* protected by ->completion_lock */ + unsigned evfd_last_cq_tail; +}; + +enum { + REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, + REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, + REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, + REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, + REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, + REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, + REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, + + /* first byte is taken by user flags, shift it to not overlap */ + REQ_F_FAIL_BIT = 8, + REQ_F_INFLIGHT_BIT, + REQ_F_CUR_POS_BIT, + REQ_F_NOWAIT_BIT, + REQ_F_LINK_TIMEOUT_BIT, + REQ_F_NEED_CLEANUP_BIT, + REQ_F_POLLED_BIT, + REQ_F_BUFFER_SELECTED_BIT, + REQ_F_BUFFER_RING_BIT, + REQ_F_REISSUE_BIT, + REQ_F_CREDS_BIT, + REQ_F_REFCOUNT_BIT, + REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_ASYNC_DATA_BIT, + REQ_F_SKIP_LINK_CQES_BIT, + REQ_F_SINGLE_POLL_BIT, + REQ_F_DOUBLE_POLL_BIT, + REQ_F_PARTIAL_IO_BIT, + REQ_F_CQE32_INIT_BIT, + REQ_F_APOLL_MULTISHOT_BIT, + REQ_F_CLEAR_POLLIN_BIT, + REQ_F_HASH_LOCKED_BIT, + /* keep async read/write and isreg together and in order */ + REQ_F_SUPPORT_NOWAIT_BIT, + REQ_F_ISREG_BIT, + + /* not a real bit, just to check we're not overflowing the space */ + __REQ_F_LAST_BIT, +}; + +enum { + /* ctx owns file */ + REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), + /* drain existing IO first */ + REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), + /* linked sqes */ + REQ_F_LINK = BIT(REQ_F_LINK_BIT), + /* doesn't sever on completion < 0 */ + REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), + /* IOSQE_ASYNC */ + REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), + /* IOSQE_BUFFER_SELECT */ + REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), + /* IOSQE_CQE_SKIP_SUCCESS */ + REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT), + + /* fail rest of links */ + REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), + /* on inflight list, should be cancelled and waited on exit reliably */ + REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), + /* read/write uses file position */ + REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), + /* must not punt to workers */ + REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), + /* has or had linked timeout */ + REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), + /* needs cleanup */ + REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), + /* already went through poll handler */ + REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), + /* buffer already selected */ + REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), + /* buffer selected from ring, needs commit */ + REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), + /* caller should reissue async */ + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), + /* supports async reads/writes */ + REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), + /* regular file */ + REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), + /* has creds assigned */ + REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), + /* skip refcounting if not set */ + REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), + /* there is a linked timeout that has to be armed */ + REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* ->async_data allocated */ + REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), + /* don't post CQEs while failing linked requests */ + REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT), + /* single poll may be active */ + REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), + /* double poll may active */ + REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), + /* request has already done partial IO */ + REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), + /* fast poll multishot mode */ + REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), + /* ->extra1 and ->extra2 are initialised */ + REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT), + /* recvmsg special flag, clear EPOLLIN */ + REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), + /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ + REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), +}; + +typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); + +struct io_task_work { + struct llist_node node; + io_req_tw_func_t func; +}; + +struct io_cqe { + __u64 user_data; + __s32 res; + /* fd initially, then cflags for completion */ + union { + __u32 flags; + int fd; + }; +}; + +/* + * Each request type overlays its private data structure on top of this one. + * They must not exceed this one in size. + */ +struct io_cmd_data { + struct file *file; + /* each command gets 56 bytes of data */ + __u8 data[56]; +}; + +#define io_kiocb_to_cmd(req) ((void *) &(req)->cmd) +#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) + +struct io_kiocb { + union { + /* + * NOTE! Each of the io_kiocb union members has the file pointer + * as the first entry in their struct definition. So you can + * access the file pointer through any of the sub-structs, + * or directly as just 'file' in this struct. + */ + struct file *file; + struct io_cmd_data cmd; + }; + + u8 opcode; + /* polled IO has completed */ + u8 iopoll_completed; + /* + * Can be either a fixed buffer index, or used with provided buffers. + * For the latter, before issue it points to the buffer group ID, + * and after selection it points to the buffer ID itself. + */ + u16 buf_index; + unsigned int flags; + + struct io_cqe cqe; + + struct io_ring_ctx *ctx; + struct task_struct *task; + + struct io_rsrc_node *rsrc_node; + + union { + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; + + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ + struct io_buffer *kbuf; + + /* + * stores buffer ID for ring provided buffers, valid IFF + * REQ_F_BUFFER_RING is set. + */ + struct io_buffer_list *buf_list; + }; + + union { + /* used by request caches, completion batching and iopoll */ + struct io_wq_work_node comp_list; + /* cache ->apoll->events */ + __poll_t apoll_events; + }; + atomic_t refs; + atomic_t poll_refs; + struct io_task_work io_task_work; + /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ + union { + struct hlist_node hash_node; + struct { + u64 extra1; + u64 extra2; + }; + }; + /* internal polling, see IORING_FEAT_FAST_POLL */ + struct async_poll *apoll; + /* opcode allocated if it needs to store data for async defer */ + void *async_data; + /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ + struct io_kiocb *link; + /* custom credentials, valid IFF REQ_F_CREDS is set */ + const struct cred *creds; + struct io_wq_work work; +}; + +struct io_overflow_cqe { + struct list_head list; + struct io_uring_cqe cqe; +}; + +#endif diff --git a/include/linux/iomap.h b/include/linux/iomap.h index e552097c67e0..25ac28175e4f 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -231,12 +231,6 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); -#ifdef CONFIG_MIGRATION -int iomap_migrate_page(struct address_space *mapping, struct page *newpage, - struct page *page, enum migrate_mode mode); -#else -#define iomap_migrate_page NULL -#endif int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, @@ -353,6 +347,12 @@ struct iomap_dio_ops { */ #define IOMAP_DIO_PARTIAL (1 << 2) +/* + * The caller will sync the write if needed; do not sync it within + * iomap_dio_rw. Overrides IOMAP_DIO_FORCE_WAIT. + */ +#define IOMAP_DIO_NOSYNC (1 << 3) + ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, void *private, size_t done_before); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 3f53bc27a19b..7578d4f6a969 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -11,7 +11,7 @@ /* * Default IO priority. */ -#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM) +#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0) /* * Check that a priority value has a valid class. @@ -46,23 +46,19 @@ static inline int task_nice_ioclass(struct task_struct *task) return IOPRIO_CLASS_BE; } -/* - * If the calling process has set an I/O priority, use that. Otherwise, return - * the default I/O priority. - */ -static inline int get_current_ioprio(void) +#ifdef CONFIG_BLOCK +int __get_task_ioprio(struct task_struct *p); +#else +static inline int __get_task_ioprio(struct task_struct *p) { - struct io_context *ioc = current->io_context; - - if (ioc) - return ioc->ioprio; return IOPRIO_DEFAULT; } +#endif /* CONFIG_BLOCK */ -/* - * For inheritance, return the highest of the two given priorities - */ -extern int ioprio_best(unsigned short aprio, unsigned short bprio); +static inline int get_current_ioprio(void) +{ + return __get_task_ioprio(current); +} extern int set_task_ioprio(struct task_struct *task, int ioprio); diff --git a/include/linux/irq.h b/include/linux/irq.h index 505308253d23..c3eb89606c2b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -151,7 +151,9 @@ struct irq_common_data { #endif void *handler_data; struct msi_desc *msi_desc; +#ifdef CONFIG_SMP cpumask_var_t affinity; +#endif #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK cpumask_var_t effective_affinity; #endif @@ -879,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d) return irq_common_data_get_node(d->common); } -static inline struct cpumask *irq_get_affinity_mask(int irq) +static inline +const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { - struct irq_data *d = irq_get_irq_data(irq); +#ifdef CONFIG_SMP + return d->common->affinity; +#else + return cpumask_of(0); +#endif +} - return d ? d->common->affinity : NULL; +static inline void irq_data_update_affinity(struct irq_data *d, + const struct cpumask *m) +{ +#ifdef CONFIG_SMP + cpumask_copy(d->common->affinity, m); +#endif } -static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) +static inline const struct cpumask *irq_get_affinity_mask(int irq) { - return d->common->affinity; + struct irq_data *d = irq_get_irq_data(irq); + + return d ? irq_data_get_affinity_mask(d) : NULL; } #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return d->common->effective_affinity; } @@ -908,13 +923,14 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d, { } static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { - return d->common->affinity; + return irq_data_get_affinity_mask(d); } #endif -static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) +static inline +const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); @@ -1121,6 +1137,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on); /* Setup functions for irq_chip_generic */ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq); +void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq); struct irq_chip_generic * irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h index cb8455c87c8a..aa1813749a4f 100644 --- a/include/linux/irqchip/mmp.h +++ b/include/linux/irqchip/mmp.h @@ -4,4 +4,7 @@ extern struct irq_chip icu_irq_chip; +extern void icu_init_irq(void); +extern void mmp2_init_icu(void); + #endif /* __IRQCHIP_MMP_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a77584593f7d..1cd4e36890fb 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -209,14 +209,15 @@ static inline void irq_set_handler_locked(struct irq_data *data, * Must be called with irq_desc locked and valid parameters. */ static inline void -irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, +irq_set_chip_handler_name_locked(struct irq_data *data, + const struct irq_chip *chip, irq_flow_handler_t handler, const char *name) { struct irq_desc *desc = irq_data_to_desc(data); desc->handle_irq = handler; desc->name = name; - data->chip = chip; + data->chip = (struct irq_chip *)chip; } bool irq_check_status_bit(unsigned int irq, unsigned int bitmask); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index e79d6e0b14e8..dc1724131300 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1557,7 +1557,7 @@ extern int jbd2_journal_wipe (journal_t *, int); extern int jbd2_journal_skip_recovery (journal_t *); extern void jbd2_journal_update_sb_errno(journal_t *); extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, - unsigned long, int); + unsigned long, blk_opf_t); extern void jbd2_journal_abort (journal_t *, int); extern int jbd2_journal_errno (journal_t *); extern void jbd2_journal_ack_err (journal_t *); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index bf1eef337a07..570831ca9951 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -220,8 +220,6 @@ extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); -extern void arch_jump_label_transform_static(struct jump_entry *entry, - enum jump_label_type type); extern bool arch_jump_label_transform_queue(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_transform_apply(void); @@ -230,12 +228,12 @@ extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern void static_key_slow_inc_cpuslocked(struct static_key *key); extern void static_key_slow_dec_cpuslocked(struct static_key *key); -extern void jump_label_apply_nops(struct module *mod); extern int static_key_count(struct static_key *key); extern void static_key_enable(struct static_key *key); extern void static_key_disable(struct static_key *key); extern void static_key_enable_cpuslocked(struct static_key *key); extern void static_key_disable_cpuslocked(struct static_key *key); +extern enum jump_label_type jump_label_init_type(struct jump_entry *entry); /* * We should be using ATOMIC_INIT() for initializing .enabled, but @@ -303,11 +301,6 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -static inline int jump_label_apply_nops(struct module *mod) -{ - return 0; -} - static inline void static_key_enable(struct static_key *key) { STATIC_KEY_CHECK_USE(key); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 69ae6b278464..ddb5a358fd82 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -28,6 +28,9 @@ enum cpu_usage_stat { CPUTIME_STEAL, CPUTIME_GUEST, CPUTIME_GUEST_NICE, +#ifdef CONFIG_SCHED_CORE + CPUTIME_FORCEIDLE, +#endif NR_STATS, }; @@ -115,4 +118,8 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); +#ifdef CONFIG_SCHED_CORE +extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); +#endif + #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ce6536f1d269..13e6c4b58f07 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -19,6 +19,7 @@ #include <asm/io.h> #include <uapi/linux/kexec.h> +#include <linux/verification.h> /* Location of a reserved region to hold the crash kernel. */ @@ -188,21 +189,54 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, void *buf, unsigned int size, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +void *kexec_image_load_default(struct kimage *image); + +#ifndef arch_kexec_kernel_image_probe +static inline int +arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) +{ + return kexec_image_probe_default(image, buf, buf_len); +} +#endif + +#ifndef arch_kimage_file_post_load_cleanup +static inline int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + return kexec_image_post_load_cleanup_default(image); +} +#endif + +#ifndef arch_kexec_kernel_image_load +static inline void *arch_kexec_kernel_image_load(struct kimage *image) +{ + return kexec_image_load_default(image); +} +#endif -/* Architectures may override the below functions */ -int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void *arch_kexec_kernel_image_load(struct kimage *image); -int arch_kimage_file_post_load_cleanup(struct kimage *image); #ifdef CONFIG_KEXEC_SIG -int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len); +#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION +int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len); +#endif #endif -int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); +#ifndef arch_kexec_locate_mem_hole +/** + * arch_kexec_locate_mem_hole - Find free memory to place the segments. + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + return kexec_locate_mem_hole(kbuf); +} +#endif + /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 @@ -358,7 +392,10 @@ extern void machine_kexec_cleanup(struct kimage *image); extern int kernel_kexec(void); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); -int machine_kexec_post_load(struct kimage *image); + +#ifndef machine_kexec_post_load +static inline int machine_kexec_post_load(struct kimage *image) { return 0; } +#endif extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); @@ -391,10 +428,21 @@ extern bool kexec_in_progress; int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); -void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); -void arch_kexec_protect_crashkres(void); -void arch_kexec_unprotect_crashkres(void); +#ifndef arch_kexec_protect_crashkres +/* + * Protection mechanism for crashkernel reserved memory after + * the kdump kernel is loaded. + * + * Provide an empty default implementation here -- architecture + * code may override this + */ +static inline void arch_kexec_protect_crashkres(void) { } +#endif + +#ifndef arch_kexec_unprotect_crashkres +static inline void arch_kexec_unprotect_crashkres(void) { } +#endif #ifndef page_to_boot_pfn static inline unsigned long page_to_boot_pfn(struct page *page) @@ -424,6 +472,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) } #endif +#ifndef crash_free_reserved_phys_range +static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) + free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); +} +#endif + static inline unsigned long virt_to_boot_phys(void *addr) { return phys_to_boot_phys(__pa((unsigned long)addr)); @@ -452,6 +510,12 @@ static inline int kexec_crash_loaded(void) { return 0; } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ +#ifdef CONFIG_KEXEC_SIG +void set_kexec_sig_enforced(void); +#else +static inline void set_kexec_sig_enforced(void) {} +#endif + #endif /* !defined(__ASSEBMLY__) */ #endif /* LINUX_KEXEC_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c20f2d55840c..90a45ef7203b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1513,7 +1513,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm) { } -static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) +static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) { return false; } @@ -1822,6 +1822,15 @@ struct _kvm_stats_desc { STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ KVM_STATS_BASE_POW10, 0) +/* Instantaneous boolean value, read only */ +#define STATS_DESC_IBOOLEAN(SCOPE, name) \ + STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ + KVM_STATS_BASE_POW10, 0) +/* Peak (sticky) boolean value, read/write */ +#define STATS_DESC_PBOOLEAN(SCOPE, name) \ + STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ + KVM_STATS_BASE_POW10, 0) + /* Cumulative time in nanosecond */ #define STATS_DESC_TIME_NSEC(SCOPE, name) \ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ @@ -1853,7 +1862,7 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ HALT_POLL_HIST_COUNT), \ - STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking) + STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) extern struct dentry *kvm_debugfs_dir; diff --git a/include/linux/libata.h b/include/linux/libata.h index 732de9014626..0f2a59c9c735 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -822,7 +822,6 @@ struct ata_port { struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1]; u64 qc_active; int nr_active_links; /* #links with active qcs */ - unsigned int sas_last_tag; /* track next tag hw expects */ struct ata_link link; /* host default link */ struct ata_link *slave_link; /* see ata_slave_link_init() */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b6829b970093..1f1099dac3f0 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -188,7 +188,7 @@ static inline void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass, u8 inner, u8 outer) { - lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); + lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); } static inline void @@ -211,24 +211,28 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, * or they are too narrow (they suffer from a false class-split): */ #define lockdep_set_class(lock, key) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_class_and_name(lock, key, name) \ - lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_class_and_subclass(lock, key, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 99f17cc8e163..c3a1f78bc884 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -38,7 +38,6 @@ extern void lockref_get(struct lockref *); extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); extern int lockref_put_not_zero(struct lockref *); -extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); extern void lockref_mark_dead(struct lockref *); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index eafa1d2489fd..806448173033 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -201,6 +201,7 @@ LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, int flags) LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, int flags) +LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 91c8146649f5..84a0d7e02176 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -702,6 +702,13 @@ * @old is the set of credentials that are being replaced. * @flags contains one of the LSM_SETID_* values. * Return 0 on success. + * @task_fix_setgroups: + * Update the module's state after setting the supplementary group + * identity attributes of the current process. + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaced. + * Return 0 on success. * @task_setpgid: * Check permission before setting the process group identifier of the * process @p to @pgid. diff --git a/include/linux/memregion.h b/include/linux/memregion.h index e11595256cac..c04c4fd2e209 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -16,7 +16,7 @@ static inline int memregion_alloc(gfp_t gfp) { return -ENOMEM; } -void memregion_free(int id) +static inline void memregion_free(int id) { } #endif diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h index ed37dc40e82a..f70a810c55f7 100644 --- a/include/linux/mfd/bcm2835-pm.h +++ b/include/linux/mfd/bcm2835-pm.h @@ -9,6 +9,7 @@ struct bcm2835_pm { struct device *dev; void __iomem *base; void __iomem *asb; + void __iomem *rpivid_asb; }; #endif /* BCM2835_MFD_PM_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 069a89e847f3..ae5bb67a9ba1 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -19,27 +19,59 @@ struct migration_target_control; */ #define MIGRATEPAGE_SUCCESS 0 +/** + * struct movable_operations - Driver page migration + * @isolate_page: + * The VM calls this function to prepare the page to be moved. The page + * is locked and the driver should not unlock it. The driver should + * return ``true`` if the page is movable and ``false`` if it is not + * currently movable. After this function returns, the VM uses the + * page->lru field, so the driver must preserve any information which + * is usually stored here. + * + * @migrate_page: + * After isolation, the VM calls this function with the isolated + * @src page. The driver should copy the contents of the + * @src page to the @dst page and set up the fields of @dst page. + * Both pages are locked. + * If page migration is successful, the driver should call + * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS. + * If the driver cannot migrate the page at the moment, it can return + * -EAGAIN. The VM interprets this as a temporary migration failure and + * will retry it later. Any other error value is a permanent migration + * failure and migration will not be retried. + * The driver shouldn't touch the @src->lru field while in the + * migrate_page() function. It may write to @dst->lru. + * + * @putback_page: + * If migration fails on the isolated page, the VM informs the driver + * that the page is no longer a candidate for migration by calling + * this function. The driver should put the isolated page back into + * its own data structure. + */ +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *dst, struct page *src, + enum migrate_mode); + void (*putback_page)(struct page *); +}; + /* Defined in mm/debug.c: */ extern const char *migrate_reason_names[MR_TYPES]; #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); -extern int migrate_page(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode); +int migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); -extern void migrate_page_states(struct page *newpage, struct page *page); -extern void migrate_page_copy(struct page *newpage, struct page *page); -extern int migrate_huge_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page); -extern int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, int extra_count); +int migrate_huge_page_move_mapping(struct address_space *mapping, + struct folio *dst, struct folio *src); void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, spinlock_t *ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); @@ -60,15 +92,8 @@ static inline struct page *alloc_migration_target(struct page *page, static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } -static inline void migrate_page_states(struct page *newpage, struct page *page) -{ -} - -static inline void migrate_page_copy(struct page *newpage, - struct page *page) {} - static inline int migrate_huge_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page) + struct folio *dst, struct folio *src) { return -ENOSYS; } @@ -91,13 +116,13 @@ static inline int next_demotion_node(int node) #endif #ifdef CONFIG_COMPACTION -extern int PageMovable(struct page *page); -extern void __SetPageMovable(struct page *page, struct address_space *mapping); -extern void __ClearPageMovable(struct page *page); +bool PageMovable(struct page *page); +void __SetPageMovable(struct page *page, const struct movable_operations *ops); +void __ClearPageMovable(struct page *page); #else -static inline int PageMovable(struct page *page) { return 0; } +static inline bool PageMovable(struct page *page) { return false; } static inline void __SetPageMovable(struct page *page, - struct address_space *mapping) + const struct movable_operations *ops) { } static inline void __ClearPageMovable(struct page *page) @@ -110,6 +135,15 @@ static inline bool folio_test_movable(struct folio *folio) return PageMovable(&folio->page); } +static inline +const struct movable_operations *page_movable_ops(struct page *page) +{ + VM_BUG_ON(!__PageMovable(page)); + + return (const struct movable_operations *) + ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE); +} + #ifdef CONFIG_NUMA_BALANCING extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); diff --git a/include/linux/mm.h b/include/linux/mm.h index bc8f326be0ce..7898e29bcfb5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1130,23 +1130,27 @@ static inline bool is_zone_movable_page(const struct page *page) #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -bool __put_devmap_managed_page(struct page *page); -static inline bool put_devmap_managed_page(struct page *page) +bool __put_devmap_managed_page_refs(struct page *page, int refs); +static inline bool put_devmap_managed_page_refs(struct page *page, int refs) { if (!static_branch_unlikely(&devmap_managed_key)) return false; if (!is_zone_device_page(page)) return false; - return __put_devmap_managed_page(page); + return __put_devmap_managed_page_refs(page, refs); } - #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_page(struct page *page) +static inline bool put_devmap_managed_page_refs(struct page *page, int refs) { return false; } #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ +static inline bool put_devmap_managed_page(struct page *page) +{ + return put_devmap_managed_page_refs(page, 1); +} + /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) @@ -1600,7 +1604,7 @@ static inline bool is_pinnable_page(struct page *page) if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) return false; #endif - return !(is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page))); + return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)); } #else static inline bool is_pinnable_page(struct page *page) @@ -3232,6 +3236,7 @@ enum mf_flags { MF_MUST_KILL = 1 << 2, MF_SOFT_OFFLINE = 1 << 3, MF_UNPOISON = 1 << 4, + MF_SW_SIMULATED = 1 << 5, }; extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b34ff2cdbc4f..c29ab4c0cd5c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -227,6 +227,7 @@ struct page { * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. * @lru: Least Recently Used list; tracks how recently this folio was used. + * @mlock_count: Number of times this folio has been pinned by mlock(). * @mapping: The file this page belongs to, or refers to the anon_vma for * anonymous memory. * @index: Offset within the file, in units of pages. For anonymous memory, @@ -255,10 +256,14 @@ struct folio { unsigned long flags; union { struct list_head lru; + /* private: avoid cluttering the output */ struct { void *__filler; + /* public: */ unsigned int mlock_count; + /* private: */ }; + /* public: */ }; struct address_space *mapping; pgoff_t index; diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h index ee5a217de2a8..f6e5369d2928 100644 --- a/include/linux/mnt_idmapping.h +++ b/include/linux/mnt_idmapping.h @@ -13,6 +13,129 @@ struct user_namespace; */ extern struct user_namespace init_user_ns; +typedef struct { + uid_t val; +} vfsuid_t; + +typedef struct { + gid_t val; +} vfsgid_t; + +static_assert(sizeof(vfsuid_t) == sizeof(kuid_t)); +static_assert(sizeof(vfsgid_t) == sizeof(kgid_t)); +static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val)); +static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val)); + +#ifdef CONFIG_MULTIUSER +static inline uid_t __vfsuid_val(vfsuid_t uid) +{ + return uid.val; +} + +static inline gid_t __vfsgid_val(vfsgid_t gid) +{ + return gid.val; +} +#else +static inline uid_t __vfsuid_val(vfsuid_t uid) +{ + return 0; +} + +static inline gid_t __vfsgid_val(vfsgid_t gid) +{ + return 0; +} +#endif + +static inline bool vfsuid_valid(vfsuid_t uid) +{ + return __vfsuid_val(uid) != (uid_t)-1; +} + +static inline bool vfsgid_valid(vfsgid_t gid) +{ + return __vfsgid_val(gid) != (gid_t)-1; +} + +static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right) +{ + return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right); +} + +static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right) +{ + return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right); +} + +/** + * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value + * @vfsuid: the vfsuid to compare + * @kuid: the kuid to compare + * + * Check whether @vfsuid and @kuid have the same values. + * + * Return: true if @vfsuid and @kuid have the same value, false if not. + * Comparison between two invalid uids returns false. + */ +static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid) +{ + return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid); +} + +/** + * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value + * @vfsgid: the vfsgid to compare + * @kgid: the kgid to compare + * + * Check whether @vfsgid and @kgid have the same values. + * + * Return: true if @vfsgid and @kgid have the same value, false if not. + * Comparison between two invalid gids returns false. + */ +static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid) +{ + return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid); +} + +/* + * vfs{g,u}ids are created from k{g,u}ids. + * We don't allow them to be created from regular {u,g}id. + */ +#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) } +#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) } + +#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID) +#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID) + +/* + * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare + * whether the mapped value is identical to value of a k{g,u}id. + */ +#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) } +#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) } + +#ifdef CONFIG_MULTIUSER +/** + * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups + * @vfsgid: the mnt gid to match + * + * This function can be used to determine whether @vfsuid matches any of the + * caller's groups. + * + * Return: 1 if vfsuid matches caller's groups, 0 if not. + */ +static inline int vfsgid_in_group_p(vfsgid_t vfsgid) +{ + return in_group_p(AS_KGIDT(vfsgid)); +} +#else +static inline int vfsgid_in_group_p(vfsgid_t vfsgid) +{ + return 1; +} +#endif + /** * initial_idmapping - check whether this is the initial mapping * @ns: idmapping to check @@ -48,7 +171,7 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns, } /** - * mapped_kuid_fs - map a filesystem kuid into a mnt_userns + * make_vfsuid - map a filesystem kuid into a mnt_userns * @mnt_userns: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kuid : kuid to be mapped @@ -67,25 +190,33 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns, * If @kuid has no mapping in either @mnt_userns or @fs_userns INVALID_UID is * returned. */ -static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns, - struct user_namespace *fs_userns, - kuid_t kuid) + +static inline vfsuid_t make_vfsuid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kuid_t kuid) { uid_t uid; if (no_idmapping(mnt_userns, fs_userns)) - return kuid; + return VFSUIDT_INIT(kuid); if (initial_idmapping(fs_userns)) uid = __kuid_val(kuid); else uid = from_kuid(fs_userns, kuid); if (uid == (uid_t)-1) - return INVALID_UID; - return make_kuid(mnt_userns, uid); + return INVALID_VFSUID; + return VFSUIDT_INIT(make_kuid(mnt_userns, uid)); +} + +static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kuid_t kuid) +{ + return AS_KUIDT(make_vfsuid(mnt_userns, fs_userns, kuid)); } /** - * mapped_kgid_fs - map a filesystem kgid into a mnt_userns + * make_vfsgid - map a filesystem kgid into a mnt_userns * @mnt_userns: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kgid : kgid to be mapped @@ -104,21 +235,56 @@ static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns, * If @kgid has no mapping in either @mnt_userns or @fs_userns INVALID_GID is * returned. */ -static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns, - struct user_namespace *fs_userns, - kgid_t kgid) + +static inline vfsgid_t make_vfsgid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kgid_t kgid) { gid_t gid; if (no_idmapping(mnt_userns, fs_userns)) - return kgid; + return VFSGIDT_INIT(kgid); if (initial_idmapping(fs_userns)) gid = __kgid_val(kgid); else gid = from_kgid(fs_userns, kgid); if (gid == (gid_t)-1) - return INVALID_GID; - return make_kgid(mnt_userns, gid); + return INVALID_VFSGID; + return VFSGIDT_INIT(make_kgid(mnt_userns, gid)); +} + +static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + kgid_t kgid) +{ + return AS_KGIDT(make_vfsgid(mnt_userns, fs_userns, kgid)); +} + +/** + * from_vfsuid - map a vfsuid into the filesystem idmapping + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsuid : vfsuid to be mapped + * + * Map @vfsuid into the filesystem idmapping. This function has to be used in + * order to e.g. write @vfsuid to inode->i_uid. + * + * Return: @vfsuid mapped into the filesystem idmapping + */ +static inline kuid_t from_vfsuid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsuid_t vfsuid) +{ + uid_t uid; + + if (no_idmapping(mnt_userns, fs_userns)) + return AS_KUIDT(vfsuid); + uid = from_kuid(mnt_userns, AS_KUIDT(vfsuid)); + if (uid == (uid_t)-1) + return INVALID_UID; + if (initial_idmapping(fs_userns)) + return KUIDT_INIT(uid); + return make_kuid(fs_userns, uid); } /** @@ -145,16 +311,66 @@ static inline kuid_t mapped_kuid_user(struct user_namespace *mnt_userns, struct user_namespace *fs_userns, kuid_t kuid) { - uid_t uid; + return from_vfsuid(mnt_userns, fs_userns, VFSUIDT_INIT(kuid)); +} + +/** + * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsuid: vfsuid to be mapped + * + * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this + * function to check whether the filesystem idmapping has a mapping for + * @vfsuid. + * + * Return: true if @vfsuid has a mapping in the filesystem, false if not. + */ +static inline bool vfsuid_has_fsmapping(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsuid_t vfsuid) +{ + return uid_valid(from_vfsuid(mnt_userns, fs_userns, vfsuid)); +} + +/** + * vfsuid_into_kuid - convert vfsuid into kuid + * @vfsuid: the vfsuid to convert + * + * This can be used when a vfsuid is committed as a kuid. + * + * Return: a kuid with the value of @vfsuid + */ +static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid) +{ + return AS_KUIDT(vfsuid); +} + +/** + * from_vfsgid - map a vfsgid into the filesystem idmapping + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsgid : vfsgid to be mapped + * + * Map @vfsgid into the filesystem idmapping. This function has to be used in + * order to e.g. write @vfsgid to inode->i_gid. + * + * Return: @vfsgid mapped into the filesystem idmapping + */ +static inline kgid_t from_vfsgid(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsgid_t vfsgid) +{ + gid_t gid; if (no_idmapping(mnt_userns, fs_userns)) - return kuid; - uid = from_kuid(mnt_userns, kuid); - if (uid == (uid_t)-1) - return INVALID_UID; + return AS_KGIDT(vfsgid); + gid = from_kgid(mnt_userns, AS_KGIDT(vfsgid)); + if (gid == (gid_t)-1) + return INVALID_GID; if (initial_idmapping(fs_userns)) - return KUIDT_INIT(uid); - return make_kuid(fs_userns, uid); + return KGIDT_INIT(gid); + return make_kgid(fs_userns, gid); } /** @@ -181,16 +397,39 @@ static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns, struct user_namespace *fs_userns, kgid_t kgid) { - gid_t gid; + return from_vfsgid(mnt_userns, fs_userns, VFSGIDT_INIT(kgid)); +} - if (no_idmapping(mnt_userns, fs_userns)) - return kgid; - gid = from_kgid(mnt_userns, kgid); - if (gid == (gid_t)-1) - return INVALID_GID; - if (initial_idmapping(fs_userns)) - return KGIDT_INIT(gid); - return make_kgid(fs_userns, gid); +/** + * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem + * @mnt_userns: the mount's idmapping + * @fs_userns: the filesystem's idmapping + * @vfsgid: vfsgid to be mapped + * + * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this + * function to check whether the filesystem idmapping has a mapping for + * @vfsgid. + * + * Return: true if @vfsgid has a mapping in the filesystem, false if not. + */ +static inline bool vfsgid_has_fsmapping(struct user_namespace *mnt_userns, + struct user_namespace *fs_userns, + vfsgid_t vfsgid) +{ + return gid_valid(from_vfsgid(mnt_userns, fs_userns, vfsgid)); +} + +/** + * vfsgid_into_kgid - convert vfsgid into kgid + * @vfsgid: the vfsgid to convert + * + * This can be used when a vfsgid is committed as a kgid. + * + * Return: a kgid with the value of @vfsgid + */ +static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid) +{ + return AS_KGIDT(vfsgid); } /** @@ -209,7 +448,8 @@ static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns, static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns, struct user_namespace *fs_userns) { - return mapped_kuid_user(mnt_userns, fs_userns, current_fsuid()); + return from_vfsuid(mnt_userns, fs_userns, + VFSUIDT_INIT(current_fsuid())); } /** @@ -228,7 +468,8 @@ static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns, static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns, struct user_namespace *fs_userns) { - return mapped_kgid_user(mnt_userns, fs_userns, current_fsgid()); + return from_vfsgid(mnt_userns, fs_userns, + VFSGIDT_INIT(current_fsgid())); } #endif /* _LINUX_MNT_IDMAPPING_H */ diff --git a/include/linux/module.h b/include/linux/module.h index abd9fa916b7d..518296ea7f73 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -505,6 +505,11 @@ struct module { int num_static_call_sites; struct static_call_site *static_call_sites; #endif +#if IS_ENABLED(CONFIG_KUNIT) + int num_kunit_suites; + struct kunit_suite **kunit_suites; +#endif + #ifdef CONFIG_LIVEPATCH bool klp; /* Is this a livepatch module? */ diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 43986f7ec4dd..1bdc39daac0a 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -19,7 +19,5 @@ void mpage_readahead(struct readahead_control *, get_block_t get_block); int mpage_read_folio(struct folio *folio, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); -int mpage_writepage(struct page *page, get_block_t *get_block, - struct writeback_control *wbc); #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f615a66c89e9..2563d30736e9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1671,7 +1671,7 @@ enum netdev_priv_flags { IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, IFF_LIVE_RENAME_OK = 1<<30, - IFF_TX_SKB_NO_LINEAR = 1<<31, + IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), }; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 77fa6a61706a..f2402ddeafbf 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -119,9 +119,10 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, bool was_async); /* - * Per-inode description. This must be directly after the inode struct. + * Per-inode context. This wraps the VFS inode. */ -struct netfs_i_context { +struct netfs_inode { + struct inode inode; /* The VFS inode */ const struct netfs_request_ops *ops; #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cache; @@ -205,15 +206,16 @@ struct netfs_io_request { */ struct netfs_request_ops { int (*init_request)(struct netfs_io_request *rreq, struct file *file); + void (*free_request)(struct netfs_io_request *rreq); int (*begin_cache_operation)(struct netfs_io_request *rreq); + void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); void (*issue_read)(struct netfs_io_subrequest *subreq); bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, - struct folio *folio, void **_fsdata); + struct folio **foliop, void **_fsdata); void (*done)(struct netfs_io_request *rreq); - void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; /* @@ -256,7 +258,7 @@ struct netfs_cache_ops { * boundary as appropriate. */ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, - loff_t i_size); + loff_t i_size); /* Prepare a write operation, working out what part of the write we can * actually do. @@ -274,85 +276,70 @@ struct netfs_cache_ops { }; struct readahead_control; -extern void netfs_readahead(struct readahead_control *); +void netfs_readahead(struct readahead_control *); int netfs_read_folio(struct file *, struct folio *); -extern int netfs_write_begin(struct file *, struct address_space *, - loff_t, unsigned int, struct folio **, - void **); +int netfs_write_begin(struct netfs_inode *, struct file *, + struct address_space *, loff_t pos, unsigned int len, + struct folio **, void **fsdata); -extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); -extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq, - enum netfs_sreq_ref_trace what); -extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, - bool was_async, enum netfs_sreq_ref_trace what); -extern void netfs_stats_show(struct seq_file *); +void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); +void netfs_get_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what); +void netfs_put_subrequest(struct netfs_io_subrequest *subreq, + bool was_async, enum netfs_sreq_ref_trace what); +void netfs_stats_show(struct seq_file *); /** - * netfs_i_context - Get the netfs inode context from the inode + * netfs_inode - Get the netfs inode context from the inode * @inode: The inode to query * * Get the netfs lib inode context from the network filesystem's inode. The * context struct is expected to directly follow on from the VFS inode struct. */ -static inline struct netfs_i_context *netfs_i_context(struct inode *inode) -{ - return (void *)inode + sizeof(*inode); -} - -/** - * netfs_inode - Get the netfs inode from the inode context - * @ctx: The context to query - * - * Get the netfs inode from the netfs library's inode context. The VFS inode - * is expected to directly precede the context struct. - */ -static inline struct inode *netfs_inode(struct netfs_i_context *ctx) +static inline struct netfs_inode *netfs_inode(struct inode *inode) { - return (void *)ctx - sizeof(struct inode); + return container_of(inode, struct netfs_inode, inode); } /** - * netfs_i_context_init - Initialise a netfs lib context - * @inode: The inode with which the context is associated + * netfs_inode_init - Initialise a netfslib inode context + * @ctx: The netfs inode to initialise * @ops: The netfs's operations list * * Initialise the netfs library context struct. This is expected to follow on * directly from the VFS inode struct. */ -static inline void netfs_i_context_init(struct inode *inode, - const struct netfs_request_ops *ops) +static inline void netfs_inode_init(struct netfs_inode *ctx, + const struct netfs_request_ops *ops) { - struct netfs_i_context *ctx = netfs_i_context(inode); - - memset(ctx, 0, sizeof(*ctx)); ctx->ops = ops; - ctx->remote_i_size = i_size_read(inode); + ctx->remote_i_size = i_size_read(&ctx->inode); +#if IS_ENABLED(CONFIG_FSCACHE) + ctx->cache = NULL; +#endif } /** * netfs_resize_file - Note that a file got resized - * @inode: The inode being resized + * @ctx: The netfs inode being resized * @new_i_size: The new file size * * Inform the netfs lib that a file got resized so that it can adjust its state. */ -static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size) +static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size) { - struct netfs_i_context *ctx = netfs_i_context(inode); - ctx->remote_i_size = new_i_size; } /** * netfs_i_cookie - Get the cache cookie from the inode - * @inode: The inode to query + * @ctx: The netfs inode to query * * Get the caching cookie (if enabled) from the network filesystem's inode. */ -static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode) +static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) { #if IS_ENABLED(CONFIG_FSCACHE) - struct netfs_i_context *ctx = netfs_i_context(inode); return ctx->cache; #else return NULL; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 29ec3e3481ff..07cfc922f8e4 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -233,8 +233,8 @@ enum { }; enum { - NVME_CAP_CRMS_CRIMS = 1ULL << 59, - NVME_CAP_CRMS_CRWMS = 1ULL << 60, + NVME_CAP_CRMS_CRWMS = 1ULL << 59, + NVME_CAP_CRMS_CRIMS = 1ULL << 60, }; struct nvme_id_power_state { @@ -906,12 +906,14 @@ struct nvme_common_command { __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; + struct_group(cdws, __le32 cdw10; __le32 cdw11; __le32 cdw12; __le32 cdw13; __le32 cdw14; __le32 cdw15; + ); }; struct nvme_rw_command { diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 6491fa8fba6d..62c54ffbeeaa 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -32,11 +32,16 @@ struct unwind_hint { * * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. * Useful for code which doesn't have an ELF function annotation. + * + * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc. */ #define UNWIND_HINT_TYPE_CALL 0 #define UNWIND_HINT_TYPE_REGS 1 #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 #define UNWIND_HINT_TYPE_FUNC 3 +#define UNWIND_HINT_TYPE_ENTRY 4 +#define UNWIND_HINT_TYPE_SAVE 5 +#define UNWIND_HINT_TYPE_RESTORE 6 #ifdef CONFIG_OBJTOOL @@ -62,7 +67,7 @@ struct unwind_hint { * It should only be used in special cases where you're 100% sure it won't * affect the reliability of frame pointers and kernel stack traces. * - * For more information, see tools/objtool/Documentation/stack-validation.txt. + * For more information, see tools/objtool/Documentation/objtool.txt. */ #define STACK_FRAME_NON_STANDARD(func) \ static void __used __section(".discard.func_stack_frame_non_standard") \ @@ -124,7 +129,7 @@ struct unwind_hint { * the debuginfo as necessary. It will also warn if it sees any * inconsistencies. */ -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .Lunwind_hint_ip_\@: .pushsection .discard.unwind_hints /* struct unwind_hint */ @@ -143,6 +148,12 @@ struct unwind_hint { .popsection .endm +.macro STACK_FRAME_NON_STANDARD_FP func:req +#ifdef CONFIG_FRAME_POINTER + STACK_FRAME_NON_STANDARD \func +#endif +.endm + .macro ANNOTATE_NOENDBR .Lhere_\@: .pushsection .discard.noendbr @@ -171,7 +182,7 @@ struct unwind_hint { #define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm diff --git a/include/linux/of.h b/include/linux/of.h index f0a5d6b10c5a..20a4e7cb7afe 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -441,8 +441,6 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline, size_t extra_fdt_size); -int ima_get_kexec_buffer(void **addr, size_t *size); -int ima_free_kexec_buffer(void); #else /* CONFIG_OF */ static inline void of_core_init(void) diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h index 861e606b820f..b7bce4983638 100644 --- a/include/linux/once_lite.h +++ b/include/linux/once_lite.h @@ -9,15 +9,27 @@ */ #define DO_ONCE_LITE(func, ...) \ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__) -#define DO_ONCE_LITE_IF(condition, func, ...) \ + +#define __ONCE_LITE_IF(condition) \ ({ \ static bool __section(".data.once") __already_done; \ - bool __ret_do_once = !!(condition); \ + bool __ret_cond = !!(condition); \ + bool __ret_once = false; \ \ - if (unlikely(__ret_do_once && !__already_done)) { \ + if (unlikely(__ret_cond && !__already_done)) { \ __already_done = true; \ - func(__VA_ARGS__); \ + __ret_once = true; \ } \ + unlikely(__ret_once); \ + }) + +#define DO_ONCE_LITE_IF(condition, func, ...) \ + ({ \ + bool __ret_do_once = !!(condition); \ + \ + if (__ONCE_LITE_IF(__ret_do_once)) \ + func(__VA_ARGS__); \ + \ unlikely(__ret_do_once); \ }) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e66f7aa3191d..3f5490f6f038 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -639,7 +639,7 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) * structure which KSM associates with that merged page. See ksm.h. * * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable - * page and then page->mapping points a struct address_space. + * page and then page->mapping points to a struct movable_operations. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ce96866fbec4..cc9adbaddb59 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -718,9 +718,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) return head + (index & (thp_nr_pages(head) - 1)); } -unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, - pgoff_t end, unsigned int nr_pages, - struct page **pages); +unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, + pgoff_t end, struct folio_batch *fbatch); unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, @@ -1079,6 +1078,12 @@ static inline int __must_check write_one_page(struct page *page) int __set_page_dirty_nobuffers(struct page *page); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); +#ifdef CONFIG_MIGRATION +int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, enum migrate_mode mode); +#else +#define filemap_migrate_folio NULL +#endif void page_endio(struct page *page, bool is_write, int err); void folio_end_private_2(struct folio *folio); @@ -1098,8 +1103,6 @@ size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); int filemap_add_folio(struct address_space *mapping, struct folio *folio, @@ -1107,10 +1110,6 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, void filemap_remove_folio(struct folio *folio); void delete_from_page_cache(struct page *page); void __filemap_remove_folio(struct folio *folio, void *shadow); -static inline void __delete_from_page_cache(struct page *page, void *shadow) -{ - __filemap_remove_folio(page_folio(page), shadow); -} void replace_page_cache_page(struct page *old, struct page *new); void delete_from_page_cache_batch(struct address_space *mapping, struct folio_batch *fbatch); @@ -1119,22 +1118,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp); loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, int whence); -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - __SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - __ClearPageLocked(page); - return error; -} - /* Must be non-static for BPF error injection */ int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp); diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 67b1246f136b..6649154a2115 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -27,16 +27,6 @@ struct pagevec { void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); -unsigned pagevec_lookup_range(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t *start, pgoff_t end); -static inline unsigned pagevec_lookup(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t *start) -{ - return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); -} - unsigned pagevec_lookup_range_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, xa_mark_t tag); diff --git a/include/linux/panic.h b/include/linux/panic.h index e71161da69c4..c7759b3f2045 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -68,7 +68,8 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) #define TAINT_LIVEPATCH 15 #define TAINT_AUX 16 #define TAINT_RANDSTRUCT 17 -#define TAINT_FLAGS_COUNT 18 +#define TAINT_TEST 18 +#define TAINT_FLAGS_COUNT 19 #define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) struct taint_flag { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0178823ce8c2..7fa460ccf7fa 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -556,10 +556,13 @@ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 +#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d +#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3 +#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 46f9b6fe306e..bf66fe011fa8 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -56,9 +56,13 @@ struct riscv_pmu { struct cpu_hw_events __percpu *hw_events; struct hlist_node node; + struct notifier_block riscv_pm_nb; }; #define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu)) + +void riscv_pmu_start(struct perf_event *event, int flags); +void riscv_pmu_stop(struct perf_event *event, int flags); unsigned long riscv_pmu_ctr_read_csr(unsigned long csr); int riscv_pmu_event_set_period(struct perf_event *event); uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index da759560eec5..ee8b9ecdc03b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -759,6 +759,8 @@ struct perf_event { struct pid_namespace *ns; u64 id; + atomic64_t lost_samples; + u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; diff --git a/include/linux/phy.h b/include/linux/phy.h index 508f1149665b..b09f7d36cff2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -572,6 +572,10 @@ struct macsec_ops; * @mdix_ctrl: User setting of crossover * @pma_extable: Cached value of PMA/PMD Extended Abilities Register * @interrupts: Flag interrupts have been enabled + * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt + * handling shall be postponed until PHY has resumed + * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended, + * requiring a rerun of the interrupt handler after resume * @interface: enum phy_interface_t value * @skb: Netlink message for cable diagnostics * @nest: Netlink nest used for cable diagnostics @@ -626,6 +630,8 @@ struct phy_device { /* Interrupts are enabled */ unsigned interrupts:1; + unsigned irq_suspended:1; + unsigned irq_rerun:1; enum phy_state state; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index cb0fd633a610..4ea496924106 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -229,6 +229,15 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, return buf->ops->try_steal(pipe, buf); } +static inline void pipe_discard_from(struct pipe_inode_info *pipe, + unsigned int old_head) +{ + unsigned int mask = pipe->ring_size - 1; + + while (pipe->head > old_head) + pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]); +} + /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ #define PIPE_SIZE PAGE_SIZE diff --git a/include/linux/platform-feature.h b/include/linux/platform-feature.h new file mode 100644 index 000000000000..b2f48be999fa --- /dev/null +++ b/include/linux/platform-feature.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PLATFORM_FEATURE_H +#define _PLATFORM_FEATURE_H + +#include <linux/bitops.h> +#include <asm/platform-feature.h> + +/* The platform features are starting with the architecture specific ones. */ + +/* Used to enable platform specific DMA handling for virtio devices. */ +#define PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS (0 + PLATFORM_ARCH_FEAT_N) + +#define PLATFORM_FEAT_N (1 + PLATFORM_ARCH_FEAT_N) + +void platform_set(unsigned int feature); +void platform_clear(unsigned int feature); +bool platform_has(unsigned int feature); + +#endif /* _PLATFORM_FEATURE_H */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9e4d056967c6..0a41b2dcccad 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -88,7 +88,7 @@ extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); -extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle); +extern void pm_runtime_release_supplier(struct device_link *link); extern int devm_pm_runtime_enable(struct device *dev); @@ -314,8 +314,7 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} static inline void pm_runtime_drop_link(struct device_link *link) {} -static inline void pm_runtime_release_supplier(struct device_link *link, - bool check_idle) {} +static inline void pm_runtime_release_supplier(struct device_link *link) {} #endif /* !CONFIG_PM */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 196a157456aa..77f4849e3418 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -109,7 +109,6 @@ extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern int device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); -extern int device_init_wakeup(struct device *dev, bool val); extern int device_set_wakeup_enable(struct device *dev, bool enable); extern void __pm_stay_awake(struct wakeup_source *ws); extern void pm_stay_awake(struct device *dev); @@ -167,13 +166,6 @@ static inline int device_set_wakeup_enable(struct device *dev, bool enable) return 0; } -static inline int device_init_wakeup(struct device *dev, bool val) -{ - device_set_wakeup_capable(dev, val); - device_set_wakeup_enable(dev, val); - return 0; -} - static inline bool device_may_wakeup(struct device *dev) { return dev->power.can_wakeup && dev->power.should_wakeup; @@ -217,4 +209,27 @@ static inline void pm_wakeup_hard_event(struct device *dev) return pm_wakeup_dev_event(dev, 0, true); } +/** + * device_init_wakeup - Device wakeup initialization. + * @dev: Device to handle. + * @enable: Whether or not to enable @dev as a wakeup device. + * + * By default, most devices should leave wakeup disabled. The exceptions are + * devices that everyone expects to be wakeup sources: keyboards, power buttons, + * possibly network interfaces, etc. Also, devices that don't generate their + * own wakeup requests but merely forward requests from one bus to another + * (like PCI bridges) should have wakeup enabled by default. + */ +static inline int device_init_wakeup(struct device *dev, bool enable) +{ + if (enable) { + device_set_wakeup_capable(dev, true); + return device_wakeup_enable(dev); + } else { + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + return 0; + } +} + #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index b65c877d92b8..7d1e604c1325 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -73,6 +73,7 @@ extern int set_posix_acl(struct user_namespace *, struct inode *, int, struct posix_acl *); struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); +struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags); #ifdef CONFIG_FS_POSIX_ACL int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t); diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 1766e1de6956..b6bd3eac2bcc 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -33,21 +33,31 @@ posix_acl_xattr_count(size_t size) } #ifdef CONFIG_FS_POSIX_ACL -void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size); -void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size); +void posix_acl_fix_xattr_from_user(void *value, size_t size); +void posix_acl_fix_xattr_to_user(void *value, size_t size); +void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, + void *value, size_t size); +void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, + void *value, size_t size); #else -static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size) +static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) { } -static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, - struct inode *inode, - void *value, size_t size) +static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) +{ +} +static inline void +posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, void *value, + size_t size) +{ +} +static inline void +posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns, + const struct inode *inode, void *value, + size_t size) { } #endif diff --git a/include/linux/printk.h b/include/linux/printk.h index 10ec29bc0135..cf7d666ab1f8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -169,9 +169,6 @@ extern void __printk_safe_exit(void); #define printk_deferred_enter __printk_safe_enter #define printk_deferred_exit __printk_safe_exit -extern void printk_prefer_direct_enter(void); -extern void printk_prefer_direct_exit(void); - extern bool pr_flush(int timeout_ms, bool reset_on_progress); /* @@ -224,14 +221,6 @@ static inline void printk_deferred_exit(void) { } -static inline void printk_prefer_direct_enter(void) -{ -} - -static inline void printk_prefer_direct_exit(void) -{ -} - static inline bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 9771a0761a40..9429930c5566 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -6,9 +6,6 @@ #include <linux/mutex.h> #include <linux/of.h> -struct pwm_capture; -struct seq_file; - struct pwm_chip; /** @@ -252,6 +249,16 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, } /** + * struct pwm_capture - PWM capture data + * @period: period of the PWM signal (in nanoseconds) + * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) + */ +struct pwm_capture { + unsigned int period; + unsigned int duty_cycle; +}; + +/** * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM @@ -261,10 +268,6 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * called once per PWM device when the PWM chip is * registered. * @owner: helps prevent removal of modules exporting active PWMs - * @config: configure duty cycles and period length for this PWM - * @set_polarity: configure the polarity of this PWM - * @enable: enable PWM output toggling - * @disable: disable PWM output toggling */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); @@ -276,14 +279,6 @@ struct pwm_ops { void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); struct module *owner; - - /* Only used by legacy drivers */ - int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns); - int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity); - int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); - void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); }; /** @@ -312,16 +307,6 @@ struct pwm_chip { struct pwm_device *pwms; }; -/** - * struct pwm_capture - PWM capture data - * @period: period of the PWM signal (in nanoseconds) - * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) - */ -struct pwm_capture { - unsigned int period; - unsigned int duty_cycle; -}; - #if IS_ENABLED(CONFIG_PWM) /* PWM user APIs */ struct pwm_device *pwm_request(int pwm_id, const char *label); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index a0f6668924d3..0d8625d71733 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -20,11 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) } /* i_mutex must being held */ -static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) +static inline bool is_quota_modification(struct user_namespace *mnt_userns, + struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE) || - (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || - (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); + return ((ia->ia_valid & ATTR_SIZE) || + i_uid_needs_update(mnt_userns, ia, inode) || + i_gid_needs_update(mnt_userns, ia, inode)); } #if defined(CONFIG_QUOTA) @@ -115,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id, struct qc_dqblk *di); int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); -int dquot_transfer(struct inode *inode, struct iattr *iattr); +int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode, + struct iattr *iattr); static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) { @@ -234,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode) { } -static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) +static inline int dquot_transfer(struct user_namespace *mnt_userns, + struct inode *inode, struct iattr *iattr) { return 0; } diff --git a/include/linux/random.h b/include/linux/random.h index fae0c84027fd..3fec206487f6 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -13,7 +13,7 @@ struct notifier_block; void add_device_randomness(const void *buf, size_t len); -void add_bootloader_randomness(const void *buf, size_t len); +void __init add_bootloader_randomness(const void *buf, size_t len); void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; void add_interrupt_randomness(int irq) __latent_entropy; @@ -74,7 +74,6 @@ static inline unsigned long get_random_canary(void) int __init random_init(const char *command_line); bool rng_is_initialized(void); -bool rng_has_arch_random(void); int wait_for_random_bytes(void); /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). @@ -107,32 +106,25 @@ declare_get_random_var_wait(long, unsigned long) */ #include <linux/prandom.h> -#ifdef CONFIG_ARCH_RANDOM -# include <asm/archrandom.h> -#else -static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; } -static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; } -static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; } -static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; } -#endif +#include <asm/archrandom.h> /* * Called from the boot CPU during startup; not valid to call once * secondary CPUs are up and preemption is possible. */ -#ifndef arch_get_random_seed_long_early -static inline bool __init arch_get_random_seed_long_early(unsigned long *v) +#ifndef arch_get_random_seed_longs_early +static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) { WARN_ON(system_state != SYSTEM_BOOTING); - return arch_get_random_seed_long(v); + return arch_get_random_seed_longs(v, max_longs); } #endif -#ifndef arch_get_random_long_early -static inline bool __init arch_get_random_long_early(unsigned long *v) +#ifndef arch_get_random_longs_early +static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs) { WARN_ON(system_state != SYSTEM_BOOTING); - return arch_get_random_long(v); + return arch_get_random_longs(v, max_longs); } #endif diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h index c21c7f8103e2..002266693e50 100644 --- a/include/linux/ratelimit_types.h +++ b/include/linux/ratelimit_types.h @@ -23,12 +23,16 @@ struct ratelimit_state { unsigned long flags; }; -#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ - .interval = interval_init, \ - .burst = burst_init, \ +#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + .flags = flags_init, \ } +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0) + #define RATELIMIT_STATE_INIT_DISABLED \ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1a32036c918c..f527f27e6438 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -29,6 +29,7 @@ #include <linux/lockdep.h> #include <asm/processor.h> #include <linux/cpumask.h> +#include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) @@ -41,6 +42,7 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void rcu_barrier_tasks_rude(void); void synchronize_rcu(void); +unsigned long get_completed_synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU @@ -103,13 +105,11 @@ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ -#ifdef CONFIG_NO_HZ_FULL -void rcu_user_enter(void); -void rcu_user_exit(void); +#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) +void rcu_irq_work_resched(void); #else -static inline void rcu_user_enter(void) { } -static inline void rcu_user_exit(void) { } -#endif /* CONFIG_NO_HZ_FULL */ +static inline void rcu_irq_work_resched(void) { } +#endif #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); @@ -128,7 +128,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } * @a: Code that RCU needs to pay attention to. * * RCU read-side critical sections are forbidden in the inner idle loop, - * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU + * that is, between the ct_idle_enter() and the ct_idle_exit() -- RCU * will happily ignore any such read-side critical sections. However, * things like powertop need tracepoints in the inner idle loop. * @@ -143,9 +143,9 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } */ #define RCU_NONIDLE(a) \ do { \ - rcu_irq_enter_irqson(); \ + ct_irq_enter_irqson(); \ do { a; } while (0); \ - rcu_irq_exit_irqson(); \ + ct_irq_exit_irqson(); \ } while (0) /* @@ -169,13 +169,24 @@ void synchronize_rcu_tasks(void); # endif # ifdef CONFIG_TASKS_TRACE_RCU -# define rcu_tasks_trace_qs(t) \ - do { \ - if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ - !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \ - smp_store_release(&(t)->trc_reader_checked, true); \ - smp_mb(); /* Readers partitioned by store. */ \ - } \ +// Bits for ->trc_reader_special.b.need_qs field. +#define TRC_NEED_QS 0x1 // Task needs a quiescent state. +#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. + +u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); +void rcu_tasks_trace_qs_blkd(struct task_struct *t); + +# define rcu_tasks_trace_qs(t) \ + do { \ + int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ + \ + if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \ + likely(!___rttq_nesting)) { \ + rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \ + } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ + !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ + rcu_tasks_trace_qs_blkd(t); \ + } \ } while (0) # else # define rcu_tasks_trace_qs(t) do { } while (0) @@ -184,7 +195,7 @@ void synchronize_rcu_tasks(void); #define rcu_tasks_qs(t, preempt) \ do { \ rcu_tasks_classic_qs((t), (preempt)); \ - rcu_tasks_trace_qs((t)); \ + rcu_tasks_trace_qs(t); \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 6f9c35817398..9bc8cbb33340 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -75,7 +75,7 @@ static inline void rcu_read_unlock_trace(void) nesting = READ_ONCE(t->trc_reader_nesting) - 1; barrier(); // Critical section before disabling. // Disable IPI-based setting of .need_qs. - WRITE_ONCE(t->trc_reader_nesting, INT_MIN); + WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { WRITE_ONCE(t->trc_reader_nesting, nesting); return; // We assume shallow reader nesting. diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5fed476f977f..62815c0a2dce 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -23,6 +23,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate) might_sleep(); } +static inline unsigned long start_poll_synchronize_rcu_expedited(void) +{ + return start_poll_synchronize_rcu(); +} + +static inline void cond_synchronize_rcu_expedited(unsigned long oldstate) +{ + cond_synchronize_rcu(oldstate); +} + extern void rcu_barrier(void); static inline void synchronize_rcu_expedited(void) @@ -38,7 +48,7 @@ static inline void synchronize_rcu_expedited(void) */ extern void kvfree(const void *addr); -static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { if (head) { call_rcu(head, func); @@ -51,6 +61,15 @@ static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) kvfree((void *) func); } +#ifdef CONFIG_KASAN_GENERIC +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); +#else +static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +{ + __kvfree_call_rcu(head, func); +} +#endif + void rcu_qs(void); static inline void rcu_softirq_qs(void) @@ -76,12 +95,6 @@ static inline int rcu_needs_cpu(void) static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_cpu_stall_reset(void) { } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } -static inline void rcu_idle_enter(void) { } -static inline void rcu_idle_exit(void) { } -static inline void rcu_irq_enter(void) { } -static inline void rcu_irq_exit_irqson(void) { } -static inline void rcu_irq_enter_irqson(void) { } -static inline void rcu_irq_exit(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 9c6cfb742504..47eaa4cb0df7 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -40,17 +40,13 @@ bool rcu_eqs_special_set(int cpu); void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); bool rcu_gp_might_be_stalled(void); +unsigned long start_poll_synchronize_rcu_expedited(void); +void cond_synchronize_rcu_expedited(unsigned long oldstate); unsigned long get_state_synchronize_rcu(void); unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); void cond_synchronize_rcu(unsigned long oldstate); -void rcu_idle_enter(void); -void rcu_idle_exit(void); -void rcu_irq_enter(void); -void rcu_irq_exit(void); -void rcu_irq_enter_irqson(void); -void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); #ifdef CONFIG_PROVE_RCU @@ -59,6 +55,9 @@ void rcu_irq_exit_check_preempt(void); static inline void rcu_irq_exit_check_preempt(void) { } #endif +struct task_struct; +void rcu_preempt_deferred_qs(struct task_struct *t); + void exit_rcu(void); void rcu_scheduler_starting(void); diff --git a/include/linux/refcount.h b/include/linux/refcount.h index b8a6e387f8f9..a62fcca97486 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -361,9 +361,9 @@ static inline void refcount_dec(refcount_t *r) extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); -extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); -extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, - unsigned long *flags); + unsigned long *flags) __cond_acquires(lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 8952fa3d0d59..7cf2157134ac 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1336,6 +1336,22 @@ static inline int regmap_field_update_bits(struct regmap_field *field, NULL, false, false); } +static inline int regmap_field_set_bits(struct regmap_field *field, + unsigned int bits) +{ + return regmap_field_update_bits_base(field, bits, bits, NULL, false, + false); +} + +static inline int regmap_field_clear_bits(struct regmap_field *field, + unsigned int bits) +{ + return regmap_field_update_bits_base(field, bits, 0, NULL, false, + false); +} + +int regmap_field_test_bits(struct regmap_field *field, unsigned int bits); + static inline int regmap_field_force_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) @@ -1424,6 +1440,8 @@ struct regmap_irq_sub_irq_map { unsigned int *offset; }; +struct regmap_irq_chip_data; + /** * struct regmap_irq_chip - Description of a generic regmap irq_chip. * @@ -1451,32 +1469,50 @@ struct regmap_irq_sub_irq_map { * main_status set. * * @status_base: Base status register address. - * @mask_base: Base mask register address. - * @mask_writeonly: Base mask register is write only. - * @unmask_base: Base unmask register address. for chips who have - * separate mask and unmask registers + * @mask_base: Base mask register address. Mask bits are set to 1 when an + * interrupt is masked, 0 when unmasked. + * @unmask_base: Base unmask register address. Unmask bits are set to 1 when + * an interrupt is unmasked and 0 when masked. * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. - * @type_base: Base address for irq type. If zero unsupported. - * @virt_reg_base: Base addresses for extra config regs. + * @type_base: Base address for irq type. If zero unsupported. Deprecated, + * use @config_base instead. + * @virt_reg_base: Base addresses for extra config regs. Deprecated, use + * @config_base instead. + * @config_base: Base address for IRQ type config regs. If null unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. + * Deprecated; prefer describing an inverted mask register as + * an unmask register. + * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set + * both @mask_base and @unmask_base. If false, mask and unmask bits are + * inverted (which is deprecated behavior); if true, bits will not be + * inverted and the registers keep their normal behavior. Note that if + * you use only one of @mask_base or @unmask_base, this flag has no + * effect and is unnecessary. Any new drivers that set both @mask_base + * and @unmask_base should set this to true to avoid relying on the + * deprecated behavior. * @use_ack: Use @ack register even if it is zero. * @ack_invert: Inverted ack register: cleared bits for ack. * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts. * @wake_invert: Inverted wake register: cleared bits are wake enabled. - * @type_invert: Invert the type flags. - * @type_in_mask: Use the mask registers for controlling irq type. For - * interrupts defining type_rising/falling_mask use mask_base - * for edge configuration and never update bits in type_base. + * @type_invert: Invert the type flags. Deprecated, use config registers + * instead. + * @type_in_mask: Use the mask registers for controlling irq type. Use this if + * the hardware provides separate bits for rising/falling edge + * or low/high level interrupts and they should be combined into + * a single logical interrupt. Use &struct regmap_irq_type data + * to define the mask bit for each irq type. * @clear_on_unmask: For chips with interrupts cleared on read: read the status * registers before unmasking interrupts to clear any bits * set when they were masked. * @not_fixed_stride: Used when chip peripherals are not laid out with fixed - * stride. Must be used with sub_reg_offsets containing the - * offsets to each peripheral. + * stride. Must be used with sub_reg_offsets containing the + * offsets to each peripheral. Deprecated; the same thing + * can be accomplished with a @get_irq_reg callback, without + * the need for a @sub_reg_offsets table. * @status_invert: Inverted status register: cleared bits are active interrupts. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * @@ -1484,17 +1520,28 @@ struct regmap_irq_sub_irq_map { * @irqs: Descriptors for individual IRQs. Interrupt numbers are * assigned based on the index in the array of the interrupt. * @num_irqs: Number of descriptors. - * @num_type_reg: Number of type registers. + * @num_type_reg: Number of type registers. Deprecated, use config registers + * instead. * @num_virt_regs: Number of non-standard irq configuration registers. - * If zero unsupported. - * @type_reg_stride: Stride to use for chips where type registers are not - * contiguous. + * If zero unsupported. Deprecated, use config registers + * instead. + * @num_config_bases: Number of config base registers. + * @num_config_regs: Number of config registers for each config base register. * @handle_pre_irq: Driver specific callback to handle interrupt from device * before regmap_irq_handler process the interrupts. * @handle_post_irq: Driver specific callback to handle interrupt from device * after handling the interrupts in regmap_irq_handler(). * @set_type_virt: Driver specific callback to extend regmap_irq_set_type() - * and configure virt regs. + * and configure virt regs. Deprecated, use @set_type_config + * callback and config registers instead. + * @set_type_config: Callback used for configuring irq types. + * @get_irq_reg: Callback for mapping (base register, index) pairs to register + * addresses. The base register will be one of @status_base, + * @mask_base, etc., @main_status, or any of @config_base. + * The index will be in the range [0, num_main_regs[ for the + * main status base, [0, num_type_settings[ for any config + * register base, and [0, num_regs[ for any other base. + * If unspecified then regmap_irq_get_irq_reg_linear() is used. * @irq_drv_data: Driver specific IRQ data which is passed as parameter when * driver specific pre/post interrupt handler is called. * @@ -1517,20 +1564,21 @@ struct regmap_irq_chip { unsigned int wake_base; unsigned int type_base; unsigned int *virt_reg_base; + const unsigned int *config_base; unsigned int irq_reg_stride; - bool mask_writeonly:1; - bool init_ack_masked:1; - bool mask_invert:1; - bool use_ack:1; - bool ack_invert:1; - bool clear_ack:1; - bool wake_invert:1; - bool runtime_pm:1; - bool type_invert:1; - bool type_in_mask:1; - bool clear_on_unmask:1; - bool not_fixed_stride:1; - bool status_invert:1; + unsigned int init_ack_masked:1; + unsigned int mask_invert:1; + unsigned int mask_unmask_non_inverted:1; + unsigned int use_ack:1; + unsigned int ack_invert:1; + unsigned int clear_ack:1; + unsigned int wake_invert:1; + unsigned int runtime_pm:1; + unsigned int type_invert:1; + unsigned int type_in_mask:1; + unsigned int clear_on_unmask:1; + unsigned int not_fixed_stride:1; + unsigned int status_invert:1; int num_regs; @@ -1539,16 +1587,24 @@ struct regmap_irq_chip { int num_type_reg; int num_virt_regs; - unsigned int type_reg_stride; + int num_config_bases; + int num_config_regs; int (*handle_pre_irq)(void *irq_drv_data); int (*handle_post_irq)(void *irq_drv_data); int (*set_type_virt)(unsigned int **buf, unsigned int type, unsigned long hwirq, int reg); + int (*set_type_config)(unsigned int **buf, unsigned int type, + const struct regmap_irq *irq_data, int idx); + unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, + unsigned int base, int index); void *irq_drv_data; }; -struct regmap_irq_chip_data; +unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, + unsigned int base, int index); +int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, + const struct regmap_irq *irq_data, int idx); int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, @@ -1769,6 +1825,27 @@ regmap_field_force_update_bits(struct regmap_field *field, return -EINVAL; } +static inline int regmap_field_set_bits(struct regmap_field *field, + unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_clear_bits(struct regmap_field *field, + unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_test_bits(struct regmap_field *field, + unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_fields_write(struct regmap_field *field, unsigned int id, unsigned int val) { diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index bbf6590a6dec..bc6cda706d1f 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -171,10 +171,13 @@ struct regulator; /** * struct regulator_bulk_data - Data used for bulk regulator operations. * - * @supply: The name of the supply. Initialised by the user before - * using the bulk regulator APIs. - * @consumer: The regulator consumer for the supply. This will be managed - * by the bulk API. + * @supply: The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @init_load_uA: After getting the regulator, regulator_set_load() will be + * called with this load. Initialised by the user before + * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. * * The regulator APIs provide a series of regulator_bulk_() API calls as * a convenience to consumers which require multiple supplies. This @@ -182,6 +185,7 @@ struct regulator; */ struct regulator_bulk_data { const char *supply; + int init_load_uA; struct regulator *consumer; /* private: Internal use */ @@ -240,6 +244,10 @@ int __must_check regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers); int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers); +int __must_check devm_regulator_bulk_get_const( + struct device *dev, int num_consumers, + const struct regulator_bulk_data *in_consumers, + struct regulator_bulk_data **out_consumers); int __must_check regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers); int regulator_bulk_disable(int num_consumers, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 0228caaa6741..f9a7461e72b8 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -348,6 +348,7 @@ enum regulator_type { * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values * should be given in units of V/S (uV/uS). See the * regulator_set_ramp_delay_regmap(). + * @n_ramp_values: number of elements at @ramp_delay_table. * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator diff --git a/include/linux/reset.h b/include/linux/reset.h index 8a21b5756c3e..514ddf003efc 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -731,7 +731,7 @@ static inline int __must_check devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); } /** diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h index 159729cffd8e..3247ed8e9ff0 100644 --- a/include/linux/rtsx_usb.h +++ b/include/linux/rtsx_usb.h @@ -54,8 +54,6 @@ struct rtsx_ucr { struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; - unsigned char *iobuf; - dma_addr_t iobuf_dma; struct timer_list sg_timer; struct mutex dev_mutex; diff --git a/include/linux/sched.h b/include/linux/sched.h index c46f3a63b758..d6b0866c71ed 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -843,8 +843,9 @@ struct task_struct { int trc_reader_nesting; int trc_ipi_to_cpu; union rcu_special trc_reader_special; - bool trc_reader_checked; struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ struct sched_info sched_info; @@ -2223,6 +2224,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern bool sched_task_on_rq(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p); +extern struct task_struct *cpu_curr_snapshot(int cpu); /* * In order to reduce various lock holder preemption latencies provide an @@ -2257,7 +2259,7 @@ static inline bool owner_on_cpu(struct task_struct *owner) } /* Returns effective CPU energy utilization, as seen by the scheduler */ -unsigned long sched_cpu_util(int cpu, unsigned long max); +unsigned long sched_cpu_util(int cpu); #endif /* CONFIG_SMP */ #ifdef CONFIG_RSEQ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index e5af028c08b4..994c25640e15 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) } extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); extern void rt_mutex_adjust_pi(struct task_struct *p); -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ - return tsk->pi_blocked_on != NULL; -} #else static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; } # define rt_mutex_adjust_pi(p) do { } while (0) -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ - return false; -} #endif extern void normalize_rt_tasks(void); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 505aaf9fe477..81cab4b01edc 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -85,7 +85,7 @@ static inline void exit_thread(struct task_struct *tsk) extern __noreturn void do_group_exit(int); extern void exit_files(struct task_struct *); -extern void exit_itimers(struct signal_struct *); +extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 56cffe42abbc..816df6cc444e 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -81,6 +81,7 @@ struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; + int nr_idle_scan; }; struct sched_domain { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 1c58646ba381..a193884ecf2b 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -13,8 +13,9 @@ #include <linux/notifier.h> #include <linux/types.h> -#define SCMI_MAX_STR_SIZE 64 -#define SCMI_MAX_NUM_RATES 16 +#define SCMI_MAX_STR_SIZE 64 +#define SCMI_SHORT_NAME_MAX_SIZE 16 +#define SCMI_MAX_NUM_RATES 16 /** * struct scmi_revision_info - version information structure @@ -36,8 +37,8 @@ struct scmi_revision_info { u8 num_protocols; u8 num_agents; u32 impl_ver; - char vendor_id[SCMI_MAX_STR_SIZE]; - char sub_vendor_id[SCMI_MAX_STR_SIZE]; + char vendor_id[SCMI_SHORT_NAME_MAX_SIZE]; + char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_clock_info { @@ -59,6 +60,12 @@ struct scmi_clock_info { }; }; +enum scmi_power_scale { + SCMI_POWER_BOGOWATTS, + SCMI_POWER_MILLIWATTS, + SCMI_POWER_MICROWATTS +}; + struct scmi_handle; struct scmi_device; struct scmi_protocol_handle; @@ -134,7 +141,7 @@ struct scmi_perf_proto_ops { unsigned long *rate, unsigned long *power); bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph, struct device *dev); - bool (*power_scale_mw_get)(const struct scmi_protocol_handle *ph); + enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph); }; /** @@ -560,6 +567,116 @@ struct scmi_voltage_proto_ops { }; /** + * struct scmi_powercap_info - Describe one available Powercap domain + * + * @id: Domain ID as advertised by the platform. + * @notify_powercap_cap_change: CAP change notification support. + * @notify_powercap_measurement_change: MEASUREMENTS change notifications + * support. + * @async_powercap_cap_set: Asynchronous CAP set support. + * @powercap_cap_config: CAP configuration support. + * @powercap_monitoring: Monitoring (measurements) support. + * @powercap_pai_config: PAI configuration support. + * @powercap_scale_mw: Domain reports power data in milliwatt units. + * @powercap_scale_uw: Domain reports power data in microwatt units. + * Note that, when both @powercap_scale_mw and + * @powercap_scale_uw are set to false, the domain + * reports power data on an abstract linear scale. + * @name: name assigned to the Powercap Domain by platform. + * @min_pai: Minimum configurable PAI. + * @max_pai: Maximum configurable PAI. + * @pai_step: Step size between two consecutive PAI values. + * @min_power_cap: Minimum configurable CAP. + * @max_power_cap: Maximum configurable CAP. + * @power_cap_step: Step size between two consecutive CAP values. + * @sustainable_power: Maximum sustainable power consumption for this domain + * under normal conditions. + * @accuracy: The accuracy with which the power is measured and reported in + * integral multiples of 0.001 percent. + * @parent_id: Identifier of the containing parent power capping domain, or the + * value 0xFFFFFFFF if this powercap domain is a root domain not + * contained in any other domain. + */ +struct scmi_powercap_info { + unsigned int id; + bool notify_powercap_cap_change; + bool notify_powercap_measurement_change; + bool async_powercap_cap_set; + bool powercap_cap_config; + bool powercap_monitoring; + bool powercap_pai_config; + bool powercap_scale_mw; + bool powercap_scale_uw; + bool fastchannels; + char name[SCMI_MAX_STR_SIZE]; + unsigned int min_pai; + unsigned int max_pai; + unsigned int pai_step; + unsigned int min_power_cap; + unsigned int max_power_cap; + unsigned int power_cap_step; + unsigned int sustainable_power; + unsigned int accuracy; +#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL + unsigned int parent_id; + struct scmi_fc_info *fc_info; +}; + +/** + * struct scmi_powercap_proto_ops - represents the various operations provided + * by SCMI Powercap Protocol + * + * @num_domains_get: get the count of powercap domains provided by SCMI. + * @info_get: get the information for the specified domain. + * @cap_get: get the current CAP value for the specified domain. + * @cap_set: set the CAP value for the specified domain to the provided value; + * if the domain supports setting the CAP with an asynchronous command + * this request will finally trigger an asynchronous transfer, but, if + * @ignore_dresp here is set to true, this call will anyway return + * immediately without waiting for the related delayed response. + * @pai_get: get the current PAI value for the specified domain. + * @pai_set: set the PAI value for the specified domain to the provided value. + * @measurements_get: retrieve the current average power measurements for the + * specified domain and the related PAI upon which is + * calculated. + * @measurements_threshold_set: set the desired low and high power thresholds + * to be used when registering for notification + * of type POWERCAP_MEASUREMENTS_NOTIFY with this + * powercap domain. + * Note that this must be called at least once + * before registering any callback with the usual + * @scmi_notify_ops; moreover, in case this method + * is called with measurement notifications already + * enabled it will also trigger, transparently, a + * proper update of the power thresholds configured + * in the SCMI backend server. + * @measurements_threshold_get: get the currently configured low and high power + * thresholds used when registering callbacks for + * notification POWERCAP_MEASUREMENTS_NOTIFY. + */ +struct scmi_powercap_proto_ops { + int (*num_domains_get)(const struct scmi_protocol_handle *ph); + const struct scmi_powercap_info __must_check *(*info_get) + (const struct scmi_protocol_handle *ph, u32 domain_id); + int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 *power_cap); + int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 power_cap, bool ignore_dresp); + int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 *pai); + int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id, + u32 pai); + int (*measurements_get)(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, u32 *pai); + int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high); + int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high); +}; + +/** * struct scmi_notify_ops - represents notifications' operations provided by * SCMI core * @devm_event_notifier_register: Managed registration of a notifier_block for @@ -623,6 +740,9 @@ struct scmi_notify_ops { * * @dev: pointer to the SCMI device * @version: pointer to the structure containing SCMI version information + * @devm_protocol_acquire: devres managed method to get hold of a protocol, + * causing its initialization and related resource + * accounting * @devm_protocol_get: devres managed method to acquire a protocol and get specific * operations and a dedicated protocol handler * @devm_protocol_put: devres managed method to release a protocol @@ -641,6 +761,8 @@ struct scmi_handle { struct device *dev; struct scmi_revision_info *version; + int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev, + u8 proto); const void __must_check * (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); @@ -660,6 +782,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_SENSOR = 0x15, SCMI_PROTOCOL_RESET = 0x16, SCMI_PROTOCOL_VOLTAGE = 0x17, + SCMI_PROTOCOL_POWERCAP = 0x18, }; enum scmi_system_events { @@ -761,6 +884,8 @@ enum scmi_notification_events { SCMI_EVENT_RESET_ISSUED = 0x0, SCMI_EVENT_BASE_ERROR_EVENT = 0x0, SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0, + SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0, + SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1, }; struct scmi_power_state_changed_report { @@ -780,8 +905,10 @@ struct scmi_clock_rate_notif_report { struct scmi_system_power_state_notifier_report { ktime_t timestamp; unsigned int agent_id; +#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0)) unsigned int flags; unsigned int system_state; + unsigned int timeout; }; struct scmi_perf_limits_report { @@ -829,4 +956,18 @@ struct scmi_base_error_report { unsigned long long reports[]; }; +struct scmi_powercap_cap_changed_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int power_cap; + unsigned int pai; +}; + +struct scmi_powercap_meas_changed_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int power; +}; #endif /* _LINUX_SCMI_PROTOCOL_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 7fc4e9f49f54..1bc362cb413f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -353,7 +353,8 @@ int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu); int security_inode_permission(struct inode *inode, int mask); -int security_inode_setattr(struct dentry *dentry, struct iattr *attr); +int security_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *attr); int security_inode_getattr(const struct path *path); int security_inode_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry, const char *name, @@ -415,6 +416,7 @@ int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); int security_task_fix_setgid(struct cred *new, const struct cred *old, int flags); +int security_task_fix_setgroups(struct cred *new, const struct cred *old); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); @@ -848,8 +850,9 @@ static inline int security_inode_permission(struct inode *inode, int mask) return 0; } -static inline int security_inode_setattr(struct dentry *dentry, - struct iattr *attr) +static inline int security_inode_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, + struct iattr *attr) { return 0; } @@ -1098,6 +1101,12 @@ static inline int security_task_fix_setgid(struct cred *new, return 0; } +static inline int security_task_fix_setgroups(struct cred *new, + const struct cred *old) +{ + return 0; +} + static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index cbd5070bc87f..fde258b3decd 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -45,6 +45,7 @@ struct uart_ops { void (*unthrottle)(struct uart_port *); void (*send_xchar)(struct uart_port *, char ch); void (*stop_rx)(struct uart_port *); + void (*start_rx)(struct uart_port *); void (*enable_ms)(struct uart_port *); void (*break_ctl)(struct uart_port *, int ctl); int (*startup)(struct uart_port *); @@ -389,6 +390,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; static inline int setup_earlycon(char *buf) { return 0; } #endif +static inline bool uart_console_enabled(struct uart_port *port) +{ + return uart_console(port) && (port->cons->flags & CON_ENABLED); +} + struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d3d10556f0fa..1111adefd906 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -686,10 +686,18 @@ enum { * charged to the kernel memory. */ SKBFL_PURE_ZEROCOPY = BIT(2), + + SKBFL_DONT_ORPHAN = BIT(3), + + /* page references are managed by the ubuf_info, so it's safe to + * use frags only up until ubuf_info is released + */ + SKBFL_MANAGED_FRAG_REFS = BIT(4), }; #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) -#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY) +#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ + SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) /* * The callback notifies userspace to release buffers when skb DMA is done in @@ -1773,13 +1781,14 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, bool success); -int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, - struct iov_iter *from, size_t length); +int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb, struct iov_iter *from, + size_t length); static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) { - return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); + return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len); } int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, @@ -1806,6 +1815,11 @@ static inline bool skb_zcopy_pure(const struct sk_buff *skb) return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; } +static inline bool skb_zcopy_managed(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; +} + static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1, const struct sk_buff *skb2) { @@ -1880,6 +1894,14 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) } } +void __skb_zcopy_downgrade_managed(struct sk_buff *skb); + +static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) +{ + if (unlikely(skb_zcopy_managed(skb))) + __skb_zcopy_downgrade_managed(skb); +} + static inline void skb_mark_not_on_list(struct sk_buff *skb) { skb->next = NULL; @@ -2528,6 +2550,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) return skb_headlen(skb) + __skb_pagelen(skb); } +static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, + int i, struct page *page, + int off, int size) +{ + skb_frag_t *frag = &shinfo->frags[i]; + + /* + * Propagate page pfmemalloc to the skb if we can. The problem is + * that not all callers have unique ownership of the page but rely + * on page_is_pfmemalloc doing the right thing(tm). + */ + frag->bv_page = page; + frag->bv_offset = off; + skb_frag_size_set(frag, size); +} + /** * __skb_fill_page_desc - initialise a paged fragment in an skb * @skb: buffer containing fragment to be initialised @@ -2544,17 +2582,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - /* - * Propagate page pfmemalloc to the skb if we can. The problem is - * that not all callers have unique ownership of the page but rely - * on page_is_pfmemalloc doing the right thing(tm). - */ - frag->bv_page = page; - frag->bv_offset = off; - skb_frag_size_set(frag, size); - + __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size); page = compound_head(page); if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; @@ -3182,8 +3210,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) { if (likely(!skb_zcopy(skb))) return 0; - if (!skb_zcopy_is_nouarg(skb) && - skb_uarg(skb)->callback == msg_zerocopy_callback) + if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN) return 0; return skb_copy_ubufs(skb, gfp_mask); } @@ -3496,7 +3523,10 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) */ static inline void skb_frag_unref(struct sk_buff *skb, int f) { - __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); + struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (!skb_zcopy_managed(skb)) + __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); } /** diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h index 6fe4ffbde290..a0f4f51a3b45 100644 --- a/include/linux/soc/mediatek/mtk-mutex.h +++ b/include/linux/soc/mediatek/mtk-mutex.h @@ -10,11 +10,33 @@ struct regmap; struct device; struct mtk_mutex; +enum mtk_mutex_mod_index { + /* MDP table index */ + MUTEX_MOD_IDX_MDP_RDMA0, + MUTEX_MOD_IDX_MDP_RSZ0, + MUTEX_MOD_IDX_MDP_RSZ1, + MUTEX_MOD_IDX_MDP_TDSHP0, + MUTEX_MOD_IDX_MDP_WROT0, + MUTEX_MOD_IDX_MDP_WDMA, + MUTEX_MOD_IDX_MDP_AAL0, + MUTEX_MOD_IDX_MDP_CCORR0, + + MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */ +}; + +enum mtk_mutex_sof_index { + MUTEX_SOF_IDX_SINGLE_MODE, + + MUTEX_SOF_IDX_MAX /* ALWAYS keep at the end */ +}; + struct mtk_mutex *mtk_mutex_get(struct device *dev); int mtk_mutex_prepare(struct mtk_mutex *mutex); void mtk_mutex_add_comp(struct mtk_mutex *mutex, enum mtk_ddp_comp_id id); void mtk_mutex_enable(struct mtk_mutex *mutex); +int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, + void *pkt); void mtk_mutex_disable(struct mtk_mutex *mutex); void mtk_mutex_remove_comp(struct mtk_mutex *mutex, enum mtk_ddp_comp_id id); @@ -22,5 +44,10 @@ void mtk_mutex_unprepare(struct mtk_mutex *mutex); void mtk_mutex_put(struct mtk_mutex *mutex); void mtk_mutex_acquire(struct mtk_mutex *mutex); void mtk_mutex_release(struct mtk_mutex *mutex); +int mtk_mutex_write_mod(struct mtk_mutex *mutex, + enum mtk_mutex_mod_index idx, + bool clear); +int mtk_mutex_write_sof(struct mtk_mutex *mutex, + enum mtk_mutex_sof_index idx); #endif /* MTK_MUTEX_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 17311ad9f9af..d4523974efbd 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -14,6 +14,8 @@ struct file; struct pid; struct cred; struct socket; +struct sock; +struct sk_buff; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -69,6 +71,9 @@ struct msghdr { unsigned int msg_flags; /* flags on received message */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ + struct ubuf_info *msg_ubuf; + int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb, + struct iov_iter *from, size_t length); }; struct user_msghdr { @@ -416,10 +421,9 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg, struct user_msghdr __user *umsg, unsigned flags, struct sockaddr __user **uaddr, struct iovec **iov); -extern int __copy_msghdr_from_user(struct msghdr *kmsg, - struct user_msghdr __user *umsg, - struct sockaddr __user **save_addr, - struct iovec __user **uiov, size_t *nsegs); +extern int __copy_msghdr(struct msghdr *kmsg, + struct user_msghdr *umsg, + struct sockaddr __user **save_addr); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index d361ba26203b..e6c73d5ff1a8 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -17,6 +17,7 @@ #include <uapi/linux/spi/spi.h> #include <linux/acpi.h> +#include <linux/u64_stats_sync.h> struct dma_chan; struct software_node; @@ -34,7 +35,8 @@ extern struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers - * @lock: lock protecting this structure + * @syncp: seqcount to protect members in this struct for per-cpu udate + * on 32-bit systems * * @messages: number of spi-messages handled * @transfers: number of spi_transfers handled @@ -59,37 +61,48 @@ extern struct bus_type spi_bus_type; * maxsize limit */ struct spi_statistics { - spinlock_t lock; /* lock for the whole structure */ + struct u64_stats_sync syncp; - unsigned long messages; - unsigned long transfers; - unsigned long errors; - unsigned long timedout; + u64_stats_t messages; + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t timedout; - unsigned long spi_sync; - unsigned long spi_sync_immediate; - unsigned long spi_async; + u64_stats_t spi_sync; + u64_stats_t spi_sync_immediate; + u64_stats_t spi_async; - unsigned long long bytes; - unsigned long long bytes_rx; - unsigned long long bytes_tx; + u64_stats_t bytes; + u64_stats_t bytes_rx; + u64_stats_t bytes_tx; #define SPI_STATISTICS_HISTO_SIZE 17 - unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; + u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; - unsigned long transfers_split_maxsize; + u64_stats_t transfers_split_maxsize; }; -#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ - do { \ - unsigned long flags; \ - spin_lock_irqsave(&(stats)->lock, flags); \ - (stats)->field += count; \ - spin_unlock_irqrestore(&(stats)->lock, flags); \ +#define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \ + do { \ + struct spi_statistics *__lstats; \ + get_cpu(); \ + __lstats = this_cpu_ptr(pcpu_stats); \ + u64_stats_update_begin(&__lstats->syncp); \ + u64_stats_add(&__lstats->field, count); \ + u64_stats_update_end(&__lstats->syncp); \ + put_cpu(); \ } while (0) -#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \ - SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) +#define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \ + do { \ + struct spi_statistics *__lstats; \ + get_cpu(); \ + __lstats = this_cpu_ptr(pcpu_stats); \ + u64_stats_update_begin(&__lstats->syncp); \ + u64_stats_inc(&__lstats->field); \ + u64_stats_update_end(&__lstats->syncp); \ + put_cpu(); \ + } while (0) /** * struct spi_delay - SPI delay information @@ -149,7 +162,7 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * @cs_inactive: delay to be introduced by the controller after CS is * deasserted. If @cs_change_delay is used from @spi_transfer, then the * two delays will be added up. - * @statistics: statistics for the spi_device + * @pcpu_statistics: statistics for the spi_device * * A @spi_device is used to interchange data between an SPI slave * (usually a discrete chip) and CPU memory. @@ -163,13 +176,13 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); struct spi_device { struct device dev; struct spi_controller *controller; - struct spi_controller *master; /* compatibility layer */ + struct spi_controller *master; /* Compatibility layer */ u32 max_speed_hz; u8 chip_select; u8 bits_per_word; bool rt; -#define SPI_NO_TX BIT(31) /* no transmit wire */ -#define SPI_NO_RX BIT(30) /* no receive wire */ +#define SPI_NO_TX BIT(31) /* No transmit wire */ +#define SPI_NO_RX BIT(30) /* No receive wire */ /* * All bits defined above should be covered by SPI_MODE_KERNEL_MASK. * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart, @@ -186,15 +199,15 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - struct gpio_desc *cs_gpiod; /* chip select gpio desc */ - struct spi_delay word_delay; /* inter-word delay */ + struct gpio_desc *cs_gpiod; /* Chip select gpio desc */ + struct spi_delay word_delay; /* Inter-word delay */ /* CS delays */ struct spi_delay cs_setup; struct spi_delay cs_hold; struct spi_delay cs_inactive; - /* the statistics */ - struct spi_statistics statistics; + /* The statistics */ + struct spi_statistics __percpu *pcpu_statistics; /* * likely need more hooks for more protocol options affecting how @@ -215,7 +228,7 @@ static inline struct spi_device *to_spi_device(struct device *dev) return dev ? container_of(dev, struct spi_device, dev) : NULL; } -/* most drivers won't need to care about device refcounting */ +/* Most drivers won't need to care about device refcounting */ static inline struct spi_device *spi_dev_get(struct spi_device *spi) { return (spi && get_device(&spi->dev)) ? spi : NULL; @@ -238,7 +251,7 @@ static inline void spi_set_ctldata(struct spi_device *spi, void *state) spi->controller_state = state; } -/* device driver data */ +/* Device driver data */ static inline void spi_set_drvdata(struct spi_device *spi, void *data) { @@ -305,7 +318,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select); -/* use a define to avoid include chaining to get THIS_MODULE */ +/* Use a define to avoid include chaining to get THIS_MODULE */ #define spi_register_driver(driver) \ __spi_register_driver(THIS_MODULE, driver) @@ -370,10 +383,14 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue - * @idling: the device is entering idle state * @cur_msg: the currently in-flight message - * @cur_msg_prepared: spi_prepare_message was called for the currently - * in-flight message + * @cur_msg_completion: a completion for the current in-flight message + * @cur_msg_incomplete: Flag used internally to opportunistically skip + * the @cur_msg_completion. This flag is used to check if the driver has + * already called spi_finalize_current_message(). + * @cur_msg_need_completion: Flag used internally to opportunistically skip + * the @cur_msg_completion. This flag is used to signal the context that + * is running spi_finalize_current_message() that it needs to complete() * @cur_msg_mapped: message has been mapped for DMA * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected @@ -433,7 +450,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @max_native_cs: When cs_gpiods is used, and this field is filled in, * spi_register_controller() will validate all native CS (including the * unused native CS) against this value. - * @statistics: statistics for the spi_controller + * @pcpu_statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel * @dummy_rx: dummy receive buffer for full-duplex devices @@ -450,6 +467,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @irq_flags: Interrupt enable state during PTP system timestamping * @fallback: fallback to pio if dma transfer return failure with * SPI_TRANS_FAIL_NO_START. + * @queue_empty: signal green light for opportunistically skipping the queue + * for spi_sync transfers. * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -467,7 +486,7 @@ struct spi_controller { struct list_head list; - /* other than negative (== assign one dynamically), bus_num is fully + /* Other than negative (== assign one dynamically), bus_num is fully * board-specific. usually that simplifies to being SOC-specific. * example: one SOC has three SPI controllers, numbered 0..2, * and one board's schematics might show it using SPI-2. software @@ -480,7 +499,7 @@ struct spi_controller { */ u16 num_chipselect; - /* some SPI controllers pose alignment requirements on DMAable + /* Some SPI controllers pose alignment requirements on DMAable * buffers; let protocol drivers know about these requirements. */ u16 dma_alignment; @@ -491,29 +510,29 @@ struct spi_controller { /* spi_device.mode flags override flags for this controller */ u32 buswidth_override_bits; - /* bitmask of supported bits_per_word for transfers */ + /* Bitmask of supported bits_per_word for transfers */ u32 bits_per_word_mask; #define SPI_BPW_MASK(bits) BIT((bits) - 1) #define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1) - /* limits on transfer speed */ + /* Limits on transfer speed */ u32 min_speed_hz; u32 max_speed_hz; - /* other constraints relevant to this driver */ + /* Other constraints relevant to this driver */ u16 flags; -#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */ -#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */ -#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */ -#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */ -#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */ +#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */ +#define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */ +#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */ +#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */ +#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */ #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ - /* flag indicating if the allocation of this struct is devres-managed */ + /* Flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; - /* flag indicating this is an SPI slave controller */ + /* Flag indicating this is an SPI slave controller */ bool slave; /* @@ -529,11 +548,11 @@ struct spi_controller { /* Used to avoid adding the same CS twice */ struct mutex add_lock; - /* lock and mutex for SPI bus locking */ + /* Lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; - /* flag indicating that the SPI bus is locked for exclusive use */ + /* Flag indicating that the SPI bus is locked for exclusive use */ bool bus_lock_flag; /* Setup mode and clock, etc (spi driver may call many times). @@ -554,7 +573,7 @@ struct spi_controller { */ int (*set_cs_timing)(struct spi_device *spi); - /* bidirectional bulk transfers + /* Bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is * just to add the message to the queue. @@ -576,7 +595,7 @@ struct spi_controller { int (*transfer)(struct spi_device *spi, struct spi_message *mesg); - /* called on release() to free memory provided by spi_controller */ + /* Called on release() to free memory provided by spi_controller */ void (*cleanup)(struct spi_device *spi); /* @@ -603,12 +622,13 @@ struct spi_controller { spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; - bool idling; + struct completion cur_msg_completion; + bool cur_msg_incomplete; + bool cur_msg_need_completion; bool busy; bool running; bool rt; bool auto_runtime_pm; - bool cur_msg_prepared; bool cur_msg_mapped; char last_cs; bool last_cs_mode_high; @@ -646,14 +666,14 @@ struct spi_controller { s8 unused_native_cs; s8 max_native_cs; - /* statistics */ - struct spi_statistics statistics; + /* Statistics */ + struct spi_statistics __percpu *pcpu_statistics; /* DMA channels for use with core dmaengine helpers */ struct dma_chan *dma_tx; struct dma_chan *dma_rx; - /* dummy data for full duplex devices */ + /* Dummy data for full duplex devices */ void *dummy_rx; void *dummy_tx; @@ -667,6 +687,9 @@ struct spi_controller { /* Interrupt enable state during PTP system timestamping */ unsigned long irq_flags; + + /* Flag for enabling opportunistic skipping of the queue in spi_sync */ + bool queue_empty; }; static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) @@ -715,7 +738,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off); -/* the spi driver core manages memory for the spi_controller classdev */ +/* The spi driver core manages memory for the spi_controller classdev */ extern struct spi_controller *__spi_alloc_controller(struct device *host, unsigned int size, bool slave); @@ -785,7 +808,7 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr, struct spi_res { struct list_head entry; spi_res_release_t release; - unsigned long long data[]; /* guarantee ull alignment */ + unsigned long long data[]; /* Guarantee ull alignment */ }; /*---------------------------------------------------------------------------*/ @@ -918,7 +941,7 @@ struct spi_res { * and its transfers, ignore them until its completion callback. */ struct spi_transfer { - /* it's ok if tx_buf == rx_buf (right?) + /* It's ok if tx_buf == rx_buf (right?) * for MicroWire, one buffer must be null * buffers must work with dma_*map_single() calls, unless * spi_message.is_dma_mapped reports a pre-existing mapping @@ -975,6 +998,7 @@ struct spi_transfer { * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message * @resources: for resource management when the spi message is processed + * @prepared: spi_prepare_message was called for the this message * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1008,22 +1032,25 @@ struct spi_message { * tell them about such special cases. */ - /* completion is reported through a callback */ + /* Completion is reported through a callback */ void (*complete)(void *context); void *context; unsigned frame_length; unsigned actual_length; int status; - /* for optional use by whatever driver currently owns the + /* For optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later * complete(), that's the spi_controller controller driver. */ struct list_head queue; void *state; - /* list of spi_res reources when the spi message is processed */ + /* List of spi_res reources when the spi message is processed */ struct list_head resources; + + /* spi_prepare_message() was called for this message */ + bool prepared; }; static inline void spi_message_init_no_memset(struct spi_message *m) @@ -1127,7 +1154,7 @@ spi_max_transfer_size(struct spi_device *spi) if (ctlr->max_transfer_size) tr_max = ctlr->max_transfer_size(spi); - /* transfer size limit must not be greater than messsage size limit */ + /* Transfer size limit must not be greater than message size limit */ return min(tr_max, msg_max); } @@ -1278,7 +1305,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len) return spi_sync_transfer(spi, &t, 1); } -/* this copies txbuf and rxbuf data; for small transfers only! */ +/* This copies txbuf and rxbuf data; for small transfers only! */ extern int spi_write_then_read(struct spi_device *spi, const void *txbuf, unsigned n_tx, void *rxbuf, unsigned n_rx); @@ -1301,7 +1328,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) status = spi_write_then_read(spi, &cmd, 1, &result, 1); - /* return negative errno or unsigned value */ + /* Return negative errno or unsigned value */ return (status < 0) ? status : result; } @@ -1326,7 +1353,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) status = spi_write_then_read(spi, &cmd, 1, &result, 2); - /* return negative errno or unsigned value */ + /* Return negative errno or unsigned value */ return (status < 0) ? status : result; } @@ -1406,7 +1433,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * are active in some dynamic board configuration models. */ struct spi_board_info { - /* the device name and module name are coupled, like platform_bus; + /* The device name and module name are coupled, like platform_bus; * "modalias" is normally the driver name. * * platform_data goes to spi_device.dev.platform_data, @@ -1419,7 +1446,7 @@ struct spi_board_info { void *controller_data; int irq; - /* slower signaling on noisy or low voltage boards */ + /* Slower signaling on noisy or low voltage boards */ u32 max_speed_hz; @@ -1448,7 +1475,7 @@ struct spi_board_info { extern int spi_register_board_info(struct spi_board_info const *info, unsigned n); #else -/* board init code may ignore whether SPI is configured or not */ +/* Board init code may ignore whether SPI is configured or not */ static inline int spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 29917850f079..8df475db88c0 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -260,6 +260,7 @@ struct plat_stmmacenet_data { bool has_crossts; int int_snapshot_num; int ext_snapshot_num; + bool int_snapshot_en; bool ext_snapshot_en; bool multi_msi_en; int msi_mac_vec; diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 4417f667c757..5860f32e3958 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -243,7 +243,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes); -extern void xdr_commit_encode(struct xdr_stream *xdr); +extern void __xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, @@ -307,6 +307,20 @@ xdr_reset_scratch_buffer(struct xdr_stream *xdr) } /** + * xdr_commit_encode - Ensure all data is written to xdr->buf + * @xdr: pointer to xdr_stream + * + * Handle encoding across page boundaries by giving the caller a + * temporary location to write to, then later copying the data into + * place. __xdr_commit_encode() does that copying. + */ +static inline void xdr_commit_encode(struct xdr_stream *xdr) +{ + if (unlikely(xdr->scratch.iov_len)) + __xdr_commit_encode(xdr); +} + +/** * xdr_stream_remaining - Return the number of bytes remaining in the stream * @xdr: pointer to struct xdr_stream * diff --git a/include/linux/swap.h b/include/linux/swap.h index 0c0fed1b348f..8672a7123ccd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -438,7 +438,8 @@ static inline bool node_reclaim_enabled(void) return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); } -extern void check_move_unevictable_pages(struct pagevec *pvec); +void check_move_unevictable_folios(struct folio_batch *fbatch); +void check_move_unevictable_pages(struct pagevec *pvec); extern void kswapd_run(int nid); extern void kswapd_stop(int nid); diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h index b0dcfa26d07b..8ba8b5be5567 100644 --- a/include/linux/sysfb.h +++ b/include/linux/sysfb.h @@ -55,6 +55,18 @@ struct efifb_dmi_info { int flags; }; +#ifdef CONFIG_SYSFB + +void sysfb_disable(void); + +#else /* CONFIG_SYSFB */ + +static inline void sysfb_disable(void) +{ +} + +#endif /* CONFIG_SYSFB */ + #ifdef CONFIG_EFI extern struct efifb_dmi_info efifb_dmi_list[]; @@ -72,8 +84,8 @@ static inline void sysfb_apply_efi_quirks(struct platform_device *pd) bool sysfb_parse_mode(const struct screen_info *si, struct simplefb_platform_data *mode); -int sysfb_create_simplefb(const struct screen_info *si, - const struct simplefb_platform_data *mode); +struct platform_device *sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode); #else /* CONFIG_SYSFB_SIMPLE */ @@ -83,10 +95,10 @@ static inline bool sysfb_parse_mode(const struct screen_info *si, return false; } -static inline int sysfb_create_simplefb(const struct screen_info *si, - const struct simplefb_platform_data *mode) +static inline struct platform_device *sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) { - return -EINVAL; + return ERR_PTR(-EINVAL); } #endif /* CONFIG_SYSFB_SIMPLE */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 365733b428d8..1386c713885d 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -40,8 +40,6 @@ enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ THERMAL_TREND_RAISING, /* temperature is raising */ THERMAL_TREND_DROPPING, /* temperature is dropping */ - THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */ - THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ }; /* Thermal notification reason */ @@ -80,6 +78,18 @@ struct thermal_zone_device_ops { void (*critical)(struct thermal_zone_device *); }; +/** + * struct thermal_trip - representation of a point in temperature domain + * @temperature: temperature value in miliCelsius + * @hysteresis: relative hysteresis in miliCelsius + * @type: trip point type + */ +struct thermal_trip { + int temperature; + int hysteresis; + enum thermal_trip_type type; +}; + struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); @@ -113,7 +123,8 @@ struct thermal_cooling_device { * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis * @mode: current mode of this thermal zone * @devdata: private pointer for device private data - * @trips: number of trip points the thermal zone supports + * @trips: an array of struct thermal_trip + * @num_trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips * @passive_delay_jiffies: number of jiffies to wait between polls when * performing passive cooling. @@ -153,7 +164,8 @@ struct thermal_zone_device { struct thermal_attr *trip_hyst_attrs; enum thermal_device_mode mode; void *devdata; - int trips; + struct thermal_trip *trips; + int num_trips; unsigned long trips_disabled; /* bitmap for disabled trips */ unsigned long passive_delay_jiffies; unsigned long polling_delay_jiffies; @@ -366,8 +378,14 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); + void thermal_zone_device_unregister(struct thermal_zone_device *); +struct thermal_zone_device * +thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int, + void *, struct thermal_zone_device_ops *, + struct thermal_zone_params *, int, int); + int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *, unsigned long, unsigned long, diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 28031b15f878..55717a2eda08 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -200,13 +200,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) */ \ if (rcuidle) { \ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ - rcu_irq_enter_irqson(); \ + ct_irq_enter_irqson(); \ } \ \ __DO_TRACE_CALL(name, TP_ARGS(args)); \ \ if (rcuidle) { \ - rcu_irq_exit_irqson(); \ + ct_irq_exit_irqson(); \ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ } \ \ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5a328cf02b75..47e5d374c7eb 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -148,7 +148,7 @@ _copy_to_user(void __user *, const void *, unsigned long); static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - if (likely(check_copy_size(to, n, false))) + if (check_copy_size(to, n, false)) n = _copy_from_user(to, from, n); return n; } @@ -156,7 +156,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - if (likely(check_copy_size(from, n, true))) + if (check_copy_size(from, n, true)) n = _copy_to_user(to, from, n); return n; } diff --git a/include/linux/uio.h b/include/linux/uio.h index 739285fe5a2f..9a2dc496d535 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -156,19 +156,17 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, static __always_inline __must_check size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, true))) - return 0; - else + if (check_copy_size(addr, bytes, true)) return _copy_to_iter(addr, bytes, i); + return 0; } static __always_inline __must_check size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else + if (check_copy_size(addr, bytes, false)) return _copy_from_iter(addr, bytes, i); + return 0; } static __always_inline __must_check @@ -184,10 +182,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else + if (check_copy_size(addr, bytes, false)) return _copy_from_iter_nocache(addr, bytes, i); + return 0; } static __always_inline __must_check @@ -219,6 +216,8 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); #endif size_t iov_iter_zero(size_t bytes, struct iov_iter *); +bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask); unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_gap_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 4700a88a28f6..7b4a13d3bd91 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -178,7 +178,8 @@ struct vdpa_map_file { * for the device * @vdev: vdpa device * Returns virtqueue algin requirement - * @get_vq_group: Get the group id for a specific virtqueue + * @get_vq_group: Get the group id for a specific + * virtqueue (optional) * @vdev: vdpa device * @idx: virtqueue index * Returns u32: group id for this virtqueue @@ -243,7 +244,7 @@ struct vdpa_map_file { * Returns the iova range supported by * the device. * @set_group_asid: Set address space identifier for a - * virtqueue group + * virtqueue group (optional) * @vdev: vdpa device * @group: virtqueue group * @asid: address space id for this group diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 9a36051ceb76..b47c2e7ed0ee 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -257,6 +257,7 @@ void virtio_device_ready(struct virtio_device *dev) WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION /* * The virtio_synchronize_cbs() makes sure vring_interrupt() * will see the driver specific setup if it sees vq->broken @@ -264,6 +265,7 @@ void virtio_device_ready(struct virtio_device *dev) */ virtio_synchronize_cbs(dev); __virtio_unbreak_device(dev); +#endif /* * The transport should ensure the visibility of vq->broken * before setting DRIVER_OK. See the comments for the transport @@ -604,13 +606,4 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) -#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS -int arch_has_restricted_virtio_memory_access(void); -#else -static inline int arch_has_restricted_virtio_memory_access(void) -{ - return 0; -} -#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */ - #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h deleted file mode 100644 index 0d8bd6769b13..000000000000 --- a/include/linux/visorbus.h +++ /dev/null @@ -1,344 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2010 - 2013 UNISYS CORPORATION - * All rights reserved. - */ - -/* - * This header file is to be included by other kernel mode components that - * implement a particular kind of visor_device. Each of these other kernel - * mode components is called a visor device driver. Refer to visortemplate - * for a minimal sample visor device driver. - * - * There should be nothing in this file that is private to the visorbus - * bus implementation itself. - */ - -#ifndef __VISORBUS_H__ -#define __VISORBUS_H__ - -#include <linux/device.h> - -#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E') - -/* - * enum channel_serverstate - * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state. - * @CHANNELSRV_READY: Channel has been initialized by server. - */ -enum channel_serverstate { - CHANNELSRV_UNINITIALIZED = 0, - CHANNELSRV_READY = 1 -}; - -/* - * enum channel_clientstate - * @CHANNELCLI_DETACHED: - * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it - * unless given TBD* explicit request - * (should actually be < DETACHED). - * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach. - * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time. - * @CHANNELCLI_BUSY: Client either wants to use or is using channel. - * @CHANNELCLI_OWNED: "No worries" state - client can access channel - * anytime. - */ -enum channel_clientstate { - CHANNELCLI_DETACHED = 0, - CHANNELCLI_DISABLED = 1, - CHANNELCLI_ATTACHING = 2, - CHANNELCLI_ATTACHED = 3, - CHANNELCLI_BUSY = 4, - CHANNELCLI_OWNED = 5 -}; - -/* - * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that - * a guest can look at the FeatureFlags in the io channel, and configure the - * driver to use interrupts or not based on this setting. All feature bits for - * all channels should be defined here. The io channel feature bits are defined - * below. - */ -#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1) -#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3) -#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4) -#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5) -#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6) - -/* - * struct channel_header - Common Channel Header - * @signature: Signature. - * @legacy_state: DEPRECATED - being replaced by. - * @header_size: sizeof(struct channel_header). - * @size: Total size of this channel in bytes. - * @features: Flags to modify behavior. - * @chtype: Channel type: data, bus, control, etc.. - * @partition_handle: ID of guest partition. - * @handle: Device number of this channel in client. - * @ch_space_offset: Offset in bytes to channel specific area. - * @version_id: Struct channel_header Version ID. - * @partition_index: Index of guest partition. - * @zone_uuid: Guid of Channel's zone. - * @cli_str_offset: Offset from channel header to null-terminated - * ClientString (0 if ClientString not present). - * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this - * channel. - * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see - * ServerStateUp, ServerStateDown, etc). - * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel. - * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>. - * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see - * ServerStateUp, ServerStateDown, etc). - * @srv_state: CHANNEL_SERVERSTATE. - * @cli_error_boot: Bits to indicate err states for boot clients, so err - * messages can be throttled. - * @cli_error_os: Bits to indicate err states for OS clients, so err - * messages can be throttled. - * @filler: Pad out to 128 byte cacheline. - * @recover_channel: Please add all new single-byte values below here. - */ -struct channel_header { - u64 signature; - u32 legacy_state; - /* SrvState, CliStateBoot, and CliStateOS below */ - u32 header_size; - u64 size; - u64 features; - guid_t chtype; - u64 partition_handle; - u64 handle; - u64 ch_space_offset; - u32 version_id; - u32 partition_index; - guid_t zone_guid; - u32 cli_str_offset; - u32 cli_state_boot; - u32 cmd_state_cli; - u32 cli_state_os; - u32 ch_characteristic; - u32 cmd_state_srv; - u32 srv_state; - u8 cli_error_boot; - u8 cli_error_os; - u8 filler[1]; - u8 recover_channel; -} __packed; - -#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0) - -/* - * struct signal_queue_header - Subheader for the Signal Type variation of the - * Common Channel. - * @version: SIGNAL_QUEUE_HEADER Version ID. - * @chtype: Queue type: storage, network. - * @size: Total size of this queue in bytes. - * @sig_base_offset: Offset to signal queue area. - * @features: Flags to modify behavior. - * @num_sent: Total # of signals placed in this queue. - * @num_overflows: Total # of inserts failed due to full queue. - * @signal_size: Total size of a signal for this queue. - * @max_slots: Max # of slots in queue, 1 slot is always empty. - * @max_signals: Max # of signals in queue (MaxSignalSlots-1). - * @head: Queue head signal #. - * @num_received: Total # of signals removed from this queue. - * @tail: Queue tail signal. - * @reserved1: Reserved field. - * @reserved2: Reserved field. - * @client_queue: - * @num_irq_received: Total # of Interrupts received. This is incremented by the - * ISR in the guest windows driver. - * @num_empty: Number of times that visor_signal_remove is called and - * returned Empty Status. - * @errorflags: Error bits set during SignalReinit to denote trouble with - * client's fields. - * @filler: Pad out to 64 byte cacheline. - */ -struct signal_queue_header { - /* 1st cache line */ - u32 version; - u32 chtype; - u64 size; - u64 sig_base_offset; - u64 features; - u64 num_sent; - u64 num_overflows; - u32 signal_size; - u32 max_slots; - u32 max_signals; - u32 head; - /* 2nd cache line */ - u64 num_received; - u32 tail; - u32 reserved1; - u64 reserved2; - u64 client_queue; - u64 num_irq_received; - u64 num_empty; - u32 errorflags; - u8 filler[12]; -} __packed; - -/* VISORCHANNEL Guids */ -/* {414815ed-c58c-11da-95a9-00e08161165f} */ -#define VISOR_VHBA_CHANNEL_GUID \ - GUID_INIT(0x414815ed, 0xc58c, 0x11da, \ - 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) -#define VISOR_VHBA_CHANNEL_GUID_STR \ - "414815ed-c58c-11da-95a9-00e08161165f" -struct visorchipset_state { - u32 created:1; - u32 attached:1; - u32 configured:1; - u32 running:1; - /* Remaining bits in this 32-bit word are reserved. */ -}; - -/** - * struct visor_device - A device type for things "plugged" into the visorbus - * bus - * @visorchannel: Points to the channel that the device is - * associated with. - * @channel_type_guid: Identifies the channel type to the bus driver. - * @device: Device struct meant for use by the bus driver - * only. - * @list_all: Used by the bus driver to enumerate devices. - * @timer: Timer fired periodically to do interrupt-type - * activity. - * @being_removed: Indicates that the device is being removed from - * the bus. Private bus driver use only. - * @visordriver_callback_lock: Used by the bus driver to lock when adding and - * removing devices. - * @pausing: Indicates that a change towards a paused state. - * is in progress. Only modified by the bus driver. - * @resuming: Indicates that a change towards a running state - * is in progress. Only modified by the bus driver. - * @chipset_bus_no: Private field used by the bus driver. - * @chipset_dev_no: Private field used the bus driver. - * @state: Used to indicate the current state of the - * device. - * @inst: Unique GUID for this instance of the device. - * @name: Name of the device. - * @pending_msg_hdr: For private use by bus driver to respond to - * hypervisor requests. - * @vbus_hdr_info: A pointer to header info. Private use by bus - * driver. - * @partition_guid: Indicates client partion id. This should be the - * same across all visor_devices in the current - * guest. Private use by bus driver only. - */ -struct visor_device { - struct visorchannel *visorchannel; - guid_t channel_type_guid; - /* These fields are for private use by the bus driver only. */ - struct device device; - struct list_head list_all; - struct timer_list timer; - bool timer_active; - bool being_removed; - struct mutex visordriver_callback_lock; /* synchronize probe/remove */ - bool pausing; - bool resuming; - u32 chipset_bus_no; - u32 chipset_dev_no; - struct visorchipset_state state; - guid_t inst; - u8 *name; - struct controlvm_message_header *pending_msg_hdr; - void *vbus_hdr_info; - guid_t partition_guid; - struct dentry *debugfs_dir; - struct dentry *debugfs_bus_info; -}; - -#define to_visor_device(x) container_of(x, struct visor_device, device) - -typedef void (*visorbus_state_complete_func) (struct visor_device *dev, - int status); - -/* - * This struct describes a specific visor channel, by providing its GUID, name, - * and sizes. - */ -struct visor_channeltype_descriptor { - const guid_t guid; - const char *name; - u64 min_bytes; - u32 version; -}; - -/** - * struct visor_driver - Information provided by each visor driver when it - * registers with the visorbus driver - * @name: Name of the visor driver. - * @owner: The module owner. - * @channel_types: Types of channels handled by this driver, ending with - * a zero GUID. Our specialized BUS.match() method knows - * about this list, and uses it to determine whether this - * driver will in fact handle a new device that it has - * detected. - * @probe: Called when a new device comes online, by our probe() - * function specified by driver.probe() (triggered - * ultimately by some call to driver_register(), - * bus_add_driver(), or driver_attach()). - * @remove: Called when a new device is removed, by our remove() - * function specified by driver.remove() (triggered - * ultimately by some call to device_release_driver()). - * @channel_interrupt: Called periodically, whenever there is a possiblity - * that "something interesting" may have happened to the - * channel. - * @pause: Called to initiate a change of the device's state. If - * the return valu`e is < 0, there was an error and the - * state transition will NOT occur. If the return value - * is >= 0, then the state transition was INITIATED - * successfully, and complete_func() will be called (or - * was just called) with the final status when either the - * state transition fails or completes successfully. - * @resume: Behaves similar to pause. - * @driver: Private reference to the device driver. For use by bus - * driver only. - */ -struct visor_driver { - const char *name; - struct module *owner; - struct visor_channeltype_descriptor *channel_types; - int (*probe)(struct visor_device *dev); - void (*remove)(struct visor_device *dev); - void (*channel_interrupt)(struct visor_device *dev); - int (*pause)(struct visor_device *dev, - visorbus_state_complete_func complete_func); - int (*resume)(struct visor_device *dev, - visorbus_state_complete_func complete_func); - - /* These fields are for private use by the bus driver only. */ - struct device_driver driver; -}; - -#define to_visor_driver(x) (container_of(x, struct visor_driver, driver)) - -int visor_check_channel(struct channel_header *ch, struct device *dev, - const guid_t *expected_uuid, char *chname, - u64 expected_min_bytes, u32 expected_version, - u64 expected_signature); - -int visorbus_register_visor_driver(struct visor_driver *drv); -void visorbus_unregister_visor_driver(struct visor_driver *drv); -int visorbus_read_channel(struct visor_device *dev, - unsigned long offset, void *dest, - unsigned long nbytes); -int visorbus_write_channel(struct visor_device *dev, - unsigned long offset, void *src, - unsigned long nbytes); -int visorbus_enable_channel_interrupts(struct visor_device *dev); -void visorbus_disable_channel_interrupts(struct visor_device *dev); - -int visorchannel_signalremove(struct visorchannel *channel, u32 queue, - void *msg); -int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, - void *msg); -bool visorchannel_signalempty(struct visorchannel *channel, u32 queue); -const guid_t *visorchannel_get_guid(struct visorchannel *channel); - -#define BUS_ROOT_DEVICE UINT_MAX -struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no, - struct visor_device *from); -#endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b159c2789961..096d48aa3437 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, void free_vm_area(struct vm_struct *area); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); +struct vmap_area *find_vmap_area(unsigned long addr); static inline bool is_vm_area_hugepages(const void *addr) { diff --git a/include/linux/wait.h b/include/linux/wait.h index 851e07da2583..58cfbf81447c 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -544,10 +544,11 @@ do { \ \ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ - if ((timeout) != KTIME_MAX) \ - hrtimer_start_range_ns(&__t.timer, timeout, \ - current->timer_slack_ns, \ - HRTIMER_MODE_REL); \ + if ((timeout) != KTIME_MAX) { \ + hrtimer_set_expires_range_ns(&__t.timer, timeout, \ + current->timer_slack_ns); \ + hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ + } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index 3b9a40ae8bdb..fc6bba20273b 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -4,7 +4,7 @@ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - * See Documentation/watch_queue.rst + * See Documentation/core-api/watch_queue.rst */ #ifndef _LINUX_WATCH_QUEUE_H diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 7fee9b6cfede..62e75dd40d9a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -406,7 +406,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); * alloc_ordered_workqueue - allocate an ordered workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) - * @args...: args for @fmt + * @args: args for @fmt * * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are @@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); -extern void flush_workqueue(struct workqueue_struct *wq); +extern void __flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); extern int schedule_on_each_cpu(work_func_t func); @@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work) return queue_work(system_wq, work); } +/* + * Detect attempt to flush system-wide workqueues at compile time when possible. + * + * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp + * for reasons and steps for converting system-wide workqueues into local workqueues. + */ +extern void __warn_flushing_systemwide_wq(void) + __compiletime_warning("Please avoid flushing system-wide workqueues."); + /** * flush_scheduled_work - ensure that any scheduled work has run to completion. * * Forces execution of the kernel-global workqueue and blocks until its * completion. * - * Think twice before calling this function! It's very easy to get into - * trouble if you don't take great care. Either of the following situations - * will lead to deadlock: + * It's very easy to get into trouble if you don't take great care. + * Either of the following situations will lead to deadlock: * * One of the work items currently on the workqueue needs to acquire * a lock held by your code or its caller. @@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work) * need to know that a particular work item isn't queued and isn't running. * In such cases you should use cancel_delayed_work_sync() or * cancel_work_sync() instead. + * + * Please stop calling this function! A conversion to stop flushing system-wide + * workqueues is in progress. This function will be removed after all in-tree + * users stopped calling this function. */ -static inline void flush_scheduled_work(void) -{ - flush_workqueue(system_wq); -} +/* + * The background of commit 771c035372a036f8 ("deprecate the + * '__deprecated' attribute warnings entirely and for good") is that, + * since Linus builds all modules between every single pull he does, + * the standard kernel build needs to be _clean_ in order to be able to + * notice when new problems happen. Therefore, don't emit warning while + * there are in-tree users. + */ +#define flush_scheduled_work() \ +({ \ + if (0) \ + __warn_flushing_systemwide_wq(); \ + __flush_workqueue(system_wq); \ +}) + +/* + * Although there is no longer in-tree caller, for now just emit warning + * in order to give out-of-tree callers time to update. + */ +#define flush_workqueue(wq) \ +({ \ + struct workqueue_struct *_wq = (wq); \ + \ + if ((__builtin_constant_p(_wq == system_wq) && \ + _wq == system_wq) || \ + (__builtin_constant_p(_wq == system_highpri_wq) && \ + _wq == system_highpri_wq) || \ + (__builtin_constant_p(_wq == system_long_wq) && \ + _wq == system_long_wq) || \ + (__builtin_constant_p(_wq == system_unbound_wq) && \ + _wq == system_unbound_wq) || \ + (__builtin_constant_p(_wq == system_freezable_wq) && \ + _wq == system_freezable_wq) || \ + (__builtin_constant_p(_wq == system_power_efficient_wq) && \ + _wq == system_power_efficient_wq) || \ + (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ + _wq == system_freezable_power_efficient_wq)) \ + __warn_flushing_systemwide_wq(); \ + __flush_workqueue(_wq); \ +}) /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay diff --git a/include/linux/writeback.h b/include/linux/writeback.h index da21d63f70e2..3f045f6d6c4f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -101,9 +101,9 @@ struct writeback_control { #endif }; -static inline int wbc_to_write_flags(struct writeback_control *wbc) +static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) { - int flags = 0; + blk_opf_t flags = 0; if (wbc->punt_to_cgroup) flags = REQ_CGROUP_PUNT; @@ -364,7 +364,14 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); void wb_update_bandwidth(struct bdi_writeback *wb); + +/* Invoke balance dirty pages in async mode. */ +#define BDP_ASYNC 0x0001 + void balance_dirty_pages_ratelimited(struct address_space *mapping); +int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, + unsigned int flags); + bool wb_over_bg_thresh(struct bdi_writeback *wb); typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 72feab5ea8d4..44dd6d6e01bc 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -16,6 +16,7 @@ #include <linux/kconfig.h> #include <linux/kernel.h> #include <linux/rcupdate.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/types.h> @@ -586,6 +587,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_bh(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -612,6 +614,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_irq(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -687,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock(xa); @@ -714,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_bh(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_bh(xa); @@ -741,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_irq(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_irq(xa); @@ -770,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock(xa); @@ -799,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -828,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -857,6 +866,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); @@ -886,6 +896,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); @@ -915,6 +926,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); @@ -948,6 +960,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock(xa); @@ -981,6 +994,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_bh(xa); @@ -1014,6 +1028,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_irq(xa); @@ -1508,6 +1523,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); void xas_init_marks(const struct xa_state *); bool xas_nomem(struct xa_state *, gfp_t); +void xas_destroy(struct xa_state *); void xas_pause(struct xa_state *); void xas_create_range(struct xa_state *); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 4c379d23ec6e..979a9d3e5bfb 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -61,7 +61,7 @@ int __vfs_setxattr_locked(struct user_namespace *, struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct user_namespace *, struct dentry *, const char *, - const void *, size_t, int); + void *, size_t, int); int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *); int __vfs_removexattr_locked(struct user_namespace *, struct dentry *, const char *, struct inode **); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f7506f08e505..c04f359655b8 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev) { const struct inet6_dev *idev = __in6_dev_get(dev); + if (unlikely(!idev)) + return true; + return !!idev->cnf.ignore_routes_with_linkdown; } diff --git a/include/net/amt.h b/include/net/amt.h index 0e40c3d64fcf..08fc30cf2f34 100644 --- a/include/net/amt.h +++ b/include/net/amt.h @@ -78,6 +78,15 @@ enum amt_status { #define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1) +/* Gateway events only */ +enum amt_event { + AMT_EVENT_NONE, + AMT_EVENT_RECEIVE, + AMT_EVENT_SEND_DISCOVERY, + AMT_EVENT_SEND_REQUEST, + __AMT_EVENT_MAX, +}; + struct amt_header { #if defined(__LITTLE_ENDIAN_BITFIELD) u8 type:4, @@ -292,6 +301,12 @@ struct amt_group_node { struct hlist_head sources[]; }; +#define AMT_MAX_EVENTS 16 +struct amt_events { + enum amt_event event; + struct sk_buff *skb; +}; + struct amt_dev { struct net_device *dev; struct net_device *stream_dev; @@ -308,6 +323,7 @@ struct amt_dev { struct delayed_work req_wq; /* Protected by RTNL */ struct delayed_work secret_wq; + struct work_struct event_wq; /* AMT status */ enum amt_status status; /* Generated key */ @@ -345,6 +361,10 @@ struct amt_dev { /* Used only in gateway mode */ u64 mac:48, reserved:16; + /* AMT gateway side message handler queue */ + struct amt_events events[AMT_MAX_EVENTS]; + u8 event_idx; + u8 nr_events; }; #define AMT_TOS 0xc0 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 3c4f550e5a8b..2f766e3437ce 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -847,6 +847,7 @@ enum { }; void l2cap_chan_hold(struct l2cap_chan *c); +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c); void l2cap_chan_put(struct l2cap_chan *c); static inline void l2cap_chan_lock(struct l2cap_chan *chan) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6d02e12e4702..80f41446b1f0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -8462,11 +8462,12 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp, * cfg80211_obss_color_collision_notify - notify about bss color collision * @dev: network device * @color_bitmap: representations of the colors that the local BSS is aware of + * @gfp: allocation flags */ static inline int cfg80211_obss_color_collision_notify(struct net_device *dev, - u64 color_bitmap) + u64 color_bitmap, gfp_t gfp) { - return cfg80211_bss_color_notify(dev, GFP_KERNEL, + return cfg80211_bss_color_notify(dev, gfp, NL80211_CMD_OBSS_COLOR_COLLISION, 0, color_bitmap); } diff --git a/include/net/compat.h b/include/net/compat.h index 595fee069b82..84c163f40f38 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -46,9 +46,8 @@ struct compat_rtentry { unsigned short rt_irtt; /* Initial RTT */ }; -int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg, - struct sockaddr __user **save_addr, compat_uptr_t *ptr, - compat_size_t *len); +int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *msg, + struct sockaddr __user **save_addr); int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); int put_cmsg_compat(struct msghdr*, int, int, int, void *); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 021778a7e1af..7ac313858037 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -152,6 +152,7 @@ enum flow_action_id { FLOW_ACTION_PIPE, FLOW_ACTION_VLAN_PUSH_ETH, FLOW_ACTION_VLAN_POP_ETH, + FLOW_ACTION_CONTINUE, NUM_FLOW_ACTIONS, }; @@ -612,5 +613,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)); +bool flow_indr_dev_exists(void); #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 077cd730ce2f..ee88f0f1350f 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -25,7 +25,6 @@ #undef INET_CSK_CLEAR_TIMERS struct inet_bind_bucket; -struct inet_bind2_bucket; struct tcp_congestion_ops; /* @@ -58,7 +57,6 @@ struct inet_connection_sock_af_ops { * * @icsk_accept_queue: FIFO of established children * @icsk_bind_hash: Bind node - * @icsk_bind2_hash: Bind node in the bhash2 table * @icsk_timeout: Timeout * @icsk_retransmit_timer: Resend (no ack) * @icsk_rto: Retransmit timeout @@ -85,7 +83,6 @@ struct inet_connection_sock { struct inet_sock icsk_inet; struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; - struct inet_bind2_bucket *icsk_bind2_hash; unsigned long icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; @@ -324,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); -#define TCP_PINGPONG_THRESH 3 +#define TCP_PINGPONG_THRESH 1 static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { @@ -341,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk) return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; } -static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_ack.pingpong < U8_MAX) - icsk->icsk_ack.pingpong++; -} - static inline bool inet_csk_has_ulp(struct sock *sk) { return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index a0887b70967b..fd6b510d114b 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -90,32 +90,11 @@ struct inet_bind_bucket { struct hlist_head owners; }; -struct inet_bind2_bucket { - possible_net_t ib_net; - int l3mdev; - unsigned short port; - union { -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr v6_rcv_saddr; -#endif - __be32 rcv_saddr; - }; - /* Node in the inet2_bind_hashbucket chain */ - struct hlist_node node; - /* List of sockets hashed to this bucket */ - struct hlist_head owners; -}; - static inline struct net *ib_net(struct inet_bind_bucket *ib) { return read_pnet(&ib->ib_net); } -static inline struct net *ib2_net(struct inet_bind2_bucket *ib) -{ - return read_pnet(&ib->ib_net); -} - #define inet_bind_bucket_for_each(tb, head) \ hlist_for_each_entry(tb, head, node) @@ -124,15 +103,6 @@ struct inet_bind_hashbucket { struct hlist_head chain; }; -/* This is synchronized using the inet_bind_hashbucket's spinlock. - * Instead of having separate spinlocks, the inet_bind2_hashbucket can share - * the inet_bind_hashbucket's given that in every case where the bhash2 table - * is useful, a lookup in the bhash table also occurs. - */ -struct inet_bind2_hashbucket { - struct hlist_head chain; -}; - /* Sockets can be hashed in established or listening table. * We must use different 'nulls' end-of-chain value for all hash buckets : * A socket might transition from ESTABLISH to LISTEN state without @@ -164,12 +134,6 @@ struct inet_hashinfo { */ struct kmem_cache *bind_bucket_cachep; struct inet_bind_hashbucket *bhash; - /* The 2nd binding table hashed by port and address. - * This is used primarily for expediting the resolution of bind - * conflicts. - */ - struct kmem_cache *bind2_bucket_cachep; - struct inet_bind2_hashbucket *bhash2; unsigned int bhash_size; /* The 2nd listener table hashed by local port and address */ @@ -215,7 +179,7 @@ static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); @@ -229,36 +193,6 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); -static inline bool check_bind_bucket_match(struct inet_bind_bucket *tb, - struct net *net, - const unsigned short port, - int l3mdev) -{ - return net_eq(ib_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev; -} - -struct inet_bind2_bucket * -inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net, - struct inet_bind2_hashbucket *head, - const unsigned short port, int l3mdev, - const struct sock *sk); - -void inet_bind2_bucket_destroy(struct kmem_cache *cachep, - struct inet_bind2_bucket *tb); - -struct inet_bind2_bucket * -inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net, - const unsigned short port, int l3mdev, - struct sock *sk, - struct inet_bind2_hashbucket **head); - -bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb, - struct net *net, - const unsigned short port, - int l3mdev, - const struct sock *sk); - static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, const u32 bhash_size) { @@ -266,7 +200,7 @@ static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, } void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, - struct inet_bind2_bucket *tb2, const unsigned short snum); + const unsigned short snum); /* Caller must disable local BH processing. */ int __inet_inherit_port(const struct sock *sk, struct sock *child); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index c1b5dcd6597c..6395f6b9a5d2 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) + if (!sk->sk_mark && + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark; return sk->sk_mark; @@ -120,7 +121,7 @@ static inline int inet_request_bound_dev_if(const struct sock *sk, #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); - if (!bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) + if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, skb->skb_iif); #endif @@ -132,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk) #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); - if (!net->ipv4.sysctl_tcp_l3mdev_accept) + if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, sk->sk_bound_dev_if); #endif @@ -253,6 +254,11 @@ struct inet_sock { #define IP_CMSG_CHECKSUM BIT(7) #define IP_CMSG_RECVFRAGSIZE BIT(8) +static inline bool sk_is_inet(struct sock *sk) +{ + return sk->sk_family == AF_INET || sk->sk_family == AF_INET6; +} + /** * sk_to_full_sk - Access to a full socket * @sk: pointer to a socket @@ -369,7 +375,7 @@ static inline bool inet_get_convert_csum(struct sock *sk) static inline bool inet_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { - return net->ipv4.sysctl_ip_nonlocal_bind || + return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || inet->freebind || inet->transparent; } diff --git a/include/net/ip.h b/include/net/ip.h index 26fffda78cca..1c979fd1904c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -357,7 +357,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name) static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) { - return port < net->ipv4.sysctl_ip_prot_sock; + return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); } #else @@ -384,7 +384,7 @@ void ipfrag_init(void); void ip_static_sysctl_init(void); #define IP4_REPLY_MARK(net, mark) \ - ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) + (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0) static inline bool ip_is_fragment(const struct iphdr *iph) { @@ -446,7 +446,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, struct net *net = dev_net(dst->dev); unsigned int mtu; - if (net->ipv4.sysctl_ip_fwd_use_pmtu || + if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || ip_mtu_locked(dst) || !forwarding) { mtu = rt->rt_pmtu; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5b38bf1a586b..de9dcc5652c4 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1063,7 +1063,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags); @@ -1079,7 +1079,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ebadb2103968..47642b020706 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6960,10 +6960,11 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is * aware of. + * @gfp: allocation flags */ void ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif, - u64 color_bitmap); + u64 color_bitmap, gfp_t gfp); /** * ieee80211_is_tx_data - check if frame is a data frame diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 20af9d3557b9..64cf655c818c 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -657,18 +657,22 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl) tmpl->len = sizeof(struct nft_set_ext); } -static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, - unsigned int len) +static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, + unsigned int len) { tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align); - BUG_ON(tmpl->len > U8_MAX); + if (tmpl->len > U8_MAX) + return -EINVAL; + tmpl->offset[id] = tmpl->len; tmpl->len += nft_set_ext_types[id].len + len; + + return 0; } -static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id) +static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id) { - nft_set_ext_add_length(tmpl, id, 0); + return nft_set_ext_add_length(tmpl, id, 0); } static inline void nft_set_ext_init(struct nft_set_ext *ext, @@ -1090,7 +1094,6 @@ struct nft_stats { struct nft_hook { struct list_head list; - bool inactive; struct nf_hook_ops ops; struct rcu_head rcu; }; @@ -1339,24 +1342,28 @@ void nft_unregister_flowtable_type(struct nf_flowtable_type *type); /** * struct nft_traceinfo - nft tracing information and state * + * @trace: other struct members are initialised + * @nf_trace: copy of skb->nf_trace before rule evaluation + * @type: event type (enum nft_trace_types) + * @skbid: hash of skb to be used as trace id + * @packet_dumped: packet headers sent in a previous traceinfo message * @pkt: pktinfo currently processed * @basechain: base chain currently processed * @chain: chain currently processed * @rule: rule that was evaluated * @verdict: verdict given by rule - * @type: event type (enum nft_trace_types) - * @packet_dumped: packet headers sent in a previous traceinfo message - * @trace: other struct members are initialised */ struct nft_traceinfo { + bool trace; + bool nf_trace; + bool packet_dumped; + enum nft_trace_types type:8; + u32 skbid; const struct nft_pktinfo *pkt; const struct nft_base_chain *basechain; const struct nft_chain *chain; const struct nft_rule_dp *rule; const struct nft_verdict *verdict; - enum nft_trace_types type; - bool packet_dumped; - bool trace; }; void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 797147843958..3568b6a2f5f0 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -92,7 +92,7 @@ int nft_flow_rule_offload_commit(struct net *net); NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ memset(&(__reg)->mask, 0xff, (__reg)->len); -int nft_chain_offload_priority(struct nft_base_chain *basechain); +bool nft_chain_offload_support(const struct nft_base_chain *basechain); int nft_offload_init(void); void nft_offload_exit(void); diff --git a/include/net/protocol.h b/include/net/protocol.h index f51c06ae365f..6aef8cb11cc8 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -35,8 +35,6 @@ /* This is used to register protocols. */ struct net_protocol { - int (*early_demux)(struct sk_buff *skb); - int (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ @@ -52,8 +50,6 @@ struct net_protocol { #if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { - void (*early_demux)(struct sk_buff *skb); - void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ diff --git a/include/net/raw.h b/include/net/raw.h index 8ad8df594853..c51a635671a7 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -75,7 +75,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept, + return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/route.h b/include/net/route.h index 991a3985712d..bbcf2aba149f 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -373,7 +373,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) struct net *net = dev_net(dst->dev); if (hoplimit == 0) - hoplimit = net->ipv4.sysctl_ip_default_ttl; + hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); return hoplimit; } diff --git a/include/net/sock.h b/include/net/sock.h index c585ef6565d9..7a48991cdb19 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -348,7 +348,6 @@ struct sk_filter; * @sk_txtime_report_errors: set report errors mode for SO_TXTIME * @sk_txtime_unused: unused txtime flags * @ns_tracker: tracker for netns reference - * @sk_bind2_node: bind node in the bhash2 table */ struct sock { /* @@ -538,7 +537,6 @@ struct sock { #endif struct rcu_head sk_rcu; netns_tracker ns_tracker; - struct hlist_node sk_bind2_node; }; enum sk_pacing { @@ -819,16 +817,6 @@ static inline void sk_add_bind_node(struct sock *sk, hlist_add_head(&sk->sk_bind_node, list); } -static inline void __sk_del_bind2_node(struct sock *sk) -{ - __hlist_del(&sk->sk_bind2_node); -} - -static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) -{ - hlist_add_head(&sk->sk_bind2_node, list); -} - #define sk_for_each(__sk, list) \ hlist_for_each_entry(__sk, list, sk_node) #define sk_for_each_rcu(__sk, list) \ @@ -846,8 +834,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) hlist_for_each_entry_safe(__sk, tmp, list, sk_node) #define sk_for_each_bound(__sk, list) \ hlist_for_each_entry(__sk, list, sk_bind_node) -#define sk_for_each_bound_bhash2(__sk, list) \ - hlist_for_each_entry(__sk, list, sk_bind2_node) /** * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset @@ -1543,7 +1529,7 @@ void __sk_mem_reclaim(struct sock *sk, int amount); /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long val = sk->sk_prot->sysctl_mem[index]; + long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]); #if PAGE_SIZE > SK_MEM_QUANTUM val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; @@ -2857,18 +2843,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_wmem ? */ if (proto->sysctl_wmem_offset) - return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset); + return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); - return *proto->sysctl_wmem; + return READ_ONCE(*proto->sysctl_wmem); } static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_rmem ? */ if (proto->sysctl_rmem_offset) - return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset); + return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); - return *proto->sysctl_rmem; + return READ_ONCE(*proto->sysctl_rmem); } /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10) diff --git a/include/net/tcp.h b/include/net/tcp.h index 1e99f5c61f84..78a64e1b33a7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -932,7 +932,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific; INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); +void tcp_v6_early_demux(struct sk_buff *skb); #endif @@ -1403,8 +1403,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); s32 delta; - if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || - ca_ops->cong_control) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || + tp->packets_out || ca_ops->cong_control) return; delta = tcp_jiffies32 - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) @@ -1419,7 +1419,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, static inline int tcp_win_from_space(const struct sock *sk, int space) { - int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale; + int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); return tcp_adv_win_scale <= 0 ? (space>>(-tcp_adv_win_scale)) : @@ -1493,21 +1493,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; + return tp->keepalive_intvl ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); } static inline int keepalive_time_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; + return tp->keepalive_time ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); } static inline int keepalive_probes(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; + return tp->keepalive_probes ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); } static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) @@ -1520,7 +1523,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) static inline int tcp_fin_time(const struct sock *sk) { - int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; + int fin_timeout = tcp_sk(sk)->linger2 ? : + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); const int rto = inet_csk(sk)->icsk_rto; if (fin_timeout < (rto << 2) - (rto >> 1)) @@ -2023,7 +2027,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; + return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); } bool tcp_stream_memory_free(const struct sock *sk, int wake); diff --git a/include/net/tls.h b/include/net/tls.h index 8017f1703447..8bd938f98bdd 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -704,7 +704,7 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_crypto_info *crypto_info); #ifdef CONFIG_TLS_DEVICE -void tls_device_init(void); +int tls_device_init(void); void tls_device_cleanup(void); void tls_device_sk_destruct(struct sock *sk); int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); @@ -724,7 +724,7 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) return tls_get_ctx(sk)->rx_conf == TLS_HW; } #else -static inline void tls_device_init(void) {} +static inline int tls_device_init(void) { return 0; } static inline void tls_device_cleanup(void) {} static inline int diff --git a/include/net/udp.h b/include/net/udp.h index b83a00330566..8dd4aa1485a6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -167,7 +167,7 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport, __be16 dport); -INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); +void udp_v6_early_demux(struct sk_buff *skb); INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, @@ -238,7 +238,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 1e80e70dfa92..bac55decf900 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -386,7 +386,7 @@ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) extern void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq); -struct request *scsi_alloc_request(struct request_queue *q, - unsigned int op, blk_mq_req_flags_t flags); +struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, + blk_mq_req_flags_t flags); #endif /* _SCSI_SCSI_CMND_H */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 7cf5f3b7589f..2493bd65351a 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -457,7 +457,7 @@ extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, struct scsi_sense_hdr *sshdr, - int timeout, int retries, u64 flags, + int timeout, int retries, blk_opf_t flags, req_flags_t rq_flags, int *resid); /* Make sure any sense buffer is the correct size. */ #define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 667d889b92b5..65082ecdd557 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -786,7 +786,7 @@ extern int scsi_host_block(struct Scsi_Host *shost); extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state); void scsi_host_busy_iter(struct Scsi_Host *, - bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv); + bool (*fn)(struct scsi_cmnd *, void *), void *priv); struct class_container; diff --git a/include/sound/soc.h b/include/sound/soc.h index f20f5f890794..b276dcb5d4e8 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -408,8 +408,6 @@ struct snd_soc_jack_pin; struct snd_soc_jack_gpio; -typedef int (*hw_write_t)(void *,const char* ,int); - enum snd_soc_pcm_subclass { SND_SOC_PCM_CLASS_PCM = 0, SND_SOC_PCM_CLASS_BE = 1, diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h index 32088c603244..bad21222130e 100644 --- a/include/trace/events/dlm.h +++ b/include/trace/events/dlm.h @@ -49,38 +49,52 @@ /* note: we begin tracing dlm_lock_start() only if ls and lkb are found */ TRACE_EVENT(dlm_lock_start, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode, - __u32 flags), + TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name, + unsigned int namelen, int mode, __u32 flags), - TP_ARGS(ls, lkb, mode, flags), + TP_ARGS(ls, lkb, name, namelen, mode, flags), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) __field(int, mode) __field(__u32, flags) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->mode = mode; __entry->flags = flags; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); + else if (name) + memcpy(__get_dynamic_array(res_name), name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s", + TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s res_name=%s", __entry->ls_id, __entry->lkb_id, show_lock_mode(__entry->mode), - show_lock_flags(__entry->flags)) + show_lock_flags(__entry->flags), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); TRACE_EVENT(dlm_lock_end, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode, __u32 flags, - int error), + TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name, + unsigned int namelen, int mode, __u32 flags, int error), - TP_ARGS(ls, lkb, mode, flags, error), + TP_ARGS(ls, lkb, name, namelen, mode, flags, error), TP_STRUCT__entry( __field(__u32, ls_id) @@ -88,14 +102,26 @@ TRACE_EVENT(dlm_lock_end, __field(int, mode) __field(__u32, flags) __field(int, error) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->mode = mode; __entry->flags = flags; + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); + else if (name) + memcpy(__get_dynamic_array(res_name), name, + __get_dynamic_array_len(res_name)); + /* return value will be zeroed in those cases by dlm_lock() * we do it here again to not introduce more overhead if * trace isn't running and error reflects the return value. @@ -104,12 +130,15 @@ TRACE_EVENT(dlm_lock_end, __entry->error = 0; else __entry->error = error; + ), - TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d", + TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d res_name=%s", __entry->ls_id, __entry->lkb_id, show_lock_mode(__entry->mode), - show_lock_flags(__entry->flags), __entry->error) + show_lock_flags(__entry->flags), __entry->error, + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); @@ -123,42 +152,65 @@ TRACE_EVENT(dlm_bast, __field(__u32, ls_id) __field(__u32, lkb_id) __field(int, mode) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->mode = mode; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x mode=%s", __entry->ls_id, - __entry->lkb_id, show_lock_mode(__entry->mode)) + TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s", + __entry->ls_id, __entry->lkb_id, + show_lock_mode(__entry->mode), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); TRACE_EVENT(dlm_ast, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_lksb *lksb), + TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb), - TP_ARGS(ls, lkb, lksb), + TP_ARGS(ls, lkb), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) __field(u8, sb_flags) __field(int, sb_status) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; - __entry->sb_flags = lksb->sb_flags; - __entry->sb_status = lksb->sb_status; + __entry->sb_flags = lkb->lkb_lksb->sb_flags; + __entry->sb_status = lkb->lkb_lksb->sb_status; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d", + TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s", __entry->ls_id, __entry->lkb_id, - show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status) + show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status, + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); @@ -173,17 +225,28 @@ TRACE_EVENT(dlm_unlock_start, __field(__u32, ls_id) __field(__u32, lkb_id) __field(__u32, flags) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->flags = flags; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x flags=%s", + TP_printk("ls_id=%u lkb_id=%x flags=%s res_name=%s", __entry->ls_id, __entry->lkb_id, - show_lock_flags(__entry->flags)) + show_lock_flags(__entry->flags), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); @@ -199,18 +262,29 @@ TRACE_EVENT(dlm_unlock_end, __field(__u32, lkb_id) __field(__u32, flags) __field(int, error) + __dynamic_array(unsigned char, res_name, + lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) ), TP_fast_assign( + struct dlm_rsb *r; + __entry->ls_id = ls->ls_global_id; __entry->lkb_id = lkb->lkb_id; __entry->flags = flags; __entry->error = error; + + r = lkb->lkb_resource; + if (r) + memcpy(__get_dynamic_array(res_name), r->res_name, + __get_dynamic_array_len(res_name)); ), - TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d", + TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d res_name=%s", __entry->ls_id, __entry->lkb_id, - show_lock_flags(__entry->flags), __entry->error) + show_lock_flags(__entry->flags), __entry->error, + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) ); diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 513e889ef8aa..f1e922237736 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -66,7 +66,7 @@ TRACE_DEFINE_ENUM(CP_RESIZE); #define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_META | REQ_PRIO | \ REQ_PREFLUSH | REQ_FUA) -#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS) +#define F2FS_BIO_FLAG_MASK(t) (__force u32)((t) & F2FS_OP_FLAGS) #define show_bio_type(op,op_flags) show_bio_op(op), \ show_bio_op_flags(op_flags) @@ -75,12 +75,12 @@ TRACE_DEFINE_ENUM(CP_RESIZE); #define show_bio_op_flags(flags) \ __print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \ - { REQ_RAHEAD, "R" }, \ - { REQ_SYNC, "S" }, \ - { REQ_META, "M" }, \ - { REQ_PRIO, "P" }, \ - { REQ_PREFLUSH, "PF" }, \ - { REQ_FUA, "FUA" }) + { (__force u32)REQ_RAHEAD, "R" }, \ + { (__force u32)REQ_SYNC, "S" }, \ + { (__force u32)REQ_META, "M" }, \ + { (__force u32)REQ_PRIO, "P" }, \ + { (__force u32)REQ_PREFLUSH, "PF" }, \ + { (__force u32)REQ_FUA, "FUA" }) #define show_data_type(type) \ __print_symbolic(type, \ @@ -1036,8 +1036,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __field(pgoff_t, index) __field(block_t, old_blkaddr) __field(block_t, new_blkaddr) - __field(int, op) - __field(int, op_flags) + __field(enum req_op, op) + __field(blk_opf_t, op_flags) __field(int, temp) __field(int, type) ), @@ -1092,8 +1092,8 @@ DECLARE_EVENT_CLASS(f2fs__bio, TP_STRUCT__entry( __field(dev_t, dev) __field(dev_t, target) - __field(int, op) - __field(int, op_flags) + __field(enum req_op, op) + __field(blk_opf_t, op_flags) __field(int, type) __field(sector_t, sector) __field(unsigned int, size) diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 66fcc5a1a5b1..c5b21ff0ac85 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -7,6 +7,7 @@ #include <linux/tracepoint.h> #include <uapi/linux/io_uring.h> +#include <linux/io_uring_types.h> #include <linux/io_uring.h> struct io_wq_work; @@ -97,9 +98,7 @@ TRACE_EVENT(io_uring_register, /** * io_uring_file_get - called before getting references to an SQE file * - * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @user_data: user data associated with the request * @fd: SQE file descriptor * * Allows to trace out how often an SQE file reference is obtained, which can @@ -108,9 +107,9 @@ TRACE_EVENT(io_uring_register, */ TRACE_EVENT(io_uring_file_get, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd), + TP_PROTO(struct io_kiocb *req, int fd), - TP_ARGS(ctx, req, user_data, fd), + TP_ARGS(req, fd), TP_STRUCT__entry ( __field( void *, ctx ) @@ -120,9 +119,9 @@ TRACE_EVENT(io_uring_file_get, ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; + __entry->user_data = req->cqe.user_data; __entry->fd = fd; ), @@ -133,22 +132,16 @@ TRACE_EVENT(io_uring_file_get, /** * io_uring_queue_async_work - called before submitting a new async work * - * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @user_data: user data associated with the request - * @opcode: opcode of request - * @flags request flags - * @work: pointer to a submitted io_wq_work * @rw: type of workqueue, hashed or normal * * Allows to trace asynchronous work submission. */ TRACE_EVENT(io_uring_queue_async_work, - TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode, - unsigned int flags, struct io_wq_work *work, int rw), + TP_PROTO(struct io_kiocb *req, int rw), - TP_ARGS(ctx, req, user_data, opcode, flags, work, rw), + TP_ARGS(req, rw), TP_STRUCT__entry ( __field( void *, ctx ) @@ -158,65 +151,69 @@ TRACE_EVENT(io_uring_queue_async_work, __field( unsigned int, flags ) __field( struct io_wq_work *, work ) __field( int, rw ) + + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->flags = flags; - __entry->opcode = opcode; - __entry->work = work; + __entry->user_data = req->cqe.user_data; + __entry->flags = req->flags; + __entry->opcode = req->opcode; + __entry->work = &req->work; __entry->rw = rw; + + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work) ); /** * io_uring_defer - called when an io_uring request is deferred * - * @ctx: pointer to a ring context structure * @req: pointer to a deferred request - * @user_data: user data associated with the request - * @opcode: opcode of request * * Allows to track deferred requests, to get an insight about what requests are * not started immediately. */ TRACE_EVENT(io_uring_defer, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode), + TP_PROTO(struct io_kiocb *req), - TP_ARGS(ctx, req, user_data, opcode), + TP_ARGS(req), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, data ) __field( u8, opcode ) + + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->data = user_data; - __entry->opcode = opcode; + __entry->data = req->cqe.user_data; + __entry->opcode = req->opcode; + + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", __entry->ctx, __entry->req, __entry->data, - io_uring_get_opcode(__entry->opcode)) + __get_str(op_str)) ); /** * io_uring_link - called before the io_uring request added into link_list of * another request * - * @ctx: pointer to a ring context structure * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * @@ -225,9 +222,9 @@ TRACE_EVENT(io_uring_defer, */ TRACE_EVENT(io_uring_link, - TP_PROTO(void *ctx, void *req, void *target_req), + TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req), - TP_ARGS(ctx, req, target_req), + TP_ARGS(req, target_req), TP_STRUCT__entry ( __field( void *, ctx ) @@ -236,7 +233,7 @@ TRACE_EVENT(io_uring_link, ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; __entry->target_req = target_req; ), @@ -277,10 +274,7 @@ TRACE_EVENT(io_uring_cqring_wait, /** * io_uring_fail_link - called before failing a linked request * - * @ctx: pointer to a ring context structure * @req: request, which links were cancelled - * @user_data: user data associated with the request - * @opcode: opcode of request * @link: cancelled link * * Allows to track linked requests cancellation, to see not only that some work @@ -288,9 +282,9 @@ TRACE_EVENT(io_uring_cqring_wait, */ TRACE_EVENT(io_uring_fail_link, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link), + TP_PROTO(struct io_kiocb *req, struct io_kiocb *link), - TP_ARGS(ctx, req, user_data, opcode, link), + TP_ARGS(req, link), TP_STRUCT__entry ( __field( void *, ctx ) @@ -298,19 +292,23 @@ TRACE_EVENT(io_uring_fail_link, __field( unsigned long long, user_data ) __field( u8, opcode ) __field( void *, link ) + + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; __entry->link = link; + + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), __entry->link) + __get_str(op_str), __entry->link) ); /** @@ -364,23 +362,17 @@ TRACE_EVENT(io_uring_complete, /** * io_uring_submit_sqe - called before submitting one SQE * - * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @user_data: user data associated with the request - * @opcode: opcode of request - * @flags request flags * @force_nonblock: whether a context blocking or not - * @sq_thread: true if sq_thread has submitted this SQE * * Allows to track SQE submitting, to understand what was the source of it, SQ * thread or io_uring_enter call. */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags, - bool force_nonblock, bool sq_thread), + TP_PROTO(struct io_kiocb *req, bool force_nonblock), - TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread), + TP_ARGS(req, force_nonblock), TP_STRUCT__entry ( __field( void *, ctx ) @@ -390,31 +382,32 @@ TRACE_EVENT(io_uring_submit_sqe, __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) + + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; - __entry->flags = flags; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; + __entry->flags = req->flags; __entry->force_nonblock = force_nonblock; - __entry->sq_thread = sq_thread; + __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL; + + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, " "non block %d, sq_thread %d", __entry->ctx, __entry->req, - __entry->user_data, io_uring_get_opcode(__entry->opcode), + __entry->user_data, __get_str(op_str), __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); /* * io_uring_poll_arm - called after arming a poll wait if successful * - * @ctx: pointer to a ring context structure * @req: pointer to the armed request - * @user_data: user data associated with the request - * @opcode: opcode of request * @mask: request poll events mask * @events: registered events of interest * @@ -423,10 +416,9 @@ TRACE_EVENT(io_uring_submit_sqe, */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode, - int mask, int events), + TP_PROTO(struct io_kiocb *req, int mask, int events), - TP_ARGS(ctx, req, user_data, opcode, mask, events), + TP_ARGS(req, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) @@ -435,38 +427,39 @@ TRACE_EVENT(io_uring_poll_arm, __field( u8, opcode ) __field( int, mask ) __field( int, events ) + + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; __entry->mask = mask; __entry->events = events; + + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->mask, __entry->events) ); /* * io_uring_task_add - called after adding a task * - * @ctx: pointer to a ring context structure * @req: pointer to request - * @user_data: user data associated with the request - * @opcode: opcode of request * @mask: request poll events mask * */ TRACE_EVENT(io_uring_task_add, - TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask), + TP_PROTO(struct io_kiocb *req, int mask), - TP_ARGS(ctx, req, user_data, opcode, mask), + TP_ARGS(req, mask), TP_STRUCT__entry ( __field( void *, ctx ) @@ -474,19 +467,23 @@ TRACE_EVENT(io_uring_task_add, __field( unsigned long long, user_data ) __field( u8, opcode ) __field( int, mask ) + + __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->opcode = opcode; + __entry->user_data = req->cqe.user_data; + __entry->opcode = req->opcode; __entry->mask = mask; + + __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->mask) ); @@ -494,7 +491,6 @@ TRACE_EVENT(io_uring_task_add, * io_uring_req_failed - called when an sqe is errored dring submission * * @sqe: pointer to the io_uring_sqe that failed - * @ctx: pointer to a ring context structure * @req: pointer to request * @error: error it failed with * @@ -502,9 +498,9 @@ TRACE_EVENT(io_uring_task_add, */ TRACE_EVENT(io_uring_req_failed, - TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error), + TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error), - TP_ARGS(sqe, ctx, req, error), + TP_ARGS(sqe, req, error), TP_STRUCT__entry ( __field( void *, ctx ) @@ -523,10 +519,12 @@ TRACE_EVENT(io_uring_req_failed, __field( u64, pad1 ) __field( u64, addr3 ) __field( int, error ) + + __string( op_str, io_uring_get_opcode(sqe->opcode) ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = sqe->user_data; __entry->opcode = sqe->opcode; @@ -542,6 +540,8 @@ TRACE_EVENT(io_uring_req_failed, __entry->pad1 = sqe->__pad2[0]; __entry->addr3 = sqe->addr3; __entry->error = error; + + __assign_str(op_str, io_uring_get_opcode(sqe->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, " @@ -550,7 +550,7 @@ TRACE_EVENT(io_uring_req_failed, "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, " "error=%d", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->flags, __entry->ioprio, (unsigned long long)__entry->off, (unsigned long long) __entry->addr, __entry->len, @@ -594,12 +594,67 @@ TRACE_EVENT(io_uring_cqe_overflow, __entry->ocqe = ocqe; ), - TP_printk("ring %p, user_data 0x%llx, res %d, flags %x, " + TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, " "overflow_cqe %p", __entry->ctx, __entry->user_data, __entry->res, __entry->cflags, __entry->ocqe) ); +/* + * io_uring_task_work_run - ran task work + * + * @tctx: pointer to a io_uring_task + * @count: how many functions it ran + * @loops: how many loops it ran + * + */ +TRACE_EVENT(io_uring_task_work_run, + + TP_PROTO(void *tctx, unsigned int count, unsigned int loops), + + TP_ARGS(tctx, count, loops), + + TP_STRUCT__entry ( + __field( void *, tctx ) + __field( unsigned int, count ) + __field( unsigned int, loops ) + ), + + TP_fast_assign( + __entry->tctx = tctx; + __entry->count = count; + __entry->loops = loops; + ), + + TP_printk("tctx %p, count %u, loops %u", + __entry->tctx, __entry->count, __entry->loops) +); + +TRACE_EVENT(io_uring_short_write, + + TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got), + + TP_ARGS(ctx, fpos, wanted, got), + + TP_STRUCT__entry( + __field(void *, ctx) + __field(u64, fpos) + __field(u64, wanted) + __field(u64, got) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->fpos = fpos; + __entry->wanted = wanted; + __entry->got = got; + ), + + TP_printk("ring %p, fpos %lld, wanted %lld, got %lld", + __entry->ctx, __entry->fpos, + __entry->wanted, __entry->got) +); + #endif /* _TRACE_IO_URING_H */ /* This part must be outside protection */ diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index e282ce02fa2d..6d1626e7a4ce 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -160,7 +160,7 @@ TRACE_EVENT(iocost_ioc_vrate_adj, TP_fast_assign( __assign_str(devname, ioc_name(ioc)); - __entry->old_vrate = atomic64_read(&ioc->vtime_rate);; + __entry->old_vrate = atomic64_read(&ioc->vtime_rate); __entry->new_vrate = new_vrate; __entry->busy_level = ioc->busy_level; __entry->read_missed_ppm = missed_ppm[READ]; diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index a4dfe005983d..99f783c384bb 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -355,22 +355,22 @@ TRACE_EVENT(jbd2_update_log_tail, TRACE_EVENT(jbd2_write_superblock, - TP_PROTO(journal_t *journal, int write_op), + TP_PROTO(journal_t *journal, blk_opf_t write_flags), - TP_ARGS(journal, write_op), + TP_ARGS(journal, write_flags), TP_STRUCT__entry( __field( dev_t, dev ) - __field( int, write_op ) + __field( blk_opf_t, write_flags ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; - __entry->write_op = write_op; + __entry->write_flags = write_flags; ), - TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev), - MINOR(__entry->dev), __entry->write_op) + TP_printk("dev %d,%d write_flags %x", MAJOR(__entry->dev), + MINOR(__entry->dev), (__force u32)__entry->write_flags) ); TRACE_EVENT(jbd2_lock_buffer_stall, diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f76668305ac5..4cb51ace600d 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, const void *ptr, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags), TP_STRUCT__entry( __field( unsigned long, call_site ) @@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) + __field( bool, accounted ) ), TP_fast_assign( @@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc, __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; + __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ? + ((gfp_flags & __GFP_ACCOUNT) || + (s && s->flags & SLAB_ACCOUNT)) : false; ), - TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s", (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags)) + show_gfp_flags(__entry->gfp_flags), + __entry->accounted ? "true" : "false") ); DEFINE_EVENT(kmem_alloc, kmalloc, - TP_PROTO(unsigned long call_site, const void *ptr, + TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags) ); DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, - TP_PROTO(unsigned long call_site, const void *ptr, + TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags) ); DECLARE_EVENT_CLASS(kmem_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node), TP_STRUCT__entry( __field( unsigned long, call_site ) @@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) __field( int, node ) + __field( bool, accounted ) ), TP_fast_assign( @@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->node = node; + __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ? + ((gfp_flags & __GFP_ACCOUNT) || + (s && s->flags & SLAB_ACCOUNT)) : false; ), - TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s", (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags), - __entry->node) + __entry->node, + __entry->accounted ? "true" : "false") ); DEFINE_EVENT(kmem_alloc_node, kmalloc_node, TP_PROTO(unsigned long call_site, const void *ptr, - size_t bytes_req, size_t bytes_alloc, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node) ); DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, - size_t bytes_req, size_t bytes_alloc, + struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) + TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node) ); TRACE_EVENT(kfree, diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h index d4e631aa976f..6025dd8ba4aa 100644 --- a/include/trace/events/libata.h +++ b/include/trace/events/libata.h @@ -288,6 +288,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template, __entry->hob_feature = qc->result_tf.hob_feature; __entry->nsect = qc->result_tf.nsect; __entry->hob_nsect = qc->result_tf.hob_nsect; + __entry->flags = qc->flags; ), TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \ diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h index 84ee31fc04cc..8efc6236f57c 100644 --- a/include/trace/events/nilfs2.h +++ b/include/trace/events/nilfs2.h @@ -192,7 +192,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block, TP_PROTO(struct inode *inode, unsigned long ino, unsigned long blkoff, - int mode), + enum req_op mode), TP_ARGS(inode, ino, blkoff, mode), @@ -200,7 +200,7 @@ TRACE_EVENT(nilfs2_mdt_submit_block, __field(struct inode *, inode) __field(unsigned long, ino) __field(unsigned long, blkoff) - __field(int, mode) + __field(enum req_op, mode) ), TP_fast_assign( diff --git a/include/trace/events/power.h b/include/trace/events/power.h index af5018aa9517..c708521e4ed5 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -500,6 +500,35 @@ DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request, TP_ARGS(name, type, new_value) ); + +TRACE_EVENT(guest_halt_poll_ns, + + TP_PROTO(bool grow, unsigned int new, unsigned int old), + + TP_ARGS(grow, new, old), + + TP_STRUCT__entry( + __field(bool, grow) + __field(unsigned int, new) + __field(unsigned int, old) + ), + + TP_fast_assign( + __entry->grow = grow; + __entry->new = new; + __entry->old = old; + ), + + TP_printk("halt_poll_ns %u (%s %u)", + __entry->new, + __entry->grow ? "grow" : "shrink", + __entry->old) +); + +#define trace_guest_halt_poll_ns_grow(new, old) \ + trace_guest_halt_poll_ns(true, new, old) +#define trace_guest_halt_poll_ns_shrink(new, old) \ + trace_guest_halt_poll_ns(false, new, old) #endif /* _TRACE_POWER_H */ /* This part must be outside protection */ diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h index cee4b2b64ae4..65016a767b7a 100644 --- a/include/trace/events/scmi.h +++ b/include/trace/events/scmi.h @@ -7,6 +7,31 @@ #include <linux/tracepoint.h> +TRACE_EVENT(scmi_fc_call, + TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2), + TP_ARGS(protocol_id, msg_id, res_id, val1, val2), + + TP_STRUCT__entry( + __field(u8, protocol_id) + __field(u8, msg_id) + __field(u32, res_id) + __field(u32, val1) + __field(u32, val2) + ), + + TP_fast_assign( + __entry->protocol_id = protocol_id; + __entry->msg_id = msg_id; + __entry->res_id = res_id; + __entry->val1 = val1; + __entry->val2 = val2; + ), + + TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u", + __entry->protocol_id, __entry->msg_id, + __entry->res_id, __entry->val1, __entry->val2) +); + TRACE_EVENT(scmi_xfer_begin, TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, bool poll), @@ -112,6 +137,37 @@ TRACE_EVENT(scmi_rx_done, __entry->transfer_id, __entry->msg_id, __entry->protocol_id, __entry->seq, __entry->msg_type) ); + +TRACE_EVENT(scmi_msg_dump, + TP_PROTO(u8 protocol_id, u8 msg_id, unsigned char *tag, u16 seq, + int status, void *buf, size_t len), + TP_ARGS(protocol_id, msg_id, tag, seq, status, buf, len), + + TP_STRUCT__entry( + __field(u8, protocol_id) + __field(u8, msg_id) + __array(char, tag, 5) + __field(u16, seq) + __field(int, status) + __field(size_t, len) + __dynamic_array(unsigned char, cmd, len) + ), + + TP_fast_assign( + __entry->protocol_id = protocol_id; + __entry->msg_id = msg_id; + strscpy(__entry->tag, tag, 5); + __entry->seq = seq; + __entry->status = status; + __entry->len = len; + memcpy(__get_dynamic_array(cmd), buf, __entry->len); + ), + + TP_printk("pt=%02X t=%s msg_id=%02X seq=%04X s=%d pyld=%s", + __entry->protocol_id, __entry->tag, __entry->msg_id, + __entry->seq, __entry->status, + __print_hex_str(__get_dynamic_array(cmd), __entry->len)) +); #endif /* _TRACE_SCMI_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index 12c315782766..777ee6cbe933 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit, TP_STRUCT__entry( __array(char, name, 32) - __field(long *, sysctl_mem) + __array(long, sysctl_mem, 3) __field(long, allocated) __field(int, sysctl_rmem) __field(int, rmem_alloc) @@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit, TP_fast_assign( strncpy(__entry->name, prot->name, 32); - __entry->sysctl_mem = prot->sysctl_mem; + __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); + __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); + __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]); __entry->allocated = allocated; __entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h index 8a5f04888abd..e58bf3072f32 100644 --- a/include/trace/events/thermal.h +++ b/include/trace/events/thermal.h @@ -92,34 +92,22 @@ TRACE_EVENT(thermal_zone_trip, ); #ifdef CONFIG_CPU_THERMAL -TRACE_EVENT(thermal_power_cpu_get_power, - TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, - size_t load_len, u32 dynamic_power), +TRACE_EVENT(thermal_power_cpu_get_power_simple, + TP_PROTO(int cpu, u32 power), - TP_ARGS(cpus, freq, load, load_len, dynamic_power), + TP_ARGS(cpu, power), TP_STRUCT__entry( - __bitmask(cpumask, num_possible_cpus()) - __field(unsigned long, freq ) - __dynamic_array(u32, load, load_len) - __field(size_t, load_len ) - __field(u32, dynamic_power ) + __field(int, cpu) + __field(u32, power) ), TP_fast_assign( - __assign_bitmask(cpumask, cpumask_bits(cpus), - num_possible_cpus()); - __entry->freq = freq; - memcpy(__get_dynamic_array(load), load, - load_len * sizeof(*load)); - __entry->load_len = load_len; - __entry->dynamic_power = dynamic_power; + __entry->cpu = cpu; + __entry->power = power; ), - TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d", - __get_bitmask(cpumask), __entry->freq, - __print_array(__get_dynamic_array(load), __entry->load_len, 4), - __entry->dynamic_power) + TP_printk("cpu=%d power=%u", __entry->cpu, __entry->power) ); TRACE_EVENT(thermal_power_cpu_limit, diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 6154a2e72bce..262d52021c23 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -22,7 +22,7 @@ struct pool_workqueue; */ TRACE_EVENT(workqueue_queue_work, - TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq, + TP_PROTO(int req_cpu, struct pool_workqueue *pwq, struct work_struct *work), TP_ARGS(req_cpu, pwq, work), @@ -31,8 +31,8 @@ TRACE_EVENT(workqueue_queue_work, __field( void *, work ) __field( void *, function) __string( workqueue, pwq->wq->name) - __field( unsigned int, req_cpu ) - __field( unsigned int, cpu ) + __field( int, req_cpu ) + __field( int, cpu ) ), TP_fast_assign( @@ -43,7 +43,7 @@ TRACE_EVENT(workqueue_queue_work, __entry->cpu = pwq->pool->cpu; ), - TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u", + TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d", __entry->work, __entry->function, __get_str(workqueue), __entry->req_cpu, __entry->cpu) ); diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index f13d37b60775..1ecdb911add8 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h @@ -192,6 +192,7 @@ struct f_owner_ex { #define F_LINUX_SPECIFIC_BASE 1024 +#ifndef HAVE_ARCH_STRUCT_FLOCK struct flock { short l_type; short l_whence; @@ -216,5 +217,6 @@ struct flock64 { __ARCH_FLOCK64_PAD #endif }; +#endif /* HAVE_ARCH_STRUCT_FLOCK */ #endif /* _ASM_GENERIC_FCNTL_H */ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index f1972154a594..0980678d502d 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -1444,11 +1444,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AMD_FMT_MOD_PIPE_MASK 0x7 #define AMD_FMT_MOD_SET(field, value) \ - ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT) + ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT) #define AMD_FMT_MOD_GET(field, value) \ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK) #define AMD_FMT_MOD_CLEAR(field) \ - (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT)) + (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT)) #if defined(__cplusplus) } diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index a2def7b27009..b28ff5d88145 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -2123,7 +2123,7 @@ struct i915_context_engines_load_balance { __u64 mbz64; /* reserved for future use; must be zero */ - struct i915_engine_class_instance engines[0]; + struct i915_engine_class_instance engines[]; } __attribute__((packed)); #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ @@ -2161,7 +2161,7 @@ struct i915_context_engines_bond { __u64 flags; /* all undefined flags must be zero */ __u64 mbz64[4]; /* reserved for future use; must be zero */ - struct i915_engine_class_instance engines[0]; + struct i915_engine_class_instance engines[]; } __attribute__((packed)); #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ @@ -2288,7 +2288,7 @@ struct i915_context_engines_parallel_submit { * length = width (i) * num_siblings (j) * index = j + i * num_siblings */ - struct i915_engine_class_instance engines[0]; + struct i915_engine_class_instance engines[]; } __packed; diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index 656a326821a2..b80fcc9ea525 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -130,7 +130,7 @@ struct blk_zone_report { __u64 sector; __u32 nr_zones; __u32 flags; - struct blk_zone zones[0]; + struct blk_zone zones[]; }; /** diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f4009dbdf62d..ff225aa3f16a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -79,7 +79,7 @@ struct bpf_insn { /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ - __u8 data[0]; /* Arbitrary size */ + __u8 data[]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { @@ -5222,22 +5222,25 @@ union bpf_attr { * Return * Nothing. Always succeeds. * - * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) + * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags) * Description * Read *len* bytes from *src* into *dst*, starting from *offset* * into *src*. + * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length - * of *src*'s data, -EINVAL if *src* is an invalid dynptr. + * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if + * *flags* is not 0. * - * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) + * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) * Description * Write *len* bytes from *src* into *dst*, starting from *offset* * into *dst*. + * *flags* is currently unused. * Return * 0 on success, -E2BIG if *offset* + *len* exceeds the length * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - * is a read-only dynptr. + * is a read-only dynptr or if *flags* is not 0. * * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) * Description diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index d956b2993970..3d0edbe3b991 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -93,7 +93,7 @@ struct btrfs_qgroup_inherit { __u64 num_ref_copies; __u64 num_excl_copies; struct btrfs_qgroup_limit lim; - __u64 qgroups[0]; + __u64 qgroups[]; }; struct btrfs_ioctl_qgroup_limit_args { @@ -561,7 +561,7 @@ struct btrfs_ioctl_search_args_v2 { __u64 buf_size; /* in - size of buffer * out - on EOVERFLOW: needed size * to store item */ - __u64 buf[0]; /* out - found items */ + __u64 buf[]; /* out - found items */ }; struct btrfs_ioctl_clone_range_args { @@ -632,7 +632,7 @@ struct btrfs_ioctl_same_args { __u16 dest_count; /* in - total elements in info array */ __u16 reserved1; __u32 reserved2; - struct btrfs_ioctl_same_extent_info info[0]; + struct btrfs_ioctl_same_extent_info info[]; }; struct btrfs_ioctl_space_info { @@ -644,7 +644,7 @@ struct btrfs_ioctl_space_info { struct btrfs_ioctl_space_args { __u64 space_slots; __u64 total_spaces; - struct btrfs_ioctl_space_info spaces[0]; + struct btrfs_ioctl_space_info spaces[]; }; struct btrfs_data_container { @@ -652,7 +652,7 @@ struct btrfs_data_container { __u32 bytes_missing; /* out -- additional bytes needed for result */ __u32 elem_cnt; /* out */ __u32 elem_missed; /* out */ - __u64 val[0]; /* out */ + __u64 val[]; /* out */ }; struct btrfs_ioctl_ino_path_args { diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index d4117152d907..5f32a2a495dc 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -575,7 +575,7 @@ struct btrfs_inode_extref { __le64 parent_objectid; __le64 index; __le16 name_len; - __u8 name[0]; + __u8 name[]; /* name goes here */ } __attribute__ ((__packed__)); diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h index dd2b925b09ac..f1e45f533a72 100644 --- a/include/uapi/linux/can/bcm.h +++ b/include/uapi/linux/can/bcm.h @@ -71,7 +71,7 @@ struct bcm_msg_head { struct bcm_timeval ival1, ival2; canid_t can_id; __u32 nframes; - struct can_frame frames[0]; + struct can_frame frames[]; }; enum { diff --git a/include/uapi/linux/connector.h b/include/uapi/linux/connector.h index 3738936149a2..5ae131c3f145 100644 --- a/include/uapi/linux/connector.h +++ b/include/uapi/linux/connector.h @@ -75,7 +75,7 @@ struct cn_msg { __u16 len; /* Length of the following data */ __u16 flags; - __u8 data[0]; + __u8 data[]; }; #endif /* _UAPI__CONNECTOR_H */ diff --git a/include/uapi/linux/cycx_cfm.h b/include/uapi/linux/cycx_cfm.h index 51f541942ff9..91778c8024b1 100644 --- a/include/uapi/linux/cycx_cfm.h +++ b/include/uapi/linux/cycx_cfm.h @@ -91,7 +91,7 @@ struct cycx_firmware { unsigned short reserved[6]; char descr[CFM_DESCR_LEN]; struct cycx_fw_info info; - unsigned char image[0]; + unsigned char image[]; }; struct cycx_fw_header { diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 2e9550fef90f..7edf335778ba 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -182,7 +182,7 @@ struct dm_target_spec { struct dm_target_deps { __u32 count; /* Array size */ __u32 padding; /* unused */ - __u64 dev[0]; /* out */ + __u64 dev[]; /* out */ }; /* @@ -192,7 +192,7 @@ struct dm_name_list { __u64 dev; __u32 next; /* offset to the next record from the _start_ of this */ - char name[0]; + char name[]; /* * The following members can be accessed by taking a pointer that @@ -216,7 +216,7 @@ struct dm_target_versions { __u32 next; __u32 version[3]; - char name[0]; + char name[]; }; /* @@ -225,7 +225,7 @@ struct dm_target_versions { struct dm_target_msg { __u64 sector; /* Device sector */ - char message[0]; + char message[]; }; /* @@ -286,9 +286,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 46 +#define DM_VERSION_MINOR 47 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2022-02-22)" +#define DM_VERSION_EXTRA "-ioctl (2022-07-28)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/dm-log-userspace.h b/include/uapi/linux/dm-log-userspace.h index 5c47a8603376..23dad9565e46 100644 --- a/include/uapi/linux/dm-log-userspace.h +++ b/include/uapi/linux/dm-log-userspace.h @@ -426,7 +426,7 @@ struct dm_ulog_request { __u32 request_type; /* DM_ULOG_* defined above */ __u32 data_size; /* How much data (not including this struct) */ - char data[0]; + char data[]; }; #endif /* __DM_LOG_USERSPACE_H__ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index e0f0ee9bc89e..2d5741fd44bb 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -257,7 +257,7 @@ struct ethtool_tunable { __u32 id; __u32 type_id; __u32 len; - void *data[0]; + void *data[]; }; #define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff @@ -322,7 +322,7 @@ struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; - __u8 data[0]; + __u8 data[]; }; /** @@ -348,7 +348,7 @@ struct ethtool_eeprom { __u32 magic; __u32 offset; __u32 len; - __u8 data[0]; + __u8 data[]; }; /** @@ -752,7 +752,7 @@ struct ethtool_gstrings { __u32 cmd; __u32 string_set; __u32 len; - __u8 data[0]; + __u8 data[]; }; /** @@ -777,7 +777,7 @@ struct ethtool_sset_info { __u32 cmd; __u32 reserved; __u64 sset_mask; - __u32 data[0]; + __u32 data[]; }; /** @@ -817,7 +817,7 @@ struct ethtool_test { __u32 flags; __u32 reserved; __u32 len; - __u64 data[0]; + __u64 data[]; }; /** @@ -834,7 +834,7 @@ struct ethtool_test { struct ethtool_stats { __u32 cmd; __u32 n_stats; - __u64 data[0]; + __u64 data[]; }; /** @@ -851,7 +851,7 @@ struct ethtool_stats { struct ethtool_perm_addr { __u32 cmd; __u32 size; - __u8 data[0]; + __u8 data[]; }; /* boolean flags controlling per-interface behavior characteristics. @@ -1160,7 +1160,7 @@ struct ethtool_rxnfc { struct ethtool_rxfh_indir { __u32 cmd; __u32 size; - __u32 ring_index[0]; + __u32 ring_index[]; }; /** @@ -1201,7 +1201,7 @@ struct ethtool_rxfh { __u8 hfunc; __u8 rsvd8[3]; __u32 rsvd32; - __u32 rss_config[0]; + __u32 rss_config[]; }; #define ETH_RXFH_CONTEXT_ALLOC 0xffffffff #define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff @@ -1286,7 +1286,7 @@ struct ethtool_dump { __u32 version; __u32 flag; __u32 len; - __u8 data[0]; + __u8 data[]; }; #define ETH_FW_DUMP_DISABLE 0 @@ -1318,7 +1318,7 @@ struct ethtool_get_features_block { struct ethtool_gfeatures { __u32 cmd; __u32 size; - struct ethtool_get_features_block features[0]; + struct ethtool_get_features_block features[]; }; /** @@ -1340,7 +1340,7 @@ struct ethtool_set_features_block { struct ethtool_sfeatures { __u32 cmd; __u32 size; - struct ethtool_set_features_block features[0]; + struct ethtool_set_features_block features[]; }; /** @@ -2087,7 +2087,7 @@ struct ethtool_link_settings { __u8 master_slave_state; __u8 reserved1[1]; __u32 reserved[7]; - __u32 link_mode_masks[0]; + __u32 link_mode_masks[]; /* layout of link_mode_masks fields: * __u32 map_supported[link_mode_masks_nwords]; * __u32 map_advertising[link_mode_masks_nwords]; diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index f1f89132d60e..436258214bb0 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -83,12 +83,20 @@ #define FAN_MARK_FLUSH 0x00000080 /* FAN_MARK_FILESYSTEM is 0x00000100 */ #define FAN_MARK_EVICTABLE 0x00000200 +/* This bit is mutually exclusive with FAN_MARK_IGNORED_MASK bit */ +#define FAN_MARK_IGNORE 0x00000400 /* These are NOT bitwise flags. Both bits can be used togther. */ #define FAN_MARK_INODE 0x00000000 #define FAN_MARK_MOUNT 0x00000010 #define FAN_MARK_FILESYSTEM 0x00000100 +/* + * Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY + * for non-inode mark types. + */ +#define FAN_MARK_IGNORE_SURV (FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY) + /* Deprecated - do not use this in programs and do not add new flags here! */ #define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\ FAN_MARK_REMOVE |\ @@ -162,7 +170,7 @@ struct fanotify_event_info_fid { * Following is an opaque struct file_handle that can be passed as * an argument to open_by_handle_at(2). */ - unsigned char handle[0]; + unsigned char handle[]; }; /* diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h index 07c1cdcb715e..24ca0c00cae3 100644 --- a/include/uapi/linux/fiemap.h +++ b/include/uapi/linux/fiemap.h @@ -34,7 +34,7 @@ struct fiemap { __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ __u32 fm_extent_count; /* size of fm_extents array (in) */ __u32 fm_reserved; - struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */ + struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */ }; #define FIEMAP_MAX_OFFSET (~0ULL) diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index 5effa9832802..92be3ea3c6e0 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -118,7 +118,7 @@ struct fw_cdev_event_response { __u32 type; __u32 rcode; __u32 length; - __u32 data[0]; + __u32 data[]; }; /** @@ -142,7 +142,7 @@ struct fw_cdev_event_request { __u64 offset; __u32 handle; __u32 length; - __u32 data[0]; + __u32 data[]; }; /** @@ -205,7 +205,7 @@ struct fw_cdev_event_request2 { __u32 generation; __u32 handle; __u32 length; - __u32 data[0]; + __u32 data[]; }; /** @@ -265,7 +265,7 @@ struct fw_cdev_event_iso_interrupt { __u32 type; __u32 cycle; __u32 header_length; - __u32 header[0]; + __u32 header[]; }; /** @@ -355,7 +355,7 @@ struct fw_cdev_event_phy_packet { __u32 type; __u32 rcode; __u32 length; - __u32 data[0]; + __u32 data[]; }; /** @@ -803,7 +803,7 @@ struct fw_cdev_set_iso_channels { */ struct fw_cdev_iso_packet { __u32 control; - __u32 header[0]; + __u32 header[]; }; /** diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index bdf7b404b3e7..b7b56871029c 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -90,7 +90,7 @@ struct file_dedupe_range { __u16 dest_count; /* in - total elements in info array */ __u16 reserved1; /* must be zero */ __u32 reserved2; /* must be zero */ - struct file_dedupe_range_info info[0]; + struct file_dedupe_range_info info[]; }; /* And dynamically-tunable limits and defaults: */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 9f4428be3e36..a756b29afcc2 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -27,7 +27,8 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 -/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */ +#define FSCRYPT_MODE_AES_256_HCTR2 10 +/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */ /* * Legacy policy version; ad-hoc KDF and no key verification. diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h index dc52a11ba6d1..578b18aab821 100644 --- a/include/uapi/linux/if_alg.h +++ b/include/uapi/linux/if_alg.h @@ -42,7 +42,7 @@ struct sockaddr_alg_new { struct af_alg_iv { __u32 ivlen; - __u8 iv[0]; + __u8 iv[]; }; /* Socket options */ diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h index 683878036d76..b122cfac7128 100644 --- a/include/uapi/linux/if_arcnet.h +++ b/include/uapi/linux/if_arcnet.h @@ -60,7 +60,7 @@ struct arc_rfc1201 { __u8 proto; /* protocol ID field - varies */ __u8 split_flag; /* for use with split packets */ __be16 sequence; /* sequence number */ - __u8 payload[0]; /* space remaining in packet (504 bytes)*/ + __u8 payload[]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 @@ -69,7 +69,7 @@ struct arc_rfc1201 { */ struct arc_rfc1051 { __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ - __u8 payload[0]; /* 507 bytes */ + __u8 payload[]; /* 507 bytes */ }; #define RFC1051_HDR_SIZE 1 @@ -80,7 +80,7 @@ struct arc_rfc1051 { struct arc_eth_encap { __u8 proto; /* Always ARC_P_ETHER */ struct ethhdr eth; /* standard ethernet header (yuck!) */ - __u8 payload[0]; /* 493 bytes */ + __u8 payload[]; /* 493 bytes */ }; #define ETH_ENCAP_HDR_SIZE 14 diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h index e7a693c28f16..9abd80dcc46f 100644 --- a/include/uapi/linux/if_pppox.h +++ b/include/uapi/linux/if_pppox.h @@ -122,7 +122,7 @@ struct sockaddr_pppol2tpv3in6 { struct pppoe_tag { __be16 tag_type; __be16 tag_len; - char tag_data[0]; + char tag_data[]; } __attribute__ ((packed)); /* Tag identifiers */ @@ -150,7 +150,7 @@ struct pppoe_hdr { __u8 code; __be16 sid; __be16 length; - struct pppoe_tag tag[0]; + struct pppoe_tag tag[]; } __packed; /* Length of entire PPPoE + PPP header */ diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index 454ae31b93c7..2ec07de1d73b 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -108,7 +108,7 @@ struct tun_pi { struct tun_filter { __u16 flags; /* TUN_FLT_ flags see above */ __u16 count; /* Number of addresses */ - __u8 addr[0][ETH_ALEN]; + __u8 addr[][ETH_ALEN]; }; #endif /* _UAPI__IF_TUN_H */ diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h index 90c28bc466c6..5930f2437cd1 100644 --- a/include/uapi/linux/igmp.h +++ b/include/uapi/linux/igmp.h @@ -48,7 +48,7 @@ struct igmpv3_grec { __u8 grec_auxwords; __be16 grec_nsrcs; __be32 grec_mca; - __be32 grec_src[0]; + __be32 grec_src[]; }; struct igmpv3_report { @@ -57,7 +57,7 @@ struct igmpv3_report { __sum16 csum; __be16 resv2; __be16 ngrec; - struct igmpv3_grec grec[0]; + struct igmpv3_grec grec[]; }; struct igmpv3_query { @@ -78,7 +78,7 @@ struct igmpv3_query { #endif __u8 qqic; __be16 nsrcs; - __be32 srcs[0]; + __be32 srcs[]; }; #define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */ diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 20ee93f0f876..50655de04c9b 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -104,7 +104,7 @@ struct inet_diag_hostcond { __u8 family; __u8 prefix_len; int port; - __be32 addr[0]; + __be32 addr[]; }; struct inet_diag_markcond { diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h index 884b4846b630..b3e165853d5b 100644 --- a/include/uapi/linux/inotify.h +++ b/include/uapi/linux/inotify.h @@ -23,7 +23,7 @@ struct inotify_event { __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ - char name[0]; /* stub for possible name */ + char name[]; /* stub for possible name */ }; /* the following are legal, implemented events that user-space can watch for */ diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index ef4257ab3026..2557eb7b0561 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -78,10 +78,13 @@ struct input_id { * Note that input core does not clamp reported values to the * [minimum, maximum] limits, such task is left to userspace. * - * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z) - * is reported in units per millimeter (units/mm), resolution - * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported - * in units per radian. + * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z, + * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units + * per millimeter (units/mm), resolution for rotational axes + * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian. + * The resolution for the size axes (ABS_MT_TOUCH_MAJOR, + * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR) + * is reported in units per millimeter (units/mm). * When INPUT_PROP_ACCELEROMETER is set the resolution changes. * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in * units per g (units/g) and in units per degree per second diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 776e0278f9dd..1463cfecb56b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -10,6 +10,7 @@ #include <linux/fs.h> #include <linux/types.h> +#include <linux/time_types.h> /* * IO submission data structure (Submission Queue Entry) @@ -22,7 +23,10 @@ struct io_uring_sqe { union { __u64 off; /* offset into file */ __u64 addr2; - __u32 cmd_op; + struct { + __u32 cmd_op; + __u32 __pad1; + }; }; union { __u64 addr; /* pointer to buffer or iovecs */ @@ -47,7 +51,7 @@ struct io_uring_sqe { __u32 unlink_flags; __u32 hardlink_flags; __u32 xattr_flags; - __u32 close_flags; + __u32 msg_ring_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -62,6 +66,10 @@ struct io_uring_sqe { union { __s32 splice_fd_in; __u32 file_index; + struct { + __u16 notification_idx; + __u16 addr_len; + }; }; union { struct { @@ -138,9 +146,12 @@ enum { * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. */ #define IORING_SETUP_TASKRUN_FLAG (1U << 9) - #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ +/* + * Only one task is allowed to submit requests + */ +#define IORING_SETUP_SINGLE_ISSUER (1U << 12) enum io_uring_op { IORING_OP_NOP, @@ -163,7 +174,8 @@ enum io_uring_op { IORING_OP_FALLOCATE, IORING_OP_OPENAT, IORING_OP_CLOSE, - IORING_OP_FILES_UPDATE, + IORING_OP_RSRC_UPDATE, + IORING_OP_FILES_UPDATE = IORING_OP_RSRC_UPDATE, IORING_OP_STATX, IORING_OP_READ, IORING_OP_WRITE, @@ -190,6 +202,7 @@ enum io_uring_op { IORING_OP_GETXATTR, IORING_OP_SOCKET, IORING_OP_URING_CMD, + IORING_OP_SENDZC_NOTIF, /* this goes last, obviously */ IORING_OP_LAST, @@ -211,6 +224,7 @@ enum io_uring_op { #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) + /* * sqe->splice_flags * extends splice(2) flags @@ -227,10 +241,13 @@ enum io_uring_op { * * IORING_POLL_UPDATE Update existing poll request, matching * sqe->addr as the old user_data field. + * + * IORING_POLL_LEVEL Level triggered poll. */ #define IORING_POLL_ADD_MULTI (1U << 0) #define IORING_POLL_UPDATE_EVENTS (1U << 1) #define IORING_POLL_UPDATE_USER_DATA (1U << 2) +#define IORING_POLL_ADD_LEVEL (1U << 3) /* * ASYNC_CANCEL flags. @@ -239,30 +256,65 @@ enum io_uring_op { * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the * request 'user_data' * IORING_ASYNC_CANCEL_ANY Match any request + * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) #define IORING_ASYNC_CANCEL_FD (1U << 1) #define IORING_ASYNC_CANCEL_ANY (1U << 2) +#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3) /* - * send/sendmsg and recv/recvmsg flags (sqe->addr2) + * send/sendmsg and recv/recvmsg flags (sqe->ioprio) * * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send * or receive and arm poll if that yields an * -EAGAIN result, arm poll upfront and skip * the initial transfer attempt. + * + * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if + * the handler will continue to report + * CQEs on behalf of the same SQE. + * + * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in + * the buf_index field. + * + * IORING_RECVSEND_NOTIF_FLUSH Flush a notification after a successful + * successful. Only for zerocopy sends. */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) +#define IORING_RECV_MULTISHOT (1U << 1) +#define IORING_RECVSEND_FIXED_BUF (1U << 2) +#define IORING_RECVSEND_NOTIF_FLUSH (1U << 3) /* * accept flags stored in sqe->ioprio */ #define IORING_ACCEPT_MULTISHOT (1U << 0) + +/* + * IORING_OP_RSRC_UPDATE flags + */ +enum { + IORING_RSRC_UPDATE_FILES, + IORING_RSRC_UPDATE_NOTIF, +}; + +/* + * IORING_OP_MSG_RING command types, stored in sqe->addr + */ +enum { + IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ + IORING_MSG_SEND_FD, /* send a registered fd to another ring */ +}; + /* - * close flags, store in sqe->close_flags + * IORING_OP_MSG_RING flags (sqe->msg_ring_flags) + * + * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not + * applicable for IORING_MSG_DATA, obviously. */ -#define IORING_CLOSE_FD_AND_FILE_SLOT (1U << 0) +#define IORING_MSG_RING_CQE_SKIP (1U << 0) /* * IO completion data structure (Completion Queue Entry) @@ -423,6 +475,16 @@ enum { IORING_REGISTER_PBUF_RING = 22, IORING_UNREGISTER_PBUF_RING = 23, + /* sync cancelation API */ + IORING_REGISTER_SYNC_CANCEL = 24, + + /* register a range of fixed file slots for automatic slot allocation */ + IORING_REGISTER_FILE_ALLOC_RANGE = 25, + + /* zerocopy notification API */ + IORING_REGISTER_NOTIFIERS = 26, + IORING_UNREGISTER_NOTIFIERS = 27, + /* this goes last */ IORING_REGISTER_LAST }; @@ -469,6 +531,19 @@ struct io_uring_rsrc_update2 { __u32 resv2; }; +struct io_uring_notification_slot { + __u64 tag; + __u64 resv[3]; +}; + +struct io_uring_notification_register { + __u32 nr_slots; + __u32 resv; + __u64 resv2; + __u64 data; + __u64 resv3; +}; + /* Skip updating fd indexes set to this value in the fd table */ #define IORING_REGISTER_FILES_SKIP (-2) @@ -486,7 +561,7 @@ struct io_uring_probe { __u8 ops_len; /* length of ops[] array below */ __u16 resv; __u32 resv2[3]; - struct io_uring_probe_op ops[0]; + struct io_uring_probe_op ops[]; }; struct io_uring_restriction { @@ -558,4 +633,32 @@ struct io_uring_getevents_arg { __u64 ts; }; +/* + * Argument for IORING_REGISTER_SYNC_CANCEL + */ +struct io_uring_sync_cancel_reg { + __u64 addr; + __s32 fd; + __u32 flags; + struct __kernel_timespec timeout; + __u64 pad[4]; +}; + +/* + * Argument for IORING_REGISTER_FILE_ALLOC_RANGE + * The range is specified as [off, off + len) + */ +struct io_uring_file_index_range { + __u32 off; + __u32 len; + __u64 resv; +}; + +struct io_uring_recvmsg_out { + __u32 namelen; + __u32 controllen; + __u32 payloadlen; + __u32 flags; +}; + #endif diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index e00bbb9c47bb..961ec16a26b8 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -112,13 +112,13 @@ struct ip_auth_hdr { __be16 reserved; __be32 spi; __be32 seq_no; /* Sequence number */ - __u8 auth_data[0]; /* Variable len but >=4. Mind the 64 bit alignment! */ + __u8 auth_data[]; /* Variable len but >=4. Mind the 64 bit alignment! */ }; struct ip_esp_hdr { __be32 spi; __be32 seq_no; /* Sequence number */ - __u8 enc_data[0]; /* Variable len but >=8. Mind the 64 bit alignment! */ + __u8 enc_data[]; /* Variable len but >=8. Mind the 64 bit alignment! */ }; struct ip_comp_hdr { diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index 4102ddcb4e14..1ed234e7f251 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -254,7 +254,7 @@ struct ip_vs_get_dests { unsigned int num_dests; /* the real servers */ - struct ip_vs_dest_entry entrytable[0]; + struct ip_vs_dest_entry entrytable[]; }; @@ -264,7 +264,7 @@ struct ip_vs_get_services { unsigned int num_services; /* service table */ - struct ip_vs_service_entry entrytable[0]; + struct ip_vs_service_entry entrytable[]; }; diff --git a/include/uapi/linux/iso_fs.h b/include/uapi/linux/iso_fs.h index a2555176f6d1..758178f5b52d 100644 --- a/include/uapi/linux/iso_fs.h +++ b/include/uapi/linux/iso_fs.h @@ -137,7 +137,7 @@ struct iso_path_table{ __u8 name_len[2]; /* 721 */ __u8 extent[4]; /* 731 */ __u8 parent[2]; /* 721 */ - char name[0]; + char name[]; } __attribute__((packed)); /* high sierra is identical to iso, except that the date is only 6 bytes, and @@ -154,7 +154,7 @@ struct iso_directory_record { __u8 interleave [ISODCL (28, 28)]; /* 711 */ __u8 volume_sequence_number [ISODCL (29, 32)]; /* 723 */ __u8 name_len [ISODCL (33, 33)]; /* 711 */ - char name [0]; + char name []; } __attribute__((packed)); #define ISOFS_BLOCK_BITS 11 diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h index 784ba0b9690a..637ee4a793cf 100644 --- a/include/uapi/linux/jffs2.h +++ b/include/uapi/linux/jffs2.h @@ -123,7 +123,7 @@ struct jffs2_raw_dirent __u8 unused[2]; jint32_t node_crc; jint32_t name_crc; - __u8 name[0]; + __u8 name[]; }; /* The JFFS2 raw inode structure: Used for storage on physical media. */ @@ -155,7 +155,7 @@ struct jffs2_raw_inode jint16_t flags; /* See JFFS2_INO_FLAG_* */ jint32_t data_crc; /* CRC for the (compressed) data. */ jint32_t node_crc; /* CRC for the raw inode (excluding data) */ - __u8 data[0]; + __u8 data[]; }; struct jffs2_raw_xattr { @@ -170,7 +170,7 @@ struct jffs2_raw_xattr { jint16_t value_len; jint32_t data_crc; jint32_t node_crc; - __u8 data[0]; + __u8 data[]; } __attribute__((packed)); struct jffs2_raw_xref @@ -196,7 +196,7 @@ struct jffs2_raw_summary jint32_t padded; /* sum of the size of padding nodes */ jint32_t sum_crc; /* summary information crc */ jint32_t node_crc; /* node crc */ - jint32_t sum[0]; /* inode summary info */ + jint32_t sum[]; /* inode summary info */ }; union jffs2_node_union diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h index 1d0350e44ae3..ed95dba9fa37 100644 --- a/include/uapi/linux/kcov.h +++ b/include/uapi/linux/kcov.h @@ -13,7 +13,7 @@ struct kcov_remote_arg { __u32 area_size; /* Length of coverage buffer in words */ __u32 num_handles; /* Size of handles array */ __aligned_u64 common_handle; - __aligned_u64 handles[0]; + __aligned_u64 handles[]; }; #define KCOV_REMOTE_MAX_HANDLES 0x100 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5088bd9f1922..cb6e3846d27b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -542,7 +542,7 @@ struct kvm_coalesced_mmio { struct kvm_coalesced_mmio_ring { __u32 first, last; - struct kvm_coalesced_mmio coalesced_mmio[0]; + struct kvm_coalesced_mmio coalesced_mmio[]; }; #define KVM_COALESCED_MMIO_MAX \ @@ -621,7 +621,7 @@ struct kvm_clear_dirty_log { /* for KVM_SET_SIGNAL_MASK */ struct kvm_signal_mask { __u32 len; - __u8 sigset[0]; + __u8 sigset[]; }; /* for KVM_TPR_ACCESS_REPORTING */ @@ -1221,7 +1221,7 @@ struct kvm_irq_routing_entry { struct kvm_irq_routing { __u32 nr; __u32 flags; - struct kvm_irq_routing_entry entries[0]; + struct kvm_irq_routing_entry entries[]; }; #endif @@ -1341,7 +1341,7 @@ struct kvm_dirty_tlb { struct kvm_reg_list { __u64 n; /* number of regs */ - __u64 reg[0]; + __u64 reg[]; }; struct kvm_one_reg { @@ -2083,7 +2083,8 @@ struct kvm_stats_header { #define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) #define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) -#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES +#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT) +#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN #define KVM_STATS_BASE_SHIFT 8 #define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT) diff --git a/include/uapi/linux/loadpin.h b/include/uapi/linux/loadpin.h new file mode 100644 index 000000000000..daa6dbb8bb02 --- /dev/null +++ b/include/uapi/linux/loadpin.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2022, Google LLC + */ + +#ifndef _UAPI_LINUX_LOOP_LOADPIN_H +#define _UAPI_LINUX_LOOP_LOADPIN_H + +#define LOADPIN_IOC_MAGIC 'L' + +/** + * LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS - Set up the root digests of verity devices + * that loadpin should trust. + * + * Takes a file descriptor from which to read the root digests of trusted verity devices. The file + * is expected to contain a list of digests in ASCII format, with one line per digest. The ioctl + * must be issued on the securityfs attribute 'loadpin/dm-verity' (which can be typically found + * under /sys/kernel/security/loadpin/dm-verity). + */ +#define LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS _IOW(LOADPIN_IOC_MAGIC, 0x00, unsigned int) + +#endif /* _UAPI_LINUX_LOOP_LOADPIN_H */ diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index f724129c0425..6325d1d0e90f 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -98,12 +98,8 @@ /* Since UDF 2.01 is ISO 13346 based... */ #define UDF_SUPER_MAGIC 0x15013346 -#define BALLOON_KVM_MAGIC 0x13661366 -#define ZSMALLOC_MAGIC 0x58295829 #define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ #define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ -#define Z3FOLD_MAGIC 0x33 -#define PPC_CMM_MAGIC 0xc7571590 #define SECRETMEM_MAGIC 0x5345434d /* "SECM" */ #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/minix_fs.h b/include/uapi/linux/minix_fs.h index 95dbcb17eacd..8d9ca8b2c357 100644 --- a/include/uapi/linux/minix_fs.h +++ b/include/uapi/linux/minix_fs.h @@ -97,11 +97,11 @@ struct minix3_super_block { struct minix_dir_entry { __u16 inode; - char name[0]; + char name[]; }; struct minix3_dir_entry { __u32 inode; - char name[0]; + char name[]; }; #endif diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 27a39847d55c..e7401ade6822 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -58,7 +58,7 @@ struct mmc_ioc_cmd { */ struct mmc_ioc_multi_cmd { __u64 num_of_cmds; - struct mmc_ioc_cmd cmds[0]; + struct mmc_ioc_cmd cmds[]; }; #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 921963589904..dfe19bf13f4c 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -2,16 +2,17 @@ #ifndef _UAPI_MPTCP_H #define _UAPI_MPTCP_H +#ifndef __KERNEL__ +#include <netinet/in.h> /* for sockaddr_in and sockaddr_in6 */ +#include <sys/socket.h> /* for struct sockaddr */ +#endif + #include <linux/const.h> #include <linux/types.h> #include <linux/in.h> /* for sockaddr_in */ #include <linux/in6.h> /* for sockaddr_in6 */ #include <linux/socket.h> /* for sockaddr_storage and sa_family */ -#ifndef __KERNEL__ -#include <sys/socket.h> /* for struct sockaddr */ -#endif - #define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0) #define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1) #define MPTCP_SUBFLOW_FLAG_JOIN_REM _BITUL(2) diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 17e02b64ea2e..73516e263627 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -30,25 +30,25 @@ struct nd_cmd_get_config_data_hdr { __u32 in_offset; __u32 in_length; __u32 status; - __u8 out_buf[0]; + __u8 out_buf[]; } __packed; struct nd_cmd_set_config_hdr { __u32 in_offset; __u32 in_length; - __u8 in_buf[0]; + __u8 in_buf[]; } __packed; struct nd_cmd_vendor_hdr { __u32 opcode; __u32 in_length; - __u8 in_buf[0]; + __u8 in_buf[]; } __packed; struct nd_cmd_vendor_tail { __u32 status; __u32 out_length; - __u8 out_buf[0]; + __u8 out_buf[]; } __packed; struct nd_cmd_ars_cap { @@ -86,7 +86,7 @@ struct nd_cmd_ars_status { __u32 reserved; __u64 err_address; __u64 length; - } __packed records[0]; + } __packed records[]; } __packed; struct nd_cmd_clear_error { diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 1bbea8f0681e..84f622a66a7a 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -29,12 +29,12 @@ struct net_dm_config_entry { struct net_dm_config_msg { __u32 entries; - struct net_dm_config_entry options[0]; + struct net_dm_config_entry options[]; }; struct net_dm_alert_msg { __u32 entries; - struct net_dm_drop_point points[0]; + struct net_dm_drop_point points[]; }; struct net_dm_user_msg { diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h index b8c6bb233ac1..796af83a963a 100644 --- a/include/uapi/linux/netfilter/x_tables.h +++ b/include/uapi/linux/netfilter/x_tables.h @@ -28,7 +28,7 @@ struct xt_entry_match { __u16 match_size; } u; - unsigned char data[0]; + unsigned char data[]; }; struct xt_entry_target { @@ -119,7 +119,7 @@ struct xt_counters_info { unsigned int num_counters; /* The counters (actually `number' of these). */ - struct xt_counters counters[0]; + struct xt_counters counters[]; }; #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ diff --git a/include/uapi/linux/netfilter_arp/arp_tables.h b/include/uapi/linux/netfilter_arp/arp_tables.h index bbf5af2b67a8..a6ac2463f787 100644 --- a/include/uapi/linux/netfilter_arp/arp_tables.h +++ b/include/uapi/linux/netfilter_arp/arp_tables.h @@ -109,7 +109,7 @@ struct arpt_entry struct xt_counters counters; /* The matches (if any), then the target. */ - unsigned char elems[0]; + unsigned char elems[]; }; /* @@ -181,7 +181,7 @@ struct arpt_replace { struct xt_counters __user *counters; /* The entries (hang off end: not really an array). */ - struct arpt_entry entries[0]; + struct arpt_entry entries[]; }; /* The argument to ARPT_SO_GET_ENTRIES. */ @@ -193,7 +193,7 @@ struct arpt_get_entries { unsigned int size; /* The entries. */ - struct arpt_entry entrytable[0]; + struct arpt_entry entrytable[]; }; /* Helper functions */ diff --git a/include/uapi/linux/netfilter_bridge/ebt_among.h b/include/uapi/linux/netfilter_bridge/ebt_among.h index 9acf757bc1f7..73b26a280c4f 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_among.h +++ b/include/uapi/linux/netfilter_bridge/ebt_among.h @@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple { struct ebt_mac_wormhash { int table[257]; int poolsize; - struct ebt_mac_wormhash_tuple pool[0]; + struct ebt_mac_wormhash_tuple pool[]; }; #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \ diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h index 50c7fee625ae..1485df28b239 100644 --- a/include/uapi/linux/netfilter_ipv4/ip_tables.h +++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h @@ -121,7 +121,7 @@ struct ipt_entry { struct xt_counters counters; /* The matches (if any), then the target. */ - unsigned char elems[0]; + unsigned char elems[]; }; /* @@ -203,7 +203,7 @@ struct ipt_replace { struct xt_counters __user *counters; /* The entries (hang off end: not really an array). */ - struct ipt_entry entries[0]; + struct ipt_entry entries[]; }; /* The argument to IPT_SO_GET_ENTRIES. */ @@ -215,7 +215,7 @@ struct ipt_get_entries { unsigned int size; /* The entries. */ - struct ipt_entry entrytable[0]; + struct ipt_entry entrytable[]; }; /* Helper functions */ diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h index d9e364f96a5c..766e8e0bcc68 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h +++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h @@ -243,7 +243,7 @@ struct ip6t_replace { struct xt_counters __user *counters; /* The entries (hang off end: not really an array). */ - struct ip6t_entry entries[0]; + struct ip6t_entry entries[]; }; /* The argument to IP6T_SO_GET_ENTRIES. */ @@ -255,7 +255,7 @@ struct ip6t_get_entries { unsigned int size; /* The entries. */ - struct ip6t_entry entrytable[0]; + struct ip6t_entry entrytable[]; }; /* Helper functions */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index d37629dbad72..03b370062741 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -301,6 +301,7 @@ enum { * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 id; } && PERF_FORMAT_ID + * { u64 lost; } && PERF_FORMAT_LOST * } && !PERF_FORMAT_GROUP * * { u64 nr; @@ -308,6 +309,7 @@ enum { * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING * { u64 value; * { u64 id; } && PERF_FORMAT_ID + * { u64 lost; } && PERF_FORMAT_LOST * } cntr[nr]; * } && PERF_FORMAT_GROUP * }; @@ -317,8 +319,9 @@ enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, PERF_FORMAT_ID = 1U << 2, PERF_FORMAT_GROUP = 1U << 3, + PERF_FORMAT_LOST = 1U << 4, - PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ + PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ @@ -491,7 +494,7 @@ struct perf_event_query_bpf { /* * User provided buffer to store program ids */ - __u32 ids[0]; + __u32 ids[]; }; /* diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 9a2ee1e39fad..ffbe230ef90b 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -256,7 +256,7 @@ struct tc_u32_sel { short hoff; __be32 hmask; - struct tc_u32_key keys[0]; + struct tc_u32_key keys[]; }; struct tc_u32_mark { @@ -268,7 +268,7 @@ struct tc_u32_mark { struct tc_u32_pcnt { __u64 rcnt; __u64 rhit; - __u64 kcnts[0]; + __u64 kcnts[]; }; /* Flags */ diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index e5a98a16f9b0..6c0aa577730f 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -303,7 +303,7 @@ struct mdp_superblock_1 { * into the 'roles' value. If a device is spare or faulty, then it doesn't * have a meaningful role. */ - __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */ + __le16 dev_roles[]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */ }; /* feature_map bits */ diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h index dcc1b3e6106f..e744c23582eb 100644 --- a/include/uapi/linux/random.h +++ b/include/uapi/linux/random.h @@ -41,7 +41,7 @@ struct rand_pool_info { int entropy_count; int buf_size; - __u32 buf[0]; + __u32 buf[]; }; /* diff --git a/include/uapi/linux/romfs_fs.h b/include/uapi/linux/romfs_fs.h index a7f1585accef..6aa05e792454 100644 --- a/include/uapi/linux/romfs_fs.h +++ b/include/uapi/linux/romfs_fs.h @@ -27,7 +27,7 @@ struct romfs_super_block { __be32 word1; __be32 size; __be32 checksum; - char name[0]; /* volume name */ + char name[]; /* volume name */ }; /* On disk inode */ @@ -37,7 +37,7 @@ struct romfs_inode { __be32 spec; __be32 size; __be32 checksum; - char name[0]; + char name[]; }; #define ROMFH_TYPE 7 diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 83849a37db5b..eb2747d58a81 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -440,7 +440,7 @@ struct rtnexthop { /* RTA_VIA */ struct rtvia { __kernel_sa_family_t rtvia_family; - __u8 rtvia_addr[0]; + __u8 rtvia_addr[]; }; /* RTM_CACHEINFO */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index c4ff1ebd8bcc..ed7d4ecbf53d 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -365,7 +365,7 @@ struct sctp_assoc_change { __u16 sac_outbound_streams; __u16 sac_inbound_streams; sctp_assoc_t sac_assoc_id; - __u8 sac_info[0]; + __u8 sac_info[]; }; /* @@ -436,7 +436,7 @@ struct sctp_remote_error { __u32 sre_length; __be16 sre_error; sctp_assoc_t sre_assoc_id; - __u8 sre_data[0]; + __u8 sre_data[]; }; @@ -453,7 +453,7 @@ struct sctp_send_failed { __u32 ssf_error; struct sctp_sndrcvinfo ssf_info; sctp_assoc_t ssf_assoc_id; - __u8 ssf_data[0]; + __u8 ssf_data[]; }; struct sctp_send_failed_event { @@ -463,7 +463,7 @@ struct sctp_send_failed_event { __u32 ssf_error; struct sctp_sndinfo ssfe_info; sctp_assoc_t ssf_assoc_id; - __u8 ssf_data[0]; + __u8 ssf_data[]; }; /* @@ -1029,7 +1029,7 @@ struct sctp_getaddrs_old { struct sctp_getaddrs { sctp_assoc_t assoc_id; /*input*/ __u32 addr_num; /*output*/ - __u8 addrs[0]; /*output, variable size*/ + __u8 addrs[]; /*output, variable size*/ }; /* A socket user request obtained via SCTP_GET_ASSOC_STATS that retrieves diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h index 286e8d6a8e98..13bcbc8bba32 100644 --- a/include/uapi/linux/seg6.h +++ b/include/uapi/linux/seg6.h @@ -30,7 +30,7 @@ struct ipv6_sr_hdr { __u8 flags; __u16 tag; - struct in6_addr segments[0]; + struct in6_addr segments[]; }; #define SR6_FLAG1_PROTECTED (1 << 6) diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index eb815e0d0ac3..a74294211290 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -26,7 +26,7 @@ enum { struct seg6_iptunnel_encap { int mode; - struct ipv6_sr_hdr srh[0]; + struct ipv6_sr_hdr srh[]; }; #define SEG6_IPTUN_ENCAP_SIZE(x) ((sizeof(*x)) + (((x)->srh->hdrlen + 1) << 3)) diff --git a/include/uapi/linux/stm.h b/include/uapi/linux/stm.h index 7bac318b4440..de3579c2cff0 100644 --- a/include/uapi/linux/stm.h +++ b/include/uapi/linux/stm.h @@ -36,7 +36,7 @@ struct stp_policy_id { /* padding */ __u16 __reserved_0; __u32 __reserved_1; - char id[0]; + char id[]; }; #define STP_POLICY_ID_SET _IOWR('%', 0, struct stp_policy_id) diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 27ace512babd..fbd8ca67e107 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -152,7 +152,7 @@ struct tcmu_tmr_entry { __u32 cmd_cnt; __u64 __pad3; __u64 __pad4; - __u16 cmd_ids[0]; + __u16 cmd_ids[]; } __packed; #define TCMU_OP_ALIGN_SIZE sizeof(__u64) diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index ac39328eabe7..bb8f80812b0b 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -39,7 +39,7 @@ /* TLS socket options */ #define TLS_TX 1 /* Set transmit parameters */ #define TLS_RX 2 /* Set receive parameters */ -#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */ +#define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */ /* Supported versions */ #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) @@ -161,7 +161,7 @@ enum { TLS_INFO_CIPHER, TLS_INFO_TXCONF, TLS_INFO_RXCONF, - TLS_INFO_ZC_SENDFILE, + TLS_INFO_ZC_RO_TX, __TLS_INFO_MAX, }; #define TLS_INFO_MAX (__TLS_INFO_MAX - 1) diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h index 9d0f06bfbac3..68aeae2addec 100644 --- a/include/uapi/linux/tty.h +++ b/include/uapi/linux/tty.h @@ -38,8 +38,9 @@ #define N_NULL 27 /* Null ldisc used for error handling */ #define N_MCTP 28 /* MCTP-over-serial */ #define N_DEVELOPMENT 29 /* Manual out-of-tree testing */ +#define N_CAN327 30 /* ELM327 based OBD-II interfaces */ /* Always the newest line discipline + 1 */ -#define NR_LDISCS 30 +#define NR_LDISCS 31 #endif /* _UAPI_LINUX_TTY_H */ diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h new file mode 100644 index 000000000000..ca33092354ab --- /dev/null +++ b/include/uapi/linux/ublk_cmd.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef USER_BLK_DRV_CMD_INC_H +#define USER_BLK_DRV_CMD_INC_H + +#include <linux/types.h> + +/* ublk server command definition */ + +/* + * Admin commands, issued by ublk server, and handled by ublk driver. + */ +#define UBLK_CMD_GET_QUEUE_AFFINITY 0x01 +#define UBLK_CMD_GET_DEV_INFO 0x02 +#define UBLK_CMD_ADD_DEV 0x04 +#define UBLK_CMD_DEL_DEV 0x05 +#define UBLK_CMD_START_DEV 0x06 +#define UBLK_CMD_STOP_DEV 0x07 + +/* + * IO commands, issued by ublk server, and handled by ublk driver. + * + * FETCH_REQ: issued via sqe(URING_CMD) beforehand for fetching IO request + * from ublk driver, should be issued only when starting device. After + * the associated cqe is returned, request's tag can be retrieved via + * cqe->userdata. + * + * COMMIT_AND_FETCH_REQ: issued via sqe(URING_CMD) after ublkserver handled + * this IO request, request's handling result is committed to ublk + * driver, meantime FETCH_REQ is piggyback, and FETCH_REQ has to be + * handled before completing io request. + */ +#define UBLK_IO_FETCH_REQ 0x20 +#define UBLK_IO_COMMIT_AND_FETCH_REQ 0x21 + +/* only ABORT means that no re-fetch */ +#define UBLK_IO_RES_OK 0 +#define UBLK_IO_RES_ABORT (-ENODEV) + +#define UBLKSRV_CMD_BUF_OFFSET 0 +#define UBLKSRV_IO_BUF_OFFSET 0x80000000 + +/* tag bit is 12bit, so at most 4096 IOs for each queue */ +#define UBLK_MAX_QUEUE_DEPTH 4096 + +/* + * zero copy requires 4k block size, and can remap ublk driver's io + * request into ublksrv's vm space + */ +#define UBLK_F_SUPPORT_ZERO_COPY (1ULL << 0) + +/* + * Force to complete io cmd via io_uring_cmd_complete_in_task so that + * performance comparison is done easily with using task_work_add + */ +#define UBLK_F_URING_CMD_COMP_IN_TASK (1ULL << 1) + +/* device state */ +#define UBLK_S_DEV_DEAD 0 +#define UBLK_S_DEV_LIVE 1 + +/* shipped via sqe->cmd of io_uring command */ +struct ublksrv_ctrl_cmd { + /* sent to which device, must be valid */ + __u32 dev_id; + + /* sent to which queue, must be -1 if the cmd isn't for queue */ + __u16 queue_id; + /* + * cmd specific buffer, can be IN or OUT. + */ + __u16 len; + __u64 addr; + + /* inline data */ + __u64 data[2]; +}; + +struct ublksrv_ctrl_dev_info { + __u16 nr_hw_queues; + __u16 queue_depth; + __u16 block_size; + __u16 state; + + __u32 rq_max_blocks; + __u32 dev_id; + + __u64 dev_blocks; + + __s32 ublksrv_pid; + __s32 reserved0; + __u64 flags; + __u64 flags_reserved; + + /* For ublksrv internal use, invisible to ublk driver */ + __u64 ublksrv_flags; + __u64 reserved1[9]; +}; + +#define UBLK_IO_OP_READ 0 +#define UBLK_IO_OP_WRITE 1 +#define UBLK_IO_OP_FLUSH 2 +#define UBLK_IO_OP_DISCARD 3 +#define UBLK_IO_OP_WRITE_SAME 4 +#define UBLK_IO_OP_WRITE_ZEROES 5 + +#define UBLK_IO_F_FAILFAST_DEV (1U << 8) +#define UBLK_IO_F_FAILFAST_TRANSPORT (1U << 9) +#define UBLK_IO_F_FAILFAST_DRIVER (1U << 10) +#define UBLK_IO_F_META (1U << 11) +#define UBLK_IO_F_FUA (1U << 13) +#define UBLK_IO_F_NOUNMAP (1U << 15) +#define UBLK_IO_F_SWAP (1U << 16) + +/* + * io cmd is described by this structure, and stored in share memory, indexed + * by request tag. + * + * The data is stored by ublk driver, and read by ublksrv after one fetch command + * returns. + */ +struct ublksrv_io_desc { + /* op: bit 0-7, flags: bit 8-31 */ + __u32 op_flags; + + __u32 nr_sectors; + + /* start sector for this io */ + __u64 start_sector; + + /* buffer address in ublksrv daemon vm space, from ublk driver */ + __u64 addr; +}; + +static inline __u8 ublksrv_get_op(const struct ublksrv_io_desc *iod) +{ + return iod->op_flags & 0xff; +} + +static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod) +{ + return iod->op_flags >> 8; +} + +/* issued to ublk driver via /dev/ublkcN */ +struct ublksrv_io_cmd { + __u16 q_id; + + /* for fetch/commit which result */ + __u16 tag; + + /* io result, it is valid for COMMIT* command only */ + __s32 result; + + /* + * userspace buffer address in ublksrv daemon process, valid for + * FETCH* command only + */ + __u64 addr; +}; + +#endif diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h index 76b7c3f6cd0d..c917c53070d5 100644 --- a/include/uapi/linux/usb/audio.h +++ b/include/uapi/linux/usb/audio.h @@ -341,7 +341,7 @@ struct uac_feature_unit_descriptor { __u8 bUnitID; __u8 bSourceID; __u8 bControlSize; - __u8 bmaControls[0]; /* variable length */ + __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc) diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h index 6d61550959ef..acf3852bb676 100644 --- a/include/uapi/linux/usb/cdc.h +++ b/include/uapi/linux/usb/cdc.h @@ -171,7 +171,7 @@ struct usb_cdc_mdlm_detail_desc { /* type is associated with mdlm_desc.bGUID */ __u8 bGuidDescriptorType; - __u8 bDetailData[0]; + __u8 bDetailData[]; } __attribute__ ((packed)); /* "OBEX Control Model Functional Descriptor" */ @@ -379,7 +379,7 @@ struct usb_cdc_ncm_ndp16 { __le32 dwSignature; __le16 wLength; __le16 wNextNdpIndex; - struct usb_cdc_ncm_dpe16 dpe16[0]; + struct usb_cdc_ncm_dpe16 dpe16[]; } __attribute__ ((packed)); /* 32-bit NCM Datagram Pointer Entry */ @@ -395,7 +395,7 @@ struct usb_cdc_ncm_ndp32 { __le16 wReserved6; __le32 dwNextNdpIndex; __le32 dwReserved12; - struct usb_cdc_ncm_dpe32 dpe32[0]; + struct usb_cdc_ncm_dpe32 dpe32[]; } __attribute__ ((packed)); /* CDC NCM subclass 3.2.1 and 3.2.2 */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 17ce56198c9a..31fcfa084e63 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -818,7 +818,7 @@ struct usb_key_descriptor { __u8 tTKID[3]; __u8 bReserved; - __u8 bKeyData[0]; + __u8 bKeyData[]; } __attribute__((packed)); /*-------------------------------------------------------------------------*/ diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h index 0be685272eb1..c7d2199134d7 100644 --- a/include/uapi/linux/usb/raw_gadget.h +++ b/include/uapi/linux/usb/raw_gadget.h @@ -60,7 +60,7 @@ enum usb_raw_event_type { struct usb_raw_event { __u32 type; __u32 length; - __u8 data[0]; + __u8 data[]; }; #define USB_RAW_IO_FLAGS_ZERO 0x0001 @@ -90,7 +90,7 @@ struct usb_raw_ep_io { __u16 ep; __u16 flags; __u32 length; - __u8 data[0]; + __u8 data[]; }; /* Maximum number of non-control endpoints in struct usb_raw_eps_info. */ diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h index cf525cddeb94..74a84e02422a 100644 --- a/include/uapi/linux/usbdevice_fs.h +++ b/include/uapi/linux/usbdevice_fs.h @@ -131,7 +131,7 @@ struct usbdevfs_urb { unsigned int signr; /* signal to be sent on completion, or 0 if none should be sent. */ void __user *usercontext; - struct usbdevfs_iso_packet_desc iso_frame_desc[0]; + struct usbdevfs_iso_packet_desc iso_frame_desc[]; }; /* ioctls for talking directly to drivers */ @@ -176,7 +176,7 @@ struct usbdevfs_disconnect_claim { struct usbdevfs_streams { unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */ unsigned int num_eps; - unsigned char eps[0]; + unsigned char eps[]; }; /* diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index 634cee485abb..391331a10879 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -107,7 +107,7 @@ struct vhost_memory_region { struct vhost_memory { __u32 nregions; __u32 padding; - struct vhost_memory_region regions[0]; + struct vhost_memory_region regions[]; }; /* VHOST_SCSI specific definitions */ @@ -135,7 +135,7 @@ struct vhost_scsi_target { struct vhost_vdpa_config { __u32 off; __u32 len; - __u8 buf[0]; + __u8 buf[]; }; /* vhost vdpa IOVA range diff --git a/include/uapi/linux/virtio_9p.h b/include/uapi/linux/virtio_9p.h index 441047432258..374b68f8ac6e 100644 --- a/include/uapi/linux/virtio_9p.h +++ b/include/uapi/linux/virtio_9p.h @@ -38,7 +38,7 @@ struct virtio_9p_config { /* length of the tag name */ __virtio16 tag_len; /* non-NULL terminated tag name */ - __u8 tag[0]; + __u8 tag[]; } __attribute__((packed)); #endif /* _LINUX_VIRTIO_9P_H */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 65e13a099b1a..e8191e0c3b56 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -33,7 +33,7 @@ struct xfrm_sec_ctx { __u8 ctx_alg; __u16 ctx_len; __u32 ctx_sid; - char ctx_str[0]; + char ctx_str[]; }; /* Security Context Domains of Interpretation */ @@ -96,27 +96,27 @@ struct xfrm_replay_state_esn { __u32 oseq_hi; __u32 seq_hi; __u32 replay_window; - __u32 bmp[0]; + __u32 bmp[]; }; struct xfrm_algo { char alg_name[64]; unsigned int alg_key_len; /* in bits */ - char alg_key[0]; + char alg_key[]; }; struct xfrm_algo_auth { char alg_name[64]; unsigned int alg_key_len; /* in bits */ unsigned int alg_trunc_len; /* in bits */ - char alg_key[0]; + char alg_key[]; }; struct xfrm_algo_aead { char alg_name[64]; unsigned int alg_key_len; /* in bits */ unsigned int alg_icv_len; /* in bits */ - char alg_key[0]; + char alg_key[]; }; struct xfrm_stats { diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index d95ef9a2b032..1106a7c90b29 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -180,7 +180,7 @@ struct hfi1_sdma_comp_entry { struct hfi1_status { __aligned_u64 dev; /* device/hw status bits */ __aligned_u64 port; /* port state and status bits */ - char freezemsg[0]; + char freezemsg[]; }; enum sdma_req_opcode { diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 7dd903d932e5..43672cb1fd57 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -158,18 +158,18 @@ struct ib_uverbs_ex_cmd_hdr { struct ib_uverbs_get_context { __aligned_u64 response; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_get_context_resp { __u32 async_fd; __u32 num_comp_vectors; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_query_device { __aligned_u64 response; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_query_device_resp { @@ -278,7 +278,7 @@ struct ib_uverbs_query_port { __aligned_u64 response; __u8 port_num; __u8 reserved[7]; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_query_port_resp { @@ -308,12 +308,12 @@ struct ib_uverbs_query_port_resp { struct ib_uverbs_alloc_pd { __aligned_u64 response; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_alloc_pd_resp { __u32 pd_handle; - __u32 driver_data[0]; + __u32 driver_data[]; }; struct ib_uverbs_dealloc_pd { @@ -324,12 +324,12 @@ struct ib_uverbs_open_xrcd { __aligned_u64 response; __u32 fd; __u32 oflags; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_open_xrcd_resp { __u32 xrcd_handle; - __u32 driver_data[0]; + __u32 driver_data[]; }; struct ib_uverbs_close_xrcd { @@ -343,14 +343,14 @@ struct ib_uverbs_reg_mr { __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_reg_mr_resp { __u32 mr_handle; __u32 lkey; __u32 rkey; - __u32 driver_data[0]; + __u32 driver_data[]; }; struct ib_uverbs_rereg_mr { @@ -362,13 +362,13 @@ struct ib_uverbs_rereg_mr { __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_rereg_mr_resp { __u32 lkey; __u32 rkey; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_dereg_mr { @@ -380,13 +380,13 @@ struct ib_uverbs_alloc_mw { __u32 pd_handle; __u8 mw_type; __u8 reserved[3]; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_alloc_mw_resp { __u32 mw_handle; __u32 rkey; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_dealloc_mw { @@ -408,7 +408,7 @@ struct ib_uverbs_create_cq { __u32 comp_vector; __s32 comp_channel; __u32 reserved; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; enum ib_uverbs_ex_create_cq_flags { @@ -442,13 +442,13 @@ struct ib_uverbs_resize_cq { __aligned_u64 response; __u32 cq_handle; __u32 cqe; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_resize_cq_resp { __u32 cqe; __u32 reserved; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_poll_cq { @@ -492,7 +492,7 @@ struct ib_uverbs_wc { struct ib_uverbs_poll_cq_resp { __u32 count; __u32 reserved; - struct ib_uverbs_wc wc[0]; + struct ib_uverbs_wc wc[]; }; struct ib_uverbs_req_notify_cq { @@ -585,7 +585,7 @@ struct ib_uverbs_create_qp { __u8 qp_type; __u8 is_srq; __u8 reserved; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; enum ib_uverbs_create_qp_mask { @@ -624,7 +624,7 @@ struct ib_uverbs_open_qp { __u32 qpn; __u8 qp_type; __u8 reserved[7]; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; /* also used for open response */ @@ -669,7 +669,7 @@ struct ib_uverbs_query_qp { __aligned_u64 response; __u32 qp_handle; __u32 attr_mask; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_query_qp_resp { @@ -703,7 +703,7 @@ struct ib_uverbs_query_qp_resp { __u8 alt_timeout; __u8 sq_sig_all; __u8 reserved[5]; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_modify_qp { @@ -824,7 +824,7 @@ struct ib_uverbs_post_send { __u32 wr_count; __u32 sge_count; __u32 wqe_size; - struct ib_uverbs_send_wr send_wr[0]; + struct ib_uverbs_send_wr send_wr[]; }; struct ib_uverbs_post_send_resp { @@ -843,7 +843,7 @@ struct ib_uverbs_post_recv { __u32 wr_count; __u32 sge_count; __u32 wqe_size; - struct ib_uverbs_recv_wr recv_wr[0]; + struct ib_uverbs_recv_wr recv_wr[]; }; struct ib_uverbs_post_recv_resp { @@ -856,7 +856,7 @@ struct ib_uverbs_post_srq_recv { __u32 wr_count; __u32 sge_count; __u32 wqe_size; - struct ib_uverbs_recv_wr recv[0]; + struct ib_uverbs_recv_wr recv[]; }; struct ib_uverbs_post_srq_recv_resp { @@ -869,12 +869,12 @@ struct ib_uverbs_create_ah { __u32 pd_handle; __u32 reserved; struct ib_uverbs_ah_attr attr; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_create_ah_resp { __u32 ah_handle; - __u32 driver_data[0]; + __u32 driver_data[]; }; struct ib_uverbs_destroy_ah { @@ -886,7 +886,7 @@ struct ib_uverbs_attach_mcast { __u32 qp_handle; __u16 mlid; __u16 reserved; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_detach_mcast { @@ -894,7 +894,7 @@ struct ib_uverbs_detach_mcast { __u32 qp_handle; __u16 mlid; __u16 reserved; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_flow_spec_hdr { @@ -1135,7 +1135,7 @@ struct ib_uverbs_flow_attr { * struct ib_flow_spec_xxx * struct ib_flow_spec_yyy */ - struct ib_uverbs_flow_spec_hdr flow_specs[0]; + struct ib_uverbs_flow_spec_hdr flow_specs[]; }; struct ib_uverbs_create_flow { @@ -1161,7 +1161,7 @@ struct ib_uverbs_create_srq { __u32 max_wr; __u32 max_sge; __u32 srq_limit; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_create_xsrq { @@ -1175,7 +1175,7 @@ struct ib_uverbs_create_xsrq { __u32 max_num_tags; __u32 xrcd_handle; __u32 cq_handle; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_create_srq_resp { @@ -1183,7 +1183,7 @@ struct ib_uverbs_create_srq_resp { __u32 max_wr; __u32 max_sge; __u32 srqn; - __u32 driver_data[0]; + __u32 driver_data[]; }; struct ib_uverbs_modify_srq { @@ -1191,14 +1191,14 @@ struct ib_uverbs_modify_srq { __u32 attr_mask; __u32 max_wr; __u32 srq_limit; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_query_srq { __aligned_u64 response; __u32 srq_handle; __u32 reserved; - __aligned_u64 driver_data[0]; + __aligned_u64 driver_data[]; }; struct ib_uverbs_query_srq_resp { @@ -1269,7 +1269,7 @@ struct ib_uverbs_ex_create_rwq_ind_table { * wq_handle1 * wq_handle2 */ - __u32 wq_handles[0]; + __u32 wq_handles[]; }; struct ib_uverbs_ex_create_rwq_ind_table_resp { diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index ed5a514305c1..7cea03581f79 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -184,7 +184,7 @@ struct rdma_ucm_query_addr_resp { struct rdma_ucm_query_path_resp { __u32 num_paths; __u32 reserved; - struct ib_path_rec_data path_data[0]; + struct ib_path_rec_data path_data[]; }; struct rdma_ucm_conn_param { diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h index 38ab7accb7be..ab1aef17feb1 100644 --- a/include/uapi/rdma/rdma_user_ioctl_cmds.h +++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h @@ -81,7 +81,7 @@ struct ib_uverbs_ioctl_hdr { __aligned_u64 reserved1; __u32 driver_id; __u32 reserved2; - struct ib_uverbs_attr attrs[0]; + struct ib_uverbs_attr attrs[]; }; #endif diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index c9812c5c2fc4..16782c360de3 100644 --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h @@ -264,7 +264,7 @@ struct fc_tlv_desc { * Size of descriptor excluding * desc_tag and desc_len fields. */ - __u8 desc_value[0]; /* Descriptor Value */ + __u8 desc_value[]; /* Descriptor Value */ }; /* Descriptor tag and len fields are considered the mandatory header @@ -1027,7 +1027,7 @@ struct fc_fn_li_desc { * threshold to caause the LI event */ __be32 pname_count; /* number of portname_list elements */ - __be64 pname_list[0]; /* list of N_Port_Names accessible + __be64 pname_list[]; /* list of N_Port_Names accessible * through the attached port */ }; @@ -1069,7 +1069,7 @@ struct fc_fn_peer_congn_desc { * congestion event */ __be32 pname_count; /* number of portname_list elements */ - __be64 pname_list[0]; /* list of N_Port_Names accessible + __be64 pname_list[]; /* list of N_Port_Names accessible * through the attached port */ }; @@ -1104,7 +1104,7 @@ struct fc_els_fpin { * Size of ELS excluding fpin_cmd, * fpin_zero and desc_len fields. */ - struct fc_tlv_desc fpin_desc[0]; /* Descriptor list */ + struct fc_tlv_desc fpin_desc[]; /* Descriptor list */ }; /* Diagnostic Function Descriptor - FPIN Registration */ @@ -1115,7 +1115,7 @@ struct fc_df_desc_fpin_reg { * desc_tag and desc_len fields. */ __be32 count; /* Number of desc_tags elements */ - __be32 desc_tags[0]; /* Array of Descriptor Tags. + __be32 desc_tags[]; /* Array of Descriptor Tags. * Each tag indicates a function * supported by the N_Port (request) * or by the N_Port and Fabric @@ -1135,7 +1135,7 @@ struct fc_els_rdf { * Size of ELS excluding fpin_cmd, * fpin_zero and desc_len fields. */ - struct fc_tlv_desc desc[0]; /* Descriptor list */ + struct fc_tlv_desc desc[]; /* Descriptor list */ }; /* @@ -1148,7 +1148,7 @@ struct fc_els_rdf_resp { * and desc_list_len fields. */ struct fc_els_lsri_desc lsri; - struct fc_tlv_desc desc[0]; /* Supported Descriptor list */ + struct fc_tlv_desc desc[]; /* Supported Descriptor list */ }; @@ -1231,7 +1231,7 @@ struct fc_els_edc { * Size of ELS excluding edc_cmd, * edc_zero and desc_len fields. */ - struct fc_tlv_desc desc[0]; + struct fc_tlv_desc desc[]; /* Diagnostic Descriptor list */ }; @@ -1245,7 +1245,7 @@ struct fc_els_edc_resp { * and desc_list_len fields. */ struct fc_els_lsri_desc lsri; - struct fc_tlv_desc desc[0]; + struct fc_tlv_desc desc[]; /* Supported Diagnostic Descriptor list */ }; diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h index 3ae65e93235c..7f5930801f72 100644 --- a/include/uapi/scsi/scsi_bsg_fc.h +++ b/include/uapi/scsi/scsi_bsg_fc.h @@ -209,7 +209,7 @@ struct fc_bsg_host_vendor { __u64 vendor_id; /* start of vendor command area */ - __u32 vendor_cmd[0]; + __u32 vendor_cmd[]; }; /* Response: diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 2d3e5df39a59..3974a2a911cc 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -1106,7 +1106,7 @@ struct snd_ctl_elem_value { struct snd_ctl_tlv { unsigned int numid; /* control element numeric identification */ unsigned int length; /* in bytes aligned to 4 */ - unsigned int tlv[0]; /* first TLV */ + unsigned int tlv[]; /* first TLV */ }; #define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int) diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h index 39cf6eb75940..3532ac7046d7 100644 --- a/include/uapi/sound/firewire.h +++ b/include/uapi/sound/firewire.h @@ -38,11 +38,11 @@ struct snd_efw_transaction { __be32 category; __be32 command; __be32 status; - __be32 params[0]; + __be32 params[]; }; struct snd_firewire_event_efw_response { unsigned int type; - __be32 response[0]; /* some responses */ + __be32 response[]; /* some responses */ }; struct snd_firewire_event_digi00x_message { @@ -63,7 +63,7 @@ struct snd_firewire_tascam_change { struct snd_firewire_event_tascam_control { unsigned int type; - struct snd_firewire_tascam_change changes[0]; + struct snd_firewire_tascam_change changes[]; }; struct snd_firewire_event_motu_register_dsp_change { diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index a93c0decfdd5..f29899b179a6 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -151,7 +151,7 @@ struct skl_dfw_algo_data { __u32 rsvd:30; __u32 param_id; __u32 max; - char params[0]; + char params[]; } __packed; enum skl_tkn_dir { diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h index 5f4518e7a972..dbf137516522 100644 --- a/include/uapi/sound/sof/header.h +++ b/include/uapi/sound/sof/header.h @@ -23,7 +23,7 @@ struct sof_abi_hdr { __u32 size; /**< size in bytes of data excl. this struct */ __u32 abi; /**< SOF ABI version */ __u32 reserved[4]; /**< reserved for future use */ - __u32 data[0]; /**< Component data - opaque to core */ + __u32 data[]; /**< Component data - opaque to core */ } __packed; #endif diff --git a/include/uapi/sound/usb_stream.h b/include/uapi/sound/usb_stream.h index 95419d8bbc16..ffdd3ea1e31d 100644 --- a/include/uapi/sound/usb_stream.h +++ b/include/uapi/sound/usb_stream.h @@ -61,7 +61,7 @@ struct usb_stream { unsigned inpacket_split_at; unsigned next_inpacket_split; unsigned next_inpacket_split_at; - struct usb_stream_packet inpacket[0]; + struct usb_stream_packet inpacket[]; }; enum usb_stream_state { diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h index e1126a74882a..eff166fdd81b 100644 --- a/include/video/of_display_timing.h +++ b/include/video/of_display_timing.h @@ -8,6 +8,8 @@ #ifndef __LINUX_OF_DISPLAY_TIMING_H #define __LINUX_OF_DISPLAY_TIMING_H +#include <linux/errno.h> + struct device_node; struct display_timing; struct display_timings; diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h new file mode 100644 index 000000000000..b0766a660338 --- /dev/null +++ b/include/xen/arm/xen-ops.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM_XEN_OPS_H +#define _ASM_ARM_XEN_OPS_H + +#include <xen/swiotlb-xen.h> +#include <xen/xen-ops.h> + +static inline void xen_setup_dma_ops(struct device *dev) +{ +#ifdef CONFIG_XEN + if (xen_is_grant_dma_device(dev)) + xen_grant_setup_dma_ops(dev); + else if (xen_swiotlb_detect()) + dev->dma_ops = &xen_swiotlb_dma_ops; +#endif +} + +#endif /* _ASM_ARM_XEN_OPS_H */ diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 527c9907f99c..e279be353e3f 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -127,10 +127,14 @@ int gnttab_try_end_foreign_access(grant_ref_t ref); */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first); + void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count); + int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index c7c1b46ff4cd..80546960f8b7 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -214,4 +214,17 @@ static inline void xen_preemptible_hcall_end(void) { } #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ +#ifdef CONFIG_XEN_GRANT_DMA_OPS +void xen_grant_setup_dma_ops(struct device *dev); +bool xen_is_grant_dma_device(struct device *dev); +#else +static inline void xen_grant_setup_dma_ops(struct device *dev) +{ +} +static inline bool xen_is_grant_dma_device(struct device *dev) +{ + return false; +} +#endif /* CONFIG_XEN_GRANT_DMA_OPS */ + #endif /* INCLUDE_XEN_OPS_H */ diff --git a/include/xen/xen.h b/include/xen/xen.h index a99bab817523..0780a81e140d 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -52,6 +52,14 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, extern u64 xen_saved_max_mem_size; #endif +#include <linux/platform-feature.h> + +static inline void xen_set_restricted_virtio_memory_access(void) +{ + if (IS_ENABLED(CONFIG_XEN_VIRTIO) && xen_domain()) + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); +} + #ifdef CONFIG_XEN_UNPOPULATED_ALLOC int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); |