diff options
Diffstat (limited to 'include')
127 files changed, 3546 insertions, 1331 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 9ed8b987185b..3f38eb03649c 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -223,6 +223,7 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#ifndef __atomic_add_unless static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -231,5 +232,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) c = old; return c; } +#endif #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h new file mode 100644 index 000000000000..57af9f21d148 --- /dev/null +++ b/include/asm-generic/kprobes.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_KPROBES_H +#define _ASM_GENERIC_KPROBES_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +# define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +/* Use this to forbid a kprobes attach on very low level functions */ +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define NOKPROBE_SYMBOL(fname) +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + +#endif /* _ASM_GENERIC_KPROBES_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 18af2bcefe6a..f4ca23b158b3 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -36,6 +36,9 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); #else static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, @@ -44,6 +47,13 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, BUILD_BUG(); return 0; } +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -121,8 +131,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) @@ -131,20 +141,40 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, pmd_clear(pmdp); return pmd; } +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int full) { return pmdp_huge_get_and_clear(mm, address, pmdp); } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, @@ -181,6 +211,9 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma, extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -192,6 +225,30 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, @@ -208,6 +265,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif #ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -273,12 +347,23 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { BUILD_BUG(); return 0; } + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + BUILD_BUG(); + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -640,6 +725,15 @@ static inline int pmd_write(pmd_t pmd) #endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + #ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { @@ -785,8 +879,10 @@ static inline int pmd_clear_huge(pmd_t *pmd) * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #else #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() #endif #endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7eed8cf3130a..4329bc6ef04b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -232,6 +232,20 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) +/** + * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb + * invalidation. This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pud_tlb_entry +#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) +#endif + +#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ + __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ + } while (0) + /* * For things like page tables caches (ie caching addresses "inside" the * page tables, like x86 does), for legacy reasons, flushing an diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 360e00cefd35..a0c812b0fa39 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -64,3 +64,5 @@ #define BCM2835_CLOCK_CAM1 46 #define BCM2835_CLOCK_DSI0E 47 #define BCM2835_CLOCK_DSI1E 48 +#define BCM2835_CLOCK_DSI0P 49 +#define BCM2835_CLOCK_DSI1P 50 diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h deleted file mode 100644 index 7eed55100721..000000000000 --- a/include/dt-bindings/clock/exynos4415.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Chanwoo Choi <cw00.choi@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Device Tree binding constants for Samsung Exynos4415 clock controllers. - */ - -#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H -#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H - -/* - * Let each exported clock get a unique index, which is used on DT-enabled - * platforms to lookup the clock from a clock specifier. These indices are - * therefore considered an ABI and so must not be changed. This implies - * that new clocks should be added either in free spaces between clock groups - * or at the end. - */ - -/* - * Main CMU - */ - -#define CLK_OSCSEL 1 -#define CLK_FIN_PLL 2 -#define CLK_FOUT_APLL 3 -#define CLK_FOUT_MPLL 4 -#define CLK_FOUT_EPLL 5 -#define CLK_FOUT_G3D_PLL 6 -#define CLK_FOUT_ISP_PLL 7 -#define CLK_FOUT_DISP_PLL 8 - -/* Muxes */ -#define CLK_MOUT_MPLL_USER_L 16 -#define CLK_MOUT_GDL 17 -#define CLK_MOUT_MPLL_USER_R 18 -#define CLK_MOUT_GDR 19 -#define CLK_MOUT_EBI 20 -#define CLK_MOUT_ACLK_200 21 -#define CLK_MOUT_ACLK_160 22 -#define CLK_MOUT_ACLK_100 23 -#define CLK_MOUT_ACLK_266 24 -#define CLK_MOUT_G3D_PLL 25 -#define CLK_MOUT_EPLL 26 -#define CLK_MOUT_EBI_1 27 -#define CLK_MOUT_ISP_PLL 28 -#define CLK_MOUT_DISP_PLL 29 -#define CLK_MOUT_MPLL_USER_T 30 -#define CLK_MOUT_ACLK_400_MCUISP 31 -#define CLK_MOUT_G3D_PLLSRC 32 -#define CLK_MOUT_CSIS1 33 -#define CLK_MOUT_CSIS0 34 -#define CLK_MOUT_CAM1 35 -#define CLK_MOUT_FIMC3_LCLK 36 -#define CLK_MOUT_FIMC2_LCLK 37 -#define CLK_MOUT_FIMC1_LCLK 38 -#define CLK_MOUT_FIMC0_LCLK 39 -#define CLK_MOUT_MFC 40 -#define CLK_MOUT_MFC_1 41 -#define CLK_MOUT_MFC_0 42 -#define CLK_MOUT_G3D 43 -#define CLK_MOUT_G3D_1 44 -#define CLK_MOUT_G3D_0 45 -#define CLK_MOUT_MIPI0 46 -#define CLK_MOUT_FIMD0 47 -#define CLK_MOUT_TSADC_ISP 48 -#define CLK_MOUT_UART_ISP 49 -#define CLK_MOUT_SPI1_ISP 50 -#define CLK_MOUT_SPI0_ISP 51 -#define CLK_MOUT_PWM_ISP 52 -#define CLK_MOUT_AUDIO0 53 -#define CLK_MOUT_TSADC 54 -#define CLK_MOUT_MMC2 55 -#define CLK_MOUT_MMC1 56 -#define CLK_MOUT_MMC0 57 -#define CLK_MOUT_UART3 58 -#define CLK_MOUT_UART2 59 -#define CLK_MOUT_UART1 60 -#define CLK_MOUT_UART0 61 -#define CLK_MOUT_SPI2 62 -#define CLK_MOUT_SPI1 63 -#define CLK_MOUT_SPI0 64 -#define CLK_MOUT_SPDIF 65 -#define CLK_MOUT_AUDIO2 66 -#define CLK_MOUT_AUDIO1 67 -#define CLK_MOUT_MPLL_USER_C 68 -#define CLK_MOUT_HPM 69 -#define CLK_MOUT_CORE 70 -#define CLK_MOUT_APLL 71 -#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72 -#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73 -#define CLK_MOUT_JPEG 74 -#define CLK_MOUT_JPEG1 75 -#define CLK_MOUT_JPEG0 76 -#define CLK_MOUT_ACLK_ISP0_300 77 -#define CLK_MOUT_ACLK_ISP0_400 78 -#define CLK_MOUT_ACLK_ISP0_300_USER 79 -#define CLK_MOUT_ACLK_ISP1_300 80 -#define CLK_MOUT_ACLK_ISP1_300_USER 81 -#define CLK_MOUT_HDMI 82 - -/* Dividers */ -#define CLK_DIV_GPL 90 -#define CLK_DIV_GDL 91 -#define CLK_DIV_GPR 92 -#define CLK_DIV_GDR 93 -#define CLK_DIV_ACLK_400_MCUISP 94 -#define CLK_DIV_EBI 95 -#define CLK_DIV_ACLK_200 96 -#define CLK_DIV_ACLK_160 97 -#define CLK_DIV_ACLK_100 98 -#define CLK_DIV_ACLK_266 99 -#define CLK_DIV_CSIS1 100 -#define CLK_DIV_CSIS0 101 -#define CLK_DIV_CAM1 102 -#define CLK_DIV_FIMC3_LCLK 103 -#define CLK_DIV_FIMC2_LCLK 104 -#define CLK_DIV_FIMC1_LCLK 105 -#define CLK_DIV_FIMC0_LCLK 106 -#define CLK_DIV_TV_BLK 107 -#define CLK_DIV_MFC 108 -#define CLK_DIV_G3D 109 -#define CLK_DIV_MIPI0_PRE 110 -#define CLK_DIV_MIPI0 111 -#define CLK_DIV_FIMD0 112 -#define CLK_DIV_UART_ISP 113 -#define CLK_DIV_SPI1_ISP_PRE 114 -#define CLK_DIV_SPI1_ISP 115 -#define CLK_DIV_SPI0_ISP_PRE 116 -#define CLK_DIV_SPI0_ISP 117 -#define CLK_DIV_PWM_ISP 118 -#define CLK_DIV_PCM0 119 -#define CLK_DIV_AUDIO0 120 -#define CLK_DIV_TSADC_PRE 121 -#define CLK_DIV_TSADC 122 -#define CLK_DIV_MMC1_PRE 123 -#define CLK_DIV_MMC1 124 -#define CLK_DIV_MMC0_PRE 125 -#define CLK_DIV_MMC0 126 -#define CLK_DIV_MMC2_PRE 127 -#define CLK_DIV_MMC2 128 -#define CLK_DIV_UART3 129 -#define CLK_DIV_UART2 130 -#define CLK_DIV_UART1 131 -#define CLK_DIV_UART0 132 -#define CLK_DIV_SPI1_PRE 133 -#define CLK_DIV_SPI1 134 -#define CLK_DIV_SPI0_PRE 135 -#define CLK_DIV_SPI0 136 -#define CLK_DIV_SPI2_PRE 137 -#define CLK_DIV_SPI2 138 -#define CLK_DIV_PCM2 139 -#define CLK_DIV_AUDIO2 140 -#define CLK_DIV_PCM1 141 -#define CLK_DIV_AUDIO1 142 -#define CLK_DIV_I2S1 143 -#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144 -#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145 -#define CLK_DIV_JPEG 146 -#define CLK_DIV_CORE2 147 -#define CLK_DIV_APLL 148 -#define CLK_DIV_PCLK_DBG 149 -#define CLK_DIV_ATB 150 -#define CLK_DIV_PERIPH 151 -#define CLK_DIV_COREM1 152 -#define CLK_DIV_COREM0 153 -#define CLK_DIV_CORE 154 -#define CLK_DIV_HPM 155 -#define CLK_DIV_COPY 156 - -/* Gates */ -#define CLK_ASYNC_G3D 180 -#define CLK_ASYNC_MFCL 181 -#define CLK_ASYNC_TVX 182 -#define CLK_PPMULEFT 183 -#define CLK_GPIO_LEFT 184 -#define CLK_PPMUIMAGE 185 -#define CLK_QEMDMA2 186 -#define CLK_QEROTATOR 187 -#define CLK_SMMUMDMA2 188 -#define CLK_SMMUROTATOR 189 -#define CLK_MDMA2 190 -#define CLK_ROTATOR 191 -#define CLK_ASYNC_ISPMX 192 -#define CLK_ASYNC_MAUDIOX 193 -#define CLK_ASYNC_MFCR 194 -#define CLK_ASYNC_FSYSD 195 -#define CLK_ASYNC_LCD0X 196 -#define CLK_ASYNC_CAMX 197 -#define CLK_PPMURIGHT 198 -#define CLK_GPIO_RIGHT 199 -#define CLK_ANTIRBK_APBIF 200 -#define CLK_EFUSE_WRITER_APBIF 201 -#define CLK_MONOCNT 202 -#define CLK_TZPC6 203 -#define CLK_PROVISIONKEY1 204 -#define CLK_PROVISIONKEY0 205 -#define CLK_CMU_ISPPART 206 -#define CLK_TMU_APBIF 207 -#define CLK_KEYIF 208 -#define CLK_RTC 209 -#define CLK_WDT 210 -#define CLK_MCT 211 -#define CLK_SECKEY 212 -#define CLK_HDMI_CEC 213 -#define CLK_TZPC5 214 -#define CLK_TZPC4 215 -#define CLK_TZPC3 216 -#define CLK_TZPC2 217 -#define CLK_TZPC1 218 -#define CLK_TZPC0 219 -#define CLK_CMU_COREPART 220 -#define CLK_CMU_TOPPART 221 -#define CLK_PMU_APBIF 222 -#define CLK_SYSREG 223 -#define CLK_CHIP_ID 224 -#define CLK_SMMUFIMC_LITE2 225 -#define CLK_FIMC_LITE2 226 -#define CLK_PIXELASYNCM1 227 -#define CLK_PIXELASYNCM0 228 -#define CLK_PPMUCAMIF 229 -#define CLK_SMMUJPEG 230 -#define CLK_SMMUFIMC3 231 -#define CLK_SMMUFIMC2 232 -#define CLK_SMMUFIMC1 233 -#define CLK_SMMUFIMC0 234 -#define CLK_JPEG 235 -#define CLK_CSIS1 236 -#define CLK_CSIS0 237 -#define CLK_FIMC3 238 -#define CLK_FIMC2 239 -#define CLK_FIMC1 240 -#define CLK_FIMC0 241 -#define CLK_PPMUTV 242 -#define CLK_SMMUTV 243 -#define CLK_HDMI 244 -#define CLK_MIXER 245 -#define CLK_VP 246 -#define CLK_PPMUMFC_R 247 -#define CLK_PPMUMFC_L 248 -#define CLK_SMMUMFC_R 249 -#define CLK_SMMUMFC_L 250 -#define CLK_MFC 251 -#define CLK_PPMUG3D 252 -#define CLK_G3D 253 -#define CLK_PPMULCD0 254 -#define CLK_SMMUFIMD0 255 -#define CLK_DSIM0 256 -#define CLK_SMIES 257 -#define CLK_MIE0 258 -#define CLK_FIMD0 259 -#define CLK_TSADC 260 -#define CLK_PPMUFILE 261 -#define CLK_NFCON 262 -#define CLK_USBDEVICE 263 -#define CLK_USBHOST 264 -#define CLK_SROMC 265 -#define CLK_SDMMC2 266 -#define CLK_SDMMC1 267 -#define CLK_SDMMC0 268 -#define CLK_PDMA1 269 -#define CLK_PDMA0 270 -#define CLK_SPDIF 271 -#define CLK_PWM 272 -#define CLK_PCM2 273 -#define CLK_PCM1 274 -#define CLK_I2S1 275 -#define CLK_SPI2 276 -#define CLK_SPI1 277 -#define CLK_SPI0 278 -#define CLK_I2CHDMI 279 -#define CLK_I2C7 280 -#define CLK_I2C6 281 -#define CLK_I2C5 282 -#define CLK_I2C4 283 -#define CLK_I2C3 284 -#define CLK_I2C2 285 -#define CLK_I2C1 286 -#define CLK_I2C0 287 -#define CLK_UART3 288 -#define CLK_UART2 289 -#define CLK_UART1 290 -#define CLK_UART0 291 - -/* Special clocks */ -#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330 -#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331 -#define CLK_SCLK_JPEG 332 -#define CLK_SCLK_CSIS1 333 -#define CLK_SCLK_CSIS0 334 -#define CLK_SCLK_CAM1 335 -#define CLK_SCLK_FIMC3_LCLK 336 -#define CLK_SCLK_FIMC2_LCLK 337 -#define CLK_SCLK_FIMC1_LCLK 338 -#define CLK_SCLK_FIMC0_LCLK 339 -#define CLK_SCLK_PIXEL 340 -#define CLK_SCLK_HDMI 341 -#define CLK_SCLK_MIXER 342 -#define CLK_SCLK_MFC 343 -#define CLK_SCLK_G3D 344 -#define CLK_SCLK_MIPIDPHY4L 345 -#define CLK_SCLK_MIPI0 346 -#define CLK_SCLK_MDNIE0 347 -#define CLK_SCLK_FIMD0 348 -#define CLK_SCLK_PCM0 349 -#define CLK_SCLK_AUDIO0 350 -#define CLK_SCLK_TSADC 351 -#define CLK_SCLK_EBI 352 -#define CLK_SCLK_MMC2 353 -#define CLK_SCLK_MMC1 354 -#define CLK_SCLK_MMC0 355 -#define CLK_SCLK_I2S 356 -#define CLK_SCLK_PCM2 357 -#define CLK_SCLK_PCM1 358 -#define CLK_SCLK_AUDIO2 359 -#define CLK_SCLK_AUDIO1 360 -#define CLK_SCLK_SPDIF 361 -#define CLK_SCLK_SPI2 362 -#define CLK_SCLK_SPI1 363 -#define CLK_SCLK_SPI0 364 -#define CLK_SCLK_UART3 365 -#define CLK_SCLK_UART2 366 -#define CLK_SCLK_UART1 367 -#define CLK_SCLK_UART0 368 -#define CLK_SCLK_HDMIPHY 369 - -/* - * Total number of clocks of main CMU. - * NOTE: Must be equal to last clock ID increased by one. - */ -#define CLK_NR_CLKS 370 - -/* - * CMU DMC - */ -#define CLK_DMC_FOUT_MPLL 1 -#define CLK_DMC_FOUT_BPLL 2 - -#define CLK_DMC_MOUT_MPLL 3 -#define CLK_DMC_MOUT_BPLL 4 -#define CLK_DMC_MOUT_DPHY 5 -#define CLK_DMC_MOUT_DMC_BUS 6 - -#define CLK_DMC_DIV_DMC 7 -#define CLK_DMC_DIV_DPHY 8 -#define CLK_DMC_DIV_DMC_PRE 9 -#define CLK_DMC_DIV_DMCP 10 -#define CLK_DMC_DIV_DMCD 11 -#define CLK_DMC_DIV_MPLL_PRE 12 - -/* - * Total number of clocks of CMU_DMC. - * NOTE: Must be equal to highest clock ID increased by one. - */ -#define NR_CLKS_DMC 13 - -#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h index 4fa6bb2136e3..be39d23e6a32 100644 --- a/include/dt-bindings/clock/exynos5433.h +++ b/include/dt-bindings/clock/exynos5433.h @@ -771,7 +771,10 @@ #define CLK_PCLK_DECON 113 -#define DISP_NR_CLK 114 +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 + +#define DISP_NR_CLK 116 /* CMU_AUD */ #define CLK_MOUT_AUD_PLL_USER 1 diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h new file mode 100644 index 000000000000..1c00b7fe296f --- /dev/null +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DTS_HI3660_CLOCK_H +#define __DTS_HI3660_CLOCK_H + +/* fixed rate clocks */ +#define HI3660_CLKIN_SYS 0 +#define HI3660_CLKIN_REF 1 +#define HI3660_CLK_FLL_SRC 2 +#define HI3660_CLK_PPLL0 3 +#define HI3660_CLK_PPLL1 4 +#define HI3660_CLK_PPLL2 5 +#define HI3660_CLK_PPLL3 6 +#define HI3660_CLK_SCPLL 7 +#define HI3660_PCLK 8 +#define HI3660_CLK_UART0_DBG 9 +#define HI3660_CLK_UART6 10 +#define HI3660_OSC32K 11 +#define HI3660_OSC19M 12 +#define HI3660_CLK_480M 13 +#define HI3660_CLK_INV 14 + +/* clk in crgctrl */ +#define HI3660_FACTOR_UART3 15 +#define HI3660_CLK_FACTOR_MMC 16 +#define HI3660_CLK_GATE_I2C0 17 +#define HI3660_CLK_GATE_I2C1 18 +#define HI3660_CLK_GATE_I2C2 19 +#define HI3660_CLK_GATE_I2C6 20 +#define HI3660_CLK_DIV_SYSBUS 21 +#define HI3660_CLK_DIV_320M 22 +#define HI3660_CLK_DIV_A53 23 +#define HI3660_CLK_GATE_SPI0 24 +#define HI3660_CLK_GATE_SPI2 25 +#define HI3660_PCIEPHY_REF 26 +#define HI3660_CLK_ABB_USB 27 +#define HI3660_HCLK_GATE_SDIO0 28 +#define HI3660_HCLK_GATE_SD 29 +#define HI3660_CLK_GATE_AOMM 30 +#define HI3660_PCLK_GPIO0 31 +#define HI3660_PCLK_GPIO1 32 +#define HI3660_PCLK_GPIO2 33 +#define HI3660_PCLK_GPIO3 34 +#define HI3660_PCLK_GPIO4 35 +#define HI3660_PCLK_GPIO5 36 +#define HI3660_PCLK_GPIO6 37 +#define HI3660_PCLK_GPIO7 38 +#define HI3660_PCLK_GPIO8 39 +#define HI3660_PCLK_GPIO9 40 +#define HI3660_PCLK_GPIO10 41 +#define HI3660_PCLK_GPIO11 42 +#define HI3660_PCLK_GPIO12 43 +#define HI3660_PCLK_GPIO13 44 +#define HI3660_PCLK_GPIO14 45 +#define HI3660_PCLK_GPIO15 46 +#define HI3660_PCLK_GPIO16 47 +#define HI3660_PCLK_GPIO17 48 +#define HI3660_PCLK_GPIO18 49 +#define HI3660_PCLK_GPIO19 50 +#define HI3660_PCLK_GPIO20 51 +#define HI3660_PCLK_GPIO21 52 +#define HI3660_CLK_GATE_SPI3 53 +#define HI3660_CLK_GATE_I2C7 54 +#define HI3660_CLK_GATE_I2C3 55 +#define HI3660_CLK_GATE_SPI1 56 +#define HI3660_CLK_GATE_UART1 57 +#define HI3660_CLK_GATE_UART2 58 +#define HI3660_CLK_GATE_UART4 59 +#define HI3660_CLK_GATE_UART5 60 +#define HI3660_CLK_GATE_I2C4 61 +#define HI3660_CLK_GATE_DMAC 62 +#define HI3660_PCLK_GATE_DSS 63 +#define HI3660_ACLK_GATE_DSS 64 +#define HI3660_CLK_GATE_LDI1 65 +#define HI3660_CLK_GATE_LDI0 66 +#define HI3660_CLK_GATE_VIVOBUS 67 +#define HI3660_CLK_GATE_EDC0 68 +#define HI3660_CLK_GATE_TXDPHY0_CFG 69 +#define HI3660_CLK_GATE_TXDPHY0_REF 70 +#define HI3660_CLK_GATE_TXDPHY1_CFG 71 +#define HI3660_CLK_GATE_TXDPHY1_REF 72 +#define HI3660_ACLK_GATE_USB3OTG 73 +#define HI3660_CLK_GATE_SPI4 74 +#define HI3660_CLK_GATE_SD 75 +#define HI3660_CLK_GATE_SDIO0 76 +#define HI3660_CLK_GATE_UFS_SUBSYS 77 +#define HI3660_PCLK_GATE_DSI0 78 +#define HI3660_PCLK_GATE_DSI1 79 +#define HI3660_ACLK_GATE_PCIE 80 +#define HI3660_PCLK_GATE_PCIE_SYS 81 +#define HI3660_CLK_GATE_PCIEAUX 82 +#define HI3660_PCLK_GATE_PCIE_PHY 83 +#define HI3660_CLK_ANDGT_LDI0 84 +#define HI3660_CLK_ANDGT_LDI1 85 +#define HI3660_CLK_ANDGT_EDC0 86 +#define HI3660_CLK_GATE_UFSPHY_GT 87 +#define HI3660_CLK_ANDGT_MMC 88 +#define HI3660_CLK_ANDGT_SD 89 +#define HI3660_CLK_A53HPM_ANDGT 90 +#define HI3660_CLK_ANDGT_SDIO 91 +#define HI3660_CLK_ANDGT_UART0 92 +#define HI3660_CLK_ANDGT_UART1 93 +#define HI3660_CLK_ANDGT_UARTH 94 +#define HI3660_CLK_ANDGT_SPI 95 +#define HI3660_CLK_VIVOBUS_ANDGT 96 +#define HI3660_CLK_AOMM_ANDGT 97 +#define HI3660_CLK_320M_PLL_GT 98 +#define HI3660_AUTODIV_EMMC0BUS 99 +#define HI3660_AUTODIV_SYSBUS 100 +#define HI3660_CLK_GATE_UFSPHY_CFG 101 +#define HI3660_CLK_GATE_UFSIO_REF 102 +#define HI3660_CLK_MUX_SYSBUS 103 +#define HI3660_CLK_MUX_UART0 104 +#define HI3660_CLK_MUX_UART1 105 +#define HI3660_CLK_MUX_UARTH 106 +#define HI3660_CLK_MUX_SPI 107 +#define HI3660_CLK_MUX_I2C 108 +#define HI3660_CLK_MUX_MMC_PLL 109 +#define HI3660_CLK_MUX_LDI1 110 +#define HI3660_CLK_MUX_LDI0 111 +#define HI3660_CLK_MUX_SD_PLL 112 +#define HI3660_CLK_MUX_SD_SYS 113 +#define HI3660_CLK_MUX_EDC0 114 +#define HI3660_CLK_MUX_SDIO_SYS 115 +#define HI3660_CLK_MUX_SDIO_PLL 116 +#define HI3660_CLK_MUX_VIVOBUS 117 +#define HI3660_CLK_MUX_A53HPM 118 +#define HI3660_CLK_MUX_320M 119 +#define HI3660_CLK_MUX_IOPERI 120 +#define HI3660_CLK_DIV_UART0 121 +#define HI3660_CLK_DIV_UART1 122 +#define HI3660_CLK_DIV_UARTH 123 +#define HI3660_CLK_DIV_MMC 124 +#define HI3660_CLK_DIV_SD 125 +#define HI3660_CLK_DIV_EDC0 126 +#define HI3660_CLK_DIV_LDI0 127 +#define HI3660_CLK_DIV_SDIO 128 +#define HI3660_CLK_DIV_LDI1 129 +#define HI3660_CLK_DIV_SPI 130 +#define HI3660_CLK_DIV_VIVOBUS 131 +#define HI3660_CLK_DIV_I2C 132 +#define HI3660_CLK_DIV_UFSPHY 133 +#define HI3660_CLK_DIV_CFGBUS 134 +#define HI3660_CLK_DIV_MMC0BUS 135 +#define HI3660_CLK_DIV_MMC1BUS 136 +#define HI3660_CLK_DIV_UFSPERI 137 +#define HI3660_CLK_DIV_AOMM 138 +#define HI3660_CLK_DIV_IOPERI 139 + +/* clk in pmuctrl */ +#define HI3660_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3660_GATE_UFS_TCXO_EN 0 +#define HI3660_GATE_USB_TCXO_EN 1 + +/* clk in sctrl */ +#define HI3660_PCLK_AO_GPIO0 0 +#define HI3660_PCLK_AO_GPIO1 1 +#define HI3660_PCLK_AO_GPIO2 2 +#define HI3660_PCLK_AO_GPIO3 3 +#define HI3660_PCLK_AO_GPIO4 4 +#define HI3660_PCLK_AO_GPIO5 5 +#define HI3660_PCLK_AO_GPIO6 6 +#define HI3660_PCLK_GATE_MMBUF 7 +#define HI3660_CLK_GATE_DSS_AXI_MM 8 +#define HI3660_PCLK_MMBUF_ANDGT 9 +#define HI3660_CLK_MMBUF_PLL_ANDGT 10 +#define HI3660_CLK_FLL_MMBUF_ANDGT 11 +#define HI3660_CLK_SYS_MMBUF_ANDGT 12 +#define HI3660_CLK_GATE_PCIEPHY_GT 13 +#define HI3660_ACLK_MUX_MMBUF 14 +#define HI3660_CLK_SW_MMBUF 15 +#define HI3660_CLK_DIV_AOBUS 16 +#define HI3660_PCLK_DIV_MMBUF 17 +#define HI3660_ACLK_DIV_MMBUF 18 +#define HI3660_CLK_DIV_PCIEPHY 19 + +/* clk in iomcu */ +#define HI3660_CLK_I2C0_IOMCU 0 +#define HI3660_CLK_I2C1_IOMCU 1 +#define HI3660_CLK_I2C2_IOMCU 2 +#define HI3660_CLK_I2C6_IOMCU 3 +#define HI3660_CLK_IOMCU_PERI0 4 + +#endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index 1183347c383f..a7a1a50f33ef 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -449,5 +449,6 @@ #define IMX7D_ADC_ROOT_CLK 436 #define IMX7D_CLK_ARM 437 #define IMX7D_CKIL 438 -#define IMX7D_CLK_END 439 +#define IMX7D_OCOTP_CLK 439 +#define IMX7D_CLK_END 440 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h index 6240e5b0e900..7e8a7be6dcda 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -81,6 +81,17 @@ #define GCC_WCSS5G_CLK 62 #define GCC_WCSS5G_REF_CLK 63 #define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 #define WIFI0_CPU_INIT_RESET 0 #define WIFI0_RADIO_SRIF_RESET 1 diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h index 9ab2c4087120..787e448958bd 100644 --- a/include/dt-bindings/clock/qcom,gcc-mdm9615.h +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -323,5 +323,7 @@ #define CE3_H_CLK 305 #define USB_HS1_SYSTEM_CLK_SRC 306 #define USB_HS1_SYSTEM_CLK 307 +#define EBI2_CLK 308 +#define EBI2_AON_CLK 309 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h index 8fa535be2ebc..df47da0860f7 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8994.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -133,5 +133,6 @@ #define GCC_USB30_MOCK_UTMI_CLK 115 #define GCC_USB3_PHY_AUX_CLK 116 #define GCC_USB_HS_SYSTEM_CLK 117 +#define GCC_SDCC1_AHB_CLK 118 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 1828723eb621..1f5c42254798 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -339,6 +339,7 @@ #define GCC_PCIE_PHY_COM_NOCSR_BCR 102 #define GCC_USB3_PHY_BCR 103 #define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 /* Indexes for GDSCs */ diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 5924cdb71336..96b63c00249e 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -14,7 +14,7 @@ #ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H #define _DT_BINDINGS_CLK_MSM_RPMCC_H -/* apq8064 */ +/* RPM clocks */ #define RPM_PXO_CLK 0 #define RPM_PXO_A_CLK 1 #define RPM_CXO_CLK 2 @@ -38,7 +38,7 @@ #define RPM_SFPB_CLK 20 #define RPM_SFPB_A_CLK 21 -/* msm8916 */ +/* SMD RPM clocks */ #define RPM_SMD_XO_CLK_SRC 0 #define RPM_SMD_XO_A_CLK_SRC 1 #define RPM_SMD_PCNOC_CLK 2 @@ -65,5 +65,41 @@ #define RPM_SMD_RF_CLK1_A_PIN 23 #define RPM_SMD_RF_CLK2_PIN 24 #define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 #endif diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h index d141c1f0c778..eff4319d008b 100644 --- a/include/dt-bindings/clock/rk3188-cru-common.h +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -108,6 +108,8 @@ #define PCLK_TSADC 349 #define PCLK_CPU 350 #define PCLK_PERI 351 +#define PCLK_DDRUPCTL 352 +#define PCLK_PUBL 353 /* hclk gates */ #define HCLK_SDMMC 448 diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h index 9a586e2d9c91..d7b6c83ea63f 100644 --- a/include/dt-bindings/clock/rk3288-cru.h +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -88,6 +88,7 @@ #define SCLK_PVTM_GPU 124 #define SCLK_CRYPTO 125 #define SCLK_MIPIDSI_24M 126 +#define SCLK_VIP_OUT 127 #define SCLK_MAC 151 #define SCLK_MACREF_OUT 152 @@ -168,6 +169,7 @@ #define PCLK_WDT 368 #define PCLK_EFUSE256 369 #define PCLK_EFUSE1024 370 +#define PCLK_ISP_IN 371 /* hclk gates */ #define HCLK_GPS 448 diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h new file mode 100644 index 000000000000..ee702c8e4c09 --- /dev/null +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_RTC32K 30 +#define SCLK_SDMMC_EXT 31 +#define SCLK_SPI 32 +#define SCLK_SDMMC 33 +#define SCLK_SDIO 34 +#define SCLK_EMMC 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_UART0 38 +#define SCLK_UART1 39 +#define SCLK_UART2 40 +#define SCLK_I2S0 41 +#define SCLK_I2S1 42 +#define SCLK_I2S2 43 +#define SCLK_I2S1_OUT 44 +#define SCLK_I2S2_OUT 45 +#define SCLK_SPDIF 46 +#define SCLK_TIMER0 47 +#define SCLK_TIMER1 48 +#define SCLK_TIMER2 49 +#define SCLK_TIMER3 50 +#define SCLK_TIMER4 51 +#define SCLK_TIMER5 52 +#define SCLK_WIFI 53 +#define SCLK_CIF_OUT 54 +#define SCLK_I2C0 55 +#define SCLK_I2C1 56 +#define SCLK_I2C2 57 +#define SCLK_I2C3 58 +#define SCLK_CRYPTO 59 +#define SCLK_PWM 60 +#define SCLK_PDM 61 +#define SCLK_EFUSE 62 +#define SCLK_OTP 63 +#define SCLK_DDRCLK 64 +#define SCLK_VDEC_CABAC 65 +#define SCLK_VDEC_CORE 66 +#define SCLK_VENC_DSP 67 +#define SCLK_VENC_CORE 68 +#define SCLK_RGA 69 +#define SCLK_HDMI_SFC 70 +#define SCLK_HDMI_CEC 71 +#define SCLK_USB3_REF 72 +#define SCLK_USB3_SUSPEND 73 +#define SCLK_SDMMC_DRV 74 +#define SCLK_SDIO_DRV 75 +#define SCLK_EMMC_DRV 76 +#define SCLK_SDMMC_EXT_DRV 77 +#define SCLK_SDMMC_SAMPLE 78 +#define SCLK_SDIO_SAMPLE 79 +#define SCLK_EMMC_SAMPLE 80 +#define SCLK_SDMMC_EXT_SAMPLE 81 +#define SCLK_VOP 82 +#define SCLK_MAC2PHY_RXTX 83 +#define SCLK_MAC2PHY_SRC 84 +#define SCLK_MAC2PHY_REF 85 +#define SCLK_MAC2PHY_OUT 86 +#define SCLK_MAC2IO_RX 87 +#define SCLK_MAC2IO_TX 88 +#define SCLK_MAC2IO_REFOUT 89 +#define SCLK_MAC2IO_REF 90 +#define SCLK_MAC2IO_OUT 91 +#define SCLK_TSP 92 +#define SCLK_HSADC_TSP 93 +#define SCLK_USB3PHY_REF 94 +#define SCLK_REF_USB3OTG 95 +#define SCLK_USB3OTG_REF 96 +#define SCLK_USB3OTG_SUSPEND 97 +#define SCLK_REF_USB3OTG_SRC 98 +#define SCLK_MAC2IO_SRC 99 +#define SCLK_MAC2IO 100 +#define SCLK_MAC2PHY 101 + +/* dclk gates */ +#define DCLK_LCDC 120 +#define DCLK_HDMIPHY 121 +#define HDMIPHY 122 +#define USB480M 123 +#define DCLK_LCDC_SRC 124 + +/* aclk gates */ +#define ACLK_AXISRAM 130 +#define ACLK_VOP_PRE 131 +#define ACLK_USB3OTG 132 +#define ACLK_RGA_PRE 133 +#define ACLK_DMAC 134 +#define ACLK_GPU 135 +#define ACLK_BUS_PRE 136 +#define ACLK_PERI_PRE 137 +#define ACLK_RKVDEC_PRE 138 +#define ACLK_RKVDEC 139 +#define ACLK_RKVENC 140 +#define ACLK_VPU_PRE 141 +#define ACLK_VIO_PRE 142 +#define ACLK_VPU 143 +#define ACLK_VIO 144 +#define ACLK_VOP 145 +#define ACLK_GMAC 146 +#define ACLK_H265 147 +#define ACLK_H264 148 +#define ACLK_MAC2PHY 149 +#define ACLK_MAC2IO 150 +#define ACLK_DCF 151 +#define ACLK_TSP 152 +#define ACLK_PERI 153 +#define ACLK_RGA 154 +#define ACLK_IEP 155 +#define ACLK_CIF 156 +#define ACLK_HDCP 157 + +/* pclk gates */ +#define PCLK_GPIO0 200 +#define PCLK_GPIO1 201 +#define PCLK_GPIO2 202 +#define PCLK_GPIO3 203 +#define PCLK_GRF 204 +#define PCLK_I2C0 205 +#define PCLK_I2C1 206 +#define PCLK_I2C2 207 +#define PCLK_I2C3 208 +#define PCLK_SPI 209 +#define PCLK_UART0 210 +#define PCLK_UART1 211 +#define PCLK_UART2 212 +#define PCLK_TSADC 213 +#define PCLK_PWM 214 +#define PCLK_TIMER 215 +#define PCLK_BUS_PRE 216 +#define PCLK_PERI_PRE 217 +#define PCLK_HDMI_CTRL 218 +#define PCLK_HDMI_PHY 219 +#define PCLK_GMAC 220 +#define PCLK_H265 221 +#define PCLK_MAC2PHY 222 +#define PCLK_MAC2IO 223 +#define PCLK_USB3PHY_OTG 224 +#define PCLK_USB3PHY_PIPE 225 +#define PCLK_USB3_GRF 226 +#define PCLK_USB2_GRF 227 +#define PCLK_HDMIPHY 228 +#define PCLK_DDR 229 +#define PCLK_PERI 230 +#define PCLK_HDMI 231 +#define PCLK_HDCP 232 +#define PCLK_DCF 233 +#define PCLK_SARADC 234 + +/* hclk gates */ +#define HCLK_PERI 308 +#define HCLK_TSP 309 +#define HCLK_GMAC 310 +#define HCLK_I2S0_8CH 311 +#define HCLK_I2S1_8CH 313 +#define HCLK_I2S2_2CH 313 +#define HCLK_SPDIF_8CH 314 +#define HCLK_VOP 315 +#define HCLK_NANDC 316 +#define HCLK_SDMMC 317 +#define HCLK_SDIO 318 +#define HCLK_EMMC 319 +#define HCLK_SDMMC_EXT 320 +#define HCLK_RKVDEC_PRE 321 +#define HCLK_RKVDEC 322 +#define HCLK_RKVENC 323 +#define HCLK_VPU_PRE 324 +#define HCLK_VIO_PRE 325 +#define HCLK_VPU 326 +#define HCLK_VIO 327 +#define HCLK_BUS_PRE 328 +#define HCLK_PERI_PRE 329 +#define HCLK_H264 330 +#define HCLK_CIF 331 +#define HCLK_OTG_PMU 332 +#define HCLK_OTG 333 +#define HCLK_HOST0 334 +#define HCLK_HOST0_ARB 335 +#define HCLK_CRYPTO_MST 336 +#define HCLK_CRYPTO_SLV 337 +#define HCLK_PDM 338 +#define HCLK_IEP 339 +#define HCLK_RGA 340 +#define HCLK_HDCP 341 + +#define CLK_NR_CLKS (HCLK_HDCP + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NIU 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_A53_GIC 18 +#define SRST_DAP 19 +#define SRST_PMU_P 21 +#define SRST_EFUSE 22 +#define SRST_BUSSYS_H 23 +#define SRST_BUSSYS_P 24 +#define SRST_SPDIF 25 +#define SRST_INTMEM 26 +#define SRST_ROM 27 +#define SRST_GPIO0 28 +#define SRST_GPIO1 29 +#define SRST_GPIO2 30 +#define SRST_GPIO3 31 + +#define SRST_I2S0 32 +#define SRST_I2S1 33 +#define SRST_I2S2 34 +#define SRST_I2S0_H 35 +#define SRST_I2S1_H 36 +#define SRST_I2S2_H 37 +#define SRST_UART0 38 +#define SRST_UART1 39 +#define SRST_UART2 40 +#define SRST_UART0_P 41 +#define SRST_UART1_P 42 +#define SRST_UART2_P 43 +#define SRST_I2C0 44 +#define SRST_I2C1 45 +#define SRST_I2C2 46 +#define SRST_I2C3 47 + +#define SRST_I2C0_P 48 +#define SRST_I2C1_P 49 +#define SRST_I2C2_P 50 +#define SRST_I2C3_P 51 +#define SRST_EFUSE_SE_P 52 +#define SRST_EFUSE_NS_P 53 +#define SRST_PWM0 54 +#define SRST_PWM0_P 55 +#define SRST_DMA 56 +#define SRST_TSP_A 57 +#define SRST_TSP_H 58 +#define SRST_TSP 59 +#define SRST_TSP_HSADC 60 +#define SRST_DCF_A 61 +#define SRST_DCF_P 62 + +#define SRST_SCR 64 +#define SRST_SPI 65 +#define SRST_TSADC 66 +#define SRST_TSADC_P 67 +#define SRST_CRYPTO 68 +#define SRST_SGRF 69 +#define SRST_GRF 70 +#define SRST_USB_GRF 71 +#define SRST_TIMER_6CH_P 72 +#define SRST_TIMER0 73 +#define SRST_TIMER1 74 +#define SRST_TIMER2 75 +#define SRST_TIMER3 76 +#define SRST_TIMER4 77 +#define SRST_TIMER5 78 +#define SRST_USB3GRF 79 + +#define SRST_PHYNIU 80 +#define SRST_HDMIPHY 81 +#define SRST_VDAC 82 +#define SRST_ACODEC_p 83 +#define SRST_SARADC 85 +#define SRST_SARADC_P 86 +#define SRST_GRF_DDR 87 +#define SRST_DFIMON 88 +#define SRST_MSCH 89 +#define SRST_DDRMSCH 91 +#define SRST_DDRCTRL 92 +#define SRST_DDRCTRL_P 93 +#define SRST_DDRPHY 94 +#define SRST_DDRPHY_P 95 + +#define SRST_GMAC_NIU_A 96 +#define SRST_GMAC_NIU_P 97 +#define SRST_GMAC2PHY_A 98 +#define SRST_GMAC2IO_A 99 +#define SRST_MACPHY 100 +#define SRST_OTP_PHY 101 +#define SRST_GPU_A 102 +#define SRST_GPU_NIU_A 103 +#define SRST_SDMMCEXT 104 +#define SRST_PERIPH_NIU_A 105 +#define SRST_PERIHP_NIU_H 106 +#define SRST_PERIHP_P 107 +#define SRST_PERIPHSYS_H 108 +#define SRST_MMC0 109 +#define SRST_SDIO 110 +#define SRST_EMMC 111 + +#define SRST_USB2OTG_H 112 +#define SRST_USB2OTG 113 +#define SRST_USB2OTG_ADP 114 +#define SRST_USB2HOST_H 115 +#define SRST_USB2HOST_ARB 116 +#define SRST_USB2HOST_AUX 117 +#define SRST_USB2HOST_EHCIPHY 118 +#define SRST_USB2HOST_UTMI 119 +#define SRST_USB3OTG 120 +#define SRST_USBPOR 121 +#define SRST_USB2OTG_UTMI 122 +#define SRST_USB2HOST_PHY_UTMI 123 +#define SRST_USB3OTG_UTMI 124 +#define SRST_USB3PHY_U2 125 +#define SRST_USB3PHY_U3 126 +#define SRST_USB3PHY_PIPE 127 + +#define SRST_VIO_A 128 +#define SRST_VIO_BUS_H 129 +#define SRST_VIO_H2P_H 130 +#define SRST_VIO_ARBI_H 131 +#define SRST_VOP_NIU_A 132 +#define SRST_VOP_A 133 +#define SRST_VOP_H 134 +#define SRST_VOP_D 135 +#define SRST_RGA 136 +#define SRST_RGA_NIU_A 137 +#define SRST_RGA_A 138 +#define SRST_RGA_H 139 +#define SRST_IEP_A 140 +#define SRST_IEP_H 141 +#define SRST_HDMI 142 +#define SRST_HDMI_P 143 + +#define SRST_HDCP_A 144 +#define SRST_HDCP 145 +#define SRST_HDCP_H 146 +#define SRST_CIF_A 147 +#define SRST_CIF_H 148 +#define SRST_CIF_P 149 +#define SRST_OTP_P 150 +#define SRST_OTP_SBPI 151 +#define SRST_OTP_USER 152 +#define SRST_DDRCTRL_A 153 +#define SRST_DDRSTDY_P 154 +#define SRST_DDRSTDY 155 +#define SRST_PDM_H 156 +#define SRST_PDM 157 +#define SRST_USB3PHY_OTG_P 158 +#define SRST_USB3PHY_PIPE_P 159 + +#define SRST_VCODEC_A 160 +#define SRST_VCODEC_NIU_A 161 +#define SRST_VCODEC_H 162 +#define SRST_VCODEC_NIU_H 163 +#define SRST_VDEC_A 164 +#define SRST_VDEC_NIU_A 165 +#define SRST_VDEC_H 166 +#define SRST_VDEC_NIU_H 167 +#define SRST_VDEC_CORE 168 +#define SRST_VDEC_CABAC 169 +#define SRST_DDRPHYDIV 175 + +#define SRST_RKVENC_NIU_A 176 +#define SRST_RKVENC_NIU_H 177 +#define SRST_RKVENC_H265_A 178 +#define SRST_RKVENC_H265_P 179 +#define SRST_RKVENC_H265_CORE 180 +#define SRST_RKVENC_H265_DSP 181 +#define SRST_RKVENC_H264_A 182 +#define SRST_RKVENC_H264_H 183 +#define SRST_RKVENC_INTMEM 184 + +#endif diff --git a/include/dt-bindings/clock/ste-ab8500.h b/include/dt-bindings/clock/ste-ab8500.h new file mode 100644 index 000000000000..6731f1f00a84 --- /dev/null +++ b/include/dt-bindings/clock/ste-ab8500.h @@ -0,0 +1,11 @@ +#ifndef __STE_CLK_AB8500_H__ +#define __STE_CLK_AB8500_H__ + +#define AB8500_SYSCLK_BUF2 0 +#define AB8500_SYSCLK_BUF3 1 +#define AB8500_SYSCLK_BUF4 2 +#define AB8500_SYSCLK_ULP 3 +#define AB8500_SYSCLK_INT 4 +#define AB8500_SYSCLK_AUDIO 5 + +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h index 08bcab61b714..49bb3c203e5c 100644 --- a/include/dt-bindings/clock/stm32fx-clock.h +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -36,4 +36,24 @@ #define END_PRIMARY_CLK 14 +#define CLK_HSI 14 +#define CLK_SYSCLK 15 +#define CLK_HDMI_CEC 16 +#define CLK_SPDIF 17 +#define CLK_USART1 18 +#define CLK_USART2 19 +#define CLK_USART3 20 +#define CLK_UART4 21 +#define CLK_UART5 22 +#define CLK_USART6 23 +#define CLK_UART7 24 +#define CLK_UART8 25 +#define CLK_I2C1 26 +#define CLK_I2C2 27 +#define CLK_I2C3 28 +#define CLK_I2C4 29 +#define CLK_LPTIMER 30 + +#define END_PRIMARY_CLK_F7 31 + #endif diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h new file mode 100644 index 000000000000..aeb2e2f781fb --- /dev/null +++ b/include/dt-bindings/clock/sun5i-ccu.h @@ -0,0 +1,103 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_SUN5I_H_ +#define _DT_BINDINGS_CLK_SUN5I_H_ + +#define CLK_HOSC 1 + +#define CLK_CPU 17 + +#define CLK_AHB_OTG 23 +#define CLK_AHB_EHCI 24 +#define CLK_AHB_OHCI 25 +#define CLK_AHB_SS 26 +#define CLK_AHB_DMA 27 +#define CLK_AHB_BIST 28 +#define CLK_AHB_MMC0 29 +#define CLK_AHB_MMC1 30 +#define CLK_AHB_MMC2 31 +#define CLK_AHB_NAND 32 +#define CLK_AHB_SDRAM 33 +#define CLK_AHB_EMAC 34 +#define CLK_AHB_TS 35 +#define CLK_AHB_SPI0 36 +#define CLK_AHB_SPI1 37 +#define CLK_AHB_SPI2 38 +#define CLK_AHB_GPS 39 +#define CLK_AHB_HSTIMER 40 +#define CLK_AHB_VE 41 +#define CLK_AHB_TVE 42 +#define CLK_AHB_LCD 43 +#define CLK_AHB_CSI 44 +#define CLK_AHB_HDMI 45 +#define CLK_AHB_DE_BE 46 +#define CLK_AHB_DE_FE 47 +#define CLK_AHB_IEP 48 +#define CLK_AHB_GPU 49 +#define CLK_APB0_CODEC 50 +#define CLK_APB0_SPDIF 51 +#define CLK_APB0_I2S 52 +#define CLK_APB0_PIO 53 +#define CLK_APB0_IR 54 +#define CLK_APB0_KEYPAD 55 +#define CLK_APB1_I2C0 56 +#define CLK_APB1_I2C1 57 +#define CLK_APB1_I2C2 58 +#define CLK_APB1_UART0 59 +#define CLK_APB1_UART1 60 +#define CLK_APB1_UART2 61 +#define CLK_APB1_UART3 62 +#define CLK_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_TS 67 +#define CLK_SS 68 +#define CLK_SPI0 69 +#define CLK_SPI1 70 +#define CLK_SPI2 71 +#define CLK_IR 72 +#define CLK_I2S 73 +#define CLK_SPDIF 74 +#define CLK_KEYPAD 75 +#define CLK_USB_OHCI 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_GPS 79 +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_TS 82 +#define CLK_DRAM_TVE 83 +#define CLK_DRAM_DE_FE 84 +#define CLK_DRAM_DE_BE 85 +#define CLK_DRAM_ACE 86 +#define CLK_DRAM_IEP 87 +#define CLK_DE_BE 88 +#define CLK_DE_FE 89 +#define CLK_TCON_CH0 90 + +#define CLK_TCON_CH1 92 +#define CLK_CSI 93 +#define CLK_VE 94 +#define CLK_CODEC 95 +#define CLK_AVS 96 +#define CLK_HDMI 97 +#define CLK_GPU 98 + +#define CLK_IEP 100 + +#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..c0d5d5599c87 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-h3-ccu.h, which is: + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_V3S_H_ +#define _DT_BINDINGS_CLK_SUN8I_V3S_H_ + +#define CLK_CPU 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_DRAM 25 +#define CLK_BUS_EMAC 26 +#define CLK_BUS_HSTIMER 27 +#define CLK_BUS_SPI0 28 +#define CLK_BUS_OTG 29 +#define CLK_BUS_EHCI0 30 +#define CLK_BUS_OHCI0 31 +#define CLK_BUS_VE 32 +#define CLK_BUS_TCON0 33 +#define CLK_BUS_CSI 34 +#define CLK_BUS_DE 35 +#define CLK_BUS_CODEC 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_I2C0 38 +#define CLK_BUS_I2C1 39 +#define CLK_BUS_UART0 40 +#define CLK_BUS_UART1 41 +#define CLK_BUS_UART2 42 +#define CLK_BUS_EPHY 43 +#define CLK_BUS_DBG 44 + +#define CLK_MMC0 45 +#define CLK_MMC0_SAMPLE 46 +#define CLK_MMC0_OUTPUT 47 +#define CLK_MMC1 48 +#define CLK_MMC1_SAMPLE 49 +#define CLK_MMC1_OUTPUT 50 +#define CLK_MMC2 51 +#define CLK_MMC2_SAMPLE 52 +#define CLK_MMC2_OUTPUT 53 +#define CLK_CE 54 +#define CLK_SPI0 55 +#define CLK_USB_PHY0 56 +#define CLK_USB_OHCI0 57 + +#define CLK_DRAM_VE 59 +#define CLK_DRAM_CSI 60 +#define CLK_DRAM_EHCI 61 +#define CLK_DRAM_OHCI 62 +#define CLK_DE 63 +#define CLK_TCON0 64 +#define CLK_CSI_MISC 65 +#define CLK_CSI0_MCLK 66 +#define CLK_CSI1_SCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_VE 69 +#define CLK_AC_DIG 70 +#define CLK_AVS 71 + +#define CLK_MIPI_CSI 73 + +#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-ccu.h b/include/dt-bindings/clock/sun9i-a80-ccu.h new file mode 100644 index 000000000000..6ea1492a73a6 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-ccu.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ + +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_PERIPH0 3 + +#define CLK_C0CPUX 12 +#define CLK_C1CPUX 13 + +#define CLK_OUT_A 27 +#define CLK_OUT_B 28 + +#define CLK_NAND0_0 29 +#define CLK_NAND0_1 30 +#define CLK_NAND1_0 31 +#define CLK_NAND1_1 32 +#define CLK_MMC0 33 +#define CLK_MMC0_SAMPLE 34 +#define CLK_MMC0_OUTPUT 35 +#define CLK_MMC1 36 +#define CLK_MMC1_SAMPLE 37 +#define CLK_MMC1_OUTPUT 38 +#define CLK_MMC2 39 +#define CLK_MMC2_SAMPLE 40 +#define CLK_MMC2_OUTPUT 41 +#define CLK_MMC3 42 +#define CLK_MMC3_SAMPLE 43 +#define CLK_MMC3_OUTPUT 44 +#define CLK_TS 45 +#define CLK_SS 46 +#define CLK_SPI0 47 +#define CLK_SPI1 48 +#define CLK_SPI2 49 +#define CLK_SPI3 50 +#define CLK_I2S0 51 +#define CLK_I2S1 52 +#define CLK_SPDIF 53 +#define CLK_SDRAM 54 +#define CLK_DE 55 +#define CLK_EDP 56 +#define CLK_MP 57 +#define CLK_LCD0 58 +#define CLK_LCD1 59 +#define CLK_MIPI_DSI0 60 +#define CLK_MIPI_DSI1 61 +#define CLK_HDMI 62 +#define CLK_HDMI_SLOW 63 +#define CLK_MIPI_CSI 64 +#define CLK_CSI_ISP 65 +#define CLK_CSI_MISC 66 +#define CLK_CSI0_MCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_FD 69 +#define CLK_VE 70 +#define CLK_AVS 71 +#define CLK_GPU_CORE 72 +#define CLK_GPU_MEMORY 73 +#define CLK_GPU_AXI 74 +#define CLK_SATA 75 +#define CLK_AC97 76 +#define CLK_MIPI_HSI 77 +#define CLK_GPADC 78 +#define CLK_CIR_TX 79 + +#define CLK_BUS_FD 80 +#define CLK_BUS_VE 81 +#define CLK_BUS_GPU_CTRL 82 +#define CLK_BUS_SS 83 +#define CLK_BUS_MMC 84 +#define CLK_BUS_NAND0 85 +#define CLK_BUS_NAND1 86 +#define CLK_BUS_SDRAM 87 +#define CLK_BUS_MIPI_HSI 88 +#define CLK_BUS_SATA 89 +#define CLK_BUS_TS 90 +#define CLK_BUS_SPI0 91 +#define CLK_BUS_SPI1 92 +#define CLK_BUS_SPI2 93 +#define CLK_BUS_SPI3 94 + +#define CLK_BUS_OTG 95 +#define CLK_BUS_USB 96 +#define CLK_BUS_GMAC 97 +#define CLK_BUS_MSGBOX 98 +#define CLK_BUS_SPINLOCK 99 +#define CLK_BUS_HSTIMER 100 +#define CLK_BUS_DMA 101 + +#define CLK_BUS_LCD0 102 +#define CLK_BUS_LCD1 103 +#define CLK_BUS_EDP 104 +#define CLK_BUS_CSI 105 +#define CLK_BUS_HDMI 106 +#define CLK_BUS_DE 107 +#define CLK_BUS_MP 108 +#define CLK_BUS_MIPI_DSI 109 + +#define CLK_BUS_SPDIF 110 +#define CLK_BUS_PIO 111 +#define CLK_BUS_AC97 112 +#define CLK_BUS_I2S0 113 +#define CLK_BUS_I2S1 114 +#define CLK_BUS_LRADC 115 +#define CLK_BUS_GPADC 116 +#define CLK_BUS_TWD 117 +#define CLK_BUS_CIR_TX 118 + +#define CLK_BUS_I2C0 119 +#define CLK_BUS_I2C1 120 +#define CLK_BUS_I2C2 121 +#define CLK_BUS_I2C3 122 +#define CLK_BUS_I2C4 123 +#define CLK_BUS_UART0 124 +#define CLK_BUS_UART1 125 +#define CLK_BUS_UART2 126 +#define CLK_BUS_UART3 127 +#define CLK_BUS_UART4 128 +#define CLK_BUS_UART5 129 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-de.h b/include/dt-bindings/clock/sun9i-a80-de.h new file mode 100644 index 000000000000..3dad6c3cd131 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-de.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ + +#define CLK_FE0 0 +#define CLK_FE1 1 +#define CLK_FE2 2 +#define CLK_IEP_DEU0 3 +#define CLK_IEP_DEU1 4 +#define CLK_BE0 5 +#define CLK_BE1 6 +#define CLK_BE2 7 +#define CLK_IEP_DRC0 8 +#define CLK_IEP_DRC1 9 +#define CLK_MERGE 10 + +#define CLK_DRAM_FE0 11 +#define CLK_DRAM_FE1 12 +#define CLK_DRAM_FE2 13 +#define CLK_DRAM_DEU0 14 +#define CLK_DRAM_DEU1 15 +#define CLK_DRAM_BE0 16 +#define CLK_DRAM_BE1 17 +#define CLK_DRAM_BE2 18 +#define CLK_DRAM_DRC0 19 +#define CLK_DRAM_DRC1 20 + +#define CLK_BUS_FE0 21 +#define CLK_BUS_FE1 22 +#define CLK_BUS_FE2 23 +#define CLK_BUS_DEU0 24 +#define CLK_BUS_DEU1 25 +#define CLK_BUS_BE0 26 +#define CLK_BUS_BE1 27 +#define CLK_BUS_BE2 28 +#define CLK_BUS_DRC0 29 +#define CLK_BUS_DRC1 30 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-usb.h b/include/dt-bindings/clock/sun9i-a80-usb.h new file mode 100644 index 000000000000..783a60d2ccea --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-usb.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ + +#define CLK_BUS_HCI0 0 +#define CLK_USB_OHCI0 1 +#define CLK_BUS_HCI1 2 +#define CLK_BUS_HCI2 3 +#define CLK_USB_OHCI2 4 + +#define CLK_USB0_PHY 5 +#define CLK_USB1_HSIC 6 +#define CLK_USB1_PHY 7 +#define CLK_USB2_HSIC 8 +#define CLK_USB2_PHY 9 +#define CLK_USB_HSIC 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/reset/sun5i-ccu.h b/include/dt-bindings/reset/sun5i-ccu.h new file mode 100644 index 000000000000..c2b9726b5026 --- /dev/null +++ b/include/dt-bindings/reset/sun5i-ccu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RST_SUN5I_H_ +#define _RST_SUN5I_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_GPS 2 +#define RST_DE_BE 3 +#define RST_DE_FE 4 +#define RST_TVE 5 +#define RST_LCD 6 +#define RST_CSI 7 +#define RST_VE 8 +#define RST_GPU 9 +#define RST_IEP 10 + +#endif /* _RST_SUN5I_H_ */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..b58ef21a2e18 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-v3s-ccu.h, which is + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_V3S_H_ +#define _DT_BINDINGS_RST_SUN8I_V3S_H_ + +#define RST_USB_PHY0 0 + +#define RST_MBUS 1 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_OHCI0 22 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_CSI 30 +#define RST_BUS_DE 34 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-ccu.h b/include/dt-bindings/reset/sun9i-a80-ccu.h new file mode 100644 index 000000000000..4b8df4b36788 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-ccu.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ + +#define RST_BUS_FD 0 +#define RST_BUS_VE 1 +#define RST_BUS_GPU_CTRL 2 +#define RST_BUS_SS 3 +#define RST_BUS_MMC 4 +#define RST_BUS_NAND0 5 +#define RST_BUS_NAND1 6 +#define RST_BUS_SDRAM 7 +#define RST_BUS_SATA 8 +#define RST_BUS_TS 9 +#define RST_BUS_SPI0 10 +#define RST_BUS_SPI1 11 +#define RST_BUS_SPI2 12 +#define RST_BUS_SPI3 13 + +#define RST_BUS_OTG 14 +#define RST_BUS_OTG_PHY 15 +#define RST_BUS_MIPI_HSI 16 +#define RST_BUS_GMAC 17 +#define RST_BUS_MSGBOX 18 +#define RST_BUS_SPINLOCK 19 +#define RST_BUS_HSTIMER 20 +#define RST_BUS_DMA 21 + +#define RST_BUS_LCD0 22 +#define RST_BUS_LCD1 23 +#define RST_BUS_EDP 24 +#define RST_BUS_LVDS 25 +#define RST_BUS_CSI 26 +#define RST_BUS_HDMI0 27 +#define RST_BUS_HDMI1 28 +#define RST_BUS_DE 29 +#define RST_BUS_MP 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MIPI_DSI 32 + +#define RST_BUS_SPDIF 33 +#define RST_BUS_AC97 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_LRADC 37 +#define RST_BUS_GPADC 38 +#define RST_BUS_CIR_TX 39 + +#define RST_BUS_I2C0 40 +#define RST_BUS_I2C1 41 +#define RST_BUS_I2C2 42 +#define RST_BUS_I2C3 43 +#define RST_BUS_I2C4 44 +#define RST_BUS_UART0 45 +#define RST_BUS_UART1 46 +#define RST_BUS_UART2 47 +#define RST_BUS_UART3 48 +#define RST_BUS_UART4 49 +#define RST_BUS_UART5 50 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-de.h b/include/dt-bindings/reset/sun9i-a80-de.h new file mode 100644 index 000000000000..205072770171 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-de.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ + +#define RST_FE0 0 +#define RST_FE1 1 +#define RST_FE2 2 +#define RST_DEU0 3 +#define RST_DEU1 4 +#define RST_BE0 5 +#define RST_BE1 6 +#define RST_BE2 7 +#define RST_DRC0 8 +#define RST_DRC1 9 +#define RST_MERGE 10 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-usb.h b/include/dt-bindings/reset/sun9i-a80-usb.h new file mode 100644 index 000000000000..ee492864c2aa --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-usb.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ + +#define RST_USB0_HCI 0 +#define RST_USB1_HCI 1 +#define RST_USB2_HCI 2 + +#define RST_USB0_PHY 3 +#define RST_USB1_HSIC 4 +#define RST_USB1_PHY 5 +#define RST_USB2_HSIC 6 +#define RST_USB2_PHY 7 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ */ diff --git a/include/linux/average.h b/include/linux/average.h index d04aa58280de..7ddaf340d2ac 100644 --- a/include/linux/average.h +++ b/include/linux/average.h @@ -1,45 +1,66 @@ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H -/* Exponentially weighted moving average (EWMA) */ +/* + * Exponentially weighted moving average (EWMA) + * + * This implements a fixed-precision EWMA algorithm, with both the + * precision and fall-off coefficient determined at compile-time + * and built into the generated helper funtions. + * + * The first argument to the macro is the name that will be used + * for the struct and helper functions. + * + * The second argument, the precision, expresses how many bits are + * used for the fractional part of the fixed-precision values. + * + * The third argument, the weight reciprocal, determines how the + * new values will be weighed vs. the old state, new values will + * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note + * that this parameter must be a power of two for efficiency. + */ -#define DECLARE_EWMA(name, _factor, _weight) \ +#define DECLARE_EWMA(name, _precision, _weight_rcp) \ struct ewma_##name { \ unsigned long internal; \ }; \ static inline void ewma_##name##_init(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + /* \ + * Even if you want to feed it just 0/1 you should have \ + * some bits for the non-fractional part... \ + */ \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ e->internal = 0; \ } \ static inline unsigned long \ ewma_##name##_read(struct ewma_##name *e) \ { \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ - return e->internal >> ilog2(_factor); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + return e->internal >> (_precision); \ } \ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ unsigned long internal = ACCESS_ONCE(e->internal); \ - unsigned long weight = ilog2(_weight); \ - unsigned long factor = ilog2(_factor); \ + unsigned long weight_rcp = ilog2(_weight_rcp); \ + unsigned long precision = _precision; \ \ - BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ - BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ - BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ \ ACCESS_ONCE(e->internal) = internal ? \ - (((internal << weight) - internal) + \ - (val << factor)) >> weight : \ - (val << factor); \ + (((internal << weight_rcp) - internal) + \ + (val << precision)) >> weight_rcp : \ + (val << precision); \ } #endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 7cf8a6c70a3f..8e521194f6fc 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) -static inline unsigned bio_segments(struct bio *bio) +static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec) { unsigned segs = 0; struct bio_vec bv; @@ -205,12 +205,17 @@ static inline unsigned bio_segments(struct bio *bio) break; } - bio_for_each_segment(bv, bio, iter) + __bio_for_each_segment(bv, bio, iter, *bvec) segs++; return segs; } +static inline unsigned bio_segments(struct bio *bio) +{ + return __bio_segments(bio, &bio->bi_iter); +} + /* * get a reference to a bio, so it won't disappear. the intended use is * something like: @@ -384,6 +389,8 @@ extern void bio_put(struct bio *); extern void __bio_clone_fast(struct bio *, struct bio *); extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); +extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t, + struct bio_set *, int, int); extern struct bio_set *fs_bio_set; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8e4df3d6c8cd..001d30d727c5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -33,6 +33,7 @@ struct blk_mq_hw_ctx { struct blk_mq_ctx **ctxs; unsigned int nr_ctx; + wait_queue_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; @@ -160,6 +161,7 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_TAG_WAITING = 3, BLK_MQ_MAX_DEPTH = 10240, diff --git a/include/linux/bug.h b/include/linux/bug.h index baff2e8fc8a8..5828489309bb 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, /* * Since detected data corruption should stop operation on the affected - * structures, this returns false if the corruption condition is found. + * structures. Return value must be checked and sanely acted on by caller. */ +static inline __must_check bool check_data_corruption(bool v) { return v; } #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ - do { \ - if (unlikely(condition)) { \ + check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ pr_err(fmt, ##__VA_ARGS__); \ BUG(); \ } else \ WARN(1, fmt, ##__VA_ARGS__); \ - return false; \ } \ - } while (0) + corruption; \ + })) #endif /* _LINUX_BUG_H */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 03a6653d329a..2ea0c282f3dc 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -22,7 +22,6 @@ struct ceph_osd_client; * completion callback for async writepages */ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); -typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); #define CEPH_HOMELESS_OSD -1 @@ -170,15 +169,12 @@ struct ceph_osd_request { unsigned int r_num_ops; int r_result; - bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion; - struct completion r_done_completion; /* fsync waiter */ + struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; - ceph_osdc_unsafe_callback_t r_unsafe_callback; struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 9a9041784dcf..938656f70807 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) case CEPH_POOL_TYPE_EC: return false; default: - BUG_ON(1); + BUG(); } } @@ -82,13 +82,6 @@ void ceph_oloc_copy(struct ceph_object_locator *dest, void ceph_oloc_destroy(struct ceph_object_locator *oloc); /* - * Maximum supported by kernel client object name length - * - * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100) - */ -#define CEPH_MAX_OID_NAME_LEN 100 - -/* * 51-char inline_name is long enough for all cephfs and all but one * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all @@ -173,8 +166,8 @@ struct ceph_osdmap { * the list of osds that store+replicate them. */ struct crush_map *crush; - struct mutex crush_scratch_mutex; - int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; + struct mutex crush_workspace_mutex; + void *crush_workspace; }; static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 5c0da61cb763..5d0018782d50 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -50,7 +50,7 @@ struct ceph_timespec { #define CEPH_PG_LAYOUT_LINEAR 2 #define CEPH_PG_LAYOUT_HYBRID 3 -#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ +#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ /* * placement group. diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 861b4677fc5b..3c02404cfce9 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -148,14 +148,18 @@ struct cgroup_subsys_state { * set for a task. */ struct css_set { - /* Reference count */ - atomic_t refcount; - /* - * List running through all cgroup groups in the same hash - * slot. Protected by css_set_lock + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). */ - struct hlist_node hlist; + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* reference count */ + atomic_t refcount; + + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; /* * Lists running through all tasks using this cgroup group. @@ -167,21 +171,29 @@ struct css_set { struct list_head tasks; struct list_head mg_tasks; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* - * List of cgrp_cset_links pointing at cgroups referenced from this - * css_set. Protected by css_set_lock. + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. */ - struct list_head cgrp_links; + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - /* the default cgroup associated with this css_set */ - struct cgroup *dfl_cgrp; + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; /* - * Set of subsystem states, one for each subsystem. This array is - * immutable after creation apart from the init_css_set during - * subsystem registration (at boot time). + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. */ - struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + struct list_head cgrp_links; /* * List of csets participating in the on-going migration either as @@ -201,18 +213,6 @@ struct css_set { struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; - /* - * On the default hierarhcy, ->subsys[ssid] may point to a css - * attached to an ancestor instead of the cgroup this css_set is - * associated with. The following node is anchored at - * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to - * iterate through all css's attached to a given cgroup. - */ - struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - - /* all css_task_iters currently walking this cset */ - struct list_head task_iters; - /* dead and being drained, ignore for migration */ bool dead; @@ -388,6 +388,9 @@ struct cftype { struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c83c23f0577b..f6b43fbb141c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -266,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset * @leader: the loop cursor * @dst_css: the destination css - * @tset: takset to iterate + * @tset: taskset to iterate * * Iterate threadgroup leaders of @tset. For single-task migrations, @tset * may not contain any. diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h new file mode 100644 index 000000000000..e94290b29e99 --- /dev/null +++ b/include/linux/cgroup_rdma.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#ifndef _CGROUP_RDMA_H +#define _CGROUP_RDMA_H + +#include <linux/cgroup.h> + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE, + RDMACG_RESOURCE_HCA_OBJECT, + RDMACG_RESOURCE_MAX, +}; + +#ifdef CONFIG_CGROUP_RDMA + +struct rdma_cgroup { + struct cgroup_subsys_state css; + + /* + * head to keep track of all resource pools + * that belongs to this cgroup. + */ + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +/* + * APIs for RDMA/IB stack to publish when a device wants to + * participate in resource accounting + */ +int rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_unregister_device(struct rdmacg_device *device); + +/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +#endif /* CONFIG_CGROUP_RDMA */ +#endif /* _CGROUP_RDMA_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 0df0336acee9..d0e597c44585 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -56,6 +56,10 @@ SUBSYS(hugetlb) SUBSYS(pids) #endif +#if IS_ENABLED(CONFIG_CGROUP_RDMA) +SUBSYS(rdma) +#endif + /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 6f0a91b37f68..03f32d0bd1d8 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, + gfp_t gfp_mask); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/compat.h b/include/linux/compat.h index 9e40be522793..aef47be2a5c1 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); compat_stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ - put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ } while (0); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index fddd1a5eb322..76e28c229805 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -122,6 +122,7 @@ #define __attribute_const__ __attribute__((__const__)) #define __maybe_unused __attribute__((unused)) #define __always_unused __attribute__((unused)) +#define __mode(x) __attribute__((mode(x))) /* gcc version specific checks */ @@ -196,6 +197,17 @@ #endif #endif +#ifdef CONFIG_STACK_VALIDATION +#define annotate_unreachable() ({ \ + asm("%c0:\t\n" \ + ".pushsection __unreachable, \"a\"\t\n" \ + ".long %c0b\t\n" \ + ".popsection\t\n" : : "i" (__LINE__)); \ +}) +#else +#define annotate_unreachable() +#endif + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer @@ -205,7 +217,8 @@ * this in the preprocessor, but we can live with this because they're * unreleased. Really, we need to have autoconf for the kernel. */ -#define unreachable() __builtin_unreachable() +#define unreachable() \ + do { annotate_unreachable(); __builtin_unreachable(); } while (0) /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 91c30cba984e..f8110051188f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -105,29 +105,36 @@ struct ftrace_branch_data { }; }; +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) -void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); #define likely_notrace(x) __builtin_expect(!!(x), 1) #define unlikely_notrace(x) __builtin_expect(!!(x), 0) -#define __branch_check__(x, expect) ({ \ +#define __branch_check__(x, expect, is_constant) ({ \ int ______r; \ - static struct ftrace_branch_data \ + static struct ftrace_likely_data \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_annotated_branch"))) \ ______f = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ }; \ - ______r = likely_notrace(x); \ - ftrace_likely_update(&______f, ______r, expect); \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ ______r; \ }) @@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * written by Daniel Walker. */ # ifndef likely -# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) # endif # ifndef unlikely -# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) # endif #ifdef CONFIG_PROFILE_ALL_BRANCHES @@ -570,12 +577,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s (_________p1); \ }) -/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ -#ifdef CONFIG_KPROBES -# define __kprobes __attribute__((__section__(".kprobes.text"))) -# define nokprobe_inline __always_inline -#else -# define __kprobes -# define nokprobe_inline inline -#endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index be8f12b8f195..fbecbd089d75 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -135,13 +135,6 @@ struct crush_bucket { __u32 size; /* num items */ __s32 *items; - /* - * cached random permutation: used for uniform bucket and for - * the linear search fallback for the other bucket types. - */ - __u32 perm_x; /* @x for which *perm is defined */ - __u32 perm_n; /* num elements of *perm that are permuted/defined */ - __u32 *perm; }; struct crush_bucket_uniform { @@ -211,6 +204,21 @@ struct crush_map { * device fails. */ __u8 chooseleaf_stable; + /* + * This value is calculated after decode or construction by + * the builder. It is exposed here (rather than having a + * 'build CRUSH working space' function) so that callers can + * reserve a static buffer, allocate space on the stack, or + * otherwise avoid calling into the heap allocator if they + * want to. The size of the working space depends on the map, + * while the size of the scratch vector passed to the mapper + * depends on the size of the desired result set. + * + * Nothing stops the caller from allocating both in one swell + * foop and passing in two points, though. + */ + size_t working_size; + #ifndef __KERNEL__ /* * version 0 (original) of straw_calc has various flaws. version 1 @@ -248,4 +256,23 @@ static inline int crush_calc_tree_node(int i) return ((i+1) << 1)-1; } +/* + * These data structures are private to the CRUSH implementation. They + * are exposed in this header file because builder needs their + * definitions to calculate the total working size. + * + * Moving this out of the crush map allow us to treat the CRUSH map as + * immutable within the mapper and removes the requirement for a CRUSH + * map lock. + */ +struct crush_work_bucket { + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; /* Permutation of the bucket's items */ +}; + +struct crush_work { + struct crush_work_bucket **work; /* Per-bucket working store */ +}; + #endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index 5dfd5b1125d2..c95e19e1ff11 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -15,6 +15,20 @@ extern int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weights, int weight_max, - int *scratch); + void *cwin); + +/* + * Returns the exact amount of workspace that will need to be used + * for a given combination of crush_map and result_max. The caller can + * then allocate this much on its own, either on the stack, in a + * per-thread long-lived buffer, or however it likes. + */ +static inline size_t crush_work_size(const struct crush_map *map, + int result_max) +{ + return map->working_size + result_max * 3 * sizeof(__u32); +} + +void crush_init_workspace(const struct crush_map *map, void *v); #endif diff --git a/include/linux/dax.h b/include/linux/dax.h index 1e77ff5818f1..d8a3dc042e1c 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -38,8 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); -int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - const struct iomap_ops *ops); +int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + const struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, @@ -71,19 +71,13 @@ static inline unsigned int dax_radix_order(void *entry) return PMD_SHIFT - PAGE_SHIFT; return 0; } -int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops); #else static inline unsigned int dax_radix_order(void *entry) { return 0; } -static inline int dax_iomap_pmd_fault(struct vm_fault *vmf, - const struct iomap_ops *ops) -{ - return VM_FAULT_FALLBACK; -} #endif -int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); +int dax_pfn_mkwrite(struct vm_fault *vmf); static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c965e4469499..591b6c16f9c1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -562,7 +562,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * @inode: inode to select the dentry from multiple layers (can be NULL) * @flags: open flags to control copy-up behavior * - * If dentry is on an union/overlay, then return the underlying, real dentry. + * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * * See also: Documentation/filesystems/vfs.txt @@ -581,7 +581,7 @@ static inline struct dentry *d_real(struct dentry *dentry, * d_real_inode - Return the real inode * @dentry: The dentry to query * - * If dentry is on an union/overlay, then return the underlying, real inode. + * If dentry is on a union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) diff --git a/include/linux/device.h b/include/linux/device.h index bd684fc8ec1d..30c4570e928d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -925,6 +925,7 @@ struct device { #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif + const struct dma_map_ops *dma_ops; u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as @@ -1139,6 +1140,7 @@ static inline bool device_supports_offline(struct device *dev) extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); +void assert_held_device_hotplug(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index fec734df1524..b67bf6ac907d 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, } struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int order); + unsigned int order, gfp_t gfp_mask); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, static inline struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int order) + unsigned int order, gfp_t gfp_mask) { return NULL; } diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c24721a33b4c..0977317c6835 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -134,7 +134,8 @@ struct dma_map_ops { int is_phys; }; -extern struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} #else /* * Define the dma api to allow compilation but not linking of * dma dependent code. Code that depends on the dma-mapping * API needs to set 'depends on HAS_DMA' in its Kconfig */ -extern struct dma_map_ops bad_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +extern const struct dma_map_ops bad_dma_ops; +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return &bad_dma_ops; } @@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(ptr, size); @@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; struct scatterlist *s; @@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); @@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); @@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); @@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_resource) @@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) @@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) @@ -371,7 +384,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) @@ -383,7 +396,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) @@ -428,7 +441,7 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); @@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, @@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!ops); @@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); WARN_ON(irqs_disabled()); @@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #ifndef HAVE_ARCH_DMA_SUPPORTED static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!ops) return 0; @@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->set_dma_mask) return ops->set_dma_mask(dev, mask); diff --git a/include/linux/fs.h b/include/linux/fs.h index c930cbc19342..c64f2cb7d364 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -655,6 +655,11 @@ struct inode { void *i_private; /* fs or device private pointer */ }; +static inline unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index a1e8277120c7..c46eab5bc893 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h @@ -73,7 +73,7 @@ struct diu_ad { /* Word 0(32-bit) in DDR memory */ /* __u16 comp; */ /* __u16 pixel_s:2; */ -/* __u16 pallete:1; */ +/* __u16 palette:1; */ /* __u16 red_c:2; */ /* __u16 green_c:2; */ /* __u16 blue_c:2; */ @@ -142,7 +142,7 @@ struct diu_ad { struct diu { __be32 desc[3]; __be32 gamma; - __be32 pallete; + __be32 palette; __be32 cursor; __be32 curs_pos; __be32 diu_mode; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0fe0b6295ab5..db373b9d3223 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void) #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype); + unsigned migratetype, gfp_t gfp_mask); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f0029e786205..a3762d49ba39 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, + struct vm_area_struct *vma); + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); +#else +static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) +{ +} +#endif + extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, extern int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr); +extern int zap_huge_pud(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pud_t *pud, unsigned long addr); extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); @@ -26,8 +41,10 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); -int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, - pfn_t pfn, bool write); +int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, pfn_t pfn, bool write); +int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, pfn_t pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, @@ -58,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) #ifdef CONFIG_TRANSPARENT_HUGEPAGE -struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags); - #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) +#define HPAGE_PUD_SHIFT PUD_SHIFT +#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) +#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) + extern bool is_vma_temporary_stack(struct vm_area_struct *vma); #define transparent_hugepage_enabled(__vma) \ @@ -118,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page); +void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, + unsigned long address); + +#define split_huge_pud(__vma, __pud, __address) \ + do { \ + pud_t *____pud = (__pud); \ + if (pud_trans_huge(*____pud) \ + || pud_devmap(*____pud)) \ + __split_huge_pud(__vma, __pud, __address); \ + } while (0) + extern int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); extern void vma_adjust_trans_huge(struct vm_area_struct *vma, @@ -126,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, long adjust_next); extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); +extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma); /* mmap_sem must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) @@ -136,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, else return NULL; } +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); + if (pud_trans_huge(*pud) || pud_devmap(*pud)) + return __pud_trans_huge_lock(pud, vma); + else + return NULL; +} static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) @@ -143,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page) return 1; } +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, int flags); +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, int flags); + extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -157,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) return is_huge_zero_page(pmd_page(pmd)); } +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + struct page *mm_get_huge_zero_page(struct mm_struct *mm); void mm_put_huge_zero_page(struct mm_struct *mm); @@ -167,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm); #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) + #define hpage_nr_pages(x) 1 #define transparent_hugepage_enabled(__vma) 0 @@ -195,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} +#define split_huge_pud(__vma, __pmd, __address) \ + do { } while (0) + static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -212,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, { return NULL; } +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + return NULL; +} static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { @@ -223,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page) return false; } +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + static inline void mm_put_huge_zero_page(struct mm_struct *mm) { return; @@ -233,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, { return NULL; } + +static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud, int flags) +{ + return NULL; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7b23a3316dcb..bed8fbb45f31 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -283,6 +283,7 @@ enum i2c_slave_event { extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); extern int i2c_slave_unregister(struct i2c_client *client); +extern bool i2c_detect_slave_mode(struct device *dev); static inline int i2c_slave_event(struct i2c_client *client, enum i2c_slave_event event, u8 *val) diff --git a/include/linux/idr.h b/include/linux/idr.h index 3c01b89aed67..bf70b3ef0a07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -12,47 +12,29 @@ #ifndef __IDR_H__ #define __IDR_H__ -#include <linux/types.h> -#include <linux/bitops.h> -#include <linux/init.h> -#include <linux/rcupdate.h> +#include <linux/radix-tree.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +struct idr { + struct radix_tree_root idr_rt; + unsigned int idr_next; +}; /* - * Using 6 bits at each layer allows us to allocate 7 layers out of each page. - * 8 bits only gave us 3 layers out of every pair of pages, which is less - * efficient except for trees with a largest element between 192-255 inclusive. + * The IDR API does not expose the tagging functionality of the radix tree + * to users. Use tag 0 to track whether a node has free space below it. */ -#define IDR_BITS 6 -#define IDR_SIZE (1 << IDR_BITS) -#define IDR_MASK ((1 << IDR_BITS)-1) - -struct idr_layer { - int prefix; /* the ID prefix of this idr_layer */ - int layer; /* distance from leaf */ - struct idr_layer __rcu *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ - union { - /* A zero bit means "space here" */ - DECLARE_BITMAP(bitmap, IDR_SIZE); - struct rcu_head rcu_head; - }; -}; +#define IDR_FREE 0 -struct idr { - struct idr_layer __rcu *hint; /* the last layer allocated from */ - struct idr_layer __rcu *top; - int layers; /* only valid w/o concurrent changes */ - int cur; /* current pos for cyclic allocation */ - spinlock_t lock; - int id_free_cnt; - struct idr_layer *id_free; -}; +/* Set the IDR flag and the IDR_FREE tag */ +#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) -#define IDR_INIT(name) \ +#define IDR_INIT \ { \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ } -#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +#define DEFINE_IDR(name) struct idr name = IDR_INIT /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -62,9 +44,9 @@ struct idr { * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ -static inline unsigned int idr_get_cursor(struct idr *idr) +static inline unsigned int idr_get_cursor(const struct idr *idr) { - return READ_ONCE(idr->cur); + return READ_ONCE(idr->idr_next); } /** @@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr) */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { - WRITE_ONCE(idr->cur, val); + WRITE_ONCE(idr->idr_next, val); } /** @@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) * period). */ -/* - * This is what we export. - */ - -void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); -int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_for_each(struct idr *idp, +int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t); +int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); +int idr_for_each(const struct idr *, int (*fn)(int id, void *p, void *data), void *data); -void *idr_get_next(struct idr *idp, int *nextid); -void *idr_replace(struct idr *idp, void *ptr, int id); -void idr_remove(struct idr *idp, int id); -void idr_destroy(struct idr *idp); -void idr_init(struct idr *idp); -bool idr_is_empty(struct idr *idp); +void *idr_get_next(struct idr *, int *nextid); +void *idr_replace(struct idr *, void *, int id); +void idr_destroy(struct idr *); + +static inline void *idr_remove(struct idr *idr, int id) +{ + return radix_tree_delete_item(&idr->idr_rt, id, NULL); +} + +static inline void idr_init(struct idr *idr) +{ + INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); + idr->idr_next = 0; +} + +static inline bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, IDR_FREE); +} /** * idr_preload_end - end preload section started with idr_preload() @@ -137,19 +128,14 @@ static inline void idr_preload_end(void) * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. */ -static inline void *idr_find(struct idr *idr, int id) +static inline void *idr_find(const struct idr *idr, int id) { - struct idr_layer *hint = rcu_dereference_raw(idr->hint); - - if (hint && (id & ~IDR_MASK) == hint->prefix) - return rcu_dereference_raw(hint->ary[id & IDR_MASK]); - - return idr_find_slowpath(idr, id); + return radix_tree_lookup(&idr->idr_rt, id); } /** * idr_for_each_entry - iterate over an idr's elements of a given type - * @idp: idr handle + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * @@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id) * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry(idp, entry, id) \ - for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) /** - * idr_for_each_entry - continue iteration over an idr's elements of a given type - * @idp: idr handle + * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * * Continue to iterate over list of given type, continuing after * the current position. */ -#define idr_for_each_entry_continue(idp, entry, id) \ - for ((entry) = idr_get_next((idp), &(id)); \ +#define idr_for_each_entry_continue(idr, entry, id) \ + for ((entry) = idr_get_next((idr), &(id)); \ entry; \ - ++id, (entry) = idr_get_next((idp), &(id))) + ++id, (entry) = idr_get_next((idr), &(id))) /* * IDA - IDR based id allocator, use when translation from id to * pointer isn't necessary. - * - * IDA_BITMAP_LONGS is calculated to be one less to accommodate - * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ -#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { - long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; +DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); + struct ida { - struct idr idr; - struct ida_bitmap *free_bitmap; + struct radix_tree_root ida_rt; }; -#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } -#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) +#define IDA_INIT { \ + .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +} +#define DEFINE_IDA(name) struct ida name = IDA_INIT int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); -void ida_init(struct ida *ida); int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + /** * ida_get_new - allocate new ID * @ida: idr handle @@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } -static inline bool ida_is_empty(struct ida *ida) +static inline bool ida_is_empty(const struct ida *ida) { - return idr_is_empty(&ida->idr); + return radix_tree_empty(&ida->ida_rt); } - -void __init idr_init_cache(void); - #endif /* __IDR_H__ */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 891459caa278..7291810067eb 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -79,8 +79,7 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); -int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, - const struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, const struct iomap_ops *ops); diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 1c30014ed176..d29e1e21bf3f 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -17,7 +17,7 @@ #include <linux/kernel.h> #include <linux/types.h> -#include <linux/hrtimer.h> +#include <linux/ktime.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/io.h> diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 78c5d5ae3857..f1045b2c6a00 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -100,7 +100,7 @@ struct ipmi_user_hndl { /* Create a new user of the IPMI layer on the given interface number. */ int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b63d6b7b0db0..8e06d758ee48 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -89,11 +89,17 @@ extern bool static_key_initialized; struct static_key { atomic_t enabled; -/* Set lsb bit to 1 if branch is default true, 0 ot */ - struct jump_entry *entries; -#ifdef CONFIG_MODULES - struct static_key_mod *next; -#endif +/* + * bit 0 => 1 if key is initially true + * 0 if initially false + * bit 1 => 1 if points to struct static_key_mod + * 0 if points to struct jump_entry + */ + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; }; #else @@ -118,9 +124,10 @@ struct module; #ifdef HAVE_JUMP_LABEL -#define JUMP_TYPE_FALSE 0UL -#define JUMP_TYPE_TRUE 1UL -#define JUMP_TYPE_MASK 1UL +#define JUMP_TYPE_FALSE 0UL +#define JUMP_TYPE_TRUE 1UL +#define JUMP_TYPE_LINKED 2UL +#define JUMP_TYPE_MASK 3UL static __always_inline bool static_key_false(struct static_key *key) { diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 820c0ad54a01..c908b25bf5a5 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -52,7 +52,7 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags); void kasan_cache_shrink(struct kmem_cache *cache); -void kasan_cache_destroy(struct kmem_cache *cache); +void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); @@ -98,7 +98,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} -static inline void kasan_cache_destroy(struct kmem_cache *cache) {} +static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 8f2e059e4d45..4d748603e818 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -8,7 +8,7 @@ /* * The use of "&&" / "||" is limited in certain expressions. - * The followings enable to calculate "and" / "or" with macro expansion only. + * The following enable to calculate "and" / "or" with macro expansion only. */ #define __and(x, y) ___and(x, y) #define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cb09238f6d32..4c26dc3a8295 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -100,16 +100,18 @@ ) /* - * Divide positive or negative dividend by positive divisor and round - * to closest integer. Result is undefined for negative divisors and - * for negative dividends if the divisor variable type is unsigned. + * Divide positive or negative dividend by positive or negative divisor + * and round to closest integer. Result is undefined for negative + * divisors if he dividend variable type is unsigned and for negative + * dividends if the divisor variable type is unsigned. */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ - ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ + ((typeof(divisor))-1) > 0 || \ + (((__x) > 0) == ((__d) > 0))) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 7056238fd9f5..a9b11b8d06f2 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -46,6 +46,7 @@ enum kernfs_node_flag { KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDED = 0x0800, KERNFS_EMPTY_DIR = 0x1000, + KERNFS_HAS_RELEASE = 0x2000, }; /* @flags for kernfs_create_root() */ @@ -175,6 +176,7 @@ struct kernfs_open_file { /* published fields */ struct kernfs_node *kn; struct file *file; + struct seq_file *seq_file; void *priv; /* private fields, do not use outside kernfs proper */ @@ -185,12 +187,20 @@ struct kernfs_open_file { char *prealloc_buf; size_t atomic_write_len; - bool mmapped; + bool mmapped:1; + bool released:1; const struct vm_operations_struct *vm_ops; }; struct kernfs_ops { /* + * Optional open/release methods. Both are called with + * @of->seq_file populated. + */ + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* * Read is handled by either seq_file or raw_read(). * * If seq_show() is present, seq_file path is active. Other seq diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 16ddfb8b304a..c328e4f7dcad 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -29,7 +29,7 @@ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. */ -#include <linux/compiler.h> /* for __kprobes */ +#include <linux/compiler.h> #include <linux/linkage.h> #include <linux/list.h> #include <linux/notifier.h> @@ -40,9 +40,9 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <asm/kprobes.h> #ifdef CONFIG_KPROBES -#include <asm/kprobes.h> /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 @@ -51,6 +51,7 @@ #define KPROBE_HIT_SSDONE 0x00000008 #else /* CONFIG_KPROBES */ +#include <asm-generic/kprobes.h> typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; @@ -509,18 +510,4 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif -#ifdef CONFIG_KPROBES -/* - * Blacklist ganerating macro. Specify functions which is not probed - * by using this macro. - */ -#define __NOKPROBE_SYMBOL(fname) \ -static unsigned long __used \ - __attribute__((section("_kprobe_blacklist"))) \ - _kbl_addr_##fname = (unsigned long)fname; -#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) -#else -#define NOKPROBE_SYMBOL(fname) -#endif - #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 6b784c59f321..394e3d9213b8 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -1,87 +1,648 @@ -#ifndef __LZ4_H__ -#define __LZ4_H__ -/* - * LZ4 Kernel Interface +/* LZ4 Kernel Interface * * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * This file is based on the original header file + * for LZ4 - Fast LZ compression algorithm. + * + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2016, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 */ -#define LZ4_MEM_COMPRESS (16384) -#define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *))) +#ifndef __LZ4_H__ +#define __LZ4_H__ + +#include <linux/types.h> +#include <linux/string.h> /* memset, memcpy */ + +/*-************************************************************************ + * CONSTANTS + **************************************************************************/ /* - * lz4_compressbound() - * Provides the maximum size that LZ4 may output in a "worst case" scenario - * (input data not compressible) + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes + * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -static inline size_t lz4_compressbound(size_t isize) -{ - return isize + (isize / 255) + 16; -} +#define LZ4_MEMORY_USAGE 14 + +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) (\ + (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ + ? 0 \ + : (isize) + ((isize)/255) + 16) + +#define LZ4_ACCELERATION_DEFAULT 1 +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) + +#define LZ4HC_MIN_CLEVEL 3 +#define LZ4HC_DEFAULT_CLEVEL 9 +#define LZ4HC_MAX_CLEVEL 16 + +#define LZ4HC_DICTIONARY_LOGSIZE 16 +#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE) +#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1) +#define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1) +#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG) +#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1) + +/*-************************************************************************ + * STREAMING CONSTANTS AND STRUCTURES + **************************************************************************/ +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) + +#define LZ4_STREAMHCSIZE 262192 +#define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t)) + +#define LZ4_STREAMDECODESIZE_U64 4 +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \ + sizeof(unsigned long long)) /* - * lz4_compress() - * src : source address of the original data - * src_len : size of the original data - * dst : output buffer address of the compressed data - * This requires 'dst' of size LZ4_COMPRESSBOUND. - * dst_len : is the output size, which is returned after compress done - * workmem : address of the working memory. - * This requires 'workmem' of size LZ4_MEM_COMPRESS. - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer and workmem must be already allocated with - * the defined size. - */ -int lz4_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); - - /* - * lz4hc_compress() - * src : source address of the original data - * src_len : size of the original data - * dst : output buffer address of the compressed data - * This requires 'dst' of size LZ4_COMPRESSBOUND. - * dst_len : is the output size, which is returned after compress done - * workmem : address of the working memory. - * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer and workmem must be already allocated with - * the defined size. - */ -int lz4hc_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); + * LZ4_stream_t - information structure to track an LZ4 stream. + */ +typedef struct { + uint32_t hashTable[LZ4_HASH_SIZE_U32]; + uint32_t currentOffset; + uint32_t initCheck; + const uint8_t *dictionary; + uint8_t *bufferStart; + uint32_t dictSize; +} LZ4_stream_t_internal; +typedef union { + unsigned long long table[LZ4_STREAMSIZE_U64]; + LZ4_stream_t_internal internal_donotuse; +} LZ4_stream_t; /* - * lz4_decompress() - * src : source address of the compressed data - * src_len : is the input size, whcih is returned after decompress done - * dest : output buffer address of the decompressed data - * actual_dest_len: is the size of uncompressed data, supposing it's known - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer must be already allocated. - * slightly faster than lz4_decompress_unknownoutputsize() - */ -int lz4_decompress(const unsigned char *src, size_t *src_len, - unsigned char *dest, size_t actual_dest_len); + * LZ4_streamHC_t - information structure to track an LZ4HC stream. + */ +typedef struct { + unsigned int hashTable[LZ4HC_HASHTABLESIZE]; + unsigned short chainTable[LZ4HC_MAXD]; + /* next block to continue on current prefix */ + const unsigned char *end; + /* All index relative to this position */ + const unsigned char *base; + /* alternate base for extDict */ + const unsigned char *dictBase; + /* below that point, need extDict */ + unsigned int dictLimit; + /* below that point, no more dict */ + unsigned int lowLimit; + /* index from which to continue dict update */ + unsigned int nextToUpdate; + unsigned int compressionLevel; +} LZ4HC_CCtx_internal; +typedef union { + size_t table[LZ4_STREAMHCSIZE_SIZET]; + LZ4HC_CCtx_internal internal_donotuse; +} LZ4_streamHC_t; /* - * lz4_decompress_unknownoutputsize() - * src : source address of the compressed data - * src_len : is the input size, therefore the compressed size - * dest : output buffer address of the decompressed data - * dest_len: is the max size of the destination buffer, which is - * returned with actual size of decompressed data after - * decompress done - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer must be already allocated. - */ -int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, - unsigned char *dest, size_t *dest_len); + * LZ4_streamDecode_t - information structure to track an + * LZ4 stream during decompression. + * + * init this structure using LZ4_setStreamDecode (or memset()) before first use + */ +typedef struct { + const uint8_t *externalDict; + size_t extDictSize; + const uint8_t *prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; +typedef union { + unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + LZ4_streamDecode_t_internal internal_donotuse; +} LZ4_streamDecode_t; + +/*-************************************************************************ + * SIZE OF STATE + **************************************************************************/ +#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE +#define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE + +/*-************************************************************************ + * Compression Functions + **************************************************************************/ + +/** + * LZ4_compressBound() - Max. output size in worst case szenarios + * @isize: Size of the input data + * + * Return: Max. size LZ4 may output in a "worst case" szenario + * (data not compressible) + */ +static inline int LZ4_compressBound(size_t isize) +{ + return LZ4_COMPRESSBOUND(isize); +} + +/** + * LZ4_compress_default() - Compress data from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Compresses 'sourceSize' bytes from buffer 'source' + * into already allocated 'dest' buffer of size 'maxOutputSize'. + * Compression is guaranteed to succeed if + * 'maxOutputSize' >= LZ4_compressBound(inputSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'source' into a more limited 'dest' budget, + * compression stops *immediately*, and the function result is zero. + * As a consequence, 'dest' content is not valid. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_default(const char *source, char *dest, int inputSize, + int maxOutputSize, void *wrkmem); + +/** + * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Same as LZ4_compress_default(), but allows to select an "acceleration" + * factor. The larger the acceleration value, the faster the algorithm, + * but also the lesser the compression. It's a trade-off. It can be fine tuned, + * with each successive value providing roughly +~3% to speed. + * An acceleration value of "1" is the same as regular LZ4_compress_default() + * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_fast(const char *source, char *dest, int inputSize, + int maxOutputSize, int acceleration, void *wrkmem); + +/** + * LZ4_compress_destSize() - Compress as much data as possible + * from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @sourceSizePtr: will be modified to indicate how many bytes where read + * from 'source' to fill 'dest'. New value is necessarily <= old value. + * @targetDestSize: Size of buffer 'dest' which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Reverse the logic, by compressing as much data as possible + * from 'source' buffer into already allocated buffer 'dest' + * of size 'targetDestSize'. + * This function either compresses the entire 'source' content into 'dest' + * if it's large enough, or fill 'dest' buffer completely with as much data as + * possible from 'source'. + * + * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) + * or 0 if compression fails + */ +int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, + int targetDestSize, void *wrkmem); + +/*-************************************************************************ + * Decompression Functions + **************************************************************************/ + +/** + * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * Decompresses data from 'source' into 'dest'. + * This function fully respect memory boundaries for properly formed + * compressed data. + * It is a bit faster than LZ4_decompress_safe(). + * However, it does not provide any protection against intentionally + * modified data stream (malicious input). + * Use this function in trusted environment only + * (data to decode comes from a trusted source). + * + * Return: number of bytes read from the source buffer + * or a negative result if decompression fails. + */ +int LZ4_decompress_fast(const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe() - Decompression protected against buffer overflow + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * Decompresses data fom 'source' into 'dest'. + * If the source stream is detected malformed, the function will + * stop decoding and return a negative result. + * This function is protected against buffer overflow exploits, + * including malicious data packets. It never writes outside output buffer, + * nor reads outside input buffer. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' + * at position 'source' into buffer 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the decompressed data which must be + * already allocated + * @compressedSize: is the precise full size of the compressed block. + * @targetOutputSize: the decompression operation will try + * to stop as soon as 'targetOutputSize' has been reached + * @maxDecompressedSize: is the size of destination buffer + * + * This function decompresses a compressed block of size 'compressedSize' + * at position 'source' into destination buffer 'dest' + * of size 'maxDecompressedSize'. + * The function tries to stop decompressing operation as soon as + * 'targetOutputSize' has been reached, reducing decompression time. + * This function never writes outside of output buffer, + * and never reads outside of input buffer. + * It is therefore protected against malicious data packets. + * + * Return: the number of bytes decoded in the destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + * + */ +int LZ4_decompress_safe_partial(const char *source, char *dest, + int compressedSize, int targetOutputSize, int maxDecompressedSize); + +/*-************************************************************************ + * LZ4 HC Compression + **************************************************************************/ + +/** + * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm + * @src: source address of the original data + * @dst: output buffer address of the compressed data + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @dstCapacity: full or partial size of buffer 'dst', + * which must be already allocated + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * @wrkmem: address of the working memory. + * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. + * + * Compress data from 'src' into 'dst', using the more powerful + * but slower "HC" algorithm. Compression is guaranteed to succeed if + * `dstCapacity >= LZ4_compressBound(srcSize) + * + * Return : the number of bytes written into 'dst' or 0 if compression fails. + */ +int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, + int compressionLevel, void *wrkmem); + +/** + * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure + * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * + * An LZ4_streamHC_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_streamHC_t` structure + * and start a new compression. + */ +void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); + +/** + * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC + * @streamHCPtr: pointer to the LZ4HC_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4HC_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_compress_HC_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary using the HC algorithm + * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * + * These functions compress data in successive blocks of any size, using + * previous blocks as dictionary. One key assumption is that previous + * blocks (up to 64 KB) remain read-accessible while + * compressing next blocks. There is an exception for ring buffers, + * which can be smaller than 64 KB. + * Ring buffers scenario is automatically detected and handled by + * LZ4_compress_HC_continue(). + * Before starting compression, state must be properly initialized, + * using LZ4_resetStreamHC(). + * A first "fictional block" can then be designated as + * initial dictionary, using LZ4_loadDictHC() (Optional). + * Then, use LZ4_compress_HC_continue() + * to compress each successive block. Previous memory blocks + * (including initial dictionary when present) must remain accessible + * and unmodified during compression. + * 'dst' buffer should be sized to handle worst case scenarios, using + * LZ4_compressBound(), to ensure operation success. + * If, for any reason, previous data blocks can't be preserved unmodified + * in memory during next compression block, + * you must save it to a safer memory space, using LZ4_saveDictHC(). + * Return value of LZ4_saveDictHC() is the size of dictionary + * effectively saved into 'safeBuffer'. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, + char *dst, int srcSize, int maxDstSize); + +/** + * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream + * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @maxDictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDictHC() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_HC_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= maxDictSize), + * or 0 if error. + */ +int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, + int maxDictSize); + +/*-********************************************* + * Streaming Compression Functions + ***********************************************/ + +/** + * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure + * @LZ4_stream: pointer to the 'LZ4_stream_t' structure + * + * An LZ4_stream_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_stream_t` structure + * and start a new compression. + */ +void LZ4_resetStream(LZ4_stream_t *LZ4_stream); + +/** + * LZ4_loadDict() - Load a static dictionary into LZ4_stream + * @streamPtr: pointer to the LZ4_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_saveDict() - Save static dictionary from LZ4_stream + * @streamPtr: pointer to the 'LZ4_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @dictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_fast_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= dictSize), + * or 0 if error. + */ +int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); + +/** + * LZ4_compress_fast_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary + * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * + * Compress buffer content 'src', using data from previously compressed blocks + * as dictionary to improve compression ratio. + * Important : Previous data blocks are assumed to still + * be present and unmodified ! + * If maxDstSize >= LZ4_compressBound(srcSize), + * compression is guaranteed to succeed, and runs faster. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, + char *dst, int srcSize, int maxDstSize, int acceleration); + +/** + * LZ4_setStreamDecode() - Instruct where to find dictionary + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @dictionary: dictionary to use + * @dictSize: size of dictionary + * + * Use this function to instruct where to find the dictionary. + * Setting a size of 0 is allowed (same effect as reset). + * + * Return: 1 if OK, 0 if error + */ +int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, + const char *dictionary, int dictSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_safe_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_usingDict(const char *source, char *dest, + int compressedSize, int maxDecompressedSize, const char *dictStart, + int dictSize); + +/** + * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_fast_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_usingDict(const char *source, char *dest, + int originalSize, const char *dictStart, int dictSize); + #endif diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 38bcf00cbed3..bdfc65af4152 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -42,6 +42,7 @@ struct memblock_type { unsigned long max; /* size of the allocated array */ phys_addr_t total_size; /* size of all regions */ struct memblock_region *regions; + char *name; }; struct memblock { diff --git a/include/linux/memory.h b/include/linux/memory.h index 093607f90b91..b723a686fc10 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -109,9 +109,6 @@ extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); extern int register_new_memory(int, struct mem_section *); -extern int memory_block_change_state(struct memory_block *mem, - unsigned long to_state, - unsigned long from_state_req); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 6483a6fdce59..ffb21e79204d 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -134,6 +134,7 @@ /* RTC_CTRL_REG bitfields */ #define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */ +#define TPS65910_RTC_CTRL_AUTO_COMP 0x04 #define TPS65910_RTC_CTRL_GET_TIME 0x40 /* RTC_STATUS_REG bitfields */ diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 27d7c95fd0da..504d54c71bdb 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -90,7 +90,7 @@ struct mbus_hw_ops { }; struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index ae8d475a9385..fa76b516fa47 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -37,7 +37,7 @@ extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); -extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); extern int migrate_prep(void); @@ -56,6 +56,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) + { return -EBUSY; } static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index e965e5090d96..a858bcb6220b 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -109,7 +109,7 @@ static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) int i; for (i = ETH_ALEN; i > 0; i--) { - addr[i - 1] = mac && 0xFF; + addr[i - 1] = mac & 0xFF; mac >>= 8; } } diff --git a/include/linux/mm.h b/include/linux/mm.h index 574bc157a27c..0d65dd72c0f4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -314,6 +314,9 @@ struct vm_fault { unsigned long address; /* Faulting virtual address */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ + pud_t *pud; /* Pointer to pud entry matching + * the 'address' + */ pte_t orig_pte; /* Value of PTE at the time of fault */ struct page *cow_page; /* Page handler may use for COW fault */ @@ -341,6 +344,13 @@ struct vm_fault { */ }; +/* page entry size for vm->huge_fault() */ +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD, + PE_SIZE_PUD, +}; + /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer @@ -350,17 +360,17 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*mremap)(struct vm_area_struct * area); - int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - int (*pmd_fault)(struct vm_fault *vmf); + int (*fault)(struct vm_fault *vmf); + int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -416,6 +426,10 @@ static inline int pmd_devmap(pmd_t pmd) { return 0; } +static inline int pud_devmap(pud_t pud) +{ + return 0; +} #endif /* @@ -1154,16 +1168,6 @@ extern void pagefault_out_of_memory(void); extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); -int shmem_zero_setup(struct vm_area_struct *); -#ifdef CONFIG_SHMEM -bool shmem_mapping(struct address_space *mapping); -#else -static inline bool shmem_mapping(struct address_space *mapping) -{ - return false; -} -#endif - extern bool can_do_mlock(void); extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); @@ -1191,6 +1195,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, /** * mm_walk - callbacks for walk_page_range + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * this handler should only handle pud_trans_huge() puds. + * the pmd_entry or pte_entry callbacks will be used for + * regular PUDs. * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to @@ -1210,6 +1218,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, * (see the comment on walk_page_range() for more details) */ struct mm_walk { + int (*pud_entry)(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk); int (*pmd_entry)(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pte_entry)(pte_t *pte, unsigned long addr, @@ -1793,8 +1803,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) return ptl; } -extern void __init pagecache_init(void); +/* + * No scalability reason to split PUD locks yet, but follow the same pattern + * as the PMD locks to make it easier if we decide to. The VM should not be + * considered ready to switch to split PUD locks yet; there may be places + * which need to be converted from page_table_lock. + */ +static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) +{ + return &mm->page_table_lock; +} +static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) +{ + spinlock_t *ptl = pud_lockptr(mm, pud); + + spin_lock(ptl); + return ptl; +} + +extern void __init pagecache_init(void); extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); @@ -2003,8 +2031,10 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); -extern int split_vma(struct mm_struct *, - struct vm_area_struct *, unsigned long addr, int new_below); +extern int __split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); @@ -2052,18 +2082,22 @@ extern int install_special_mapping(struct mm_struct *mm, extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); -extern int do_munmap(struct mm_struct *, unsigned long, size_t); + vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, + struct list_head *uf); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf); static inline unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate) + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { - return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); + return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); } #ifdef CONFIG_MMU @@ -2124,10 +2158,10 @@ extern void truncate_inode_pages_range(struct address_space *, extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ -extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); +extern int filemap_fault(struct vm_fault *vmf); extern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); -extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); +extern int filemap_page_mkwrite(struct vm_fault *vmf); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 41d376e7116d..e030a68ead7e 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page, list_add(&page->lru, &lruvec->lists[lru]); } +static __always_inline void add_page_to_lru_list_tail(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + list_add_tail(&page->lru, &lruvec->lists[lru]); +} + static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 808751d7b737..4f6d440ad785 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -407,8 +407,27 @@ struct mm_struct { unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; - atomic_t mm_users; /* How many users with user space? */ - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + + /** + * @mm_users: The number of users including userspace. + * + * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops + * to 0 (i.e. when the task exits and there are no other temporary + * reference holders), we also release a reference on @mm_count + * (which may then free the &struct mm_struct if @mm_count also + * drops to 0). + */ + atomic_t mm_users; + + /** + * @mm_count: The number of references to &struct mm_struct + * (@mm_users count as 1). + * + * Use mmgrab()/mmdrop() to modify. When this drops to 0, the + * &struct mm_struct is freed. + */ + atomic_t mm_count; + atomic_long_t nr_ptes; /* PTE page table pages */ #if CONFIG_PGTABLE_LEVELS > 2 atomic_long_t nr_pmds; /* PMD page table pages */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index a1a210d59961..51891fb0d3ce 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -381,6 +381,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) ___pmd; \ }) +#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pud_t ___pud; \ + \ + ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PUD_SIZE); \ + \ + ___pud; \ +}) + #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ ({ \ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ @@ -475,6 +488,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define pmdp_clear_young_notify pmdp_test_and_clear_young #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush +#define pudp_huge_clear_flush_notify pudp_huge_clear_flush #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 82fc632fd11d..8e02b3750fe0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -236,8 +236,6 @@ struct lruvec { #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) -/* Isolate clean file */ -#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) /* Isolate unmapped file */ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 7b3d487d8b3f..b532ce524dae 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -14,7 +14,7 @@ * @DevId - Chip Device ID * @qinfo - pointer to qinfo records describing the chip * @numchips - number of chips including virual RWW partitions - * @chipshift - Chip/partiton size 2^chipshift + * @chipshift - Chip/partition size 2^chipshift * @chips - per-chip data structure */ struct lpddr_private { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f40f0ab3847a..97456b2539e4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -330,6 +330,7 @@ struct napi_struct { enum { NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_MISSED, /* reschedule a napi */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ @@ -338,12 +339,13 @@ enum { }; enum { - NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), - NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), - NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), - NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), - NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), + NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), }; enum gro_result { @@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n) return test_bit(NAPI_STATE_DISABLE, &n->state); } -/** - * napi_schedule_prep - check if NAPI can be scheduled - * @n: NAPI context - * - * Test if NAPI routine is already running, and if not mark - * it as running. This is used as a condition variable to - * insure only one NAPI poll instance runs. We also make - * sure there is no pending NAPI disable. - */ -static inline bool napi_schedule_prep(struct napi_struct *n) -{ - return !napi_disable_pending(n) && - !test_and_set_bit(NAPI_STATE_SCHED, &n->state); -} +bool napi_schedule_prep(struct napi_struct *n); /** * napi_schedule - schedule NAPI poll diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index bf240a3cbf99..a72fd04aa5e1 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -29,6 +29,30 @@ enum nvme_rdma_cm_status { NVME_RDMA_CM_INVALID_ORD = 0x08, }; +static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) +{ + switch (status) { + case NVME_RDMA_CM_INVALID_LEN: + return "invalid length"; + case NVME_RDMA_CM_INVALID_RECFMT: + return "invalid record format"; + case NVME_RDMA_CM_INVALID_QID: + return "invalid queue ID"; + case NVME_RDMA_CM_INVALID_HSQSIZE: + return "invalid host SQ size"; + case NVME_RDMA_CM_INVALID_HRQSIZE: + return "invalid host RQ size"; + case NVME_RDMA_CM_NO_RSC: + return "resource not found"; + case NVME_RDMA_CM_INVALID_IRD: + return "invalid IRD"; + case NVME_RDMA_CM_INVALID_ORD: + return "Invalid ORD"; + default: + return "unrecognized reason"; + } +} + /** * struct nvme_rdma_cm_req - rdma connect request * diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 0b676a02cf3e..c43d435d4225 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -579,6 +579,12 @@ struct nvme_write_zeroes_cmd { __le16 appmask; }; +/* Features */ + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + /* Admin commands */ enum nvme_admin_opcode { @@ -644,7 +650,9 @@ struct nvme_identify { __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; - __le32 cns; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; __u32 rsvd11[5]; }; diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 033fc7bbcefa..a49b3259cad7 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -90,6 +90,13 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) { return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); } + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pud(pfn_t_to_pfn(pfn), pgprot); +} +#endif #endif #ifdef __HAVE_ARCH_PTE_DEVMAP @@ -106,5 +113,10 @@ static inline bool pfn_t_devmap(pfn_t pfn) } pte_t pte_mkdevmap(pte_t pte); pmd_t pmd_mkdevmap(pmd_t pmd); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +pud_t pud_mkdevmap(pud_t pud); #endif +#endif /* __HAVE_ARCH_PTE_DEVMAP */ + #endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/pid.h b/include/linux/pid.h index 23705a53abba..298ead5512e5 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -191,10 +191,10 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_thread(pid, type, task) \ do_each_pid_task(pid, type, task) { \ struct task_struct *tg___ = task; \ - do { + for_each_thread(tg___, task) { #define while_each_pid_thread(pid, type, task) \ - } while_each_thread(tg___, task); \ + } \ task = tg___; \ } while_each_pid_task(pid, type, task) #endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 34cce96741bc..c2a989dee876 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -21,6 +21,12 @@ struct pidmap { struct fs_pin; +enum { /* definitions for pid_namespace's hide_pid field */ + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, +}; + struct pid_namespace { struct kref kref; struct pidmap pidmap[PIDMAP_ENTRIES]; diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h deleted file mode 100644 index 915d6b4f0f89..000000000000 --- a/include/linux/platform_data/rtc-m48t86.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * ST M48T86 / Dallas DS12887 RTC driver - * Copyright (c) 2006 Tower Technologies - * - * Author: Alessandro Zummo <a.zummo@towertech.it> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -struct m48t86_ops -{ - void (*writebyte)(unsigned char value, unsigned long addr); - unsigned char (*readbyte)(unsigned long addr); -}; diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h index 18e908324549..a5c0a71ec914 100644 --- a/include/linux/platform_data/video-imxfb.h +++ b/include/linux/platform_data/video-imxfb.h @@ -47,10 +47,6 @@ #define LSCR1_GRAY2(x) (((x) & 0xf) << 4) #define LSCR1_GRAY1(x) (((x) & 0xf)) -#define DMACR_BURST (1 << 31) -#define DMACR_HM(x) (((x) & 0xf) << 16) -#define DMACR_TM(x) ((x) & 0xf) - struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h new file mode 100644 index 000000000000..3ab892208343 --- /dev/null +++ b/include/linux/platform_data/x86/clk-pmc-atom.h @@ -0,0 +1,44 @@ +/* + * Intel Atom platform clocks for BayTrail and CherryTrail SoC. + * + * Copyright (C) 2016, Intel Corporation + * Author: Irina Tirdea <irina.tirdea@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H +#define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H + +/** + * struct pmc_clk - PMC platform clock configuration + * + * @name: identified, typically pmc_plt_clk_<x>, x=[0..5] + * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported + * @parent_name: one of 'xtal' or 'osc' + */ +struct pmc_clk { + const char *name; + unsigned long freq; + const char *parent_name; +}; + +/** + * struct pmc_clk_data - common PMC clock configuration + * + * @base: PMC clock register base offset + * @clks: pointer to set of registered clocks, typically 0..5 + */ +struct pmc_clk_data { + void __iomem *base; + const struct pmc_clk *clks; +}; + +#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h new file mode 100644 index 000000000000..e4905fe69c38 --- /dev/null +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -0,0 +1,158 @@ +/* + * Intel Atom SOC Power Management Controller Header File + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef PMC_ATOM_H +#define PMC_ATOM_H + +/* ValleyView Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_VLV_PMC 0x0F1C +/* CherryTrail Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_CHT_PMC 0x229C + +/* PMC Memory mapped IO registers */ +#define PMC_BASE_ADDR_OFFSET 0x44 +#define PMC_BASE_ADDR_MASK 0xFFFFFE00 +#define PMC_MMIO_REG_LEN 0x100 +#define PMC_REG_BIT_WIDTH 32 + +/* BIOS uses FUNC_DIS to disable specific function */ +#define PMC_FUNC_DIS 0x34 +#define PMC_FUNC_DIS_2 0x38 + +/* CHT specific bits in FUNC_DIS2 register */ +#define BIT_FD_GMM BIT(3) +#define BIT_FD_ISH BIT(4) + +/* S0ix wake event control */ +#define PMC_S0IX_WAKE_EN 0x3C + +#define BIT_LPC_CLOCK_RUN BIT(4) +#define BIT_SHARED_IRQ_GPSC BIT(5) +#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) +#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) +#define BIT_SHARED_IRQ_GPSS BIT(20) + +#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ + BIT_SHARED_IRQ_GPSC | \ + BIT_ORED_DEDICATED_IRQ_GPSS | \ + BIT_ORED_DEDICATED_IRQ_GPSC | \ + BIT_SHARED_IRQ_GPSS) + +/* The timers accumulate time spent in sleep state */ +#define PMC_S0IR_TMR 0x80 +#define PMC_S0I1_TMR 0x84 +#define PMC_S0I2_TMR 0x88 +#define PMC_S0I3_TMR 0x8C +#define PMC_S0_TMR 0x90 +/* Sleep state counter is in units of of 32us */ +#define PMC_TMR_SHIFT 5 + +/* Power status of power islands */ +#define PMC_PSS 0x98 + +#define PMC_PSS_BIT_GBE BIT(0) +#define PMC_PSS_BIT_SATA BIT(1) +#define PMC_PSS_BIT_HDA BIT(2) +#define PMC_PSS_BIT_SEC BIT(3) +#define PMC_PSS_BIT_PCIE BIT(4) +#define PMC_PSS_BIT_LPSS BIT(5) +#define PMC_PSS_BIT_LPE BIT(6) +#define PMC_PSS_BIT_DFX BIT(7) +#define PMC_PSS_BIT_USH_CTRL BIT(8) +#define PMC_PSS_BIT_USH_SUS BIT(9) +#define PMC_PSS_BIT_USH_VCCS BIT(10) +#define PMC_PSS_BIT_USH_VCCA BIT(11) +#define PMC_PSS_BIT_OTG_CTRL BIT(12) +#define PMC_PSS_BIT_OTG_VCCS BIT(13) +#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14) +#define PMC_PSS_BIT_OTG_VCCA BIT(15) +#define PMC_PSS_BIT_USB BIT(16) +#define PMC_PSS_BIT_USB_SUS BIT(17) + +/* CHT specific bits in PSS register */ +#define PMC_PSS_BIT_CHT_UFS BIT(7) +#define PMC_PSS_BIT_CHT_UXD BIT(11) +#define PMC_PSS_BIT_CHT_UXD_FD BIT(12) +#define PMC_PSS_BIT_CHT_UX_ENG BIT(15) +#define PMC_PSS_BIT_CHT_USB_SUS BIT(16) +#define PMC_PSS_BIT_CHT_GMM BIT(17) +#define PMC_PSS_BIT_CHT_ISH BIT(18) +#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) + +/* These registers reflect D3 status of functions */ +#define PMC_D3_STS_0 0xA0 + +#define BIT_LPSS1_F0_DMA BIT(0) +#define BIT_LPSS1_F1_PWM1 BIT(1) +#define BIT_LPSS1_F2_PWM2 BIT(2) +#define BIT_LPSS1_F3_HSUART1 BIT(3) +#define BIT_LPSS1_F4_HSUART2 BIT(4) +#define BIT_LPSS1_F5_SPI BIT(5) +#define BIT_LPSS1_F6_XXX BIT(6) +#define BIT_LPSS1_F7_XXX BIT(7) +#define BIT_SCC_EMMC BIT(8) +#define BIT_SCC_SDIO BIT(9) +#define BIT_SCC_SDCARD BIT(10) +#define BIT_SCC_MIPI BIT(11) +#define BIT_HDA BIT(12) +#define BIT_LPE BIT(13) +#define BIT_OTG BIT(14) +#define BIT_USH BIT(15) +#define BIT_GBE BIT(16) +#define BIT_SATA BIT(17) +#define BIT_USB_EHCI BIT(18) +#define BIT_SEC BIT(19) +#define BIT_PCIE_PORT0 BIT(20) +#define BIT_PCIE_PORT1 BIT(21) +#define BIT_PCIE_PORT2 BIT(22) +#define BIT_PCIE_PORT3 BIT(23) +#define BIT_LPSS2_F0_DMA BIT(24) +#define BIT_LPSS2_F1_I2C1 BIT(25) +#define BIT_LPSS2_F2_I2C2 BIT(26) +#define BIT_LPSS2_F3_I2C3 BIT(27) +#define BIT_LPSS2_F4_I2C4 BIT(28) +#define BIT_LPSS2_F5_I2C5 BIT(29) +#define BIT_LPSS2_F6_I2C6 BIT(30) +#define BIT_LPSS2_F7_I2C7 BIT(31) + +#define PMC_D3_STS_1 0xA4 +#define BIT_SMB BIT(0) +#define BIT_OTG_SS_PHY BIT(1) +#define BIT_USH_SS_PHY BIT(2) +#define BIT_DFX BIT(3) + +/* CHT specific bits in PMC_D3_STS_1 register */ +#define BIT_STS_GMM BIT(1) +#define BIT_STS_ISH BIT(2) + +/* PMC I/O Registers */ +#define ACPI_BASE_ADDR_OFFSET 0x40 +#define ACPI_BASE_ADDR_MASK 0xFFFFFE00 +#define ACPI_MMIO_REG_LEN 0x100 + +#define PM1_CNT 0x4 +#define SLEEP_TYPE_MASK 0xFFFFECFF +#define SLEEP_TYPE_S5 0x1C00 +#define SLEEP_ENABLE 0x2000 + +extern int pmc_atom_read(int offset, u32 *value); +extern int pmc_atom_write(int offset, u32 value); + +#endif /* PMC_ATOM_H */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 52bda854593b..3e5735064b71 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -22,11 +22,13 @@ #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> -#include <linux/preempt.h> -#include <linux/types.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/preempt.h> #include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/types.h> /* * The bottom two bits of the slot determine how the remaining bits in the @@ -94,7 +96,7 @@ struct radix_tree_node { unsigned char count; /* Total entry count */ unsigned char exceptional; /* Exceptional entry count */ struct radix_tree_node *parent; /* Used when ascending tree */ - void *private_data; /* For tree user */ + struct radix_tree_root *root; /* The tree we belong to */ union { struct list_head private_list; /* For tree user */ struct rcu_head rcu_head; /* Used when freeing node */ @@ -103,7 +105,10 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ +#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) + struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; @@ -123,7 +128,7 @@ do { \ (root)->rnode = NULL; \ } while (0) -static inline bool radix_tree_empty(struct radix_tree_root *root) +static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->rnode == NULL; } @@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) */ /** - * radix_tree_deref_slot - dereference a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. + * radix_tree_deref_slot - dereference a slot + * @slot: slot pointer, returned by radix_tree_lookup_slot * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is @@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot(void **pslot) +static inline void *radix_tree_deref_slot(void __rcu **slot) { - return rcu_dereference(*pslot); + return rcu_dereference(*slot); } /** - * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. - * - * Similar to radix_tree_deref_slot but only used during migration when a pages - * mapping is being moved. The caller does not hold the RCU read lock but it - * must hold the tree lock to prevent parallel updates. + * radix_tree_deref_slot_protected - dereference a slot with tree lock held + * @slot: slot pointer, returned by radix_tree_lookup_slot + * + * Similar to radix_tree_deref_slot. The caller does not hold the RCU read + * lock but it must hold the tree lock to prevent parallel updates. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot_protected(void **pslot, +static inline void *radix_tree_deref_slot_protected(void __rcu **slot, spinlock_t *treelock) { - return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); + return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); } /** @@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int __radix_tree_create(struct radix_tree_root *root, unsigned long index, +int __radix_tree_create(struct radix_tree_root *, unsigned long index, unsigned order, struct radix_tree_node **nodep, - void ***slotp); + void __rcu ***slotp); int __radix_tree_insert(struct radix_tree_root *, unsigned long index, unsigned order, void *); static inline int radix_tree_insert(struct radix_tree_root *root, @@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root, { return __radix_tree_insert(root, index, 0, entry); } -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp); -void *radix_tree_lookup(struct radix_tree_root *, unsigned long); -void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, + struct radix_tree_node **nodep, void __rcu ***slotp); +void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); +void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, + unsigned long index); typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); -void __radix_tree_replace(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot, void *item, +void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot, void *entry, radix_tree_update_node_t update_node, void *private); void radix_tree_iter_replace(struct radix_tree_root *, - const struct radix_tree_iter *, void **slot, void *item); -void radix_tree_replace_slot(struct radix_tree_root *root, - void **slot, void *item); -void __radix_tree_delete_node(struct radix_tree_root *root, - struct radix_tree_node *node, + const struct radix_tree_iter *, void __rcu **slot, void *entry); +void radix_tree_replace_slot(struct radix_tree_root *, + void __rcu **slot, void *entry); +void __radix_tree_delete_node(struct radix_tree_root *, + struct radix_tree_node *, radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_delete(struct radix_tree_root *, + struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -void radix_tree_clear_tags(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot); -unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, +void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot); +unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, +unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); -void *radix_tree_tag_set(struct radix_tree_root *root, +void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); -void *radix_tree_tag_clear(struct radix_tree_root *root, +void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *root, +void radix_tree_iter_tag_set(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, + void **results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { @@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, unsigned new_order, void *); +void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *, + gfp_t, int end); -#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ -#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ -#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; /** * radix_tree_iter_init - initialize radix tree iterator @@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index, * @start: iteration starting index * Returns: NULL */ -static __always_inline void ** +static __always_inline void __rcu ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* @@ -393,10 +402,46 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** + * radix_tree_iter_lookup - look up an index in the radix tree + * @root: radix tree root + * @iter: iterator state + * @index: key to look up + * + * If @index is present in the radix tree, this function returns the slot + * containing it and updates @iter to describe the entry. If @index is not + * present, it returns NULL. + */ +static inline void __rcu ** +radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +/** + * radix_tree_iter_find - find a present entry + * @root: radix tree root + * @iter: iterator state + * @index: start location + * + * This function returns the slot containing the entry with the lowest index + * which is at least @index. If @index is larger than any present entry, this + * function returns NULL. The @iter is updated to describe the entry found. + */ +static inline void __rcu ** +radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +/** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * @@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * and continue the iteration. */ static inline __must_check -void **radix_tree_iter_retry(struct radix_tree_iter *iter) +void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; @@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) * have been invalidated by an insertion or deletion. Call this function * before releasing the lock to continue the iteration from the next index. */ -void **__must_check radix_tree_iter_resume(void **slot, +void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter); /** @@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) } #ifdef CONFIG_RADIX_TREE_MULTIORDER -void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, - unsigned flags); +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags); #else /* Can't happen without sibling entries, but the compiler can't tell that */ -static inline void ** __radix_tree_next_slot(void **slot, +static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { return slot; @@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot, * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ -static __always_inline void ** -radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) +static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { iter->tags >>= 1; @@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) return NULL; found: - if (unlikely(radix_tree_is_internal_node(*slot))) + if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) return __radix_tree_next_slot(slot, iter, flags); return slot; } diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index d076183e49be..9702b6e183bc 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ old->rbaugmented = rbcompute(old); \ } \ rbstatic const struct rb_augment_callbacks rbname = { \ - rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ + .propagate = rbname ## _propagate, \ + .copy = rbname ## _copy, \ + .rotate = rbname ## _rotate \ }; diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 600aadf9cca4..0023fee4bbbc 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -1,54 +1,10 @@ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H -/* - * Variant of atomic_t specialized for reference counts. - * - * The interface matches the atomic_t interface (to aid in porting) but only - * provides the few functions one should use for reference counting. - * - * It differs in that the counter saturates at UINT_MAX and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. - * - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions - * and provide only what is strictly required for refcounts. - * - * The increments are fully relaxed; these will not provide ordering. The - * rationale is that whatever is used to obtain the object we're increasing the - * reference count on will provide the ordering. For locked data structures, - * its the lock acquire, for RCU/lockless data structures its the dependent - * load. - * - * Do note that inc_not_zero() provides a control dependency which will order - * future stores against the inc, this ensures we'll never modify the object - * if we did not in fact acquire a reference. - * - * The decrements will provide release order, such that all the prior loads and - * stores will be issued before, it also provides a control dependency, which - * will order us against the subsequent free(). - * - * The control dependency is against the load of the cmpxchg (ll/sc) that - * succeeded. This means the stores aren't fully ordered, but this is fine - * because the 1->0 transition indicates no concurrency. - * - * Note that the allocator is responsible for ordering things between free() - * and alloc(). - * - */ - #include <linux/atomic.h> -#include <linux/bug.h> #include <linux/mutex.h> #include <linux/spinlock.h> - -#ifdef CONFIG_DEBUG_REFCOUNT -#define REFCOUNT_WARN(cond, str) WARN_ON(cond) -#define __refcount_check __must_check -#else -#define REFCOUNT_WARN(cond, str) (void)(cond) -#define __refcount_check -#endif +#include <linux/kernel.h> typedef struct refcount_struct { atomic_t refs; @@ -66,229 +22,21 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -static inline __refcount_check -bool refcount_add_not_zero(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (!val) - return false; - - if (unlikely(val == UINT_MAX)) - return true; - - new = val + i; - if (new < val) - new = UINT_MAX; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -static inline void refcount_add(unsigned int i, refcount_t *r) -{ - REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - */ -static inline __refcount_check -bool refcount_inc_not_zero(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -/* - * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller already has a - * reference on the object, will WARN when this is not so. - */ -static inline void refcount_inc(refcount_t *r) -{ - REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_sub_and_test(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (unlikely(val == UINT_MAX)) - return false; - - new = val - i; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return !new; -} - -static inline __refcount_check -bool refcount_dec_and_test(refcount_t *r) -{ - return refcount_sub_and_test(1, r); -} +extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); +extern void refcount_add(unsigned int i, refcount_t *r); -/* - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ -static inline -void refcount_dec(refcount_t *r) -{ - REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); -} - -/* - * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the - * success thereof. - * - * Like all decrement operations, it provides release memory order and provides - * a control dependency. - * - * It can be used like a try-delete operator; this explicit case is provided - * and not cmpxchg in generic, because that would allow implementing unsafe - * operations. - */ -static inline __refcount_check -bool refcount_dec_if_one(refcount_t *r) -{ - return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; -} - -/* - * No atomic_t counterpart, it decrements unless the value is 1, in which case - * it will return false. - * - * Was often done like: atomic_add_unless(&var, -1, 1) - */ -static inline __refcount_check -bool refcount_dec_not_one(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); +extern __must_check bool refcount_inc_not_zero(refcount_t *r); +extern void refcount_inc(refcount_t *r); - for (;;) { - if (unlikely(val == UINT_MAX)) - return true; +extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +extern void refcount_sub(unsigned int i, refcount_t *r); - if (val == 1) - return false; +extern __must_check bool refcount_dec_and_test(refcount_t *r); +extern void refcount_dec(refcount_t *r); - new = val - 1; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return true; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return true; -} - -/* - * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail - * to decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - mutex_lock(lock); - if (!refcount_dec_and_test(r)) { - mutex_unlock(lock); - return false; - } - - return true; -} - -/* - * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - spin_lock(lock); - if (!refcount_dec_and_test(r)) { - spin_unlock(lock); - return false; - } - - return true; -} +extern __must_check bool refcount_dec_if_one(refcount_t *r); +extern __must_check bool refcount_dec_not_one(refcount_t *r); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 15321fb1df6b..8c89e902df3e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/rwsem.h> #include <linux/memcontrol.h> +#include <linux/highmem.h> /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -196,41 +197,30 @@ int page_referenced(struct page *, int is_locked, int try_to_unmap(struct page *, enum ttu_flags flags); -/* - * Used by uprobes to replace a userspace page safely - */ -pte_t *__page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **, int); - -static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, - spinlock_t **ptlp, int sync) -{ - pte_t *ptep; +/* Avoid racy checks */ +#define PVMW_SYNC (1 << 0) +/* Look for migarion entries rather than present PTEs */ +#define PVMW_MIGRATION (1 << 1) - __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, - ptlp, sync)); - return ptep; -} +struct page_vma_mapped_walk { + struct page *page; + struct vm_area_struct *vma; + unsigned long address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; -/* - * Used by idle page tracking to check if a page was referenced via page - * tables. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp); -#else -static inline bool page_check_address_transhuge(struct page *page, - struct mm_struct *mm, unsigned long address, - pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) +static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - *ptep = page_check_address(page, mm, address, ptlp, 0); - *pmdp = NULL; - return !!*ptep; + if (pvmw->pte) + pte_unmap(pvmw->pte); + if (pvmw->ptl) + spin_unlock(pvmw->ptl); } -#endif + +bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); /* * Used by swapoff to help locate where page is expected in vma. diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h new file mode 100644 index 000000000000..ea05f6c51413 --- /dev/null +++ b/include/linux/rodata_test.h @@ -0,0 +1,23 @@ +/* + * rodata_test.h: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _RODATA_TEST_H +#define _RODATA_TEST_H + +#ifdef CONFIG_DEBUG_RODATA_TEST +extern const int rodata_test_data; +void rodata_test(void); +#else +static inline void rodata_test(void) {} +#endif + +#endif /* _RODATA_TEST_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 451e241f32c5..4a28deb5f210 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2904,6 +2904,28 @@ static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) */ extern struct mm_struct * mm_alloc(void); +/** + * mmgrab() - Pin a &struct mm_struct. + * @mm: The &struct mm_struct to pin. + * + * Make sure that @mm will not get freed even after the owning task + * exits. This doesn't guarantee that the associated address space + * will still exist later on and mmget_not_zero() has to be used before + * accessing it. + * + * This is a preferred way to to pin @mm for a longer/unbounded amount + * of time. + * + * Use mmdrop() to release the reference acquired by mmgrab(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); static inline void mmdrop(struct mm_struct *mm) @@ -2926,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm) } } +/** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. + * + * Make sure that the address space of the given &struct mm_struct doesn't + * go away. This does not protect against parts of the address space being + * modified or freed, however. + * + * Never use this function to pin this address space for an + * unbounded/indefinite amount of time. + * + * Use mmput() to release the reference acquired by mmget(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + static inline bool mmget_not_zero(struct mm_struct *mm) { return atomic_inc_not_zero(&mm->mm_users); diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index deee23d012e7..04b124fca51e 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -27,6 +27,7 @@ typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send); #ifdef CONFIG_BLK_SED_OPAL +void free_opal_dev(struct opal_dev *dev); bool opal_unlock_from_suspend(struct opal_dev *dev); struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); @@ -51,6 +52,10 @@ static inline bool is_sed_ioctl(unsigned int cmd) return false; } #else +static inline void free_opal_dev(struct opal_dev *dev) +{ +} + static inline bool is_sed_ioctl(unsigned int cmd) { return false; diff --git a/include/linux/sem.h b/include/linux/sem.h index d0efd6e6c20a..4fc222f8755d 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -21,7 +21,7 @@ struct sem_array { struct list_head list_id; /* undo requests on this array */ int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ - bool complex_mode; /* no parallel simple ops */ + unsigned int use_global_lock;/* >0: global lock required */ }; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index fdaac9d4d46d..a7d6bd2a918f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -57,7 +57,14 @@ extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +#ifdef CONFIG_SHMEM extern bool shmem_mapping(struct address_space *mapping); +#else +static inline bool shmem_mapping(struct address_space *mapping) +{ + return false; +} +#endif /* CONFIG_SHMEM */ extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h index 3f22932e67a4..f4199e758f97 100644 --- a/include/linux/spi/flash.h +++ b/include/linux/spi/flash.h @@ -7,7 +7,7 @@ struct mtd_partition; * struct flash_platform_data: board-specific flash data * @name: optional flash device name (eg, as used with mtdparts=) * @parts: optional array of mtd_partitions for static partitioning - * @nr_parts: number of mtd_partitions for static partitoning + * @nr_parts: number of mtd_partitions for static partitioning * @type: optional flash device type (e.g. m25p80 vs m25p64), for use * with chips that can't be queried for JEDEC or other IDs * diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 8a511c0985aa..20d157a518a7 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -204,8 +204,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) kref_put(&h->ref, cd->cache_put); } -static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) +static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { + if (!test_bit(CACHE_VALID, &h->flags)) + return false; + return (h->expiry_time < seconds_since_boot()) || (detail->flush_time >= h->last_refresh); } @@ -227,6 +230,7 @@ extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); +extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ extern void *cache_seq_start(struct seq_file *file, loff_t *pos); diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index cfda6adcf33c..245fc59b7324 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -110,6 +110,15 @@ struct rpcrdma_msg { }; /* + * XDR sizes, in quads + */ +enum { + rpcrdma_fixed_maxsz = 4, + rpcrdma_segment_maxsz = 4, + rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, +}; + +/* * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks */ #define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7321ae933867..e770abeed32d 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -400,10 +400,14 @@ struct svc_version { struct svc_procedure * vs_proc; /* per-procedure info */ u32 vs_xdrsize; /* xdrsize needed for this version */ - unsigned int vs_hidden : 1, /* Don't register with portmapper. - * Only used for nfsacl so far. */ - vs_rpcb_optnl:1;/* Don't care the result of register. - * Only used for nfsv4. */ + /* Don't register with rpcbind */ + bool vs_hidden; + + /* Don't care if the rpcbind registration fails */ + bool vs_rpcb_optnl; + + /* Need xprt with congestion control */ + bool vs_need_cong_ctrl; /* Override dispatch function (e.g. when caching replies). * A return value of 0 means drop the request. diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 757fb963696c..b105f73e3ca2 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod; * completes. */ struct svc_rdma_op_ctxt { - struct list_head free; + struct list_head list; struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_fastreg_mr *frmr; int hdr_count; @@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt { struct ib_cqe cqe; struct ib_cqe reg_cqe; struct ib_cqe inv_cqe; - struct list_head dto_q; u32 byte_len; u32 position; struct svcxprt_rdma *xprt; @@ -141,7 +140,8 @@ struct svcxprt_rdma { atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ unsigned int sc_rq_depth; /* Depth of RQ */ - u32 sc_max_requests; /* Forward credits */ + __be32 sc_fc_credits; /* Forward credits */ + u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ @@ -171,7 +171,6 @@ struct svcxprt_rdma { wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; - struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ struct list_head sc_read_complete_q; struct work_struct sc_work; }; @@ -214,11 +213,7 @@ extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, __be32, __be64, u32); -extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, - struct rpcrdma_msg *, - struct rpcrdma_msg *, - enum rpcrdma_proc); -extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *); +extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp); /* svc_rdma_recvfrom.c */ extern int svc_rdma_recvfrom(struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 7440290f64ac..ddb7f94a9d06 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -67,6 +67,7 @@ struct svc_xprt { #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ #define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ +#define XPT_CONG_CTRL 14 /* has congestion control */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 5a209b84fd9e..c7bdf895179c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -61,6 +61,8 @@ struct timer_list { #define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYMASK 0xFFC00000 +#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) + #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index f431861f22f1..0468548acebf 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -61,10 +61,18 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, unsigned long from, unsigned long to, unsigned long len); -extern void madvise_userfault_dontneed(struct vm_area_struct *vma, - struct vm_area_struct **prev, - unsigned long start, - unsigned long end); +extern void userfaultfd_remove(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, + unsigned long end); + +extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *uf); +extern void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf); + +extern void userfaultfd_exit(struct mm_struct *mm); #else /* CONFIG_USERFAULTFD */ @@ -112,12 +120,29 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, { } -static inline void madvise_userfault_dontneed(struct vm_area_struct *vma, - struct vm_area_struct **prev, - unsigned long start, - unsigned long end) +static inline void userfaultfd_remove(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, + unsigned long end) { } + +static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *uf) +{ + return 0; +} + +static inline void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf) +{ +} + +static inline void userfaultfd_exit(struct mm_struct *mm) +{ +} + #endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 35a4d8185b51..a786e5e8973b 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -117,6 +117,7 @@ struct watchdog_device { #define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ +#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ struct list_head deferred; }; @@ -151,6 +152,12 @@ static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd) set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); } +/* Use the following function to stop the watchdog when unregistering it */ +static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) +{ + set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a26cc437293c..bde063cefd04 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -106,9 +106,9 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ - ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) + ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) struct delayed_work { struct work_struct work; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5527d910ba3d..a3c0cbd7c888 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -46,7 +46,7 @@ enum writeback_sync_modes { */ enum wb_reason { WB_REASON_BACKGROUND, - WB_REASON_TRY_TO_FREE_PAGES, + WB_REASON_VMSCAN, WB_REASON_SYNC, WB_REASON_PERIODIC, WB_REASON_LAPTOP_TIMER, diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index e1006b391cdc..bee1404391dd 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -174,10 +174,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * not freed when the control is deleted. Should this be needed * then a new internal bitfield can be added to tell the framework * to free this pointer. - * @p_cur: The control's current value represented via an union with + * @p_cur: The control's current value represented via a union with * provides a standard way of accessing control types * through a pointer. - * @p_new: The control's new value represented via an union with provides + * @p_new: The control's new value represented via a union with provides * a standard way of accessing control types * through a pointer. */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index c92dc03c8528..ead1aa6d003e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1948,7 +1948,7 @@ struct cfg80211_deauth_request { * struct cfg80211_disassoc_request - Disassociation request data * * This structure provides information needed to complete IEEE 802.11 - * disassocation. + * disassociation. * * @bss: the BSS to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b9a08cd1d97d..a3bab3c5ecfb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3392,7 +3392,7 @@ enum ieee80211_reconfig_type { * since there won't be any time to beacon before the switch anyway. * @pre_channel_switch: This is an optional callback that is called * before a channel switch procedure is started (ie. when a STA - * gets a CSA or an userspace initiated channel-switch), allowing + * gets a CSA or a userspace initiated channel-switch), allowing * the driver to prepare for the channel switch. * @post_channel_switch: This is an optional callback that is called * after a channel switch procedure is completed, allowing the diff --git a/include/net/sock.h b/include/net/sock.h index 9ccefa5c5487..5e5997654db6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1526,6 +1526,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, void sk_free(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); +void sk_free_unlock_clone(struct sock *sk); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 89f5bd4e1d52..0f1813c13687 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -60,6 +60,7 @@ #include <linux/atomic.h> #include <linux/mmu_notifier.h> #include <linux/uaccess.h> +#include <linux/cgroup_rdma.h> extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -1356,6 +1357,12 @@ struct ib_fmr_attr { struct ib_umem; +struct ib_rdmacg_object { +#ifdef CONFIG_CGROUP_RDMA + struct rdma_cgroup *cg; /* owner rdma cgroup */ +#endif +}; + struct ib_ucontext { struct ib_device *device; struct list_head pd_list; @@ -1388,6 +1395,8 @@ struct ib_ucontext { struct list_head no_private_counters; int odp_mrs_count; #endif + + struct ib_rdmacg_object cg_obj; }; struct ib_uobject { @@ -1395,6 +1404,7 @@ struct ib_uobject { struct ib_ucontext *context; /* associated user context */ void *object; /* containing object */ struct list_head list; /* link to context's list */ + struct ib_rdmacg_object cg_obj; /* rdmacg object */ int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ @@ -1843,53 +1853,6 @@ struct ib_cache { struct ib_port_cache *ports; }; -struct ib_dma_mapping_ops { - int (*mapping_error)(struct ib_device *dev, - u64 dma_addr); - u64 (*map_single)(struct ib_device *dev, - void *ptr, size_t size, - enum dma_data_direction direction); - void (*unmap_single)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - u64 (*map_page)(struct ib_device *dev, - struct page *page, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*unmap_page)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - int (*map_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*unmap_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*sync_single_for_cpu)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void (*sync_single_for_device)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void *(*alloc_coherent)(struct ib_device *dev, - size_t size, - u64 *dma_handle, - gfp_t flag); - void (*free_coherent)(struct ib_device *dev, - size_t size, void *cpu_addr, - u64 dma_handle); -}; - struct iw_cm_verbs; struct ib_port_immutable { @@ -1900,8 +1863,6 @@ struct ib_port_immutable { }; struct ib_device { - struct device *dma_device; - char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -2151,7 +2112,6 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); - struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; @@ -2178,6 +2138,10 @@ struct ib_device { struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; +#ifdef CONFIG_CGROUP_RDMA + struct rdmacg_device cg_device; +#endif + /** * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this @@ -3043,9 +3007,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - if (dev->dma_ops) - return dev->dma_ops->mapping_error(dev, dma_addr); - return dma_mapping_error(dev->dma_device, dma_addr); + return dma_mapping_error(&dev->dev, dma_addr); } /** @@ -3059,9 +3021,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_single(dev, cpu_addr, size, direction); - return dma_map_single(dev->dma_device, cpu_addr, size, direction); + return dma_map_single(&dev->dev, cpu_addr, size, direction); } /** @@ -3075,28 +3035,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_single(dev, addr, size, direction); - else - dma_unmap_single(dev->dma_device, addr, size, direction); -} - -static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_map_single_attrs(dev->dma_device, cpu_addr, size, - direction, dma_attrs); -} - -static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_unmap_single_attrs(dev->dma_device, addr, size, - direction, dma_attrs); + dma_unmap_single(&dev->dev, addr, size, direction); } /** @@ -3113,9 +3052,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_page(dev, page, offset, size, direction); - return dma_map_page(dev->dma_device, page, offset, size, direction); + return dma_map_page(&dev->dev, page, offset, size, direction); } /** @@ -3129,10 +3066,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_page(dev, addr, size, direction); - else - dma_unmap_page(dev->dma_device, addr, size, direction); + dma_unmap_page(&dev->dev, addr, size, direction); } /** @@ -3146,9 +3080,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_sg(dev, sg, nents, direction); - return dma_map_sg(dev->dma_device, sg, nents, direction); + return dma_map_sg(&dev->dev, sg, nents, direction); } /** @@ -3162,10 +3094,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_sg(dev, sg, nents, direction); - else - dma_unmap_sg(dev->dma_device, sg, nents, direction); + dma_unmap_sg(&dev->dev, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3173,12 +3102,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3186,12 +3110,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3233,10 +3152,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); - else - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); + dma_sync_single_for_cpu(&dev->dev, addr, size, dir); } /** @@ -3251,10 +3167,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_device(dev, addr, size, dir); - else - dma_sync_single_for_device(dev->dma_device, addr, size, dir); + dma_sync_single_for_device(&dev->dev, addr, size, dir); } /** @@ -3266,19 +3179,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, */ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, + dma_addr_t *dma_handle, gfp_t flag) { - if (dev->dma_ops) - return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); - else { - dma_addr_t handle; - void *ret; - - ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); - *dma_handle = handle; - return ret; - } + return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); } /** @@ -3290,12 +3194,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, */ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, - u64 dma_handle) + dma_addr_t dma_handle) { - if (dev->dma_ops) - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); - else - dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); + dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); } /** diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h index 963229e71dc7..963229e71dc7 100755..100644 --- a/include/sound/rt5665.h +++ b/include/sound/rt5665.h diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 88d18a8ceb59..a3c3cab643a9 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -184,7 +184,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, TRACE_EVENT_CONDITION(btrfs_get_extent, - TP_PROTO(struct btrfs_root *root, struct inode *inode, + TP_PROTO(struct btrfs_root *root, struct btrfs_inode *inode, struct extent_map *map), TP_ARGS(root, inode, map), diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 593f586545eb..39123c06a566 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -119,6 +119,7 @@ enum rxrpc_recvmsg_trace { rxrpc_recvmsg_full, rxrpc_recvmsg_hole, rxrpc_recvmsg_next, + rxrpc_recvmsg_requeue, rxrpc_recvmsg_return, rxrpc_recvmsg_terminal, rxrpc_recvmsg_to_be_accepted, @@ -277,6 +278,7 @@ enum rxrpc_congest_change { EM(rxrpc_recvmsg_full, "FULL") \ EM(rxrpc_recvmsg_hole, "HOLE") \ EM(rxrpc_recvmsg_next, "NEXT") \ + EM(rxrpc_recvmsg_requeue, "REQU") \ EM(rxrpc_recvmsg_return, "RETN") \ EM(rxrpc_recvmsg_terminal, "TERM") \ EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \ diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1bca99dbb98f..80787eafba99 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init, TP_ARGS(timer) ); +#define decode_timer_flags(flags) \ + __print_flags(flags, "|", \ + { TIMER_MIGRATING, "M" }, \ + { TIMER_DEFERRABLE, "D" }, \ + { TIMER_PINNED, "P" }, \ + { TIMER_IRQSAFE, "I" }) + /** * timer_start - called when the timer is started * @timer: pointer to struct timer_list @@ -65,9 +72,12 @@ TRACE_EVENT(timer_start, __entry->flags = flags; ), - TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, - (long)__entry->expires - __entry->now, __entry->flags) + (long)__entry->expires - __entry->now, + __entry->flags & TIMER_CPUMASK, + __entry->flags >> TIMER_ARRAYSHIFT, + decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK)) ); /** diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 2ccd9ccbf9ef..7bd8783a590f 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -31,7 +31,7 @@ #define WB_WORK_REASON \ EM( WB_REASON_BACKGROUND, "background") \ - EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \ + EM( WB_REASON_VMSCAN, "vmscan") \ EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h index 021ed331dd71..744b3d060968 100644 --- a/include/uapi/linux/auto_dev-ioctl.h +++ b/include/uapi/linux/auto_dev-ioctl.h @@ -113,17 +113,13 @@ struct autofs_dev_ioctl { static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) { - memset(in, 0, sizeof(struct autofs_dev_ioctl)); + memset(in, 0, AUTOFS_DEV_IOCTL_SIZE); in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; - in->size = sizeof(struct autofs_dev_ioctl); + in->size = AUTOFS_DEV_IOCTL_SIZE; in->ioctlfd = -1; } -/* - * If you change this make sure you make the corresponding change - * to autofs-dev-ioctl.c:lookup_ioctl() - */ enum { /* Get various version info */ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, @@ -160,8 +156,6 @@ enum { AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, }; -#define AUTOFS_IOCTL 0x93 - #define AUTOFS_DEV_IOCTL_VERSION \ _IOWR(AUTOFS_IOCTL, \ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 1bfc3ed8b284..aa63451ef20a 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -61,12 +61,23 @@ struct autofs_packet_expire { char name[NAME_MAX+1]; }; -#define AUTOFS_IOC_READY _IO(0x93, 0x60) -#define AUTOFS_IOC_FAIL _IO(0x93, 0x61) -#define AUTOFS_IOC_CATATONIC _IO(0x93, 0x62) -#define AUTOFS_IOC_PROTOVER _IOR(0x93, 0x63, int) -#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93, 0x64, compat_ulong_t) -#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93, 0x64, unsigned long) -#define AUTOFS_IOC_EXPIRE _IOR(0x93, 0x65, struct autofs_packet_expire) +#define AUTOFS_IOCTL 0x93 + +enum { + AUTOFS_IOC_READY_CMD = 0x60, + AUTOFS_IOC_FAIL_CMD, + AUTOFS_IOC_CATATONIC_CMD, + AUTOFS_IOC_PROTOVER_CMD, + AUTOFS_IOC_SETTIMEOUT_CMD, + AUTOFS_IOC_EXPIRE_CMD, +}; + +#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD) +#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD) +#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD) +#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int) +#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t) +#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long) +#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire) #endif /* _UAPI_LINUX_AUTO_FS_H */ diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index 8f8f1bdcca8c..7c6da423d54e 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h @@ -148,10 +148,16 @@ union autofs_v5_packet_union { autofs_packet_expire_direct_t expire_direct; }; -#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93, 0x66, int) -#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93, 0x67, int) -#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93, 0x70, int) +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */ + AUTOFS_IOC_PROTOSUBVER_CMD, + AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */ +}; + +#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int) +#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int) +#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int) #endif /* _LINUX_AUTO_FS4_H */ diff --git a/include/uapi/linux/mqueue.h b/include/uapi/linux/mqueue.h index d0a2b8e89813..bbd5116ea739 100644 --- a/include/uapi/linux/mqueue.h +++ b/include/uapi/linux/mqueue.h @@ -18,6 +18,8 @@ #ifndef _LINUX_MQUEUE_H #define _LINUX_MQUEUE_H +#include <linux/types.h> + #define MQ_PRIO_MAX 32768 /* per-uid limit of kernel memory used by mqueue, in bytes */ #define MQ_BYTES_MAX 819200 diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 0df7bd5d2fb1..c3be256107c6 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h @@ -32,7 +32,8 @@ #define NFSEXP_ASYNC 0x0010 #define NFSEXP_GATHERED_WRITES 0x0020 #define NFSEXP_NOREADDIRPLUS 0x0040 -/* 80 100 currently unused */ +#define NFSEXP_SECURITY_LABEL 0x0080 +/* 0x100 currently unused */ #define NFSEXP_NOHIDE 0x0200 #define NFSEXP_NOSUBTREECHECK 0x0400 #define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ @@ -53,7 +54,7 @@ #define NFSEXP_PNFS 0x20000 /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x3FE7F +#define NFSEXP_ALLFLAGS 0x3FEFF /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 9ac4b68c54d1..c055947c5c98 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -18,9 +18,11 @@ * means the userland is reading). */ #define UFFD_API ((__u64)0xAA) -#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \ +#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \ + UFFD_FEATURE_EVENT_FORK | \ UFFD_FEATURE_EVENT_REMAP | \ - UFFD_FEATURE_EVENT_MADVDONTNEED | \ + UFFD_FEATURE_EVENT_REMOVE | \ + UFFD_FEATURE_EVENT_UNMAP | \ UFFD_FEATURE_MISSING_HUGETLBFS | \ UFFD_FEATURE_MISSING_SHMEM) #define UFFD_API_IOCTLS \ @@ -92,7 +94,7 @@ struct uffd_msg { struct { __u64 start; __u64 end; - } madv_dn; + } remove; struct { /* unused reserved fields */ @@ -109,7 +111,9 @@ struct uffd_msg { #define UFFD_EVENT_PAGEFAULT 0x12 #define UFFD_EVENT_FORK 0x13 #define UFFD_EVENT_REMAP 0x14 -#define UFFD_EVENT_MADVDONTNEED 0x15 +#define UFFD_EVENT_REMOVE 0x15 +#define UFFD_EVENT_UNMAP 0x16 +#define UFFD_EVENT_EXIT 0x17 /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ @@ -155,9 +159,11 @@ struct uffdio_api { #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) #define UFFD_FEATURE_EVENT_REMAP (1<<2) -#define UFFD_FEATURE_EVENT_MADVDONTNEED (1<<3) +#define UFFD_FEATURE_EVENT_REMOVE (1<<3) #define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) #define UFFD_FEATURE_MISSING_SHMEM (1<<5) +#define UFFD_FEATURE_EVENT_UNMAP (1<<6) +#define UFFD_FEATURE_EVENT_EXIT (1<<7) __u64 features; __u64 ioctls; diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 95251512e2c4..44b587b49904 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } -extern struct dma_map_ops *xen_dma_ops; +extern const struct dma_map_ops *xen_dma_ops; #ifdef CONFIG_XEN void __init xen_early_init(void); diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h index 56806bc90c2f..7fb7112d667c 100644 --- a/include/xen/interface/grant_table.h +++ b/include/xen/interface/grant_table.h @@ -181,7 +181,7 @@ struct grant_entry_header { }; /* - * Version 2 of the grant entry structure, here is an union because three + * Version 2 of the grant entry structure, here is a union because three * different types are suppotted: full_page, sub_page and transitive. */ union grant_entry_v2 { |