diff options
author | 2021-12-22 09:07:37 -0500 | |
---|---|---|
committer | 2021-12-22 09:07:37 -0500 | |
commit | aba288c2b172fea07e242cc010c8f566c96008d2 (patch) | |
tree | ca6db3c2726ef160fc7eb9d902c9c759212df6cc | |
parent | Linux patch 4.14.258 (diff) | |
download | linux-patches-aba288c2b172fea07e242cc010c8f566c96008d2.tar.gz linux-patches-aba288c2b172fea07e242cc010c8f566c96008d2.tar.bz2 linux-patches-aba288c2b172fea07e242cc010c8f566c96008d2.zip |
Linux patch 4.14.2594.14-269
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1258_linux-4.14.259.patch | 2159 |
2 files changed, 2163 insertions, 0 deletions
diff --git a/0000_README b/0000_README index dda40838..db2d1b6c 100644 --- a/0000_README +++ b/0000_README @@ -1079,6 +1079,10 @@ Patch: 1257_linux-4.14.258.patch From: https://www.kernel.org Desc: Linux 4.14.258 +Patch: 1258_linux-4.14.259.patch +From: https://www.kernel.org +Desc: Linux 4.14.259 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1258_linux-4.14.259.patch b/1258_linux-4.14.259.patch new file mode 100644 index 00000000..ef93e75d --- /dev/null +++ b/1258_linux-4.14.259.patch @@ -0,0 +1,2159 @@ +diff --git a/Makefile b/Makefile +index f77e1d686f872..1d22e50da86e8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 258 ++SUBLEVEL = 259 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/Kconfig b/arch/Kconfig +index 95567f6832752..c2e73c01d7adc 100644 +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -980,4 +980,7 @@ config HAVE_ARCH_COMPILER_H + linux/compiler-*.h in order to override macro definitions that those + headers generally provide. + ++config ARCH_USE_MEMREMAP_PROT ++ bool ++ + source "kernel/gcov/Kconfig" +diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug +index b14f154919a5d..d6cf18a0cb0a9 100644 +--- a/arch/arm/Kconfig.debug ++++ b/arch/arm/Kconfig.debug +@@ -16,30 +16,42 @@ config ARM_PTDUMP + kernel. + If in doubt, say "N" + +-# RMK wants arm kernels compiled with frame pointers or stack unwinding. +-# If you know what you are doing and are willing to live without stack +-# traces, you can get a slightly smaller kernel by setting this option to +-# n, but then RMK will have to kill you ;). +-config FRAME_POINTER +- bool +- depends on !THUMB2_KERNEL +- default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER ++choice ++ prompt "Choose kernel unwinder" ++ default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER ++ default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER ++ help ++ This determines which method will be used for unwinding kernel stack ++ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack, ++ livepatch, lockdep, and more. ++ ++config UNWINDER_FRAME_POINTER ++ bool "Frame pointer unwinder" ++ depends on !THUMB2_KERNEL && !CC_IS_CLANG ++ select ARCH_WANT_FRAME_POINTERS ++ select FRAME_POINTER + help +- If you say N here, the resulting kernel will be slightly smaller and +- faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled, +- when a problem occurs with the kernel, the information that is +- reported is severely limited. ++ This option enables the frame pointer unwinder for unwinding ++ kernel stack traces. + +-config ARM_UNWIND +- bool "Enable stack unwinding support (EXPERIMENTAL)" ++config UNWINDER_ARM ++ bool "ARM EABI stack unwinder" + depends on AEABI +- default y ++ select ARM_UNWIND + help + This option enables stack unwinding support in the kernel + using the information automatically generated by the + compiler. The resulting kernel image is slightly bigger but + the performance is not affected. Currently, this feature +- only works with EABI compilers. If unsure say Y. ++ only works with EABI compilers. ++ ++endchoice ++ ++config ARM_UNWIND ++ bool ++ ++config FRAME_POINTER ++ bool + + config OLD_MCOUNT + bool +diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h +index 1182023366912..c4dcb3c6c5086 100644 +--- a/arch/arm/boot/dts/imx6ull-pinfunc.h ++++ b/arch/arm/boot/dts/imx6ull-pinfunc.h +@@ -51,6 +51,6 @@ + #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0 + #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0 + #define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0 0x01FC 0x0488 0x0000 0x9 0x0 +-#define MX6ULL_PAD_CSI_DATA07__ESAI_T0 0x0200 0x048C 0x0000 0x9 0x0 ++#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0 0x0200 0x048C 0x0000 0x9 0x0 + + #endif /* __DTS_IMX6ULL_PINFUNC_H */ +diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts +index beb2fc6b9eb63..adfdc43ac052f 100644 +--- a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts ++++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts +@@ -23,7 +23,7 @@ + flash0: n25q00@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q00aa"; ++ compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + +diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts +index aac4feea86f38..09ffa79240c84 100644 +--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts ++++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts +@@ -131,7 +131,7 @@ + flash: flash@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q256a"; ++ compatible = "micron,n25q256a", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + +diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts +index 155829f9eba16..907d8aa6d9fc8 100644 +--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts ++++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts +@@ -136,7 +136,7 @@ + flash0: n25q00@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q00"; ++ compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; /* chip select */ + spi-max-frequency = <100000000>; + +diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts +index a4a555c19d943..fe5fe4559969d 100644 +--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts ++++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts +@@ -181,7 +181,7 @@ + flash: flash@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q00"; ++ compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + +diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts +index 53bf99eef66de..0992cae3e60ef 100644 +--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts ++++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts +@@ -87,7 +87,7 @@ + flash: flash@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q256a"; ++ compatible = "micron,n25q256a", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + m25p,fast-read; +diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts +index 8860dd2e242c4..22bfef024913a 100644 +--- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts ++++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts +@@ -128,7 +128,7 @@ + flash0: n25q512a@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q512a"; ++ compatible = "micron,n25q512a", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <100000000>; + +diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts +index 655fe87e272d9..349719a9c1360 100644 +--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts ++++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts +@@ -249,7 +249,7 @@ + n25q128@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q128"; ++ compatible = "micron,n25q128", "jedec,spi-nor"; + reg = <0>; /* chip select */ + spi-max-frequency = <100000000>; + m25p,fast-read; +@@ -266,7 +266,7 @@ + n25q00@1 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "n25q00"; ++ compatible = "micron,mt25qu02g", "jedec,spi-nor"; + reg = <1>; /* chip select */ + spi-max-frequency = <100000000>; + m25p,fast-read; +diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c +index d130a5ece5d55..bf24690ec83af 100644 +--- a/arch/arm/mm/copypage-fa.c ++++ b/arch/arm/mm/copypage-fa.c +@@ -17,26 +17,25 @@ + /* + * Faraday optimised copy_user_page + */ +-static void __naked +-fa_copy_user_page(void *kto, const void *kfrom) ++static void fa_copy_user_page(void *kto, const void *kfrom) + { +- asm("\ +- stmfd sp!, {r4, lr} @ 2\n\ +- mov r2, %0 @ 1\n\ +-1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +- stmia r0, {r3, r4, ip, lr} @ 4\n\ +- mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ +- add r0, r0, #16 @ 1\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +- stmia r0, {r3, r4, ip, lr} @ 4\n\ +- mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ +- add r0, r0, #16 @ 1\n\ +- subs r2, r2, #1 @ 1\n\ ++ int tmp; ++ ++ asm volatile ("\ ++1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++ stmia %0, {r3, r4, ip, lr} @ 4\n\ ++ mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ ++ add %0, %0, #16 @ 1\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++ stmia %0, {r3, r4, ip, lr} @ 4\n\ ++ mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ ++ add %0, %0, #16 @ 1\n\ ++ subs %2, %2, #1 @ 1\n\ + bne 1b @ 1\n\ +- mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\ +- ldmfd sp!, {r4, pc} @ 3" +- : +- : "I" (PAGE_SIZE / 32)); ++ mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB" ++ : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) ++ : "2" (PAGE_SIZE / 32) ++ : "r3", "r4", "ip", "lr"); + } + + void fa_copy_user_highpage(struct page *to, struct page *from, +diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c +index 49ee0c1a72097..cc819732d9b82 100644 +--- a/arch/arm/mm/copypage-feroceon.c ++++ b/arch/arm/mm/copypage-feroceon.c +@@ -13,58 +13,56 @@ + #include <linux/init.h> + #include <linux/highmem.h> + +-static void __naked +-feroceon_copy_user_page(void *kto, const void *kfrom) ++static void feroceon_copy_user_page(void *kto, const void *kfrom) + { +- asm("\ +- stmfd sp!, {r4-r9, lr} \n\ +- mov ip, %2 \n\ +-1: mov lr, r1 \n\ +- ldmia r1!, {r2 - r9} \n\ +- pld [lr, #32] \n\ +- pld [lr, #64] \n\ +- pld [lr, #96] \n\ +- pld [lr, #128] \n\ +- pld [lr, #160] \n\ +- pld [lr, #192] \n\ +- pld [lr, #224] \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- ldmia r1!, {r2 - r9} \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ +- stmia r0, {r2 - r9} \n\ +- subs ip, ip, #(32 * 8) \n\ +- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ +- add r0, r0, #32 \n\ ++ int tmp; ++ ++ asm volatile ("\ ++1: ldmia %1!, {r2 - r7, ip, lr} \n\ ++ pld [%1, #0] \n\ ++ pld [%1, #32] \n\ ++ pld [%1, #64] \n\ ++ pld [%1, #96] \n\ ++ pld [%1, #128] \n\ ++ pld [%1, #160] \n\ ++ pld [%1, #192] \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ ldmia %1!, {r2 - r7, ip, lr} \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ ++ stmia %0, {r2 - r7, ip, lr} \n\ ++ subs %2, %2, #(32 * 8) \n\ ++ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ ++ add %0, %0, #32 \n\ + bne 1b \n\ +- mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ +- ldmfd sp!, {r4-r9, pc}" +- : +- : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); ++ mcr p15, 0, %2, c7, c10, 4 @ drain WB" ++ : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) ++ : "2" (PAGE_SIZE) ++ : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); + } + + void feroceon_copy_user_highpage(struct page *to, struct page *from, +diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c +index 1267e64133b92..db624170854a0 100644 +--- a/arch/arm/mm/copypage-v4mc.c ++++ b/arch/arm/mm/copypage-v4mc.c +@@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); + * instruction. If your processor does not supply this, you have to write your + * own copy_user_highpage that does the right thing. + */ +-static void __naked +-mc_copy_user_page(void *from, void *to) ++static void mc_copy_user_page(void *from, void *to) + { +- asm volatile( +- "stmfd sp!, {r4, lr} @ 2\n\ +- mov r4, %2 @ 1\n\ ++ int tmp; ++ ++ asm volatile ("\ + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ + 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ +@@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to) + mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ +- subs r4, r4, #1 @ 1\n\ ++ subs %2, %2, #1 @ 1\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ + ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ +- bne 1b @ 1\n\ +- ldmfd sp!, {r4, pc} @ 3" +- : +- : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); ++ bne 1b @ " ++ : "+&r" (from), "+&r" (to), "=&r" (tmp) ++ : "2" (PAGE_SIZE / 64) ++ : "r2", "r3", "ip", "lr"); + } + + void v4_mc_copy_user_highpage(struct page *to, struct page *from, +diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c +index 067d0fdd630c1..cd3e165afeede 100644 +--- a/arch/arm/mm/copypage-v4wb.c ++++ b/arch/arm/mm/copypage-v4wb.c +@@ -22,29 +22,28 @@ + * instruction. If your processor does not supply this, you have to write your + * own copy_user_highpage that does the right thing. + */ +-static void __naked +-v4wb_copy_user_page(void *kto, const void *kfrom) ++static void v4wb_copy_user_page(void *kto, const void *kfrom) + { +- asm("\ +- stmfd sp!, {r4, lr} @ 2\n\ +- mov r2, %2 @ 1\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +-1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +- mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +- subs r2, r2, #1 @ 1\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ ++ int tmp; ++ ++ asm volatile ("\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++ mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++ subs %2, %2, #1 @ 1\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ +- mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ +- ldmfd sp!, {r4, pc} @ 3" +- : +- : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); ++ mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB" ++ : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) ++ : "2" (PAGE_SIZE / 64) ++ : "r3", "r4", "ip", "lr"); + } + + void v4wb_copy_user_highpage(struct page *to, struct page *from, +diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c +index b85c5da2e510e..8614572e1296b 100644 +--- a/arch/arm/mm/copypage-v4wt.c ++++ b/arch/arm/mm/copypage-v4wt.c +@@ -20,27 +20,26 @@ + * dirty data in the cache. However, we do have to ensure that + * subsequent reads are up to date. + */ +-static void __naked +-v4wt_copy_user_page(void *kto, const void *kfrom) ++static void v4wt_copy_user_page(void *kto, const void *kfrom) + { +- asm("\ +- stmfd sp!, {r4, lr} @ 2\n\ +- mov r2, %2 @ 1\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +-1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +- subs r2, r2, #1 @ 1\n\ +- stmia r0!, {r3, r4, ip, lr} @ 4\n\ +- ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ ++ int tmp; ++ ++ asm volatile ("\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++1: stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmia %1!, {r3, r4, ip, lr} @ 4\n\ ++ subs %2, %2, #1 @ 1\n\ ++ stmia %0!, {r3, r4, ip, lr} @ 4\n\ ++ ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ +- mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ +- ldmfd sp!, {r4, pc} @ 3" +- : +- : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); ++ mcr p15, 0, %2, c7, c7, 0 @ flush ID cache" ++ : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) ++ : "2" (PAGE_SIZE / 64) ++ : "r3", "r4", "ip", "lr"); + } + + void v4wt_copy_user_highpage(struct page *to, struct page *from, +diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c +index 03a2042aced5f..55cbc3a89d858 100644 +--- a/arch/arm/mm/copypage-xsc3.c ++++ b/arch/arm/mm/copypage-xsc3.c +@@ -21,53 +21,46 @@ + + /* + * XSC3 optimised copy_user_highpage +- * r0 = destination +- * r1 = source + * + * The source page may have some clean entries in the cache already, but we + * can safely ignore them - break_cow() will flush them out of the cache + * if we eventually end up using our copied page. + * + */ +-static void __naked +-xsc3_mc_copy_user_page(void *kto, const void *kfrom) ++static void xsc3_mc_copy_user_page(void *kto, const void *kfrom) + { +- asm("\ +- stmfd sp!, {r4, r5, lr} \n\ +- mov lr, %2 \n\ +- \n\ +- pld [r1, #0] \n\ +- pld [r1, #32] \n\ +-1: pld [r1, #64] \n\ +- pld [r1, #96] \n\ ++ int tmp; ++ ++ asm volatile ("\ ++ pld [%1, #0] \n\ ++ pld [%1, #32] \n\ ++1: pld [%1, #64] \n\ ++ pld [%1, #96] \n\ + \n\ +-2: ldrd r2, [r1], #8 \n\ +- mov ip, r0 \n\ +- ldrd r4, [r1], #8 \n\ +- mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ +- strd r2, [r0], #8 \n\ +- ldrd r2, [r1], #8 \n\ +- strd r4, [r0], #8 \n\ +- ldrd r4, [r1], #8 \n\ +- strd r2, [r0], #8 \n\ +- strd r4, [r0], #8 \n\ +- ldrd r2, [r1], #8 \n\ +- mov ip, r0 \n\ +- ldrd r4, [r1], #8 \n\ +- mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ +- strd r2, [r0], #8 \n\ +- ldrd r2, [r1], #8 \n\ +- subs lr, lr, #1 \n\ +- strd r4, [r0], #8 \n\ +- ldrd r4, [r1], #8 \n\ +- strd r2, [r0], #8 \n\ +- strd r4, [r0], #8 \n\ ++2: ldrd r2, [%1], #8 \n\ ++ ldrd r4, [%1], #8 \n\ ++ mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ ++ strd r2, [%0], #8 \n\ ++ ldrd r2, [%1], #8 \n\ ++ strd r4, [%0], #8 \n\ ++ ldrd r4, [%1], #8 \n\ ++ strd r2, [%0], #8 \n\ ++ strd r4, [%0], #8 \n\ ++ ldrd r2, [%1], #8 \n\ ++ ldrd r4, [%1], #8 \n\ ++ mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ ++ strd r2, [%0], #8 \n\ ++ ldrd r2, [%1], #8 \n\ ++ subs %2, %2, #1 \n\ ++ strd r4, [%0], #8 \n\ ++ ldrd r4, [%1], #8 \n\ ++ strd r2, [%0], #8 \n\ ++ strd r4, [%0], #8 \n\ + bgt 1b \n\ +- beq 2b \n\ +- \n\ +- ldmfd sp!, {r4, r5, pc}" +- : +- : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); ++ beq 2b " ++ : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) ++ : "2" (PAGE_SIZE / 64 - 1) ++ : "r2", "r3", "r4", "r5"); + } + + void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, +@@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, + + /* + * XScale optimised clear_user_page +- * r0 = destination +- * r1 = virtual user address of ultimate destination page + */ + void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) + { +diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c +index 0fb85025344d9..c775d4b7adb08 100644 +--- a/arch/arm/mm/copypage-xscale.c ++++ b/arch/arm/mm/copypage-xscale.c +@@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); + * Dcache aliasing issue. The writes will be forwarded to the write buffer, + * and merged as appropriate. + */ +-static void __naked +-mc_copy_user_page(void *from, void *to) ++static void mc_copy_user_page(void *from, void *to) + { ++ int tmp; ++ + /* + * Strangely enough, best performance is achieved + * when prefetching destination as well. (NP) + */ +- asm volatile( +- "stmfd sp!, {r4, r5, lr} \n\ +- mov lr, %2 \n\ +- pld [r0, #0] \n\ +- pld [r0, #32] \n\ +- pld [r1, #0] \n\ +- pld [r1, #32] \n\ +-1: pld [r0, #64] \n\ +- pld [r0, #96] \n\ +- pld [r1, #64] \n\ +- pld [r1, #96] \n\ +-2: ldrd r2, [r0], #8 \n\ +- ldrd r4, [r0], #8 \n\ +- mov ip, r1 \n\ +- strd r2, [r1], #8 \n\ +- ldrd r2, [r0], #8 \n\ +- strd r4, [r1], #8 \n\ +- ldrd r4, [r0], #8 \n\ +- strd r2, [r1], #8 \n\ +- strd r4, [r1], #8 \n\ ++ asm volatile ("\ ++ pld [%0, #0] \n\ ++ pld [%0, #32] \n\ ++ pld [%1, #0] \n\ ++ pld [%1, #32] \n\ ++1: pld [%0, #64] \n\ ++ pld [%0, #96] \n\ ++ pld [%1, #64] \n\ ++ pld [%1, #96] \n\ ++2: ldrd r2, [%0], #8 \n\ ++ ldrd r4, [%0], #8 \n\ ++ mov ip, %1 \n\ ++ strd r2, [%1], #8 \n\ ++ ldrd r2, [%0], #8 \n\ ++ strd r4, [%1], #8 \n\ ++ ldrd r4, [%0], #8 \n\ ++ strd r2, [%1], #8 \n\ ++ strd r4, [%1], #8 \n\ + mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ +- ldrd r2, [r0], #8 \n\ ++ ldrd r2, [%0], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ +- ldrd r4, [r0], #8 \n\ +- mov ip, r1 \n\ +- strd r2, [r1], #8 \n\ +- ldrd r2, [r0], #8 \n\ +- strd r4, [r1], #8 \n\ +- ldrd r4, [r0], #8 \n\ +- strd r2, [r1], #8 \n\ +- strd r4, [r1], #8 \n\ ++ ldrd r4, [%0], #8 \n\ ++ mov ip, %1 \n\ ++ strd r2, [%1], #8 \n\ ++ ldrd r2, [%0], #8 \n\ ++ strd r4, [%1], #8 \n\ ++ ldrd r4, [%0], #8 \n\ ++ strd r2, [%1], #8 \n\ ++ strd r4, [%1], #8 \n\ + mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ +- subs lr, lr, #1 \n\ ++ subs %2, %2, #1 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ + bgt 1b \n\ +- beq 2b \n\ +- ldmfd sp!, {r4, r5, pc} " +- : +- : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); ++ beq 2b " ++ : "+&r" (from), "+&r" (to), "=&r" (tmp) ++ : "2" (PAGE_SIZE / 64 - 1) ++ : "r2", "r3", "r4", "r5", "ip"); + } + + void xscale_mc_copy_user_highpage(struct page *to, struct page *from, +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 290254849f97a..6301a8d2b87ee 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -1449,6 +1449,7 @@ config ARCH_HAS_MEM_ENCRYPT + config AMD_MEM_ENCRYPT + bool "AMD Secure Memory Encryption (SME) support" + depends on X86_64 && CPU_SUP_AMD ++ select ARCH_USE_MEMREMAP_PROT + ---help--- + Say yes to enable support for the encryption of system memory. + This requires an AMD processor that supports Secure Memory +@@ -1467,10 +1468,6 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT + If set to N, then the encryption of system memory can be + activated with the mem_encrypt=on command line option. + +-config ARCH_USE_MEMREMAP_PROT +- def_bool y +- depends on AMD_MEM_ENCRYPT +- + # Common NUMA Features + config NUMA + bool "Numa Memory Allocation and Scheduler Support" +@@ -1903,6 +1900,7 @@ config EFI + depends on ACPI + select UCS2_STRING + select EFI_RUNTIME_WRAPPERS ++ select ARCH_USE_MEMREMAP_PROT + ---help--- + This enables the kernel to use EFI runtime services that are + available (such as the EFI variable services). +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c +index 7bebdd0273d34..3faf9667cc409 100644 +--- a/arch/x86/mm/ioremap.c ++++ b/arch/x86/mm/ioremap.c +@@ -626,7 +626,7 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) + return arch_memremap_can_ram_remap(phys_addr, size, 0); + } + +-#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT ++#ifdef CONFIG_AMD_MEM_ENCRYPT + /* Remap memory with encryption */ + void __init *early_memremap_encrypted(resource_size_t phys_addr, + unsigned long size) +@@ -668,7 +668,7 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, + + return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); + } +-#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */ ++#endif /* CONFIG_AMD_MEM_ENCRYPT */ + + static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; + +diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c +index cadd7fd290fa8..2a4ad0606fa30 100644 +--- a/arch/x86/platform/efi/quirks.c ++++ b/arch/x86/platform/efi/quirks.c +@@ -276,7 +276,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) + return; + } + +- new = early_memremap(new_phys, new_size); ++ new = early_memremap_prot(new_phys, new_size, ++ pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL))); + if (!new) { + pr_err("Failed to map new boot services memmap\n"); + return; +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 4b299efbd8047..fdf7b5edd520e 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -3182,8 +3182,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) + goto invalid_fld; + } + +- if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0) +- tf->protocol = ATA_PROT_NCQ_NODATA; ++ if ((cdb[2 + cdb_offset] & 0x3) == 0) { ++ /* ++ * When T_LENGTH is zero (No data is transferred), dir should ++ * be DMA_NONE. ++ */ ++ if (scmd->sc_data_direction != DMA_NONE) { ++ fp = 2 + cdb_offset; ++ goto invalid_fld; ++ } ++ ++ if (ata_is_ncq(tf->protocol)) ++ tf->protocol = ATA_PROT_NCQ_NODATA; ++ } + + /* enable LBA */ + tf->flags |= ATA_TFLAG_LBA; +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index 878eb9ba06b27..4487530217246 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -1566,9 +1566,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) + unsigned long flags; + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; + struct blkfront_info *info = rinfo->dev_info; ++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + +- if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) ++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { ++ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + return IRQ_HANDLED; ++ } + + spin_lock_irqsave(&rinfo->ring_lock, flags); + again: +@@ -1584,6 +1587,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) + unsigned long id; + unsigned int op; + ++ eoiflag = 0; ++ + RING_COPY_RESPONSE(&rinfo->ring, i, &bret); + id = bret.id; + +@@ -1699,6 +1704,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) + + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + ++ xen_irq_lateeoi(irq, eoiflag); ++ + return IRQ_HANDLED; + + err: +@@ -1706,6 +1713,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) + + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + ++ /* No EOI in order to avoid further interrupts. */ ++ + pr_alert("%s disabled for further use\n", info->gd->disk_name); + return IRQ_HANDLED; + } +@@ -1745,8 +1754,8 @@ static int setup_blkring(struct xenbus_device *dev, + if (err) + goto fail; + +- err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0, +- "blkif", rinfo); ++ err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, ++ 0, "blkif", rinfo); + if (err <= 0) { + xenbus_dev_fatal(dev, err, + "bind_evtchn_to_irqhandler failed"); +diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c +index 15f2e7025b78e..1d5510cb6db4e 100644 +--- a/drivers/char/agp/parisc-agp.c ++++ b/drivers/char/agp/parisc-agp.c +@@ -285,7 +285,7 @@ agp_ioc_init(void __iomem *ioc_regs) + return 0; + } + +-static int ++static int __init + lba_find_capability(int cap) + { + struct _parisc_agp_info *info = &parisc_agp_info; +@@ -370,7 +370,7 @@ fail: + return error; + } + +-static int ++static int __init + find_quicksilver(struct device *dev, void *data) + { + struct parisc_device **lba = data; +@@ -382,7 +382,7 @@ find_quicksilver(struct device *dev, void *data) + return 0; + } + +-static int ++static int __init + parisc_agp_init(void) + { + extern struct sba_device *sba_list; +diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c +index bfb79bd0c6de5..087d22ba8a2f6 100644 +--- a/drivers/dma/st_fdma.c ++++ b/drivers/dma/st_fdma.c +@@ -886,4 +886,4 @@ MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); + MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>"); + MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>"); +-MODULE_ALIAS("platform: " DRIVER_NAME); ++MODULE_ALIAS("platform:" DRIVER_NAME); +diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c +index f395dec271131..a6e62a793fbe6 100644 +--- a/drivers/firmware/scpi_pm_domain.c ++++ b/drivers/firmware/scpi_pm_domain.c +@@ -27,7 +27,6 @@ struct scpi_pm_domain { + struct generic_pm_domain genpd; + struct scpi_ops *ops; + u32 domain; +- char name[30]; + }; + + /* +@@ -121,8 +120,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev) + + scpi_pd->domain = i; + scpi_pd->ops = scpi_ops; +- sprintf(scpi_pd->name, "%s.%d", np->name, i); +- scpi_pd->genpd.name = scpi_pd->name; ++ scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL, ++ "%s.%d", np->name, i); ++ if (!scpi_pd->genpd.name) { ++ dev_err(dev, "Failed to allocate genpd name:%s.%d\n", ++ np->name, i); ++ continue; ++ } + scpi_pd->genpd.power_off = scpi_pd_power_off; + scpi_pd->genpd.power_on = scpi_pd_power_on; + +diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c +index ef4e81d774464..d49f177481195 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi_host.c ++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c +@@ -1563,6 +1563,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, + if (!prop) { + dev_dbg(dev, + "failed to find data lane mapping, using default\n"); ++ /* Set the number of date lanes to 4 by default. */ ++ msm_host->num_data_lanes = 4; + return 0; + } + +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c +index c7c9e95e58a83..123b870728fbc 100644 +--- a/drivers/hwmon/dell-smm-hwmon.c ++++ b/drivers/hwmon/dell-smm-hwmon.c +@@ -578,15 +578,18 @@ static const struct file_operations i8k_fops = { + .unlocked_ioctl = i8k_ioctl, + }; + ++static struct proc_dir_entry *entry; ++ + static void __init i8k_init_procfs(void) + { + /* Register the proc entry */ +- proc_create("i8k", 0, NULL, &i8k_fops); ++ entry = proc_create("i8k", 0, NULL, &i8k_fops); + } + + static void __exit i8k_exit_procfs(void) + { +- remove_proc_entry("i8k", NULL); ++ if (entry) ++ remove_proc_entry("i8k", NULL); + } + + #else +diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c +index fe234578380ac..548089aa9aba5 100644 +--- a/drivers/i2c/busses/i2c-rk3x.c ++++ b/drivers/i2c/busses/i2c-rk3x.c +@@ -424,8 +424,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) + if (!(ipd & REG_INT_MBRF)) + return; + +- /* ack interrupt */ +- i2c_writel(i2c, REG_INT_MBRF, REG_IPD); ++ /* ack interrupt (read also produces a spurious START flag, clear it too) */ ++ i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD); + + /* Can only handle a maximum of 32 bytes at a time */ + if (len > 32) +diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c +index 8d7f9c8f2771c..db499ef6ccff4 100644 +--- a/drivers/input/touchscreen/of_touchscreen.c ++++ b/drivers/input/touchscreen/of_touchscreen.c +@@ -79,8 +79,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, + data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x", + input_abs_get_max(input, + axis) + 1, +- &maximum) | +- touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x", ++ &maximum); ++ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x", + input_abs_get_fuzz(input, axis), + &fuzz); + if (data_present) +@@ -90,8 +90,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, + data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y", + input_abs_get_max(input, + axis) + 1, +- &maximum) | +- touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y", ++ &maximum); ++ data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y", + input_abs_get_fuzz(input, axis), + &fuzz); + if (data_present) +@@ -101,11 +101,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, + data_present = touchscreen_get_prop_u32(dev, + "touchscreen-max-pressure", + input_abs_get_max(input, axis), +- &maximum) | +- touchscreen_get_prop_u32(dev, +- "touchscreen-fuzz-pressure", +- input_abs_get_fuzz(input, axis), +- &fuzz); ++ &maximum); ++ data_present |= touchscreen_get_prop_u32(dev, ++ "touchscreen-fuzz-pressure", ++ input_abs_get_fuzz(input, axis), ++ &fuzz); + if (data_present) + touchscreen_set_params(input, axis, maximum, fuzz); + +diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c +index 9e4d1212f4c16..63f2baed3c8a6 100644 +--- a/drivers/md/persistent-data/dm-btree-remove.c ++++ b/drivers/md/persistent-data/dm-btree-remove.c +@@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s, + + memcpy(n, dm_block_data(child), + dm_bm_block_size(dm_tm_get_bm(info->tm))); +- dm_tm_unlock(info->tm, child); + + dm_tm_dec(info->tm, dm_block_location(child)); ++ dm_tm_unlock(info->tm, child); + return 0; + } + +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c +index 0083e2a52a30f..576381ee757dd 100644 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c +@@ -120,9 +120,13 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, + struct dma_desc *desc, + unsigned int port) + { ++ unsigned long desc_flags; ++ + /* Ports are latched, so write upper address first */ ++ spin_lock_irqsave(&priv->desc_lock, desc_flags); + tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); + tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); ++ spin_unlock_irqrestore(&priv->desc_lock, desc_flags); + } + + /* Ethtool operations */ +@@ -1880,6 +1884,7 @@ static int bcm_sysport_open(struct net_device *dev) + } + + /* Initialize both hardware and software ring */ ++ spin_lock_init(&priv->desc_lock); + for (i = 0; i < dev->num_tx_queues; i++) { + ret = bcm_sysport_init_tx_ring(priv, i); + if (ret) { +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h +index 3df4a48b8eac8..de2c7a6b3cd26 100644 +--- a/drivers/net/ethernet/broadcom/bcmsysport.h ++++ b/drivers/net/ethernet/broadcom/bcmsysport.h +@@ -733,6 +733,7 @@ struct bcm_sysport_priv { + int wol_irq; + + /* Transmit rings */ ++ spinlock_t desc_lock; + struct bcm_sysport_tx_ring *tx_rings; + + /* Receive queue */ +diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c +index 6f5888bd91944..98fd214f2c42b 100644 +--- a/drivers/net/ethernet/intel/igbvf/netdev.c ++++ b/drivers/net/ethernet/intel/igbvf/netdev.c +@@ -2911,6 +2911,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + return 0; + + err_hw_init: ++ netif_napi_del(&adapter->rx_ring->napi); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + err_sw_init: +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +index a37c951b07530..10fa0e095ec37 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +@@ -3397,6 +3397,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + ++ /* set MDIO speed before talking to the PHY in case it's the 1st time */ ++ ixgbe_set_mdio_speed(hw); ++ + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index 64c4b88de8449..565e1ac241aab 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -649,7 +649,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void) + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, +- ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); ++ ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, +@@ -661,9 +661,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void) + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, +- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); ++ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, +- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); ++ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 838b6fe9dfaaf..e286188b6ea1b 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -920,11 +920,9 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, + ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); + + if (ret == 0) { +- if (sig == OTP_INDICATOR_1) +- offset = offset; +- else if (sig == OTP_INDICATOR_2) ++ if (sig == OTP_INDICATOR_2) + offset += 0x100; +- else ++ else if (sig != OTP_INDICATOR_1) + ret = -EINVAL; + if (!ret) + ret = lan78xx_read_raw_otp(dev, offset, length, data); +diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c +index 0edc5d621304b..a9cee3dac97b0 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c ++++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c +@@ -323,9 +323,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) + + adapter->seq_num++; + sleep_cfm_buf->seq_num = +- cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO ++ cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO + (adapter->seq_num, priv->bss_num, +- priv->bss_type))); ++ priv->bss_type)); + + mwifiex_dbg(adapter, CMD, + "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", +diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h +index 1d86d29b64ccc..c802b73a15c5c 100644 +--- a/drivers/net/wireless/marvell/mwifiex/fw.h ++++ b/drivers/net/wireless/marvell/mwifiex/fw.h +@@ -498,10 +498,10 @@ enum mwifiex_channel_flags { + + #define RF_ANTENNA_AUTO 0xFFFF + +-#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \ +- (((seq) & 0x00ff) | \ +- (((num) & 0x000f) << 8)) | \ +- (((type) & 0x000f) << 12); } ++#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \ ++ ((((seq) & 0x00ff) | \ ++ (((num) & 0x000f) << 8)) | \ ++ (((type) & 0x000f) << 12)) + + #define HostCmd_GET_SEQ_NO(seq) \ + ((seq) & HostCmd_SEQ_NUM_MASK) +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h +index 347c796afd4ed..bfa3c6aaebe6b 100644 +--- a/drivers/net/xen-netback/common.h ++++ b/drivers/net/xen-netback/common.h +@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ + unsigned int rx_queue_max; + unsigned int rx_queue_len; + unsigned long last_rx_time; ++ unsigned int rx_slots_needed; + bool stalled; + + struct xenvif_copy_state rx_copy; +diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c +index ddfb1cfa2dd94..29c7645f57805 100644 +--- a/drivers/net/xen-netback/rx.c ++++ b/drivers/net/xen-netback/rx.c +@@ -33,28 +33,36 @@ + #include <xen/xen.h> + #include <xen/events.h> + +-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) ++/* ++ * Update the needed ring page slots for the first SKB queued. ++ * Note that any call sequence outside the RX thread calling this function ++ * needs to wake up the RX thread via a call of xenvif_kick_thread() ++ * afterwards in order to avoid a race with putting the thread to sleep. ++ */ ++static void xenvif_update_needed_slots(struct xenvif_queue *queue, ++ const struct sk_buff *skb) + { +- RING_IDX prod, cons; +- struct sk_buff *skb; +- int needed; +- unsigned long flags; +- +- spin_lock_irqsave(&queue->rx_queue.lock, flags); ++ unsigned int needed = 0; + +- skb = skb_peek(&queue->rx_queue); +- if (!skb) { +- spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +- return false; ++ if (skb) { ++ needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); ++ if (skb_is_gso(skb)) ++ needed++; ++ if (skb->sw_hash) ++ needed++; + } + +- needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); +- if (skb_is_gso(skb)) +- needed++; +- if (skb->sw_hash) +- needed++; ++ WRITE_ONCE(queue->rx_slots_needed, needed); ++} + +- spin_unlock_irqrestore(&queue->rx_queue.lock, flags); ++static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) ++{ ++ RING_IDX prod, cons; ++ unsigned int needed; ++ ++ needed = READ_ONCE(queue->rx_slots_needed); ++ if (!needed) ++ return false; + + do { + prod = queue->rx.sring->req_prod; +@@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) + + spin_lock_irqsave(&queue->rx_queue.lock, flags); + +- __skb_queue_tail(&queue->rx_queue, skb); +- +- queue->rx_queue_len += skb->len; +- if (queue->rx_queue_len > queue->rx_queue_max) { ++ if (queue->rx_queue_len >= queue->rx_queue_max) { + struct net_device *dev = queue->vif->dev; + + netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); ++ kfree_skb(skb); ++ queue->vif->dev->stats.rx_dropped++; ++ } else { ++ if (skb_queue_empty(&queue->rx_queue)) ++ xenvif_update_needed_slots(queue, skb); ++ ++ __skb_queue_tail(&queue->rx_queue, skb); ++ ++ queue->rx_queue_len += skb->len; + } + + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +@@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) + + skb = __skb_dequeue(&queue->rx_queue); + if (skb) { ++ xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); ++ + queue->rx_queue_len -= skb->len; + if (queue->rx_queue_len < queue->rx_queue_max) { + struct netdev_queue *txq; +@@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) + break; + xenvif_rx_dequeue(queue); + kfree_skb(skb); ++ queue->vif->dev->stats.rx_dropped++; + } + } + +@@ -474,27 +491,31 @@ void xenvif_rx_action(struct xenvif_queue *queue) + xenvif_rx_copy_flush(queue); + } + +-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) ++static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) + { + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; + ++ return prod - cons; ++} ++ ++static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) ++{ ++ unsigned int needed = READ_ONCE(queue->rx_slots_needed); ++ + return !queue->stalled && +- prod - cons < 1 && ++ xenvif_rx_queue_slots(queue) < needed && + time_after(jiffies, + queue->last_rx_time + queue->vif->stall_timeout); + } + + static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) + { +- RING_IDX prod, cons; +- +- prod = queue->rx.sring->req_prod; +- cons = queue->rx.req_cons; ++ unsigned int needed = READ_ONCE(queue->rx_slots_needed); + +- return queue->stalled && prod - cons >= 1; ++ return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; + } + + bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index ef7dd70a40a12..56c7f2ebf5738 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -142,6 +142,9 @@ struct netfront_queue { + struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; + grant_ref_t gref_rx_head; + grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; ++ ++ unsigned int rx_rsp_unconsumed; ++ spinlock_t rx_cons_lock; + }; + + struct netfront_info { +@@ -366,12 +369,13 @@ static int xennet_open(struct net_device *dev) + return 0; + } + +-static void xennet_tx_buf_gc(struct netfront_queue *queue) ++static bool xennet_tx_buf_gc(struct netfront_queue *queue) + { + RING_IDX cons, prod; + unsigned short id; + struct sk_buff *skb; + bool more_to_do; ++ bool work_done = false; + const struct device *dev = &queue->info->netdev->dev; + + BUG_ON(!netif_carrier_ok(queue->info->netdev)); +@@ -388,6 +392,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) + for (cons = queue->tx.rsp_cons; cons != prod; cons++) { + struct xen_netif_tx_response txrsp; + ++ work_done = true; ++ + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); + if (txrsp.status == XEN_NETIF_RSP_NULL) + continue; +@@ -431,11 +437,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) + + xennet_maybe_wake_tx(queue); + +- return; ++ return work_done; + + err: + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); ++ ++ return work_done; + } + + struct xennet_gnttab_make_txreq { +@@ -755,6 +763,16 @@ static int xennet_close(struct net_device *dev) + return 0; + } + ++static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&queue->rx_cons_lock, flags); ++ queue->rx.rsp_cons = val; ++ queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); ++ spin_unlock_irqrestore(&queue->rx_cons_lock, flags); ++} ++ + static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, + grant_ref_t ref) + { +@@ -806,7 +824,7 @@ static int xennet_get_extras(struct netfront_queue *queue, + xennet_move_rx_slot(queue, skb, ref); + } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); + +- queue->rx.rsp_cons = cons; ++ xennet_set_rx_rsp_cons(queue, cons); + return err; + } + +@@ -886,7 +904,7 @@ next: + } + + if (unlikely(err)) +- queue->rx.rsp_cons = cons + slots; ++ xennet_set_rx_rsp_cons(queue, cons + slots); + + return err; + } +@@ -940,7 +958,8 @@ static int xennet_fill_frags(struct netfront_queue *queue, + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); + } + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { +- queue->rx.rsp_cons = ++cons + skb_queue_len(list); ++ xennet_set_rx_rsp_cons(queue, ++ ++cons + skb_queue_len(list)); + kfree_skb(nskb); + return -ENOENT; + } +@@ -953,7 +972,7 @@ static int xennet_fill_frags(struct netfront_queue *queue, + kfree_skb(nskb); + } + +- queue->rx.rsp_cons = cons; ++ xennet_set_rx_rsp_cons(queue, cons); + + return 0; + } +@@ -1074,7 +1093,9 @@ err: + + if (unlikely(xennet_set_skb_gso(skb, gso))) { + __skb_queue_head(&tmpq, skb); +- queue->rx.rsp_cons += skb_queue_len(&tmpq); ++ xennet_set_rx_rsp_cons(queue, ++ queue->rx.rsp_cons + ++ skb_queue_len(&tmpq)); + goto err; + } + } +@@ -1098,7 +1119,8 @@ err: + + __skb_queue_tail(&rxq, skb); + +- i = ++queue->rx.rsp_cons; ++ i = queue->rx.rsp_cons + 1; ++ xennet_set_rx_rsp_cons(queue, i); + work_done++; + } + +@@ -1260,40 +1282,79 @@ static int xennet_set_features(struct net_device *dev, + return 0; + } + +-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) ++static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) + { +- struct netfront_queue *queue = dev_id; + unsigned long flags; + +- if (queue->info->broken) +- return IRQ_HANDLED; ++ if (unlikely(queue->info->broken)) ++ return false; + + spin_lock_irqsave(&queue->tx_lock, flags); +- xennet_tx_buf_gc(queue); ++ if (xennet_tx_buf_gc(queue)) ++ *eoi = 0; + spin_unlock_irqrestore(&queue->tx_lock, flags); + ++ return true; ++} ++ ++static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) ++{ ++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; ++ ++ if (likely(xennet_handle_tx(dev_id, &eoiflag))) ++ xen_irq_lateeoi(irq, eoiflag); ++ + return IRQ_HANDLED; + } + +-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) ++static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) + { +- struct netfront_queue *queue = dev_id; +- struct net_device *dev = queue->info->netdev; ++ unsigned int work_queued; ++ unsigned long flags; + +- if (queue->info->broken) +- return IRQ_HANDLED; ++ if (unlikely(queue->info->broken)) ++ return false; ++ ++ spin_lock_irqsave(&queue->rx_cons_lock, flags); ++ work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); ++ if (work_queued > queue->rx_rsp_unconsumed) { ++ queue->rx_rsp_unconsumed = work_queued; ++ *eoi = 0; ++ } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { ++ const struct device *dev = &queue->info->netdev->dev; ++ ++ spin_unlock_irqrestore(&queue->rx_cons_lock, flags); ++ dev_alert(dev, "RX producer index going backwards\n"); ++ dev_alert(dev, "Disabled for further use\n"); ++ queue->info->broken = true; ++ return false; ++ } ++ spin_unlock_irqrestore(&queue->rx_cons_lock, flags); + +- if (likely(netif_carrier_ok(dev) && +- RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) ++ if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) + napi_schedule(&queue->napi); + ++ return true; ++} ++ ++static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) ++{ ++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; ++ ++ if (likely(xennet_handle_rx(dev_id, &eoiflag))) ++ xen_irq_lateeoi(irq, eoiflag); ++ + return IRQ_HANDLED; + } + + static irqreturn_t xennet_interrupt(int irq, void *dev_id) + { +- xennet_tx_interrupt(irq, dev_id); +- xennet_rx_interrupt(irq, dev_id); ++ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; ++ ++ if (xennet_handle_tx(dev_id, &eoiflag) && ++ xennet_handle_rx(dev_id, &eoiflag)) ++ xen_irq_lateeoi(irq, eoiflag); ++ + return IRQ_HANDLED; + } + +@@ -1527,9 +1588,10 @@ static int setup_netfront_single(struct netfront_queue *queue) + if (err < 0) + goto fail; + +- err = bind_evtchn_to_irqhandler(queue->tx_evtchn, +- xennet_interrupt, +- 0, queue->info->netdev->name, queue); ++ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, ++ xennet_interrupt, 0, ++ queue->info->netdev->name, ++ queue); + if (err < 0) + goto bind_fail; + queue->rx_evtchn = queue->tx_evtchn; +@@ -1557,18 +1619,18 @@ static int setup_netfront_split(struct netfront_queue *queue) + + snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), + "%s-tx", queue->name); +- err = bind_evtchn_to_irqhandler(queue->tx_evtchn, +- xennet_tx_interrupt, +- 0, queue->tx_irq_name, queue); ++ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, ++ xennet_tx_interrupt, 0, ++ queue->tx_irq_name, queue); + if (err < 0) + goto bind_tx_fail; + queue->tx_irq = err; + + snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), + "%s-rx", queue->name); +- err = bind_evtchn_to_irqhandler(queue->rx_evtchn, +- xennet_rx_interrupt, +- 0, queue->rx_irq_name, queue); ++ err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, ++ xennet_rx_interrupt, 0, ++ queue->rx_irq_name, queue); + if (err < 0) + goto bind_rx_fail; + queue->rx_irq = err; +@@ -1670,6 +1732,7 @@ static int xennet_init_queue(struct netfront_queue *queue) + + spin_lock_init(&queue->tx_lock); + spin_lock_init(&queue->rx_lock); ++ spin_lock_init(&queue->rx_cons_lock); + + setup_timer(&queue->rx_refill_timer, rx_refill_timeout, + (unsigned long)queue); +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index 2c000b9b0a42e..bbddf492da9fc 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -796,9 +796,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, + goto out_disable; + } + +- /* Ensure that all table entries are masked. */ +- msix_mask_all(base, tsize); +- + ret = msix_setup_entries(dev, base, entries, nvec, affd); + if (ret) + goto out_disable; +@@ -821,6 +818,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, + /* Set MSI-X enabled bits and unmask the function */ + pci_intx_for_msi(dev, 0); + dev->msix_enabled = 1; ++ ++ /* ++ * Ensure that all table entries are masked to prevent ++ * stale entries from firing in a crash kernel. ++ * ++ * Done late to deal with a broken Marvell NVME device ++ * which takes the MSI-X mask bits into account even ++ * when MSI-X is disabled, which prevents MSI delivery. ++ */ ++ msix_mask_all(base, tsize); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + + pcibios_free_irq(dev); +@@ -847,7 +854,7 @@ out_free: + free_msi_irqs(dev); + + out_disable: +- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); + + return ret; + } +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c +index aba1a3396890b..b553ae4a88635 100644 +--- a/drivers/scsi/scsi_debug.c ++++ b/drivers/scsi/scsi_debug.c +@@ -2181,11 +2181,11 @@ static int resp_mode_select(struct scsi_cmnd *scp, + __func__, param_len, res); + md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); + bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); +- if (md_len > 2) { ++ off = bd_len + (mselect6 ? 4 : 8); ++ if (md_len > 2 || off >= res) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); + return check_condition_result; + } +- off = bd_len + (mselect6 ? 4 : 8); + mpage = arr[off] & 0x3f; + ps = !!(arr[off] & 0x80); + if (ps) { +diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c +index 37bde5c8268d1..a623e498a97bc 100644 +--- a/drivers/soc/tegra/fuse/fuse-tegra.c ++++ b/drivers/soc/tegra/fuse/fuse-tegra.c +@@ -178,7 +178,7 @@ static struct platform_driver tegra_fuse_driver = { + }; + builtin_platform_driver(tegra_fuse_driver); + +-bool __init tegra_fuse_read_spare(unsigned int spare) ++u32 __init tegra_fuse_read_spare(unsigned int spare) + { + unsigned int offset = fuse->soc->info->spare + spare * 4; + +diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h +index 10c2076d5089a..f368bd5373088 100644 +--- a/drivers/soc/tegra/fuse/fuse.h ++++ b/drivers/soc/tegra/fuse/fuse.h +@@ -62,7 +62,7 @@ struct tegra_fuse { + void tegra_init_revision(void); + void tegra_init_apbmisc(void); + +-bool __init tegra_fuse_read_spare(unsigned int spare); ++u32 __init tegra_fuse_read_spare(unsigned int spare); + u32 __init tegra_fuse_read_early(unsigned int offset); + + #ifdef CONFIG_ARCH_TEGRA_2x_SOC +diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c +index 858c7b4b197cb..2af089b2a343d 100644 +--- a/drivers/tty/hvc/hvc_xen.c ++++ b/drivers/tty/hvc/hvc_xen.c +@@ -50,6 +50,8 @@ struct xencons_info { + struct xenbus_device *xbdev; + struct xencons_interface *intf; + unsigned int evtchn; ++ XENCONS_RING_IDX out_cons; ++ unsigned int out_cons_same; + struct hvc_struct *hvc; + int irq; + int vtermno; +@@ -151,6 +153,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) + XENCONS_RING_IDX cons, prod; + int recv = 0; + struct xencons_info *xencons = vtermno_to_xencons(vtermno); ++ unsigned int eoiflag = 0; ++ + if (xencons == NULL) + return -EINVAL; + intf = xencons->intf; +@@ -170,7 +174,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) + mb(); /* read ring before consuming */ + intf->in_cons = cons; + +- notify_daemon(xencons); ++ /* ++ * When to mark interrupt having been spurious: ++ * - there was no new data to be read, and ++ * - the backend did not consume some output bytes, and ++ * - the previous round with no read data didn't see consumed bytes ++ * (we might have a race with an interrupt being in flight while ++ * updating xencons->out_cons, so account for that by allowing one ++ * round without any visible reason) ++ */ ++ if (intf->out_cons != xencons->out_cons) { ++ xencons->out_cons = intf->out_cons; ++ xencons->out_cons_same = 0; ++ } ++ if (recv) { ++ notify_daemon(xencons); ++ } else if (xencons->out_cons_same++ > 1) { ++ eoiflag = XEN_EOI_FLAG_SPURIOUS; ++ } ++ ++ xen_irq_lateeoi(xencons->irq, eoiflag); ++ + return recv; + } + +@@ -399,7 +423,7 @@ static int xencons_connect_backend(struct xenbus_device *dev, + if (ret) + return ret; + info->evtchn = evtchn; +- irq = bind_evtchn_to_irq(evtchn); ++ irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn); + if (irq < 0) + return irq; + info->irq = irq; +@@ -563,7 +587,7 @@ static int __init xen_hvc_init(void) + return r; + + info = vtermno_to_xencons(HVC_COOKIE); +- info->irq = bind_evtchn_to_irq(info->evtchn); ++ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn); + } + if (info->irq < 0) + info->irq = 0; /* NO_IRQ */ +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index bcebec17bdd51..b407f907d6555 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -1636,14 +1636,14 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + u8 endp; + + if (w_length > USB_COMP_EP0_BUFSIZ) { +- if (ctrl->bRequestType == USB_DIR_OUT) { +- goto done; +- } else { ++ if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); + w_length = USB_COMP_EP0_BUFSIZ; ++ } else { ++ goto done; + } + } + +diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c +index f1c5a22704b28..e8818ad973e4b 100644 +--- a/drivers/usb/gadget/legacy/dbgp.c ++++ b/drivers/usb/gadget/legacy/dbgp.c +@@ -345,14 +345,14 @@ static int dbgp_setup(struct usb_gadget *gadget, + u16 len = 0; + + if (length > DBGP_REQ_LEN) { +- if (ctrl->bRequestType == USB_DIR_OUT) { +- return err; +- } else { ++ if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(DBGP_REQ_LEN); + length = DBGP_REQ_LEN; ++ } else { ++ return err; + } + } + +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index ee4c206150a83..c67d53beed85e 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -1339,14 +1339,14 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + u16 w_length = le16_to_cpu(ctrl->wLength); + + if (w_length > RBUF_SIZE) { +- if (ctrl->bRequestType == USB_DIR_OUT) { +- return value; +- } else { ++ if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(RBUF_SIZE); + w_length = RBUF_SIZE; ++ } else { ++ return value; + } + } + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 4e2a9852147b3..b101a505c74bc 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1222,6 +1222,14 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */ + .driver_info = NCTRL(0) | RSVD(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */ ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */ ++ .driver_info = NCTRL(0) | RSVD(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */ ++ .driver_info = NCTRL(2) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ ++ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 4d95a416fc36b..b8d13b69583cc 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -969,7 +969,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, + if (!parent) + return -ENOENT; + +- inode_lock(parent); ++ inode_lock_nested(parent, I_MUTEX_PARENT); + if (!S_ISDIR(parent->i_mode)) + goto unlock; + +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index d5d1c70bb927b..1c7a695ac265b 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -955,6 +955,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) + return 0; + } + ++static bool delegation_hashed(struct nfs4_delegation *dp) ++{ ++ return !(list_empty(&dp->dl_perfile)); ++} ++ + static bool + unhash_delegation_locked(struct nfs4_delegation *dp) + { +@@ -962,7 +967,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp) + + lockdep_assert_held(&state_lock); + +- if (list_empty(&dp->dl_perfile)) ++ if (!delegation_hashed(dp)) + return false; + + dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; +@@ -3881,7 +3886,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) + * queued for a lease break. Don't queue it again. + */ + spin_lock(&state_lock); +- if (dp->dl_time == 0) { ++ if (delegation_hashed(dp) && dp->dl_time == 0) { + dp->dl_time = get_seconds(); + list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); + } +diff --git a/kernel/audit.c b/kernel/audit.c +index 6faaa908544af..2b82316b844b7 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -686,7 +686,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, + { + int rc = 0; + struct sk_buff *skb; +- static unsigned int failed = 0; ++ unsigned int failed = 0; + + /* NOTE: kauditd_thread takes care of all our locking, we just use + * the netlink info passed to us (e.g. sk and portid) */ +@@ -703,32 +703,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, + continue; + } + ++retry: + /* grab an extra skb reference in case of error */ + skb_get(skb); + rc = netlink_unicast(sk, skb, portid, 0); + if (rc < 0) { +- /* fatal failure for our queue flush attempt? */ ++ /* send failed - try a few times unless fatal error */ + if (++failed >= retry_limit || + rc == -ECONNREFUSED || rc == -EPERM) { +- /* yes - error processing for the queue */ + sk = NULL; + if (err_hook) + (*err_hook)(skb); +- if (!skb_hook) +- goto out; +- /* keep processing with the skb_hook */ ++ if (rc == -EAGAIN) ++ rc = 0; ++ /* continue to drain the queue */ + continue; + } else +- /* no - requeue to preserve ordering */ +- skb_queue_head(queue, skb); ++ goto retry; + } else { +- /* it worked - drop the extra reference and continue */ ++ /* skb sent - drop the extra reference and continue */ + consume_skb(skb); + failed = 0; + } + } + +-out: + return (rc >= 0 ? 0 : rc); + } + +@@ -1518,7 +1516,8 @@ static int __net_init audit_net_init(struct net *net) + audit_panic("cannot initialize netlink socket in namespace"); + return -ENOMEM; + } +- aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; ++ /* limit the timeout in case auditd is blocked/stopped */ ++ aunet->sk->sk_sndtimeo = HZ / 10; + + return 0; + } +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 5b6f815a74ee3..602b476627dd5 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -1235,8 +1235,7 @@ int do_settimeofday64(const struct timespec64 *ts) + timekeeping_forward_now(tk); + + xt = tk_xtime(tk); +- ts_delta.tv_sec = ts->tv_sec - xt.tv_sec; +- ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec; ++ ts_delta = timespec64_sub(*ts, xt); + + if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) { + ret = -EINVAL; +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c +index 379db35838b64..572c0854d631c 100644 +--- a/kernel/trace/tracing_map.c ++++ b/kernel/trace/tracing_map.c +@@ -24,6 +24,7 @@ + #include <linux/jhash.h> + #include <linux/slab.h> + #include <linux/sort.h> ++#include <linux/kmemleak.h> + + #include "tracing_map.h" + #include "trace.h" +@@ -227,6 +228,7 @@ void tracing_map_array_free(struct tracing_map_array *a) + for (i = 0; i < a->n_pages; i++) { + if (!a->pages[i]) + break; ++ kmemleak_free(a->pages[i]); + free_page((unsigned long)a->pages[i]); + } + +@@ -262,6 +264,7 @@ struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, + a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); + if (!a->pages[i]) + goto free; ++ kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL); + } + out: + return a; +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 428eaf16a1d25..f63a4faf244e8 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -1131,7 +1131,7 @@ config LOCKDEP + bool + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT + select STACKTRACE +- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86 ++ select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86 + select KALLSYMS + select KALLSYMS_ALL + +@@ -1566,7 +1566,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER + depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT + depends on !X86_64 + select STACKTRACE +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86 ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !SCORE && !X86 + help + Provide stacktrace filter for fault-injection capabilities + +@@ -1575,7 +1575,7 @@ config LATENCYTOP + depends on DEBUG_KERNEL + depends on STACKTRACE_SUPPORT + depends on PROC_FS +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86 ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 + select KALLSYMS + select KALLSYMS_ALL + select STACKTRACE +diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c +index 6be41a44d688a..4f3c08583d8c5 100644 +--- a/net/bpf/test_run.c ++++ b/net/bpf/test_run.c +@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + u32 size = kattr->test.data_size_in; + u32 repeat = kattr->test.repeat; + u32 retval, duration; ++ int hh_len = ETH_HLEN; + struct sk_buff *skb; + void *data; + int ret; +@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + skb_reset_network_header(skb); + + if (is_l2) +- __skb_push(skb, ETH_HLEN); ++ __skb_push(skb, hh_len); + if (is_direct_pkt_access) + bpf_compute_data_end(skb); + retval = bpf_test_run(prog, skb, repeat, &duration); +- if (!is_l2) +- __skb_push(skb, ETH_HLEN); ++ if (!is_l2) { ++ if (skb_headroom(skb) < hh_len) { ++ int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); ++ ++ if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { ++ kfree_skb(skb); ++ return -ENOMEM; ++ } ++ } ++ memset(__skb_push(skb, hh_len), 0, hh_len); ++ } ++ + size = skb->len; + /* bpf program can never convert linear skb to non-linear */ + if (WARN_ON_ONCE(skb_is_nonlinear(skb))) +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index 0c71137c5d41b..43fd9cfa7b115 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -1858,7 +1858,6 @@ static int __net_init sit_init_net(struct net *net) + return 0; + + err_reg_dev: +- ipip6_dev_free(sitn->fb_tunnel_dev); + free_netdev(sitn->fb_tunnel_dev); + err_alloc_dev: + return err; +diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c +index ed57db9b60861..060d3ed5c191a 100644 +--- a/net/mac80211/agg-tx.c ++++ b/net/mac80211/agg-tx.c +@@ -109,7 +109,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, + mgmt->u.action.u.addba_req.start_seq_num = + cpu_to_le16(start_seq_num << 4); + +- ieee80211_tx_skb(sdata, skb); ++ ieee80211_tx_skb_tid(sdata, skb, tid); + } + + void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index d1fd9f7c867ef..0563b4d34eaec 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -1822,6 +1822,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + if (msg->msg_flags&MSG_OOB) + return -EOPNOTSUPP; + ++ if (len == 0) { ++ pr_warn_once("Zero length message leads to an empty skb\n"); ++ return -ENODATA; ++ } ++ + err = scm_send(sock, msg, &scm, true); + if (err < 0) + return err; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index d7a2580f04206..ff5995624146a 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -666,8 +666,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb) + { + struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; + +- nfc_device_iter_exit(iter); +- kfree(iter); ++ if (iter) { ++ nfc_device_iter_exit(iter); ++ kfree(iter); ++ } + + return 0; + } +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 50ca70b3c1759..3177b9320c62d 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -4477,9 +4477,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + } + + out_free_pg_vec: +- bitmap_free(rx_owner_map); +- if (pg_vec) ++ if (pg_vec) { ++ bitmap_free(rx_owner_map); + free_pg_vec(pg_vec, order, req->tp_block_nr); ++ } + out: + return err; + } +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl +index cdf63eccad11e..ebc78cb3bc7dc 100755 +--- a/scripts/recordmcount.pl ++++ b/scripts/recordmcount.pl +@@ -252,7 +252,7 @@ if ($arch eq "x86_64") { + + } elsif ($arch eq "s390" && $bits == 64) { + if ($cc =~ /-DCC_USING_HOTPATCH/) { +- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$"; ++ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$"; + $mcount_adjust = 0; + } else { + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c +index d4f611546fc0b..0846345fe1e5c 100644 +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -4334,6 +4334,24 @@ static struct bpf_test tests[] = { + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, + }, ++ { ++ "make headroom for LWT_XMIT", ++ .insns = { ++ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), ++ BPF_MOV64_IMM(BPF_REG_2, 34), ++ BPF_MOV64_IMM(BPF_REG_3, 0), ++ BPF_EMIT_CALL(BPF_FUNC_skb_change_head), ++ /* split for s390 to succeed */ ++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), ++ BPF_MOV64_IMM(BPF_REG_2, 42), ++ BPF_MOV64_IMM(BPF_REG_3, 0), ++ BPF_EMIT_CALL(BPF_FUNC_skb_change_head), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = ACCEPT, ++ .prog_type = BPF_PROG_TYPE_LWT_XMIT, ++ }, + { + "invalid access of tc_classid for LWT_IN", + .insns = { |