diff options
author | Mike Pagano <mpagano@gentoo.org> | 2018-05-30 07:42:50 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2018-05-30 07:42:50 -0400 |
commit | ab22674da00d00aff48540c57c2979d195b2d87f (patch) | |
tree | 1b0b5d750af5f628235adf508485c752e686c266 | |
parent | Linux patch 4.14.44 (diff) | |
download | linux-patches-4.14-50.tar.gz linux-patches-4.14-50.tar.bz2 linux-patches-4.14-50.zip |
Linux patches 4.14.45 and 4.14.464.14-50
-rw-r--r-- | 0000_README | 8 | ||||
-rw-r--r-- | 1044_linux-4.14.45.patch | 16573 | ||||
-rw-r--r-- | 1045_linux-4.14.46.patch | 850 |
3 files changed, 17431 insertions, 0 deletions
diff --git a/0000_README b/0000_README index f2b1b86f..63dde0ea 100644 --- a/0000_README +++ b/0000_README @@ -219,6 +219,14 @@ Patch: 1043_linux-4.14.44.patch From: http://www.kernel.org Desc: Linux 4.14.44 +Patch: 1044_linux-4.14.45.patch +From: http://www.kernel.org +Desc: Linux 4.14.45 + +Patch: 1045_linux-4.14.46.patch +From: http://www.kernel.org +Desc: Linux 4.14.46 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1044_linux-4.14.45.patch b/1044_linux-4.14.45.patch new file mode 100644 index 00000000..878e4730 --- /dev/null +++ b/1044_linux-4.14.45.patch @@ -0,0 +1,16573 @@ +diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +index 7eda08eb8a1e..a2b6a8a565a7 100644 +--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt ++++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +@@ -20,6 +20,7 @@ Required properties : + - "allwinner,sun50i-a64-ccu" + - "allwinner,sun50i-a64-r-ccu" + - "allwinner,sun50i-h5-ccu" ++ - "allwinner,sun50i-h6-ccu" + - "nextthing,gr8-ccu" + + - reg: Must contain the registers base address and length +@@ -31,6 +32,9 @@ Required properties : + - #clock-cells : must contain 1 + - #reset-cells : must contain 1 + ++For the main CCU on H6, one more clock is needed: ++- "iosc": the SoC's internal frequency oscillator ++ + For the PRCM CCUs on A83T/H3/A64, two more clocks are needed: + - "pll-periph": the SoC's peripheral PLL from the main CCU + - "iosc": the SoC's internal frequency oscillator +diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt +index 217a90eaabe7..9c38bbe7e6d7 100644 +--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt ++++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt +@@ -11,7 +11,11 @@ Required properties: + interrupts. + + Optional properties: +-- clocks: Optional reference to the clock used by the XOR engine. ++- clocks: Optional reference to the clocks used by the XOR engine. ++- clock-names: mandatory if there is a second clock, in this case the ++ name must be "core" for the first clock and "reg" for the second ++ one ++ + + Example: + +diff --git a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt +index 47284f85ec80..c3f9826692bc 100644 +--- a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt ++++ b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt +@@ -20,7 +20,8 @@ Required subnode-properties: + gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0, + i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0, + spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0, +- uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0 ++ uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0, ++ uart5nocts + cpuclkout: cpuclkoutgrp0 + udlclkout: udlclkoutgrp0 + i2c1: i2c1grp0 +@@ -37,7 +38,7 @@ Required subnode-properties: + uart2: uart2grp0, uart2grp1 + uart3: uart3grp0 + uart4: uart4grp0 +- uart5: uart5grp0 ++ uart5: uart5grp0, uart5nocts + nand: nandgrp0 + sdio0: sdio0grp0 + sdio1: sdio1grp0 +diff --git a/Makefile b/Makefile +index 787cf6605209..f3ea74e7a516 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 44 ++SUBLEVEL = 45 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h +index 68dfb3cb7145..02a7c2fa6106 100644 +--- a/arch/alpha/include/asm/xchg.h ++++ b/arch/alpha/include/asm/xchg.h +@@ -12,6 +12,10 @@ + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). ++ * ++ * The leading and the trailing memory barriers guarantee that these ++ * operations are fully ordered. ++ * + */ + + static inline unsigned long +@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val) + { + unsigned long ret, tmp, addr64; + ++ smp_mb(); + __asm__ __volatile__( + " andnot %4,7,%3\n" + " insbl %1,%4,%1\n" +@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val) + { + unsigned long ret, tmp, addr64; + ++ smp_mb(); + __asm__ __volatile__( + " andnot %4,7,%3\n" + " inswl %1,%4,%1\n" +@@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val) + { + unsigned long dummy; + ++ smp_mb(); + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " bis $31,%3,%1\n" +@@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val) + { + unsigned long dummy; + ++ smp_mb(); + __asm__ __volatile__( + "1: ldq_l %0,%4\n" + " bis $31,%3,%1\n" +@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size) + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * +- * The memory barrier should be placed in SMP only when we actually +- * make the change. If we don't change anything (so if the returned +- * prev is equal to old) then we aren't acquiring anything new and +- * we don't need any memory barrier as far I can tell. ++ * The leading and the trailing memory barriers guarantee that these ++ * operations are fully ordered. ++ * ++ * The trailing memory barrier is placed in SMP unconditionally, in ++ * order to guarantee that dependency ordering is preserved when a ++ * dependency is headed by an unsuccessful operation. + */ + + static inline unsigned long +@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) + { + unsigned long prev, tmp, cmp, addr64; + ++ smp_mb(); + __asm__ __volatile__( + " andnot %5,7,%4\n" + " insbl %1,%5,%1\n" +@@ -150,8 +161,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" +- __ASM__MB + "2:\n" ++ __ASM__MB + ".subsection 2\n" + "3: br 1b\n" + ".previous" +@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) + { + unsigned long prev, tmp, cmp, addr64; + ++ smp_mb(); + __asm__ __volatile__( + " andnot %5,7,%4\n" + " inswl %1,%5,%1\n" +@@ -177,8 +189,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" +- __ASM__MB + "2:\n" ++ __ASM__MB + ".subsection 2\n" + "3: br 1b\n" + ".previous" +@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) + { + unsigned long prev, cmp; + ++ smp_mb(); + __asm__ __volatile__( + "1: ldl_l %0,%5\n" + " cmpeq %0,%3,%1\n" +@@ -200,8 +213,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" +- __ASM__MB + "2:\n" ++ __ASM__MB + ".subsection 2\n" + "3: br 1b\n" + ".previous" +@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) + { + unsigned long prev, cmp; + ++ smp_mb(); + __asm__ __volatile__( + "1: ldq_l %0,%5\n" + " cmpeq %0,%3,%1\n" +@@ -223,8 +237,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" +- __ASM__MB + "2:\n" ++ __ASM__MB + ".subsection 2\n" + "3: br 1b\n" + ".previous" +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig +index c84e67fdea09..4383313b064a 100644 +--- a/arch/arc/Kconfig ++++ b/arch/arc/Kconfig +@@ -487,7 +487,6 @@ config ARC_CURR_IN_REG + + config ARC_EMUL_UNALIGNED + bool "Emulate unaligned memory access (userspace only)" +- default N + select SYSCTL_ARCH_UNALIGN_NO_WARN + select SYSCTL_ARCH_UNALIGN_ALLOW + depends on ISA_ARCOMPACT +diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h +index ea022d47896c..21ec82466d62 100644 +--- a/arch/arc/include/asm/bug.h ++++ b/arch/arc/include/asm/bug.h +@@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address); + + #define BUG() do { \ + pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ +- dump_stack(); \ ++ barrier_before_unreachable(); \ ++ __builtin_trap(); \ + } while (0) + + #define HAVE_ARCH_BUG +diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c +index f61a52b01625..5fe84e481654 100644 +--- a/arch/arc/kernel/mcip.c ++++ b/arch/arc/kernel/mcip.c +@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); + + static char smp_cpuinfo_buf[128]; + ++/* ++ * Set mask to halt GFRC if any online core in SMP cluster is halted. ++ * Only works for ARC HS v3.0+, on earlier versions has no effect. ++ */ ++static void mcip_update_gfrc_halt_mask(int cpu) ++{ ++ struct bcr_generic gfrc; ++ unsigned long flags; ++ u32 gfrc_halt_mask; ++ ++ READ_BCR(ARC_REG_GFRC_BUILD, gfrc); ++ ++ /* ++ * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in ++ * GFRC 0x3 version. ++ */ ++ if (gfrc.ver < 0x3) ++ return; ++ ++ raw_spin_lock_irqsave(&mcip_lock, flags); ++ ++ __mcip_cmd(CMD_GFRC_READ_CORE, 0); ++ gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); ++ gfrc_halt_mask |= BIT(cpu); ++ __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); ++ ++ raw_spin_unlock_irqrestore(&mcip_lock, flags); ++} ++ ++static void mcip_update_debug_halt_mask(int cpu) ++{ ++ u32 mcip_mask = 0; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&mcip_lock, flags); ++ ++ /* ++ * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK ++ * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK ++ * and CMD_DEBUG_READ_SELECT. ++ */ ++ __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); ++ mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); ++ ++ mcip_mask |= BIT(cpu); ++ ++ __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); ++ /* ++ * Parameter specified halt cause: ++ * STATUS32[H]/actionpoint/breakpoint/self-halt ++ * We choose all of them (0xF). ++ */ ++ __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); ++ ++ raw_spin_unlock_irqrestore(&mcip_lock, flags); ++} ++ + static void mcip_setup_per_cpu(int cpu) + { ++ struct mcip_bcr mp; ++ ++ READ_BCR(ARC_REG_MCIP_BCR, mp); ++ + smp_ipi_irq_setup(cpu, IPI_IRQ); + smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); ++ ++ /* Update GFRC halt mask as new CPU came online */ ++ if (mp.gfrc) ++ mcip_update_gfrc_halt_mask(cpu); ++ ++ /* Update MCIP debug mask as new CPU came online */ ++ if (mp.dbg) ++ mcip_update_debug_halt_mask(cpu); + } + + static void mcip_ipi_send(int cpu) +@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void) + IS_AVAIL1(mp.gfrc, "GFRC")); + + cpuinfo_arc700[0].extn.gfrc = mp.gfrc; +- +- if (mp.dbg) { +- __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); +- __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); +- } + } + + struct plat_smp_ops plat_smp_ops = { +diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c +index 6df9d94a9537..115eecc0d9a4 100644 +--- a/arch/arc/kernel/smp.c ++++ b/arch/arc/kernel/smp.c +@@ -24,6 +24,7 @@ + #include <linux/reboot.h> + #include <linux/irqdomain.h> + #include <linux/export.h> ++#include <linux/of_fdt.h> + + #include <asm/processor.h> + #include <asm/setup.h> +@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void) + { + } + ++static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) ++{ ++ unsigned long dt_root = of_get_flat_dt_root(); ++ const char *buf; ++ ++ buf = of_get_flat_dt_prop(dt_root, name, NULL); ++ if (!buf) ++ return -EINVAL; ++ ++ if (cpulist_parse(buf, cpumask)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/* ++ * Read from DeviceTree and setup cpu possible mask. If there is no ++ * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist. ++ */ ++static void __init arc_init_cpu_possible(void) ++{ ++ struct cpumask cpumask; ++ ++ if (arc_get_cpu_map("possible-cpus", &cpumask)) { ++ pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n", ++ NR_CPUS); ++ ++ cpumask_setall(&cpumask); ++ } ++ ++ if (!cpumask_test_cpu(0, &cpumask)) ++ panic("Master cpu (cpu[0]) is missed in cpu possible mask!"); ++ ++ init_cpu_possible(&cpumask); ++} ++ + /* + * Called from setup_arch() before calling setup_processor() + * +@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void) + */ + void __init smp_init_cpus(void) + { +- unsigned int i; +- +- for (i = 0; i < NR_CPUS; i++) +- set_cpu_possible(i, true); ++ arc_init_cpu_possible(); + + if (plat_smp_ops.init_early_smp) + plat_smp_ops.init_early_smp(); +@@ -70,16 +104,12 @@ void __init smp_init_cpus(void) + /* called from init ( ) => process 1 */ + void __init smp_prepare_cpus(unsigned int max_cpus) + { +- int i; +- + /* + * if platform didn't set the present map already, do it now + * boot cpu is set to present already by init/main.c + */ +- if (num_present_cpus() <= 1) { +- for (i = 0; i < max_cpus; i++) +- set_cpu_present(i, true); +- } ++ if (num_present_cpus() <= 1) ++ init_cpu_present(cpu_possible_mask); + } + + void __init smp_cpus_done(unsigned int max_cpus) +diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts +index 5f29010cdbd8..4ef80a703eda 100644 +--- a/arch/arm/boot/dts/at91-tse850-3.dts ++++ b/arch/arm/boot/dts/at91-tse850-3.dts +@@ -245,7 +245,7 @@ + }; + + eeprom@50 { +- compatible = "nxp,24c02", "atmel,24c02"; ++ compatible = "nxp,se97b", "atmel,24c02"; + reg = <0x50>; + pagesize = <16>; + }; +diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi +index 61e158003509..168c002f0ca0 100644 +--- a/arch/arm/boot/dts/bcm2836.dtsi ++++ b/arch/arm/boot/dts/bcm2836.dtsi +@@ -9,7 +9,7 @@ + <0x40000000 0x40000000 0x00001000>; + dma-ranges = <0xc0000000 0x00000000 0x3f000000>; + +- local_intc: local_intc { ++ local_intc: local_intc@40000000 { + compatible = "brcm,bcm2836-l1-intc"; + reg = <0x40000000 0x100>; + interrupt-controller; +diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi +index bc1cca5cf43c..d5d058a568c3 100644 +--- a/arch/arm/boot/dts/bcm2837.dtsi ++++ b/arch/arm/boot/dts/bcm2837.dtsi +@@ -8,7 +8,7 @@ + <0x40000000 0x40000000 0x00001000>; + dma-ranges = <0xc0000000 0x00000000 0x3f000000>; + +- local_intc: local_intc { ++ local_intc: local_intc@40000000 { + compatible = "brcm,bcm2836-l1-intc"; + reg = <0x40000000 0x100>; + interrupt-controller; +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi +index 013431e3d7c3..4745e3c7806b 100644 +--- a/arch/arm/boot/dts/bcm283x.dtsi ++++ b/arch/arm/boot/dts/bcm283x.dtsi +@@ -251,7 +251,7 @@ + + jtag_gpio4: jtag_gpio4 { + brcm,pins = <4 5 6 12 13>; +- brcm,function = <BCM2835_FSEL_ALT4>; ++ brcm,function = <BCM2835_FSEL_ALT5>; + }; + jtag_gpio22: jtag_gpio22 { + brcm,pins = <22 23 24 25 26 27>; +@@ -396,8 +396,8 @@ + + i2s: i2s@7e203000 { + compatible = "brcm,bcm2835-i2s"; +- reg = <0x7e203000 0x20>, +- <0x7e101098 0x02>; ++ reg = <0x7e203000 0x24>; ++ clocks = <&clocks BCM2835_CLOCK_PCM>; + + dmas = <&dma 2>, + <&dma 3>; +diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts +index 6a44b8021702..f0e2008f7490 100644 +--- a/arch/arm/boot/dts/bcm958625hr.dts ++++ b/arch/arm/boot/dts/bcm958625hr.dts +@@ -49,7 +49,7 @@ + + memory { + device_type = "memory"; +- reg = <0x60000000 0x80000000>; ++ reg = <0x60000000 0x20000000>; + }; + + gpio-restart { +diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts +index 41c9132eb550..64363f75c01a 100644 +--- a/arch/arm/boot/dts/dra71-evm.dts ++++ b/arch/arm/boot/dts/dra71-evm.dts +@@ -24,13 +24,13 @@ + + regulator-name = "vddshv8"; + regulator-min-microvolt = <1800000>; +- regulator-max-microvolt = <3000000>; ++ regulator-max-microvolt = <3300000>; + regulator-boot-on; + vin-supply = <&evm_5v0>; + + gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; + states = <1800000 0x0 +- 3000000 0x1>; ++ 3300000 0x1>; + }; + + evm_1v8_sw: fixedregulator-evm_1v8 { +diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts +index cf42c2f5cdc7..1281bc39b7ab 100644 +--- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts ++++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts +@@ -42,7 +42,7 @@ + + /dts-v1/; + +-#include "imx6q.dtsi" ++#include "imx6dl.dtsi" + #include "imx6qdl-icore-rqs.dtsi" + + / { +diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +index ae45af1ad062..3cc1fb9ce441 100644 +--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts ++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +@@ -213,37 +213,37 @@ + &iomuxc { + pinctrl_enet1: enet1grp { + fsl,pins = < +- MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3 +- MX7D_PAD_SD2_WP__ENET1_MDC 0x3 +- MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1 +- MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1 +- MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1 +- MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1 +- MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1 +- MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1 +- MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1 +- MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1 +- MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1 +- MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1 +- MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1 +- MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1 ++ MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30 ++ MX7D_PAD_SD2_WP__ENET1_MDC 0x30 ++ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11 ++ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11 ++ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11 ++ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11 ++ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11 ++ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11 ++ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11 ++ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11 ++ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11 ++ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11 ++ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11 ++ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11 + >; + }; + + pinctrl_enet2: enet2grp { + fsl,pins = < +- MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1 +- MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1 +- MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1 +- MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1 +- MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1 +- MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1 +- MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1 +- MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1 +- MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1 +- MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1 +- MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1 +- MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1 ++ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11 ++ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11 ++ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11 ++ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11 ++ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11 ++ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11 ++ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11 ++ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11 ++ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11 ++ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11 ++ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11 ++ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11 + >; + }; + +diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts +index 95da5cb9d37a..b6ebe79261c6 100644 +--- a/arch/arm/boot/dts/r8a7791-porter.dts ++++ b/arch/arm/boot/dts/r8a7791-porter.dts +@@ -427,7 +427,7 @@ + "dclkin.0", "dclkin.1"; + + ports { +- port@1 { ++ port@0 { + endpoint { + remote-endpoint = <&adv7511_in>; + }; +diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi +index 4916c65e0ace..5c0a76493d22 100644 +--- a/arch/arm/boot/dts/rk3036.dtsi ++++ b/arch/arm/boot/dts/rk3036.dtsi +@@ -261,7 +261,7 @@ + max-frequency = <37500000>; + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; + resets = <&cru SRST_SDIO>; +@@ -279,7 +279,7 @@ + max-frequency = <37500000>; + clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, + <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + default-sample-phase = <158>; + disable-wp; + dmas = <&pdma 12>; +diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi +index 06814421eed2..f59f7cc62be6 100644 +--- a/arch/arm/boot/dts/rk322x.dtsi ++++ b/arch/arm/boot/dts/rk322x.dtsi +@@ -600,7 +600,7 @@ + interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, + <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; +@@ -613,7 +613,7 @@ + interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + pinctrl-names = "default"; + pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; +@@ -628,7 +628,7 @@ + max-frequency = <37500000>; + clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, + <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + bus-width = <8>; + default-sample-phase = <158>; + fifo-depth = <0x100>; +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi +index 356ed1e62452..f7a951afd281 100644 +--- a/arch/arm/boot/dts/rk3288.dtsi ++++ b/arch/arm/boot/dts/rk3288.dtsi +@@ -927,6 +927,7 @@ + i2s: i2s@ff890000 { + compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s"; + reg = <0x0 0xff890000 0x0 0x10000>; ++ #sound-dai-cells = <0>; + interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; +@@ -1122,6 +1123,7 @@ + compatible = "rockchip,rk3288-dw-hdmi"; + reg = <0x0 0xff980000 0x0 0x20000>; + reg-io-width = <4>; ++ #sound-dai-cells = <0>; + rockchip,grf = <&grf>; + interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>; +diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi +index 7e24dc8e82d4..8d9f42a422cb 100644 +--- a/arch/arm/boot/dts/socfpga.dtsi ++++ b/arch/arm/boot/dts/socfpga.dtsi +@@ -827,7 +827,7 @@ + timer@fffec600 { + compatible = "arm,cortex-a9-twd-timer"; + reg = <0xfffec600 0x100>; +- interrupts = <1 13 0xf04>; ++ interrupts = <1 13 0xf01>; + clocks = <&mpu_periph_clk>; + }; + +diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h +index 9c99e817535e..5b85889f82ee 100644 +--- a/arch/arm/include/asm/vdso.h ++++ b/arch/arm/include/asm/vdso.h +@@ -12,8 +12,6 @@ struct mm_struct; + + void arm_install_vdso(struct mm_struct *mm, unsigned long addr); + +-extern char vdso_start, vdso_end; +- + extern unsigned int vdso_total_pages; + + #else /* CONFIG_VDSO */ +diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c +index a4d6dc0f2427..f4dd7f9663c1 100644 +--- a/arch/arm/kernel/vdso.c ++++ b/arch/arm/kernel/vdso.c +@@ -39,6 +39,8 @@ + + static struct page **vdso_text_pagelist; + ++extern char vdso_start[], vdso_end[]; ++ + /* Total number of pages needed for the data and text portions of the VDSO. */ + unsigned int vdso_total_pages __ro_after_init; + +@@ -197,13 +199,13 @@ static int __init vdso_init(void) + unsigned int text_pages; + int i; + +- if (memcmp(&vdso_start, "\177ELF", 4)) { ++ if (memcmp(vdso_start, "\177ELF", 4)) { + pr_err("VDSO is not a valid ELF object!\n"); + return -ENOEXEC; + } + +- text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; +- pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start); ++ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; ++ pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start); + + /* Allocate the VDSO text pagelist */ + vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), +@@ -218,7 +220,7 @@ static int __init vdso_init(void) + for (i = 0; i < text_pages; i++) { + struct page *page; + +- page = virt_to_page(&vdso_start + i * PAGE_SIZE); ++ page = virt_to_page(vdso_start + i * PAGE_SIZE); + vdso_text_pagelist[i] = page; + } + +@@ -229,7 +231,7 @@ static int __init vdso_init(void) + + cntvct_ok = cntvct_functional(); + +- patch_vdso(&vdso_start); ++ patch_vdso(vdso_start); + + return 0; + } +diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c +index a3e78074be70..62eb7d668890 100644 +--- a/arch/arm/mach-davinci/board-omapl138-hawk.c ++++ b/arch/arm/mach-davinci/board-omapl138-hawk.c +@@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = { + .dev_id = "da830-mmc.0", + .table = { + /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/ +- GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW), +- GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW), + }, + }; + +diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c +index 43e3e188f521..fa512413a471 100644 +--- a/arch/arm/mach-omap1/clock.c ++++ b/arch/arm/mach-omap1/clock.c +@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c) + return -ENOMEM; + c->dent = d; + +- d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); ++ d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); + if (!d) { + err = -ENOMEM; + goto err_out; + } +- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); ++ d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); + if (!d) { + err = -ENOMEM; + goto err_out; + } +- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); ++ d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); + if (!d) { + err = -ENOMEM; + goto err_out; +diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c +index 4bb6751864a5..fc5fb776a710 100644 +--- a/arch/arm/mach-omap2/omap-wakeupgen.c ++++ b/arch/arm/mach-omap2/omap-wakeupgen.c +@@ -299,8 +299,6 @@ static void irq_save_context(void) + if (soc_is_dra7xx()) + return; + +- if (!sar_base) +- sar_base = omap4_get_sar_ram_base(); + if (wakeupgen_ops && wakeupgen_ops->save_context) + wakeupgen_ops->save_context(); + } +@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node, + irq_hotplug_init(); + irq_pm_init(); + ++ sar_base = omap4_get_sar_ram_base(); ++ + return 0; + } + IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); +diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c +index 366158a54fcd..6f68576e5695 100644 +--- a/arch/arm/mach-omap2/pm.c ++++ b/arch/arm/mach-omap2/pm.c +@@ -186,7 +186,7 @@ static void omap_pm_end(void) + cpu_idle_poll_ctrl(false); + } + +-static void omap_pm_finish(void) ++static void omap_pm_wake(void) + { + if (soc_is_omap34xx()) + omap_prcm_irq_complete(); +@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = { + .begin = omap_pm_begin, + .end = omap_pm_end, + .enter = omap_pm_enter, +- .finish = omap_pm_finish, ++ .wake = omap_pm_wake, + .valid = suspend_valid_only_mem, + }; + +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c +index ece09c9461f7..d61fbd7a2840 100644 +--- a/arch/arm/mach-omap2/timer.c ++++ b/arch/arm/mach-omap2/timer.c +@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = { + .tick_resume = omap2_gp_timer_shutdown, + }; + +-static struct property device_disabled = { +- .name = "status", +- .length = sizeof("disabled"), +- .value = "disabled", +-}; +- + static const struct of_device_id omap_timer_match[] __initconst = { + { .compatible = "ti,omap2420-timer", }, + { .compatible = "ti,omap3430-timer", }, +@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id * + of_get_property(np, "ti,timer-secure", NULL))) + continue; + +- if (!of_device_is_compatible(np, "ti,omap-counter32k")) +- of_add_property(np, &device_disabled); ++ if (!of_device_is_compatible(np, "ti,omap-counter32k")) { ++ struct property *prop; ++ ++ prop = kzalloc(sizeof(*prop), GFP_KERNEL); ++ if (!prop) ++ return NULL; ++ prop->name = "status"; ++ prop->value = "disabled"; ++ prop->length = strlen(prop->value); ++ of_add_property(np, prop); ++ } + return np; + } + +diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig +index 2a7bb6ccdcb7..a810f4dd34b1 100644 +--- a/arch/arm/mach-orion5x/Kconfig ++++ b/arch/arm/mach-orion5x/Kconfig +@@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO + + config MACH_DNS323 + bool "D-Link DNS-323" +- select GENERIC_NET_UTILS + select I2C_BOARDINFO if I2C + help + Say 'Y' here if you want your kernel to support the +@@ -66,7 +65,6 @@ config MACH_DNS323 + + config MACH_TS209 + bool "QNAP TS-109/TS-209" +- select GENERIC_NET_UTILS + help + Say 'Y' here if you want your kernel to support the + QNAP TS-109/TS-209 platform. +@@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL + + config MACH_TS409 + bool "QNAP TS-409" +- select GENERIC_NET_UTILS + help + Say 'Y' here if you want your kernel to support the + QNAP TS-409 platform. +diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c +index cd483bfb5ca8..d13344b2ddcd 100644 +--- a/arch/arm/mach-orion5x/dns323-setup.c ++++ b/arch/arm/mach-orion5x/dns323-setup.c +@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), + }; + ++/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these ++ * functions be kept somewhere? ++ */ ++static int __init dns323_parse_hex_nibble(char n) ++{ ++ if (n >= '0' && n <= '9') ++ return n - '0'; ++ ++ if (n >= 'A' && n <= 'F') ++ return n - 'A' + 10; ++ ++ if (n >= 'a' && n <= 'f') ++ return n - 'a' + 10; ++ ++ return -1; ++} ++ ++static int __init dns323_parse_hex_byte(const char *b) ++{ ++ int hi; ++ int lo; ++ ++ hi = dns323_parse_hex_nibble(b[0]); ++ lo = dns323_parse_hex_nibble(b[1]); ++ ++ if (hi < 0 || lo < 0) ++ return -1; ++ ++ return (hi << 4) | lo; ++} ++ + static int __init dns323_read_mac_addr(void) + { + u_int8_t addr[6]; +- void __iomem *mac_page; ++ int i; ++ char *mac_page; + + /* MAC address is stored as a regular ol' string in /dev/mtdblock4 + * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). +@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void) + if (!mac_page) + return -ENOMEM; + +- if (!mac_pton((__force const char *) mac_page, addr)) +- goto error_fail; ++ /* Sanity check the string we're looking at */ ++ for (i = 0; i < 5; i++) { ++ if (*(mac_page + (i * 3) + 2) != ':') { ++ goto error_fail; ++ } ++ } ++ ++ for (i = 0; i < 6; i++) { ++ int byte; ++ ++ byte = dns323_parse_hex_byte(mac_page + (i * 3)); ++ if (byte < 0) { ++ goto error_fail; ++ } ++ ++ addr[i] = byte; ++ } + + iounmap(mac_page); + printk("DNS-323: Found ethernet MAC address: %pM\n", addr); +diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c +index 89774985d380..905d4f2dd0b8 100644 +--- a/arch/arm/mach-orion5x/tsx09-common.c ++++ b/arch/arm/mach-orion5x/tsx09-common.c +@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), + }; + ++static int __init qnap_tsx09_parse_hex_nibble(char n) ++{ ++ if (n >= '0' && n <= '9') ++ return n - '0'; ++ ++ if (n >= 'A' && n <= 'F') ++ return n - 'A' + 10; ++ ++ if (n >= 'a' && n <= 'f') ++ return n - 'a' + 10; ++ ++ return -1; ++} ++ ++static int __init qnap_tsx09_parse_hex_byte(const char *b) ++{ ++ int hi; ++ int lo; ++ ++ hi = qnap_tsx09_parse_hex_nibble(b[0]); ++ lo = qnap_tsx09_parse_hex_nibble(b[1]); ++ ++ if (hi < 0 || lo < 0) ++ return -1; ++ ++ return (hi << 4) | lo; ++} ++ + static int __init qnap_tsx09_check_mac_addr(const char *addr_str) + { + u_int8_t addr[6]; ++ int i; + +- if (!mac_pton(addr_str, addr)) +- return -1; ++ for (i = 0; i < 6; i++) { ++ int byte; ++ ++ /* ++ * Enforce "xx:xx:xx:xx:xx:xx\n" format. ++ */ ++ if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n')) ++ return -1; ++ ++ byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3)); ++ if (byte < 0) ++ return -1; ++ addr[i] = byte; ++ } + + printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); + +@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) + unsigned long addr; + + for (addr = mem_base; addr < (mem_base + size); addr += 1024) { +- void __iomem *nor_page; ++ char *nor_page; + int ret = 0; + + nor_page = ioremap(addr, 1024); + if (nor_page != NULL) { +- ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); ++ ret = qnap_tsx09_check_mac_addr(nor_page); + iounmap(nor_page); + } + +diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c +index 7a327bd32521..ebef8aacea83 100644 +--- a/arch/arm/plat-omap/dmtimer.c ++++ b/arch/arm/plat-omap/dmtimer.c +@@ -890,11 +890,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev) + timer->irq = irq->start; + timer->pdev = pdev; + +- /* Skip pm_runtime_enable for OMAP1 */ +- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { +- pm_runtime_enable(dev); +- pm_runtime_irq_safe(dev); +- } ++ pm_runtime_enable(dev); ++ pm_runtime_irq_safe(dev); + + if (!timer->reserved) { + ret = pm_runtime_get_sync(dev); +diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +index 4220fbdcb24a..ff5c4c47b22b 100644 +--- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi ++++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +@@ -98,7 +98,7 @@ + clock-output-names = "clk125mhz"; + }; + +- pci { ++ pcie@30000000 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #interrupt-cells = <1>; +@@ -118,6 +118,7 @@ + ranges = + <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 + 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; ++ bus-range = <0 0xff>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = + /* addr pin ic icaddr icintr */ +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi +index 887b61c872dd..ab00be277c6f 100644 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi +@@ -484,8 +484,8 @@ + blsp2_spi5: spi@075ba000{ + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x075ba000 0x600>; +- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>, ++ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + pinctrl-names = "default", "sleep"; +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +index d4f80786e7c2..28257724a56e 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +@@ -136,11 +136,12 @@ + phy-mode = "rgmii"; + pinctrl-names = "default"; + pinctrl-0 = <&rgmiim1_pins>; ++ snps,force_thresh_dma_mode; + snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 50000>; +- tx_delay = <0x26>; +- rx_delay = <0x11>; ++ tx_delay = <0x24>; ++ rx_delay = <0x18>; + status = "okay"; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +index 41d61840fb99..d70e409e2b0c 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi +@@ -683,7 +683,7 @@ + interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, + <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + status = "disabled"; + }; +@@ -694,7 +694,7 @@ + interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, + <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + status = "disabled"; + }; +@@ -705,7 +705,7 @@ + interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, + <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + status = "disabled"; + }; +diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi +index 1070c8264c13..2313aea0e69e 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi +@@ -257,7 +257,7 @@ + max-frequency = <150000000>; + clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, + <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; +- clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; ++ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; + fifo-depth = <0x100>; + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; + resets = <&cru SRST_SDIO0>; +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +index 199a5118b20d..264a6bb60c53 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +@@ -406,8 +406,9 @@ + wlan_pd_n: wlan-pd-n { + compatible = "regulator-fixed"; + regulator-name = "wlan_pd_n"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&wlan_module_reset_l>; + +- /* Note the wlan_module_reset_l pinctrl */ + enable-active-high; + gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>; + +@@ -940,12 +941,6 @@ ap_i2c_audio: &i2c8 { + pinctrl-0 = < + &ap_pwroff /* AP will auto-assert this when in S3 */ + &clk_32k /* This pin is always 32k on gru boards */ +- +- /* +- * We want this driven low ASAP; firmware should help us, but +- * we can help ourselves too. +- */ +- &wlan_module_reset_l + >; + + pcfg_output_low: pcfg-output-low { +@@ -1125,12 +1120,7 @@ ap_i2c_audio: &i2c8 { + }; + + wlan_module_reset_l: wlan-module-reset-l { +- /* +- * We want this driven low ASAP (As {Soon,Strongly} As +- * Possible), to avoid leakage through the powered-down +- * WiFi. +- */ +- rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>; ++ rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_host_wake_l: bt-host-wake-l { +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +index 0f873c897d0d..ce592a4c0c4c 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +@@ -457,7 +457,7 @@ + assigned-clocks = <&cru SCLK_PCIEPHY_REF>; + assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; + assigned-clock-rates = <100000000>; +- ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; ++ ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; + num-lanes = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_clkreqn_cpm>; +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h +index 9ef0797380cb..f9b0b09153e0 100644 +--- a/arch/arm64/include/asm/atomic_lse.h ++++ b/arch/arm64/include/asm/atomic_lse.h +@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v) + /* LSE atomics */ + " mvn %w[i], %w[i]\n" + " stclr %w[i], %[v]") +- : [i] "+r" (w0), [v] "+Q" (v->counter) ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : __LL_SC_CLOBBERS); + } +@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ + /* LSE atomics */ \ + " mvn %w[i], %w[i]\n" \ + " ldclr" #mb " %w[i], %w[i], %[v]") \ +- : [i] "+r" (w0), [v] "+Q" (v->counter) \ ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \ + : "r" (x1) \ + : __LL_SC_CLOBBERS, ##cl); \ + \ +@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v) + /* LSE atomics */ + " neg %w[i], %w[i]\n" + " stadd %w[i], %[v]") +- : [i] "+r" (w0), [v] "+Q" (v->counter) ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : __LL_SC_CLOBBERS); + } +@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ + " neg %w[i], %w[i]\n" \ + " ldadd" #mb " %w[i], w30, %[v]\n" \ + " add %w[i], %w[i], w30") \ +- : [i] "+r" (w0), [v] "+Q" (v->counter) \ ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \ + : "r" (x1) \ + : __LL_SC_CLOBBERS , ##cl); \ + \ +@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ + /* LSE atomics */ \ + " neg %w[i], %w[i]\n" \ + " ldadd" #mb " %w[i], %w[i], %[v]") \ +- : [i] "+r" (w0), [v] "+Q" (v->counter) \ ++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \ + : "r" (x1) \ + : __LL_SC_CLOBBERS, ##cl); \ + \ +@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v) + /* LSE atomics */ + " mvn %[i], %[i]\n" + " stclr %[i], %[v]") +- : [i] "+r" (x0), [v] "+Q" (v->counter) ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : __LL_SC_CLOBBERS); + } +@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ + /* LSE atomics */ \ + " mvn %[i], %[i]\n" \ + " ldclr" #mb " %[i], %[i], %[v]") \ +- : [i] "+r" (x0), [v] "+Q" (v->counter) \ ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \ + : "r" (x1) \ + : __LL_SC_CLOBBERS, ##cl); \ + \ +@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) + /* LSE atomics */ + " neg %[i], %[i]\n" + " stadd %[i], %[v]") +- : [i] "+r" (x0), [v] "+Q" (v->counter) ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : __LL_SC_CLOBBERS); + } +@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ + " neg %[i], %[i]\n" \ + " ldadd" #mb " %[i], x30, %[v]\n" \ + " add %[i], %[i], x30") \ +- : [i] "+r" (x0), [v] "+Q" (v->counter) \ ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \ + : "r" (x1) \ + : __LL_SC_CLOBBERS, ##cl); \ + \ +@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ + /* LSE atomics */ \ + " neg %[i], %[i]\n" \ + " ldadd" #mb " %[i], %[i], %[v]") \ +- : [i] "+r" (x0), [v] "+Q" (v->counter) \ ++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \ + : "r" (x1) \ + : __LL_SC_CLOBBERS, ##cl); \ + \ +@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) + " sub x30, x30, %[ret]\n" + " cbnz x30, 1b\n" + "2:") +- : [ret] "+r" (x0), [v] "+Q" (v->counter) ++ : [ret] "+&r" (x0), [v] "+Q" (v->counter) + : + : __LL_SC_CLOBBERS, "cc", "memory"); + +@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ + " eor %[old1], %[old1], %[oldval1]\n" \ + " eor %[old2], %[old2], %[oldval2]\n" \ + " orr %[old1], %[old1], %[old2]") \ +- : [old1] "+r" (x0), [old2] "+r" (x1), \ ++ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ + [v] "+Q" (*(unsigned long *)ptr) \ + : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ + [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ +diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h +index 6ad30776e984..99390755c0c4 100644 +--- a/arch/arm64/include/asm/stacktrace.h ++++ b/arch/arm64/include/asm/stacktrace.h +@@ -27,7 +27,7 @@ struct stackframe { + unsigned long fp; + unsigned long pc; + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +- unsigned int graph; ++ int graph; + #endif + }; + +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 52f15cd896e1..b5a28336c077 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data) + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); +- if (res.a0) ++ if ((int)res.a0 < 0) + return 0; + cb = call_hvc_arch_workaround_1; + smccc_start = __smccc_workaround_1_hvc_start; +@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data) + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); +- if (res.a0) ++ if ((int)res.a0 < 0) + return 0; + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c +index 9eaef51f83ff..1984e739f155 100644 +--- a/arch/arm64/kernel/perf_event.c ++++ b/arch/arm64/kernel/perf_event.c +@@ -914,9 +914,9 @@ static void __armv8pmu_probe_pmu(void *info) + int pmuver; + + dfr0 = read_sysreg(id_aa64dfr0_el1); +- pmuver = cpuid_feature_extract_signed_field(dfr0, ++ pmuver = cpuid_feature_extract_unsigned_field(dfr0, + ID_AA64DFR0_PMUVER_SHIFT); +- if (pmuver < 1) ++ if (pmuver == 0xf || pmuver == 0) + return; + + probe->present = true; +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c +index 76809ccd309c..d5718a060672 100644 +--- a/arch/arm64/kernel/stacktrace.c ++++ b/arch/arm64/kernel/stacktrace.c +@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (tsk->ret_stack && + (frame->pc == (unsigned long)return_to_handler)) { ++ if (WARN_ON_ONCE(frame->graph == -1)) ++ return -EINVAL; ++ if (frame->graph < -1) ++ frame->graph += FTRACE_NOTRACE_DEPTH; ++ + /* + * This is a case where function graph tracer has + * modified a return address (LR) in a stack frame +diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c +index a4391280fba9..f258636273c9 100644 +--- a/arch/arm64/kernel/time.c ++++ b/arch/arm64/kernel/time.c +@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs) + frame.fp = regs->regs[29]; + frame.pc = regs->pc; + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +- frame.graph = -1; /* no task info */ ++ frame.graph = current->curr_ret_stack; + #endif + do { + int ret = unwind_frame(NULL, &frame); +diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h +index 905afeacfedf..06da9d49152a 100644 +--- a/arch/cris/include/arch-v10/arch/bug.h ++++ b/arch/cris/include/arch-v10/arch/bug.h +@@ -44,18 +44,25 @@ struct bug_frame { + * not be used like this with newer versions of gcc. + */ + #define BUG() \ ++do { \ + __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ + "movu.w " __stringify(__LINE__) ",$r0\n\t"\ + "jump 0f\n\t" \ + ".section .rodata\n" \ + "0:\t.string \"" __FILE__ "\"\n\t" \ +- ".previous") ++ ".previous"); \ ++ unreachable(); \ ++} while (0) + #endif + + #else + + /* This just causes an oops. */ +-#define BUG() (*(int *)0 = 0) ++#define BUG() \ ++do { \ ++ barrier_before_unreachable(); \ ++ __builtin_trap(); \ ++} while (0) + + #endif + +diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h +index bd3eeb8d1cfa..66b37a532765 100644 +--- a/arch/ia64/include/asm/bug.h ++++ b/arch/ia64/include/asm/bug.h +@@ -4,7 +4,11 @@ + + #ifdef CONFIG_BUG + #define ia64_abort() __builtin_trap() +-#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) ++#define BUG() do { \ ++ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ ++ barrier_before_unreachable(); \ ++ ia64_abort(); \ ++} while (0) + + /* should this BUG be made generic? */ + #define HAVE_ARCH_BUG +diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c +index 85bba43e7d5d..658a8e06a69b 100644 +--- a/arch/ia64/kernel/err_inject.c ++++ b/arch/ia64/kernel/err_inject.c +@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, + u64 virt_addr=simple_strtoull(buf, NULL, 16); + int ret; + +- ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); ++ ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); + if (ret<=0) { + #ifdef ERR_INJ_DEBUG + printk("Virtual address %lx is not existing.\n",virt_addr); +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c +index 84938fdbbada..908d58347790 100644 +--- a/arch/m68k/coldfire/device.c ++++ b/arch/m68k/coldfire/device.c +@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = { + .id = 0, + .num_resources = ARRAY_SIZE(mcf_fec0_resources), + .resource = mcf_fec0_resources, +- .dev.platform_data = FEC_PDATA, ++ .dev = { ++ .dma_mask = &mcf_fec0.dev.coherent_dma_mask, ++ .coherent_dma_mask = DMA_BIT_MASK(32), ++ .platform_data = FEC_PDATA, ++ } + }; + + #ifdef MCFFEC_BASE1 +@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = { + .id = 1, + .num_resources = ARRAY_SIZE(mcf_fec1_resources), + .resource = mcf_fec1_resources, +- .dev.platform_data = FEC_PDATA, ++ .dev = { ++ .dma_mask = &mcf_fec1.dev.coherent_dma_mask, ++ .coherent_dma_mask = DMA_BIT_MASK(32), ++ .platform_data = FEC_PDATA, ++ } + }; + #endif /* MCFFEC_BASE1 */ + #endif /* CONFIG_FEC */ +diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h +index b7e2bf1ba4a6..275dca1435bf 100644 +--- a/arch/m68k/include/asm/bug.h ++++ b/arch/m68k/include/asm/bug.h +@@ -8,16 +8,19 @@ + #ifndef CONFIG_SUN3 + #define BUG() do { \ + pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ ++ barrier_before_unreachable(); \ + __builtin_trap(); \ + } while (0) + #else + #define BUG() do { \ + pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ ++ barrier_before_unreachable(); \ + panic("BUG!"); \ + } while (0) + #endif + #else + #define BUG() do { \ ++ barrier_before_unreachable(); \ + __builtin_trap(); \ + } while (0) + #endif +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c +index d99f5242169e..b3aec101a65d 100644 +--- a/arch/mips/cavium-octeon/octeon-irq.c ++++ b/arch/mips/cavium-octeon/octeon-irq.c +@@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, + + parent_irq = irq_of_parse_and_map(ciu_node, 0); + if (!parent_irq) { +- pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", ++ pr_err("ERROR: Couldn't acquire parent_irq for %s\n", + ciu_node->name); + return -EINVAL; + } +@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, + + addr = of_get_address(ciu_node, 0, NULL, NULL); + if (!addr) { +- pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); ++ pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name); + return -EINVAL; + } + host_data->raw_reg = (u64)phys_to_virt( +@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, + + addr = of_get_address(ciu_node, 1, NULL, NULL); + if (!addr) { +- pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); ++ pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name); + return -EINVAL; + } + host_data->en_reg = (u64)phys_to_virt( +@@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, + + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); + if (r) { +- pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", ++ pr_err("ERROR: Couldn't read cavium,max-bits from %s\n", + ciu_node->name); + return r; + } +@@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, + &octeon_irq_domain_cib_ops, + host_data); + if (!cib_domain) { +- pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); ++ pr_err("ERROR: Couldn't irq_domain_add_linear()\n"); + return -ENOMEM; + } + +diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +index aa3800c82332..d99ca862dae3 100644 +--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h ++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +@@ -167,7 +167,7 @@ + #define AR71XX_AHB_DIV_MASK 0x7 + + #define AR724X_PLL_REG_CPU_CONFIG 0x00 +-#define AR724X_PLL_REG_PCIE_CONFIG 0x18 ++#define AR724X_PLL_REG_PCIE_CONFIG 0x10 + + #define AR724X_PLL_FB_SHIFT 0 + #define AR724X_PLL_FB_MASK 0x3ff +diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h +index e0d9b373d415..f83879dadd1e 100644 +--- a/arch/mips/include/asm/machine.h ++++ b/arch/mips/include/asm/machine.h +@@ -52,7 +52,7 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt) + if (!mach->matches) + return NULL; + +- for (match = mach->matches; match->compatible; match++) { ++ for (match = mach->matches; match->compatible[0]; match++) { + if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0) + return match; + } +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index c552c20237d4..006105fb12fe 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -454,7 +454,7 @@ static int fpr_get_msa(struct task_struct *target, + /* + * Copy the floating-point context to the supplied NT_PRFPREG buffer. + * Choose the appropriate helper for general registers, and then copy +- * the FCSR register separately. ++ * the FCSR and FIR registers separately. + */ + static int fpr_get(struct task_struct *target, + const struct user_regset *regset, +@@ -462,6 +462,7 @@ static int fpr_get(struct task_struct *target, + void *kbuf, void __user *ubuf) + { + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); ++ const int fir_pos = fcr31_pos + sizeof(u32); + int err; + + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) +@@ -474,6 +475,12 @@ static int fpr_get(struct task_struct *target, + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); ++ if (err) ++ return err; ++ ++ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ &boot_cpu_data.fpu_id, ++ fir_pos, fir_pos + sizeof(u32)); + + return err; + } +@@ -522,7 +529,8 @@ static int fpr_set_msa(struct task_struct *target, + /* + * Copy the supplied NT_PRFPREG buffer to the floating-point context. + * Choose the appropriate helper for general registers, and then copy +- * the FCSR register separately. ++ * the FCSR register separately. Ignore the incoming FIR register ++ * contents though, as the register is read-only. + * + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', + * which is supposed to have been guaranteed by the kernel before +@@ -536,6 +544,7 @@ static int fpr_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); ++ const int fir_pos = fcr31_pos + sizeof(u32); + u32 fcr31; + int err; + +@@ -563,6 +572,11 @@ static int fpr_set(struct task_struct *target, + ptrace_setfcr31(target, fcr31); + } + ++ if (count > 0) ++ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, ++ fir_pos, ++ fir_pos + sizeof(u32)); ++ + return err; + } + +@@ -784,7 +798,7 @@ long arch_ptrace(struct task_struct *child, long request, + fregs = get_fpu_regs(child); + + #ifdef CONFIG_32BIT +- if (test_thread_flag(TIF_32BIT_FPREGS)) { ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even +@@ -873,7 +887,7 @@ long arch_ptrace(struct task_struct *child, long request, + + init_fp_ctx(child); + #ifdef CONFIG_32BIT +- if (test_thread_flag(TIF_32BIT_FPREGS)) { ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c +index 40e212d6b26b..4a157d3249ac 100644 +--- a/arch/mips/kernel/ptrace32.c ++++ b/arch/mips/kernel/ptrace32.c +@@ -98,7 +98,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + break; + } + fregs = get_fpu_regs(child); +- if (test_thread_flag(TIF_32BIT_FPREGS)) { ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even +@@ -205,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + sizeof(child->thread.fpu)); + child->thread.fpu.fcr31 = 0; + } +- if (test_thread_flag(TIF_32BIT_FPREGS)) { ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c +index 75fdeaa8c62f..9730ba734afe 100644 +--- a/arch/mips/kvm/mips.c ++++ b/arch/mips/kvm/mips.c +@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, +- { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, ++ { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c +index 6f534b209971..e12dfa48b478 100644 +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) + /* + * Either no secondary cache or the available caches don't have the + * subset property so we have to flush the primary caches +- * explicitly ++ * explicitly. ++ * If we would need IPI to perform an INDEX-type operation, then ++ * we have to use the HIT-type alternative as IPI cannot be used ++ * here due to interrupts possibly being disabled. + */ +- if (size >= dcache_size) { ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { + r4k_blast_dcache(); + } else { + R4600_HIT_CACHEOP_WAR_IMPL; +@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) + return; + } + +- if (size >= dcache_size) { ++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { + r4k_blast_dcache(); + } else { + R4600_HIT_CACHEOP_WAR_IMPL; +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile +index 651974192c4d..b479926f0167 100644 +--- a/arch/powerpc/boot/Makefile ++++ b/arch/powerpc/boot/Makefile +@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \ + libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c + libfdtheader := fdt.h libfdt.h libfdt_internal.h + +-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ ++$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \ ++ treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \ + $(addprefix $(obj)/,$(libfdtheader)) + + src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ +diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h +index ccf10c2f8899..c3bdd2d8ec90 100644 +--- a/arch/powerpc/include/asm/exception-64s.h ++++ b/arch/powerpc/include/asm/exception-64s.h +@@ -69,6 +69,27 @@ + */ + #define EX_R3 EX_DAR + ++#define STF_ENTRY_BARRIER_SLOT \ ++ STF_ENTRY_BARRIER_FIXUP_SECTION; \ ++ nop; \ ++ nop; \ ++ nop ++ ++#define STF_EXIT_BARRIER_SLOT \ ++ STF_EXIT_BARRIER_FIXUP_SECTION; \ ++ nop; \ ++ nop; \ ++ nop; \ ++ nop; \ ++ nop; \ ++ nop ++ ++/* ++ * r10 must be free to use, r13 must be paca ++ */ ++#define INTERRUPT_TO_KERNEL \ ++ STF_ENTRY_BARRIER_SLOT ++ + /* + * Macros for annotating the expected destination of (h)rfid + * +@@ -85,16 +106,19 @@ + rfid + + #define RFI_TO_USER \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + + #define RFI_TO_USER_OR_KERNEL \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + + #define RFI_TO_GUEST \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback +@@ -103,21 +127,25 @@ + hrfid + + #define HRFI_TO_USER \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + + #define HRFI_TO_USER_OR_KERNEL \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + + #define HRFI_TO_GUEST \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + + #define HRFI_TO_UNKNOWN \ ++ STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback +@@ -249,6 +277,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) + #define __EXCEPTION_PROLOG_1(area, extra, vec) \ + OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ + OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ ++ INTERRUPT_TO_KERNEL; \ + SAVE_CTR(r10, area); \ + mfcr r9; \ + extra(vec); \ +diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h +index 1e82eb3caabd..a9b64df34e2a 100644 +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -187,6 +187,22 @@ label##3: \ + FTR_ENTRY_OFFSET label##1b-label##3b; \ + .popsection; + ++#define STF_ENTRY_BARRIER_FIXUP_SECTION \ ++953: \ ++ .pushsection __stf_entry_barrier_fixup,"a"; \ ++ .align 2; \ ++954: \ ++ FTR_ENTRY_OFFSET 953b-954b; \ ++ .popsection; ++ ++#define STF_EXIT_BARRIER_FIXUP_SECTION \ ++955: \ ++ .pushsection __stf_exit_barrier_fixup,"a"; \ ++ .align 2; \ ++956: \ ++ FTR_ENTRY_OFFSET 955b-956b; \ ++ .popsection; ++ + #define RFI_FLUSH_FIXUP_SECTION \ + 951: \ + .pushsection __rfi_flush_fixup,"a"; \ +@@ -199,6 +215,9 @@ label##3: \ + #ifndef __ASSEMBLY__ + #include <linux/types.h> + ++extern long stf_barrier_fallback; ++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; ++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + + void apply_feature_fixups(void); +diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h +index eca3f9c68907..5a740feb7bd7 100644 +--- a/arch/powerpc/include/asm/hvcall.h ++++ b/arch/powerpc/include/asm/hvcall.h +@@ -337,6 +337,9 @@ + #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 + #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 + #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 ++#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 ++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 ++#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 + + #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 + #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 +diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h +index c6d3078bd8c3..b8b0be8f1a07 100644 +--- a/arch/powerpc/include/asm/irq_work.h ++++ b/arch/powerpc/include/asm/irq_work.h +@@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void) + { + return true; + } ++extern void arch_irq_work_raise(void); + + #endif /* _ASM_POWERPC_IRQ_WORK_H */ +diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h +index b8366df50d19..e6bd59353e40 100644 +--- a/arch/powerpc/include/asm/paca.h ++++ b/arch/powerpc/include/asm/paca.h +@@ -238,8 +238,7 @@ struct paca_struct { + */ + u64 exrfi[EX_SIZE] __aligned(0x80); + void *rfi_flush_fallback_area; +- u64 l1d_flush_congruence; +- u64 l1d_flush_sets; ++ u64 l1d_flush_size; + #endif + }; + +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h +new file mode 100644 +index 000000000000..44989b22383c +--- /dev/null ++++ b/arch/powerpc/include/asm/security_features.h +@@ -0,0 +1,85 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Security related feature bit definitions. ++ * ++ * Copyright 2018, Michael Ellerman, IBM Corporation. ++ */ ++ ++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H ++#define _ASM_POWERPC_SECURITY_FEATURES_H ++ ++ ++extern unsigned long powerpc_security_features; ++extern bool rfi_flush; ++ ++/* These are bit flags */ ++enum stf_barrier_type { ++ STF_BARRIER_NONE = 0x1, ++ STF_BARRIER_FALLBACK = 0x2, ++ STF_BARRIER_EIEIO = 0x4, ++ STF_BARRIER_SYNC_ORI = 0x8, ++}; ++ ++void setup_stf_barrier(void); ++void do_stf_barrier_fixups(enum stf_barrier_type types); ++ ++static inline void security_ftr_set(unsigned long feature) ++{ ++ powerpc_security_features |= feature; ++} ++ ++static inline void security_ftr_clear(unsigned long feature) ++{ ++ powerpc_security_features &= ~feature; ++} ++ ++static inline bool security_ftr_enabled(unsigned long feature) ++{ ++ return !!(powerpc_security_features & feature); ++} ++ ++ ++// Features indicating support for Spectre/Meltdown mitigations ++ ++// The L1-D cache can be flushed with ori r30,r30,0 ++#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull ++ ++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2) ++#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull ++ ++// ori r31,r31,0 acts as a speculation barrier ++#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull ++ ++// Speculation past bctr is disabled ++#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull ++ ++// Entries in L1-D are private to a SMT thread ++#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull ++ ++// Indirect branch prediction cache disabled ++#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull ++ ++ ++// Features indicating need for Spectre/Meltdown mitigations ++ ++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest) ++#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull ++ ++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace) ++#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull ++ ++// A speculation barrier should be used for bounds checks (Spectre variant 1) ++#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull ++ ++// Firmware configuration indicates user favours security over performance ++#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull ++ ++ ++// Features enabled by default ++#define SEC_FTR_DEFAULT \ ++ (SEC_FTR_L1D_FLUSH_HV | \ ++ SEC_FTR_L1D_FLUSH_PR | \ ++ SEC_FTR_BNDS_CHK_SPEC_BAR | \ ++ SEC_FTR_FAVOUR_SECURITY) ++ ++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h +index 469b7fdc9be4..bbcdf929be54 100644 +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -49,7 +49,7 @@ enum l1d_flush_type { + L1D_FLUSH_MTTRIG = 0x8, + }; + +-void __init setup_rfi_flush(enum l1d_flush_type, bool enable); ++void setup_rfi_flush(enum l1d_flush_type, bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); + + #endif /* !__ASSEMBLY__ */ +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile +index 6c6cce937dd8..1479c61e29c5 100644 +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/ + obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o + obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o +-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o ++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o + obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o + obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o + obj-$(CONFIG_PPC64) += vdso64/ +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c +index 748cdc4bb89a..2e5ea300258a 100644 +--- a/arch/powerpc/kernel/asm-offsets.c ++++ b/arch/powerpc/kernel/asm-offsets.c +@@ -239,8 +239,7 @@ int main(void) + OFFSET(PACA_IN_NMI, paca_struct, in_nmi); + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); + OFFSET(PACA_EXRFI, paca_struct, exrfi); +- OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); +- OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); ++ OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size); + + #endif + OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S +index 679bbe714e85..9daede99c131 100644 +--- a/arch/powerpc/kernel/cpu_setup_power.S ++++ b/arch/powerpc/kernel/cpu_setup_power.S +@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7) + beqlr + li r0,0 + mtspr SPRN_LPID,r0 ++ mtspr SPRN_PCR,r0 + mfspr r3,SPRN_LPCR + li r4,(LPCR_LPES1 >> LPCR_LPES_SH) + bl __init_LPCR_ISA206 +@@ -42,6 +43,7 @@ _GLOBAL(__restore_cpu_power7) + beqlr + li r0,0 + mtspr SPRN_LPID,r0 ++ mtspr SPRN_PCR,r0 + mfspr r3,SPRN_LPCR + li r4,(LPCR_LPES1 >> LPCR_LPES_SH) + bl __init_LPCR_ISA206 +@@ -59,6 +61,7 @@ _GLOBAL(__setup_cpu_power8) + beqlr + li r0,0 + mtspr SPRN_LPID,r0 ++ mtspr SPRN_PCR,r0 + mfspr r3,SPRN_LPCR + ori r3, r3, LPCR_PECEDH + li r4,0 /* LPES = 0 */ +@@ -81,6 +84,7 @@ _GLOBAL(__restore_cpu_power8) + beqlr + li r0,0 + mtspr SPRN_LPID,r0 ++ mtspr SPRN_PCR,r0 + mfspr r3,SPRN_LPCR + ori r3, r3, LPCR_PECEDH + li r4,0 /* LPES = 0 */ +@@ -103,6 +107,7 @@ _GLOBAL(__setup_cpu_power9) + mtspr SPRN_PSSCR,r0 + mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 ++ mtspr SPRN_PCR,r0 + mfspr r3,SPRN_LPCR + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) + or r3, r3, r4 +@@ -128,6 +133,7 @@ _GLOBAL(__restore_cpu_power9) + mtspr SPRN_PSSCR,r0 + mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 ++ mtspr SPRN_PCR,r0 + mfspr r3,SPRN_LPCR + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) + or r3, r3, r4 +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c +index f047ae1b6271..2dba206b065a 100644 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c +@@ -137,6 +137,7 @@ static void __restore_cpu_cpufeatures(void) + if (hv_mode) { + mtspr(SPRN_LPID, 0); + mtspr(SPRN_HFSCR, system_registers.hfscr); ++ mtspr(SPRN_PCR, 0); + } + mtspr(SPRN_FSCR, system_registers.fscr); + +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index f9ca4bb3d48e..c09f0a6f8495 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -825,7 +825,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) + #endif + + +-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80) ++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80) + EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900) + TRAMP_KVM(PACA_EXGEN, 0x900) + EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) +@@ -901,6 +901,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) + mtctr r13; \ + GET_PACA(r13); \ + std r10,PACA_EXGEN+EX_R10(r13); \ ++ INTERRUPT_TO_KERNEL; \ + KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ + HMT_MEDIUM; \ + mfctr r9; +@@ -909,7 +910,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) + #define SYSCALL_KVMTEST \ + HMT_MEDIUM; \ + mr r9,r13; \ +- GET_PACA(r13); ++ GET_PACA(r13); \ ++ INTERRUPT_TO_KERNEL; + #endif + + #define LOAD_SYSCALL_HANDLER(reg) \ +@@ -1434,45 +1436,56 @@ masked_##_H##interrupt: \ + b .; \ + MASKED_DEC_HANDLER(_H) + ++TRAMP_REAL_BEGIN(stf_barrier_fallback) ++ std r9,PACA_EXRFI+EX_R9(r13) ++ std r10,PACA_EXRFI+EX_R10(r13) ++ sync ++ ld r9,PACA_EXRFI+EX_R9(r13) ++ ld r10,PACA_EXRFI+EX_R10(r13) ++ ori 31,31,0 ++ .rept 14 ++ b 1f ++1: ++ .endr ++ blr ++ + TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) +- std r12,PACA_EXRFI+EX_R12(r13) +- std r8,PACA_EXRFI+EX_R13(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) +- ld r11,PACA_L1D_FLUSH_SETS(r13) +- ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) +- /* +- * The load adresses are at staggered offsets within cachelines, +- * which suits some pipelines better (on others it should not +- * hurt). +- */ +- addi r12,r12,8 ++ ld r11,PACA_L1D_FLUSH_SIZE(r13) ++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync +-1: li r8,0 +- .rept 8 /* 8-way set associative */ +- ldx r11,r10,r8 +- add r8,r8,r12 +- xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not +- add r8,r8,r11 // Add 0, this creates a dependency on the ldx +- .endr +- addi r10,r10,128 /* 128 byte cache line */ ++ ++ /* ++ * The load adresses are at staggered offsets within cachelines, ++ * which suits some pipelines better (on others it should not ++ * hurt). ++ */ ++1: ++ ld r11,(0x80 + 8)*0(r10) ++ ld r11,(0x80 + 8)*1(r10) ++ ld r11,(0x80 + 8)*2(r10) ++ ld r11,(0x80 + 8)*3(r10) ++ ld r11,(0x80 + 8)*4(r10) ++ ld r11,(0x80 + 8)*5(r10) ++ ld r11,(0x80 + 8)*6(r10) ++ ld r11,(0x80 + 8)*7(r10) ++ addi r10,r10,0x80*8 + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) +- ld r12,PACA_EXRFI+EX_R12(r13) +- ld r8,PACA_EXRFI+EX_R13(r13) + GET_SCRATCH0(r13); + rfid + +@@ -1482,39 +1495,37 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) +- std r12,PACA_EXRFI+EX_R12(r13) +- std r8,PACA_EXRFI+EX_R13(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) +- ld r11,PACA_L1D_FLUSH_SETS(r13) +- ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) +- /* +- * The load adresses are at staggered offsets within cachelines, +- * which suits some pipelines better (on others it should not +- * hurt). +- */ +- addi r12,r12,8 ++ ld r11,PACA_L1D_FLUSH_SIZE(r13) ++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync +-1: li r8,0 +- .rept 8 /* 8-way set associative */ +- ldx r11,r10,r8 +- add r8,r8,r12 +- xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not +- add r8,r8,r11 // Add 0, this creates a dependency on the ldx +- .endr +- addi r10,r10,128 /* 128 byte cache line */ ++ ++ /* ++ * The load adresses are at staggered offsets within cachelines, ++ * which suits some pipelines better (on others it should not ++ * hurt). ++ */ ++1: ++ ld r11,(0x80 + 8)*0(r10) ++ ld r11,(0x80 + 8)*1(r10) ++ ld r11,(0x80 + 8)*2(r10) ++ ld r11,(0x80 + 8)*3(r10) ++ ld r11,(0x80 + 8)*4(r10) ++ ld r11,(0x80 + 8)*5(r10) ++ ld r11,(0x80 + 8)*6(r10) ++ ld r11,(0x80 + 8)*7(r10) ++ addi r10,r10,0x80*8 + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) +- ld r12,PACA_EXRFI+EX_R12(r13) +- ld r8,PACA_EXRFI+EX_R13(r13) + GET_SCRATCH0(r13); + hrfid + +diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S +index 1125c9be9e06..e35cebd45c35 100644 +--- a/arch/powerpc/kernel/idle_book3s.S ++++ b/arch/powerpc/kernel/idle_book3s.S +@@ -838,6 +838,8 @@ BEGIN_FTR_SECTION + mtspr SPRN_PTCR,r4 + ld r4,_RPR(r1) + mtspr SPRN_RPR,r4 ++ ld r4,_AMOR(r1) ++ mtspr SPRN_AMOR,r4 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + + ld r4,_TSCR(r1) +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c +new file mode 100644 +index 000000000000..b98a722da915 +--- /dev/null ++++ b/arch/powerpc/kernel/security.c +@@ -0,0 +1,237 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++// ++// Security related flags and so on. ++// ++// Copyright 2018, Michael Ellerman, IBM Corporation. ++ ++#include <linux/kernel.h> ++#include <linux/device.h> ++#include <linux/seq_buf.h> ++ ++#include <asm/debugfs.h> ++#include <asm/security_features.h> ++ ++ ++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; ++ ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ bool thread_priv; ++ ++ thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); ++ ++ if (rfi_flush || thread_priv) { ++ struct seq_buf s; ++ seq_buf_init(&s, buf, PAGE_SIZE - 1); ++ ++ seq_buf_printf(&s, "Mitigation: "); ++ ++ if (rfi_flush) ++ seq_buf_printf(&s, "RFI Flush"); ++ ++ if (rfi_flush && thread_priv) ++ seq_buf_printf(&s, ", "); ++ ++ if (thread_priv) ++ seq_buf_printf(&s, "L1D private per thread"); ++ ++ seq_buf_printf(&s, "\n"); ++ ++ return s.len; ++ } ++ ++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && ++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) ++ return sprintf(buf, "Not affected\n"); ++ ++ return sprintf(buf, "Vulnerable\n"); ++} ++ ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) ++ return sprintf(buf, "Not affected\n"); ++ ++ return sprintf(buf, "Vulnerable\n"); ++} ++ ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ bool bcs, ccd, ori; ++ struct seq_buf s; ++ ++ seq_buf_init(&s, buf, PAGE_SIZE - 1); ++ ++ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); ++ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); ++ ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); ++ ++ if (bcs || ccd) { ++ seq_buf_printf(&s, "Mitigation: "); ++ ++ if (bcs) ++ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); ++ ++ if (bcs && ccd) ++ seq_buf_printf(&s, ", "); ++ ++ if (ccd) ++ seq_buf_printf(&s, "Indirect branch cache disabled"); ++ } else ++ seq_buf_printf(&s, "Vulnerable"); ++ ++ if (ori) ++ seq_buf_printf(&s, ", ori31 speculation barrier enabled"); ++ ++ seq_buf_printf(&s, "\n"); ++ ++ return s.len; ++} ++ ++/* ++ * Store-forwarding barrier support. ++ */ ++ ++static enum stf_barrier_type stf_enabled_flush_types; ++static bool no_stf_barrier; ++bool stf_barrier; ++ ++static int __init handle_no_stf_barrier(char *p) ++{ ++ pr_info("stf-barrier: disabled on command line."); ++ no_stf_barrier = true; ++ return 0; ++} ++ ++early_param("no_stf_barrier", handle_no_stf_barrier); ++ ++/* This is the generic flag used by other architectures */ ++static int __init handle_ssbd(char *p) ++{ ++ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { ++ /* Until firmware tells us, we have the barrier with auto */ ++ return 0; ++ } else if (strncmp(p, "off", 3) == 0) { ++ handle_no_stf_barrier(NULL); ++ return 0; ++ } else ++ return 1; ++ ++ return 0; ++} ++early_param("spec_store_bypass_disable", handle_ssbd); ++ ++/* This is the generic flag used by other architectures */ ++static int __init handle_no_ssbd(char *p) ++{ ++ handle_no_stf_barrier(NULL); ++ return 0; ++} ++early_param("nospec_store_bypass_disable", handle_no_ssbd); ++ ++static void stf_barrier_enable(bool enable) ++{ ++ if (enable) ++ do_stf_barrier_fixups(stf_enabled_flush_types); ++ else ++ do_stf_barrier_fixups(STF_BARRIER_NONE); ++ ++ stf_barrier = enable; ++} ++ ++void setup_stf_barrier(void) ++{ ++ enum stf_barrier_type type; ++ bool enable, hv; ++ ++ hv = cpu_has_feature(CPU_FTR_HVMODE); ++ ++ /* Default to fallback in case fw-features are not available */ ++ if (cpu_has_feature(CPU_FTR_ARCH_300)) ++ type = STF_BARRIER_EIEIO; ++ else if (cpu_has_feature(CPU_FTR_ARCH_207S)) ++ type = STF_BARRIER_SYNC_ORI; ++ else if (cpu_has_feature(CPU_FTR_ARCH_206)) ++ type = STF_BARRIER_FALLBACK; ++ else ++ type = STF_BARRIER_NONE; ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || ++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); ++ ++ if (type == STF_BARRIER_FALLBACK) { ++ pr_info("stf-barrier: fallback barrier available\n"); ++ } else if (type == STF_BARRIER_SYNC_ORI) { ++ pr_info("stf-barrier: hwsync barrier available\n"); ++ } else if (type == STF_BARRIER_EIEIO) { ++ pr_info("stf-barrier: eieio barrier available\n"); ++ } ++ ++ stf_enabled_flush_types = type; ++ ++ if (!no_stf_barrier) ++ stf_barrier_enable(enable); ++} ++ ++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { ++ const char *type; ++ switch (stf_enabled_flush_types) { ++ case STF_BARRIER_EIEIO: ++ type = "eieio"; ++ break; ++ case STF_BARRIER_SYNC_ORI: ++ type = "hwsync"; ++ break; ++ case STF_BARRIER_FALLBACK: ++ type = "fallback"; ++ break; ++ default: ++ type = "unknown"; ++ } ++ return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); ++ } ++ ++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && ++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) ++ return sprintf(buf, "Not affected\n"); ++ ++ return sprintf(buf, "Vulnerable\n"); ++} ++ ++#ifdef CONFIG_DEBUG_FS ++static int stf_barrier_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ /* Only do anything if we're changing state */ ++ if (enable != stf_barrier) ++ stf_barrier_enable(enable); ++ ++ return 0; ++} ++ ++static int stf_barrier_get(void *data, u64 *val) ++{ ++ *val = stf_barrier ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); ++ ++static __init int stf_barrier_debugfs_init(void) ++{ ++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); ++ return 0; ++} ++device_initcall(stf_barrier_debugfs_init); ++#endif /* CONFIG_DEBUG_FS */ +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 9527a4c6cbc2..0618aa61b26a 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -822,9 +822,6 @@ static void do_nothing(void *unused) + + void rfi_flush_enable(bool enable) + { +- if (rfi_flush == enable) +- return; +- + if (enable) { + do_rfi_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); +@@ -834,11 +831,15 @@ void rfi_flush_enable(bool enable) + rfi_flush = enable; + } + +-static void init_fallback_flush(void) ++static void __ref init_fallback_flush(void) + { + u64 l1d_size, limit; + int cpu; + ++ /* Only allocate the fallback flush area once (at boot time). */ ++ if (l1d_flush_fallback_area) ++ return; ++ + l1d_size = ppc64_caches.l1d.size; + limit = min(safe_stack_limit(), ppc64_rma_size); + +@@ -851,34 +852,23 @@ static void init_fallback_flush(void) + memset(l1d_flush_fallback_area, 0, l1d_size * 2); + + for_each_possible_cpu(cpu) { +- /* +- * The fallback flush is currently coded for 8-way +- * associativity. Different associativity is possible, but it +- * will be treated as 8-way and may not evict the lines as +- * effectively. +- * +- * 128 byte lines are mandatory. +- */ +- u64 c = l1d_size / 8; +- + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; +- paca[cpu].l1d_flush_congruence = c; +- paca[cpu].l1d_flush_sets = c / 128; ++ paca[cpu].l1d_flush_size = l1d_size; + } + } + +-void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) ++void setup_rfi_flush(enum l1d_flush_type types, bool enable) + { + if (types & L1D_FLUSH_FALLBACK) { +- pr_info("rfi-flush: Using fallback displacement flush\n"); ++ pr_info("rfi-flush: fallback displacement flush available\n"); + init_fallback_flush(); + } + + if (types & L1D_FLUSH_ORI) +- pr_info("rfi-flush: Using ori type flush\n"); ++ pr_info("rfi-flush: ori type flush available\n"); + + if (types & L1D_FLUSH_MTTRIG) +- pr_info("rfi-flush: Using mttrig type flush\n"); ++ pr_info("rfi-flush: mttrig type flush available\n"); + + enabled_flush_types = types; + +@@ -889,13 +879,19 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) + #ifdef CONFIG_DEBUG_FS + static int rfi_flush_set(void *data, u64 val) + { ++ bool enable; ++ + if (val == 1) +- rfi_flush_enable(true); ++ enable = true; + else if (val == 0) +- rfi_flush_enable(false); ++ enable = false; + else + return -EINVAL; + ++ /* Only do anything if we're changing state */ ++ if (enable != rfi_flush) ++ rfi_flush_enable(enable); ++ + return 0; + } + +@@ -914,12 +910,4 @@ static __init int rfi_flush_debugfs_init(void) + } + device_initcall(rfi_flush_debugfs_init); + #endif +- +-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +-{ +- if (rfi_flush) +- return sprintf(buf, "Mitigation: RFI Flush\n"); +- +- return sprintf(buf, "Vulnerable\n"); +-} + #endif /* CONFIG_PPC_BOOK3S_64 */ +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index d17007451f62..ac2e5e56a9f0 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -182,6 +182,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, + } + raw_local_irq_restore(flags); + ++ /* ++ * system_reset_excption handles debugger, crash dump, panic, for 0x100 ++ */ ++ if (TRAP(regs) == 0x100) ++ return; ++ + crash_fadump(regs, "die oops"); + + if (kexec_should_crash(current)) +@@ -246,8 +252,13 @@ void die(const char *str, struct pt_regs *regs, long err) + { + unsigned long flags; + +- if (debugger(regs)) +- return; ++ /* ++ * system_reset_excption handles debugger, crash dump, panic, for 0x100 ++ */ ++ if (TRAP(regs) != 0x100) { ++ if (debugger(regs)) ++ return; ++ } + + flags = oops_begin(regs); + if (__die(str, regs, err)) +@@ -1379,6 +1390,22 @@ void facility_unavailable_exception(struct pt_regs *regs) + value = mfspr(SPRN_FSCR); + + status = value >> 56; ++ if ((hv || status >= 2) && ++ (status < ARRAY_SIZE(facility_strings)) && ++ facility_strings[status]) ++ facility = facility_strings[status]; ++ ++ /* We should not have taken this interrupt in kernel */ ++ if (!user_mode(regs)) { ++ pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", ++ facility, status, regs->nip); ++ die("Unexpected facility unavailable exception", regs, SIGABRT); ++ } ++ ++ /* We restore the interrupt state now */ ++ if (!arch_irq_disabled_regs(regs)) ++ local_irq_enable(); ++ + if (status == FSCR_DSCR_LG) { + /* + * User is accessing the DSCR register using the problem +@@ -1445,25 +1472,11 @@ void facility_unavailable_exception(struct pt_regs *regs) + return; + } + +- if ((hv || status >= 2) && +- (status < ARRAY_SIZE(facility_strings)) && +- facility_strings[status]) +- facility = facility_strings[status]; +- +- /* We restore the interrupt state now */ +- if (!arch_irq_disabled_regs(regs)) +- local_irq_enable(); +- + pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", + hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); + + out: +- if (user_mode(regs)) { +- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); +- return; +- } +- +- die("Unexpected facility unavailable exception", regs, SIGABRT); ++ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + } + #endif + +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S +index 307843d23682..c89ffb88fa3b 100644 +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -133,6 +133,20 @@ SECTIONS + RO_DATA(PAGE_SIZE) + + #ifdef CONFIG_PPC64 ++ . = ALIGN(8); ++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { ++ __start___stf_entry_barrier_fixup = .; ++ *(__stf_entry_barrier_fixup) ++ __stop___stf_entry_barrier_fixup = .; ++ } ++ ++ . = ALIGN(8); ++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { ++ __start___stf_exit_barrier_fixup = .; ++ *(__stf_exit_barrier_fixup) ++ __stop___stf_exit_barrier_fixup = .; ++ } ++ + . = ALIGN(8); + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { + __start___rfi_flush_fixup = .; +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c +index d0c0b8443dcf..762a899e85a4 100644 +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -23,6 +23,7 @@ + #include <asm/page.h> + #include <asm/sections.h> + #include <asm/setup.h> ++#include <asm/security_features.h> + #include <asm/firmware.h> + + struct fixup_entry { +@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) + } + + #ifdef CONFIG_PPC_BOOK3S_64 ++void do_stf_entry_barrier_fixups(enum stf_barrier_type types) ++{ ++ unsigned int instrs[3], *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___stf_entry_barrier_fixup), ++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup); ++ ++ instrs[0] = 0x60000000; /* nop */ ++ instrs[1] = 0x60000000; /* nop */ ++ instrs[2] = 0x60000000; /* nop */ ++ ++ i = 0; ++ if (types & STF_BARRIER_FALLBACK) { ++ instrs[i++] = 0x7d4802a6; /* mflr r10 */ ++ instrs[i++] = 0x60000000; /* branch patched below */ ++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */ ++ } else if (types & STF_BARRIER_EIEIO) { ++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ ++ } else if (types & STF_BARRIER_SYNC_ORI) { ++ instrs[i++] = 0x7c0004ac; /* hwsync */ ++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */ ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ } ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ ++ patch_instruction(dest, instrs[0]); ++ ++ if (types & STF_BARRIER_FALLBACK) ++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, ++ BRANCH_SET_LINK); ++ else ++ patch_instruction(dest + 1, instrs[1]); ++ ++ patch_instruction(dest + 2, instrs[2]); ++ } ++ ++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, ++ (types == STF_BARRIER_NONE) ? "no" : ++ (types == STF_BARRIER_FALLBACK) ? "fallback" : ++ (types == STF_BARRIER_EIEIO) ? "eieio" : ++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" ++ : "unknown"); ++} ++ ++void do_stf_exit_barrier_fixups(enum stf_barrier_type types) ++{ ++ unsigned int instrs[6], *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___stf_exit_barrier_fixup), ++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup); ++ ++ instrs[0] = 0x60000000; /* nop */ ++ instrs[1] = 0x60000000; /* nop */ ++ instrs[2] = 0x60000000; /* nop */ ++ instrs[3] = 0x60000000; /* nop */ ++ instrs[4] = 0x60000000; /* nop */ ++ instrs[5] = 0x60000000; /* nop */ ++ ++ i = 0; ++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { ++ if (cpu_has_feature(CPU_FTR_HVMODE)) { ++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */ ++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */ ++ } else { ++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */ ++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */ ++ } ++ instrs[i++] = 0x7c0004ac; /* hwsync */ ++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */ ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ if (cpu_has_feature(CPU_FTR_HVMODE)) { ++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */ ++ } else { ++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */ ++ } ++ } else if (types & STF_BARRIER_EIEIO) { ++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ ++ } ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ ++ patch_instruction(dest, instrs[0]); ++ patch_instruction(dest + 1, instrs[1]); ++ patch_instruction(dest + 2, instrs[2]); ++ patch_instruction(dest + 3, instrs[3]); ++ patch_instruction(dest + 4, instrs[4]); ++ patch_instruction(dest + 5, instrs[5]); ++ } ++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, ++ (types == STF_BARRIER_NONE) ? "no" : ++ (types == STF_BARRIER_FALLBACK) ? "fallback" : ++ (types == STF_BARRIER_EIEIO) ? "eieio" : ++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" ++ : "unknown"); ++} ++ ++ ++void do_stf_barrier_fixups(enum stf_barrier_type types) ++{ ++ do_stf_entry_barrier_fixups(types); ++ do_stf_exit_barrier_fixups(types); ++} ++ + void do_rfi_flush_fixups(enum l1d_flush_type types) + { + unsigned int instrs[3], *dest; +@@ -153,7 +268,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) + patch_instruction(dest + 2, instrs[2]); + } + +- printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); ++ printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, ++ (types == L1D_FLUSH_NONE) ? "no" : ++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : ++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ++ ? "ori+mttrig type" ++ : "ori type" : ++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type" ++ : "unknown"); + } + #endif /* CONFIG_PPC_BOOK3S_64 */ + +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c +index f9941b3b5770..f760494ecd66 100644 +--- a/arch/powerpc/net/bpf_jit_comp.c ++++ b/arch/powerpc/net/bpf_jit_comp.c +@@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); + break; ++ case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ ++ PPC_LWZ_OFFS(r_A, r_skb, K); ++ break; + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ + PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); + break; +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index fce545774d50..b7a6044161e8 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -457,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) + /* invalid entry */ + continue; + ++ /* ++ * BHRB rolling buffer could very much contain the kernel ++ * addresses at this point. Check the privileges before ++ * exporting it to userspace (avoid exposure of regions ++ * where we could have speculative execution) ++ */ ++ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && ++ is_kernel_addr(addr)) ++ continue; ++ + /* Branches are read most recent first (ie. mfbhrb 0 is + * the most recent branch). + * There are two types of valid entries: +@@ -1226,6 +1236,7 @@ static void power_pmu_disable(struct pmu *pmu) + */ + write_mmcr0(cpuhw, val); + mb(); ++ isync(); + + /* + * Disable instruction sampling if it was enabled +@@ -1234,12 +1245,26 @@ static void power_pmu_disable(struct pmu *pmu) + mtspr(SPRN_MMCRA, + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + mb(); ++ isync(); + } + + cpuhw->disabled = 1; + cpuhw->n_added = 0; + + ebb_switch_out(mmcr0); ++ ++#ifdef CONFIG_PPC64 ++ /* ++ * These are readable by userspace, may contain kernel ++ * addresses and are not switched by context switch, so clear ++ * them now to avoid leaking anything to userspace in general ++ * including to another process. ++ */ ++ if (ppmu->flags & PPMU_ARCH_207S) { ++ mtspr(SPRN_SDAR, 0); ++ mtspr(SPRN_SIAR, 0); ++ } ++#endif + } + + local_irq_restore(flags); +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c +index 4043109f4051..63f007f2de7e 100644 +--- a/arch/powerpc/platforms/powernv/npu-dma.c ++++ b/arch/powerpc/platforms/powernv/npu-dma.c +@@ -413,6 +413,11 @@ struct npu_context { + void *priv; + }; + ++struct mmio_atsd_reg { ++ struct npu *npu; ++ int reg; ++}; ++ + /* + * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC + * if none are available. +@@ -422,7 +427,7 @@ static int get_mmio_atsd_reg(struct npu *npu) + int i; + + for (i = 0; i < npu->mmio_atsd_count; i++) { +- if (!test_and_set_bit(i, &npu->mmio_atsd_usage)) ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) + return i; + } + +@@ -431,86 +436,90 @@ static int get_mmio_atsd_reg(struct npu *npu) + + static void put_mmio_atsd_reg(struct npu *npu, int reg) + { +- clear_bit(reg, &npu->mmio_atsd_usage); ++ clear_bit_unlock(reg, &npu->mmio_atsd_usage); + } + + /* MMIO ATSD register offsets */ + #define XTS_ATSD_AVA 1 + #define XTS_ATSD_STAT 2 + +-static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, +- unsigned long va) ++static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg, ++ unsigned long launch, unsigned long va) + { +- int mmio_atsd_reg; +- +- do { +- mmio_atsd_reg = get_mmio_atsd_reg(npu); +- cpu_relax(); +- } while (mmio_atsd_reg < 0); ++ struct npu *npu = mmio_atsd_reg->npu; ++ int reg = mmio_atsd_reg->reg; + + __raw_writeq(cpu_to_be64(va), +- npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA); ++ npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); + eieio(); +- __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]); +- +- return mmio_atsd_reg; ++ __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]); + } + +-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) ++static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], ++ unsigned long pid, bool flush) + { ++ int i; + unsigned long launch; + +- /* IS set to invalidate matching PID */ +- launch = PPC_BIT(12); ++ for (i = 0; i <= max_npu2_index; i++) { ++ if (mmio_atsd_reg[i].reg < 0) ++ continue; ++ ++ /* IS set to invalidate matching PID */ ++ launch = PPC_BIT(12); + +- /* PRS set to process-scoped */ +- launch |= PPC_BIT(13); ++ /* PRS set to process-scoped */ ++ launch |= PPC_BIT(13); + +- /* AP */ +- launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); ++ /* AP */ ++ launch |= (u64) ++ mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + +- /* PID */ +- launch |= pid << PPC_BITLSHIFT(38); ++ /* PID */ ++ launch |= pid << PPC_BITLSHIFT(38); + +- /* No flush */ +- launch |= !flush << PPC_BITLSHIFT(39); ++ /* No flush */ ++ launch |= !flush << PPC_BITLSHIFT(39); + +- /* Invalidating the entire process doesn't use a va */ +- return mmio_launch_invalidate(npu, launch, 0); ++ /* Invalidating the entire process doesn't use a va */ ++ mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0); ++ } + } + +-static int mmio_invalidate_va(struct npu *npu, unsigned long va, +- unsigned long pid, bool flush) ++static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], ++ unsigned long va, unsigned long pid, bool flush) + { ++ int i; + unsigned long launch; + +- /* IS set to invalidate target VA */ +- launch = 0; ++ for (i = 0; i <= max_npu2_index; i++) { ++ if (mmio_atsd_reg[i].reg < 0) ++ continue; ++ ++ /* IS set to invalidate target VA */ ++ launch = 0; + +- /* PRS set to process scoped */ +- launch |= PPC_BIT(13); ++ /* PRS set to process scoped */ ++ launch |= PPC_BIT(13); + +- /* AP */ +- launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); ++ /* AP */ ++ launch |= (u64) ++ mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + +- /* PID */ +- launch |= pid << PPC_BITLSHIFT(38); ++ /* PID */ ++ launch |= pid << PPC_BITLSHIFT(38); + +- /* No flush */ +- launch |= !flush << PPC_BITLSHIFT(39); ++ /* No flush */ ++ launch |= !flush << PPC_BITLSHIFT(39); + +- return mmio_launch_invalidate(npu, launch, va); ++ mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va); ++ } + } + + #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) + +-struct mmio_atsd_reg { +- struct npu *npu; +- int reg; +-}; +- + static void mmio_invalidate_wait( +- struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) ++ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) + { + struct npu *npu; + int i, reg; +@@ -525,16 +534,67 @@ static void mmio_invalidate_wait( + reg = mmio_atsd_reg[i].reg; + while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) + cpu_relax(); ++ } ++} ++ ++/* ++ * Acquires all the address translation shootdown (ATSD) registers required to ++ * launch an ATSD on all links this npu_context is active on. ++ */ ++static void acquire_atsd_reg(struct npu_context *npu_context, ++ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) ++{ ++ int i, j; ++ struct npu *npu; ++ struct pci_dev *npdev; ++ struct pnv_phb *nphb; + +- put_mmio_atsd_reg(npu, reg); ++ for (i = 0; i <= max_npu2_index; i++) { ++ mmio_atsd_reg[i].reg = -1; ++ for (j = 0; j < NV_MAX_LINKS; j++) { ++ /* ++ * There are no ordering requirements with respect to ++ * the setup of struct npu_context, but to ensure ++ * consistent behaviour we need to ensure npdev[][] is ++ * only read once. ++ */ ++ npdev = READ_ONCE(npu_context->npdev[i][j]); ++ if (!npdev) ++ continue; + ++ nphb = pci_bus_to_host(npdev->bus)->private_data; ++ npu = &nphb->npu; ++ mmio_atsd_reg[i].npu = npu; ++ mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); ++ while (mmio_atsd_reg[i].reg < 0) { ++ mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); ++ cpu_relax(); ++ } ++ break; ++ } ++ } ++} ++ ++/* ++ * Release previously acquired ATSD registers. To avoid deadlocks the registers ++ * must be released in the same order they were acquired above in ++ * acquire_atsd_reg. ++ */ ++static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) ++{ ++ int i; ++ ++ for (i = 0; i <= max_npu2_index; i++) { + /* +- * The GPU requires two flush ATSDs to ensure all entries have +- * been flushed. We use PID 0 as it will never be used for a +- * process on the GPU. ++ * We can't rely on npu_context->npdev[][] being the same here ++ * as when acquire_atsd_reg() was called, hence we use the ++ * values stored in mmio_atsd_reg during the acquire phase ++ * rather than re-reading npdev[][]. + */ +- if (flush) +- mmio_invalidate_pid(npu, 0, true); ++ if (mmio_atsd_reg[i].reg < 0) ++ continue; ++ ++ put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg); + } + } + +@@ -545,10 +605,6 @@ static void mmio_invalidate_wait( + static void mmio_invalidate(struct npu_context *npu_context, int va, + unsigned long address, bool flush) + { +- int i, j; +- struct npu *npu; +- struct pnv_phb *nphb; +- struct pci_dev *npdev; + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; + unsigned long pid = npu_context->mm->context.id; + +@@ -562,37 +618,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, + * Loop over all the NPUs this process is active on and launch + * an invalidate. + */ +- for (i = 0; i <= max_npu2_index; i++) { +- mmio_atsd_reg[i].reg = -1; +- for (j = 0; j < NV_MAX_LINKS; j++) { +- npdev = npu_context->npdev[i][j]; +- if (!npdev) +- continue; +- +- nphb = pci_bus_to_host(npdev->bus)->private_data; +- npu = &nphb->npu; +- mmio_atsd_reg[i].npu = npu; +- +- if (va) +- mmio_atsd_reg[i].reg = +- mmio_invalidate_va(npu, address, pid, +- flush); +- else +- mmio_atsd_reg[i].reg = +- mmio_invalidate_pid(npu, pid, flush); +- +- /* +- * The NPU hardware forwards the shootdown to all GPUs +- * so we only have to launch one shootdown per NPU. +- */ +- break; +- } ++ acquire_atsd_reg(npu_context, mmio_atsd_reg); ++ if (va) ++ mmio_invalidate_va(mmio_atsd_reg, address, pid, flush); ++ else ++ mmio_invalidate_pid(mmio_atsd_reg, pid, flush); ++ ++ mmio_invalidate_wait(mmio_atsd_reg); ++ if (flush) { ++ /* ++ * The GPU requires two flush ATSDs to ensure all entries have ++ * been flushed. We use PID 0 as it will never be used for a ++ * process on the GPU. ++ */ ++ mmio_invalidate_pid(mmio_atsd_reg, 0, true); ++ mmio_invalidate_wait(mmio_atsd_reg); ++ mmio_invalidate_pid(mmio_atsd_reg, 0, true); ++ mmio_invalidate_wait(mmio_atsd_reg); + } +- +- mmio_invalidate_wait(mmio_atsd_reg, flush); +- if (flush) +- /* Wait for the flush to complete */ +- mmio_invalidate_wait(mmio_atsd_reg, false); ++ release_atsd_reg(mmio_atsd_reg); + } + + static void pnv_npu2_mn_release(struct mmu_notifier *mn, +@@ -735,7 +779,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", + &nvlink_index))) + return ERR_PTR(-ENODEV); +- npu_context->npdev[npu->index][nvlink_index] = npdev; ++ ++ /* ++ * npdev is a pci_dev pointer setup by the PCI code. We assign it to ++ * npdev[][] to indicate to the mmu notifiers that an invalidation ++ * should also be sent over this nvlink. The notifiers don't use any ++ * other fields in npu_context, so we just need to ensure that when they ++ * deference npu_context->npdev[][] it is either a valid pointer or ++ * NULL. ++ */ ++ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); + + return npu_context; + } +@@ -774,7 +827,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, + if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", + &nvlink_index))) + return; +- npu_context->npdev[npu->index][nvlink_index] = NULL; ++ WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); + opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, + PCI_DEVID(gpdev->bus->number, gpdev->devfn)); + kref_put(&npu_context->kref, pnv_npu2_release_context); +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c +index 7966a314d93a..fd143c934768 100644 +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -37,53 +37,92 @@ + #include <asm/kexec.h> + #include <asm/smp.h> + #include <asm/setup.h> ++#include <asm/security_features.h> + + #include "powernv.h" + ++ ++static bool fw_feature_is(const char *state, const char *name, ++ struct device_node *fw_features) ++{ ++ struct device_node *np; ++ bool rc = false; ++ ++ np = of_get_child_by_name(fw_features, name); ++ if (np) { ++ rc = of_property_read_bool(np, state); ++ of_node_put(np); ++ } ++ ++ return rc; ++} ++ ++static void init_fw_feat_flags(struct device_node *np) ++{ ++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) ++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); ++ ++ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np)) ++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); ++ ++ if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np)) ++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); ++ ++ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np)) ++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); ++ ++ if (fw_feature_is("enabled", "fw-l1d-thread-split", np)) ++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); ++ ++ if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) ++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); ++ ++ /* ++ * The features below are enabled by default, so we instead look to see ++ * if firmware has *disabled* them, and clear them if so. ++ */ ++ if (fw_feature_is("disabled", "speculation-policy-favor-security", np)) ++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); ++ ++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np)) ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); ++ ++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np)) ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); ++ ++ if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np)) ++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); ++} ++ + static void pnv_setup_rfi_flush(void) + { + struct device_node *np, *fw_features; + enum l1d_flush_type type; +- int enable; ++ bool enable; + + /* Default to fallback in case fw-features are not available */ + type = L1D_FLUSH_FALLBACK; +- enable = 1; + + np = of_find_node_by_name(NULL, "ibm,opal"); + fw_features = of_get_child_by_name(np, "fw-features"); + of_node_put(np); + + if (fw_features) { +- np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); +- if (np && of_property_read_bool(np, "enabled")) +- type = L1D_FLUSH_MTTRIG; ++ init_fw_feat_flags(fw_features); ++ of_node_put(fw_features); + +- of_node_put(np); ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) ++ type = L1D_FLUSH_MTTRIG; + +- np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); +- if (np && of_property_read_bool(np, "enabled")) ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) + type = L1D_FLUSH_ORI; +- +- of_node_put(np); +- +- /* Enable unless firmware says NOT to */ +- enable = 2; +- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); +- if (np && of_property_read_bool(np, "disabled")) +- enable--; +- +- of_node_put(np); +- +- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); +- if (np && of_property_read_bool(np, "disabled")) +- enable--; +- +- of_node_put(np); +- of_node_put(fw_features); + } + +- setup_rfi_flush(type, enable > 0); ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ ++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); ++ ++ setup_rfi_flush(type, enable); + } + + static void __init pnv_setup_arch(void) +@@ -91,6 +130,7 @@ static void __init pnv_setup_arch(void) + set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); + + pnv_setup_rfi_flush(); ++ setup_stf_barrier(); + + /* Initialize SMP */ + pnv_smp_init(); +diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c +index f7042ad492ba..fbea7db043fa 100644 +--- a/arch/powerpc/platforms/pseries/mobility.c ++++ b/arch/powerpc/platforms/pseries/mobility.c +@@ -348,6 +348,9 @@ void post_mobility_fixup(void) + printk(KERN_ERR "Post-mobility device tree update " + "failed: %d\n", rc); + ++ /* Possibly switch to a new RFI flush type */ ++ pseries_setup_rfi_flush(); ++ + return; + } + +diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h +index 1ae1d9f4dbe9..27cdcb69fd18 100644 +--- a/arch/powerpc/platforms/pseries/pseries.h ++++ b/arch/powerpc/platforms/pseries/pseries.h +@@ -100,4 +100,6 @@ static inline unsigned long cmo_get_page_size(void) + + int dlpar_workqueue_init(void); + ++void pseries_setup_rfi_flush(void); ++ + #endif /* _PSERIES_PSERIES_H */ +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index ae4f596273b5..45f814041448 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -68,6 +68,7 @@ + #include <asm/plpar_wrappers.h> + #include <asm/kexec.h> + #include <asm/isa-bridge.h> ++#include <asm/security_features.h> + + #include "pseries.h" + +@@ -459,35 +460,78 @@ static void __init find_and_init_phbs(void) + of_pci_check_probe_only(); + } + +-static void pseries_setup_rfi_flush(void) ++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) ++{ ++ /* ++ * The features below are disabled by default, so we instead look to see ++ * if firmware has *enabled* them, and set them if so. ++ */ ++ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31) ++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); ++ ++ if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED) ++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); ++ ++ if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30) ++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); ++ ++ if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2) ++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); ++ ++ if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV) ++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); ++ ++ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) ++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); ++ ++ /* ++ * The features below are enabled by default, so we instead look to see ++ * if firmware has *disabled* them, and clear them if so. ++ */ ++ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) ++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); ++ ++ if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); ++ ++ if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) ++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); ++} ++ ++void pseries_setup_rfi_flush(void) + { + struct h_cpu_char_result result; + enum l1d_flush_type types; + bool enable; + long rc; + +- /* Enable by default */ +- enable = true; ++ /* ++ * Set features to the defaults assumed by init_cpu_char_feature_flags() ++ * so it can set/clear again any features that might have changed after ++ * migration, and in case the hypercall fails and it is not even called. ++ */ ++ powerpc_security_features = SEC_FTR_DEFAULT; + + rc = plpar_get_cpu_characteristics(&result); +- if (rc == H_SUCCESS) { +- types = L1D_FLUSH_NONE; ++ if (rc == H_SUCCESS) ++ init_cpu_char_feature_flags(&result); + +- if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) +- types |= L1D_FLUSH_MTTRIG; +- if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) +- types |= L1D_FLUSH_ORI; ++ /* ++ * We're the guest so this doesn't apply to us, clear it to simplify ++ * handling of it elsewhere. ++ */ ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + +- /* Use fallback if nothing set in hcall */ +- if (types == L1D_FLUSH_NONE) +- types = L1D_FLUSH_FALLBACK; ++ types = L1D_FLUSH_FALLBACK; + +- if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) +- enable = false; +- } else { +- /* Default to fallback if case hcall is not available */ +- types = L1D_FLUSH_FALLBACK; +- } ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) ++ types |= L1D_FLUSH_MTTRIG; ++ ++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) ++ types |= L1D_FLUSH_ORI; ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + + setup_rfi_flush(types, enable); + } +@@ -510,6 +554,7 @@ static void __init pSeries_setup_arch(void) + fwnmi_init(); + + pseries_setup_rfi_flush(); ++ setup_stf_barrier(); + + /* By default, only probe PCI (can be overridden by rtas_pci) */ + pci_add_flags(PCI_PROBE_ONLY); +diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c +index ead3e2549ebf..205dec18d6b5 100644 +--- a/arch/powerpc/sysdev/mpic.c ++++ b/arch/powerpc/sysdev/mpic.c +@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask) + int i; + u32 mask = 0; + +- for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) ++ for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1) + mask |= (cpumask & 1) << get_hard_smp_processor_id(i); + return mask; + } +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index 2c8b325591cc..a5938fadd031 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -2348,6 +2348,8 @@ static void dump_one_paca(int cpu) + DUMP(p, slb_cache_ptr, "x"); + for (i = 0; i < SLB_CACHE_ENTRIES; i++) + printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); ++ ++ DUMP(p, rfi_flush_fallback_area, "px"); + #endif + DUMP(p, dscr_default, "llx"); + #ifdef CONFIG_PPC_BOOK3E +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c +index eb7b530d1783..4f1f5fc8139d 100644 +--- a/arch/s390/kvm/vsie.c ++++ b/arch/s390/kvm/vsie.c +@@ -590,7 +590,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) + + gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; + if (gpa && (scb_s->ecb & ECB_TE)) { +- if (!(gpa & ~0x1fffU)) { ++ if (!(gpa & ~0x1fffUL)) { + rc = set_validity_icpt(scb_s, 0x0080U); + goto unpin; + } +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S +index c001f782c5f1..28cc61216b64 100644 +--- a/arch/sh/kernel/entry-common.S ++++ b/arch/sh/kernel/entry-common.S +@@ -255,7 +255,7 @@ debug_trap: + mov.l @r8, r8 + jsr @r8 + nop +- bra __restore_all ++ bra ret_from_exception + nop + CFI_ENDPROC + +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h +index abad97edf736..28db058d471b 100644 +--- a/arch/sparc/include/asm/atomic_64.h ++++ b/arch/sparc/include/asm/atomic_64.h +@@ -83,7 +83,11 @@ ATOMIC_OPS(xor) + #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) + + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +-#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++ ++static inline int atomic_xchg(atomic_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h +index 6f17528356b2..ea53e418f6c0 100644 +--- a/arch/sparc/include/asm/bug.h ++++ b/arch/sparc/include/asm/bug.h +@@ -9,10 +9,14 @@ + void do_BUG(const char *file, int line); + #define BUG() do { \ + do_BUG(__FILE__, __LINE__); \ ++ barrier_before_unreachable(); \ + __builtin_trap(); \ + } while (0) + #else +-#define BUG() __builtin_trap() ++#define BUG() do { \ ++ barrier_before_unreachable(); \ ++ __builtin_trap(); \ ++} while (0) + #endif + + #define HAVE_ARCH_BUG +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 011a47b4587c..717c9219d00e 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -1162,16 +1162,13 @@ int x86_perf_event_set_period(struct perf_event *event) + + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; + +- if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) || +- local64_read(&hwc->prev_count) != (u64)-left) { +- /* +- * The hw event starts counting from this event offset, +- * mark it to be able to extra future deltas: +- */ +- local64_set(&hwc->prev_count, (u64)-left); ++ /* ++ * The hw event starts counting from this event offset, ++ * mark it to be able to extra future deltas: ++ */ ++ local64_set(&hwc->prev_count, (u64)-left); + +- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); +- } ++ wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); + + /* + * Due to erratum on certan cpu we need +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 9b18a227fff7..6965ee8c4b8a 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) + int bit, loops; + u64 status; + int handled; ++ int pmu_enabled; + + cpuc = this_cpu_ptr(&cpu_hw_events); + ++ /* ++ * Save the PMU state. ++ * It needs to be restored when leaving the handler. ++ */ ++ pmu_enabled = cpuc->enabled; + /* + * No known reason to not always do late ACK, + * but just in case do it opt-in. +@@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) + if (!x86_pmu.late_ack) + apic_write(APIC_LVTPC, APIC_DM_NMI); + intel_bts_disable_local(); ++ cpuc->enabled = 0; + __intel_pmu_disable_all(); + handled = intel_pmu_drain_bts_buffer(); + handled += intel_bts_interrupt(); +@@ -2320,7 +2327,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) + + done: + /* Only restore PMU state when it's active. See x86_pmu_disable(). */ +- if (cpuc->enabled) ++ cpuc->enabled = pmu_enabled; ++ if (pmu_enabled) + __intel_pmu_enable_all(0, true); + intel_bts_enable_local(); + +@@ -3188,7 +3196,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + * Therefore the effective (average) period matches the requested period, + * despite coarser hardware granularity. + */ +-static unsigned bdw_limit_period(struct perf_event *event, unsigned left) ++static u64 bdw_limit_period(struct perf_event *event, u64 left) + { + if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == + X86_CONFIG(.event=0xc0, .umask=0x01)) { +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c +index 8156e47da7ba..10b39d44981c 100644 +--- a/arch/x86/events/intel/ds.c ++++ b/arch/x86/events/intel/ds.c +@@ -1150,6 +1150,7 @@ static void setup_pebs_sample_data(struct perf_event *event, + if (pebs == NULL) + return; + ++ regs->flags &= ~PERF_EFLAGS_EXACT; + sample_type = event->attr.sample_type; + dsrc = sample_type & PERF_SAMPLE_DATA_SRC; + +@@ -1194,7 +1195,6 @@ static void setup_pebs_sample_data(struct perf_event *event, + */ + *regs = *iregs; + regs->flags = pebs->flags; +- set_linear_ip(regs, pebs->ip); + + if (sample_type & PERF_SAMPLE_REGS_INTR) { + regs->ax = pebs->ax; +@@ -1230,13 +1230,22 @@ static void setup_pebs_sample_data(struct perf_event *event, + #endif + } + +- if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { +- regs->ip = pebs->real_ip; +- regs->flags |= PERF_EFLAGS_EXACT; +- } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs)) +- regs->flags |= PERF_EFLAGS_EXACT; +- else +- regs->flags &= ~PERF_EFLAGS_EXACT; ++ if (event->attr.precise_ip > 1) { ++ /* Haswell and later have the eventing IP, so use it: */ ++ if (x86_pmu.intel_cap.pebs_format >= 2) { ++ set_linear_ip(regs, pebs->real_ip); ++ regs->flags |= PERF_EFLAGS_EXACT; ++ } else { ++ /* Otherwise use PEBS off-by-1 IP: */ ++ set_linear_ip(regs, pebs->ip); ++ ++ /* ... and try to fix it up using the LBR entries: */ ++ if (intel_pmu_pebs_fixup_ip(regs)) ++ regs->flags |= PERF_EFLAGS_EXACT; ++ } ++ } else ++ set_linear_ip(regs, pebs->ip); ++ + + if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) && + x86_pmu.intel_cap.pebs_format >= 1) +@@ -1303,17 +1312,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) + return NULL; + } + ++/* ++ * Special variant of intel_pmu_save_and_restart() for auto-reload. ++ */ ++static int ++intel_pmu_save_and_restart_reload(struct perf_event *event, int count) ++{ ++ struct hw_perf_event *hwc = &event->hw; ++ int shift = 64 - x86_pmu.cntval_bits; ++ u64 period = hwc->sample_period; ++ u64 prev_raw_count, new_raw_count; ++ s64 new, old; ++ ++ WARN_ON(!period); ++ ++ /* ++ * drain_pebs() only happens when the PMU is disabled. ++ */ ++ WARN_ON(this_cpu_read(cpu_hw_events.enabled)); ++ ++ prev_raw_count = local64_read(&hwc->prev_count); ++ rdpmcl(hwc->event_base_rdpmc, new_raw_count); ++ local64_set(&hwc->prev_count, new_raw_count); ++ ++ /* ++ * Since the counter increments a negative counter value and ++ * overflows on the sign switch, giving the interval: ++ * ++ * [-period, 0] ++ * ++ * the difference between two consequtive reads is: ++ * ++ * A) value2 - value1; ++ * when no overflows have happened in between, ++ * ++ * B) (0 - value1) + (value2 - (-period)); ++ * when one overflow happened in between, ++ * ++ * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); ++ * when @n overflows happened in between. ++ * ++ * Here A) is the obvious difference, B) is the extension to the ++ * discrete interval, where the first term is to the top of the ++ * interval and the second term is from the bottom of the next ++ * interval and C) the extension to multiple intervals, where the ++ * middle term is the whole intervals covered. ++ * ++ * An equivalent of C, by reduction, is: ++ * ++ * value2 - value1 + n * period ++ */ ++ new = ((s64)(new_raw_count << shift) >> shift); ++ old = ((s64)(prev_raw_count << shift) >> shift); ++ local64_add(new - old + count * period, &event->count); ++ ++ perf_event_update_userpage(event); ++ ++ return 0; ++} ++ + static void __intel_pmu_pebs_event(struct perf_event *event, + struct pt_regs *iregs, + void *base, void *top, + int bit, int count) + { ++ struct hw_perf_event *hwc = &event->hw; + struct perf_sample_data data; + struct pt_regs regs; + void *at = get_next_pebs_record_by_bit(base, top, bit); + +- if (!intel_pmu_save_and_restart(event) && +- !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)) ++ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { ++ /* ++ * Now, auto-reload is only enabled in fixed period mode. ++ * The reload value is always hwc->sample_period. ++ * May need to change it, if auto-reload is enabled in ++ * freq mode later. ++ */ ++ intel_pmu_save_and_restart_reload(event, count); ++ } else if (!intel_pmu_save_and_restart(event)) + return; + + while (count > 1) { +@@ -1365,8 +1441,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) + return; + + n = top - at; +- if (n <= 0) ++ if (n <= 0) { ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) ++ intel_pmu_save_and_restart_reload(event, 0); + return; ++ } + + __intel_pmu_pebs_event(event, iregs, at, top, 0, n); + } +@@ -1389,8 +1468,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) + + ds->pebs_index = ds->pebs_buffer_base; + +- if (unlikely(base >= top)) ++ if (unlikely(base >= top)) { ++ /* ++ * The drain_pebs() could be called twice in a short period ++ * for auto-reload event in pmu::read(). There are no ++ * overflows have happened in between. ++ * It needs to call intel_pmu_save_and_restart_reload() to ++ * update the event->count for this case. ++ */ ++ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, ++ x86_pmu.max_pebs_events) { ++ event = cpuc->events[bit]; ++ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) ++ intel_pmu_save_and_restart_reload(event, 0); ++ } + return; ++ } + + for (at = base; at < top; at += x86_pmu.pebs_record_size) { + struct pebs_record_nhm *p = at; +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h +index 8e4ea143ed96..dc4728eccfd8 100644 +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -556,7 +556,7 @@ struct x86_pmu { + struct x86_pmu_quirk *quirks; + int perfctr_second_write; + bool late_ack; +- unsigned (*limit_period)(struct perf_event *event, unsigned l); ++ u64 (*limit_period)(struct perf_event *event, u64 l); + + /* + * sysfs attrs +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h +index cf5961ca8677..4cd6a3b71824 100644 +--- a/arch/x86/include/asm/alternative.h ++++ b/arch/x86/include/asm/alternative.h +@@ -218,13 +218,11 @@ static inline int alternatives_text_reserved(void *start, void *end) + */ + #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ + output, input...) \ +-{ \ + asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ + "call %P[new2]", feature2) \ + : output, ASM_CALL_CONSTRAINT \ + : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ +- [new2] "i" (newfunc2), ## input); \ +-} ++ [new2] "i" (newfunc2), ## input) + + /* + * use this macro(s) if you need more than one output parameter +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index 704f31315dde..875ca99b82ee 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) + static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) + { + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); +- VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); ++ /* ++ * Use boot_cpu_has() instead of this_cpu_has() as this function ++ * might be called during early boot. This should work even after ++ * boot because all CPU's the have same capabilities: ++ */ ++ VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; + } + +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 5942aa5f569b..ebdcc368a2d3 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1481,7 +1481,7 @@ void setup_local_APIC(void) + * TODO: set up through-local-APIC from through-I/O-APIC? --macro + */ + value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; +- if (!cpu && (pic_mode || !value)) { ++ if (!cpu && (pic_mode || !value || skip_ioapic_setup)) { + value = APIC_DM_EXTINT; + apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); + } else { +diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +index 7be35b600299..2dae1b3c42fc 100644 +--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c ++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +@@ -1657,6 +1657,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, + if (ret < 0) + goto out_common_fail; + closid = ret; ++ ret = 0; + + rdtgrp->closid = closid; + list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); +diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c +index 76e07698e6d1..7fa0855e4b9a 100644 +--- a/arch/x86/kernel/devicetree.c ++++ b/arch/x86/kernel/devicetree.c +@@ -12,6 +12,7 @@ + #include <linux/of_address.h> + #include <linux/of_platform.h> + #include <linux/of_irq.h> ++#include <linux/libfdt.h> + #include <linux/slab.h> + #include <linux/pci.h> + #include <linux/of_pci.h> +@@ -200,19 +201,22 @@ static struct of_ioapic_type of_ioapic_type[] = + static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) + { +- struct of_phandle_args *irq_data = (void *)arg; ++ struct irq_fwspec *fwspec = (struct irq_fwspec *)arg; + struct of_ioapic_type *it; + struct irq_alloc_info tmp; ++ int type_index; + +- if (WARN_ON(irq_data->args_count < 2)) ++ if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; +- if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type)) ++ ++ type_index = fwspec->param[1]; ++ if (type_index >= ARRAY_SIZE(of_ioapic_type)) + return -EINVAL; + +- it = &of_ioapic_type[irq_data->args[1]]; ++ it = &of_ioapic_type[type_index]; + ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); + tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); +- tmp.ioapic_pin = irq_data->args[0]; ++ tmp.ioapic_pin = fwspec->param[0]; + + return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); + } +@@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void) + + map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128); + +- initial_boot_params = dt = early_memremap(initial_dtb, map_len); +- size = of_get_flat_dt_size(); ++ dt = early_memremap(initial_dtb, map_len); ++ size = fdt_totalsize(dt); + if (map_len < size) { + early_memunmap(dt, map_len); +- initial_boot_params = dt = early_memremap(initial_dtb, size); ++ dt = early_memremap(initial_dtb, size); + map_len = size; + } + ++ early_init_dt_verify(dt); + unflatten_and_copy_device_tree(); + early_memunmap(dt, map_len); + } +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 4a96aa004390..344d3c160f8d 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -1521,6 +1521,7 @@ static void remove_siblinginfo(int cpu) + cpumask_clear(topology_core_cpumask(cpu)); + c->phys_proc_id = 0; + c->cpu_core_id = 0; ++ c->booted_cores = 0; + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); + recompute_smt_state(); + } +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index d67e3b31f3db..d1f5c744142b 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -394,8 +394,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + + /* cpuid 7.0.edx*/ + const u32 kvm_cpuid_7_0_edx_x86_features = +- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) | +- F(ARCH_CAPABILITIES); ++ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | ++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); + + /* all calls to cpuid_count() should be made on the same cpu */ + get_cpu(); +@@ -481,6 +481,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + entry->ecx &= ~F(PKU); + entry->edx &= kvm_cpuid_7_0_edx_x86_features; + cpuid_mask(&entry->edx, CPUID_7_EDX); ++ /* ++ * We emulate ARCH_CAPABILITIES in software even ++ * if the host doesn't support it. ++ */ ++ entry->edx |= F(ARCH_CAPABILITIES); + } else { + entry->ebx = 0; + entry->ecx = 0; +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index ab8993fe58cc..6d0fbff71d7a 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -321,8 +321,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu) + if (!lapic_in_kernel(vcpu)) + return; + ++ /* ++ * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation) ++ * which doesn't have EOI register; Some buggy OSes (e.g. Windows with ++ * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC ++ * version first and level-triggered interrupts never get EOIed in ++ * IOAPIC. ++ */ + feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); +- if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) ++ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) && ++ !ioapic_in_kernel(vcpu->kvm)) + v |= APIC_LVR_DIRECTED_EOI; + kvm_lapic_set_reg(apic, APIC_LVR, v); + } +@@ -1467,11 +1475,23 @@ static bool set_target_expiration(struct kvm_lapic *apic) + + static void advance_periodic_target_expiration(struct kvm_lapic *apic) + { +- apic->lapic_timer.tscdeadline += +- nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); ++ ktime_t now = ktime_get(); ++ u64 tscl = rdtsc(); ++ ktime_t delta; ++ ++ /* ++ * Synchronize both deadlines to the same time source or ++ * differences in the periods (caused by differences in the ++ * underlying clocks or numerical approximation errors) will ++ * cause the two to drift apart over time as the errors ++ * accumulate. ++ */ + apic->lapic_timer.target_expiration = + ktime_add_ns(apic->lapic_timer.target_expiration, + apic->lapic_timer.period); ++ delta = ktime_sub(apic->lapic_timer.target_expiration, now); ++ apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + ++ nsec_to_cycles(apic->vcpu, delta); + } + + static void start_sw_period(struct kvm_lapic *apic) +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 2e63edf8312c..4c88572d2b81 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -2583,6 +2583,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) + return; + } + ++ WARN_ON_ONCE(vmx->emulation_required); ++ + if (kvm_exception_is_soft(nr)) { + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmx->vcpu.arch.event_exit_inst_len); +@@ -6829,12 +6831,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) + goto out; + } + +- if (err != EMULATE_DONE) { +- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; +- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; +- vcpu->run->internal.ndata = 0; +- return 0; +- } ++ if (err != EMULATE_DONE) ++ goto emulation_error; ++ ++ if (vmx->emulation_required && !vmx->rmode.vm86_active && ++ vcpu->arch.exception.pending) ++ goto emulation_error; + + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; +@@ -6850,6 +6852,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) + + out: + return ret; ++ ++emulation_error: ++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; ++ vcpu->run->internal.ndata = 0; ++ return 0; + } + + static int __grow_ple_window(int val) +@@ -11174,7 +11182,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) + if (ret) + return ret; + +- if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) ++ /* ++ * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken ++ * by event injection, halt vcpu. ++ */ ++ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && ++ !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) + return kvm_vcpu_halt(vcpu); + + vmx->nested.nested_run_pending = 1; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 649f476039de..adac01d0181a 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -7505,6 +7505,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + { + struct msr_data apic_base_msr; + int mmu_reset_needed = 0; ++ int cpuid_update_needed = 0; + int pending_vec, max_bits, idx; + struct desc_ptr dt; + +@@ -7542,8 +7543,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + vcpu->arch.cr0 = sregs->cr0; + + mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; ++ cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & ++ (X86_CR4_OSXSAVE | X86_CR4_PKE)); + kvm_x86_ops->set_cr4(vcpu, sregs->cr4); +- if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) ++ if (cpuid_update_needed) + kvm_update_cpuid(vcpu); + + idx = srcu_read_lock(&vcpu->kvm->srcu); +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index 3ed9a08885c5..4085897fef64 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + + /* + * The .rodata section needs to be read-only. Using the pfn +- * catches all aliases. ++ * catches all aliases. This also includes __ro_after_init, ++ * so do not enforce until kernel_set_to_readonly is true. + */ +- if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, ++ if (kernel_set_to_readonly && ++ within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, + __pa_symbol(__end_rodata) >> PAGE_SHIFT)) + pgprot_val(forbidden) |= _PAGE_RW; + +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 34cda7e0551b..c03c85e4fb6a 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -1,6 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 + #include <linux/mm.h> + #include <linux/gfp.h> ++#include <linux/hugetlb.h> + #include <asm/pgalloc.h> + #include <asm/pgtable.h> + #include <asm/tlb.h> +@@ -636,6 +637,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) + (mtrr != MTRR_TYPE_WRBACK)) + return 0; + ++ /* Bail out if we are we on a populated non-leaf entry: */ ++ if (pud_present(*pud) && !pud_huge(*pud)) ++ return 0; ++ + prot = pgprot_4k_2_large(prot); + + set_pte((pte_t *)pud, pfn_pte( +@@ -664,6 +669,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) + return 0; + } + ++ /* Bail out if we are we on a populated non-leaf entry: */ ++ if (pmd_present(*pmd) && !pmd_huge(*pmd)) ++ return 0; ++ + prot = pgprot_4k_2_large(prot); + + set_pte((pte_t *)pmd, pfn_pte( +diff --git a/block/partition-generic.c b/block/partition-generic.c +index 91622db9aedf..08dabcd8b6ae 100644 +--- a/block/partition-generic.c ++++ b/block/partition-generic.c +@@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf) + + EXPORT_SYMBOL(bdevname); + ++const char *bio_devname(struct bio *bio, char *buf) ++{ ++ return disk_name(bio->bi_disk, bio->bi_partno, buf); ++} ++EXPORT_SYMBOL(bio_devname); ++ + /* + * There's very little reason to use this, you should really + * have a struct block_device just about everywhere and use +diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c +index f6a009d88a33..52e5ea3b8e40 100644 +--- a/crypto/asymmetric_keys/pkcs7_trust.c ++++ b/crypto/asymmetric_keys/pkcs7_trust.c +@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, + pr_devel("sinfo %u: Direct signer is key %x\n", + sinfo->index, key_serial(key)); + x509 = NULL; ++ sig = sinfo->sig; + goto matched; + } + if (PTR_ERR(key) != -ENOKEY) +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c +index 754431031282..552c1f725b6c 100644 +--- a/drivers/acpi/acpi_pad.c ++++ b/drivers/acpi/acpi_pad.c +@@ -110,6 +110,7 @@ static void round_robin_cpu(unsigned int tsk_index) + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); + if (cpumask_empty(tmp)) { + mutex_unlock(&round_robin_lock); ++ free_cpumask_var(tmp); + return; + } + for_each_cpu(cpu, tmp) { +@@ -127,6 +128,8 @@ static void round_robin_cpu(unsigned int tsk_index) + mutex_unlock(&round_robin_lock); + + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); ++ ++ free_cpumask_var(tmp); + } + + static void exit_round_robin(unsigned int tsk_index) +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c +index d3b6b314fa50..37b0b4c04220 100644 +--- a/drivers/acpi/acpica/evevent.c ++++ b/drivers/acpi/acpica/evevent.c +@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void) + u32 fixed_status; + u32 fixed_enable; + u32 i; ++ acpi_status status; + + ACPI_FUNCTION_NAME(ev_fixed_event_detect); + +@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void) + * Read the fixed feature status and enable registers, as all the cases + * depend on their values. Ignore errors here. + */ +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); ++ status |= ++ acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); ++ if (ACPI_FAILURE(status)) { ++ return (int_status); ++ } + + ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, + "Fixed Event Block: Enable %08X Status %08X\n", +diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c +index d22167cbd0ca..f13d3cfa74e1 100644 +--- a/drivers/acpi/acpica/nseval.c ++++ b/drivers/acpi/acpica/nseval.c +@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) + /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */ + + status = AE_OK; ++ } else if (ACPI_FAILURE(status)) { ++ ++ /* If return_object exists, delete it */ ++ ++ if (info->return_object) { ++ acpi_ut_remove_reference(info->return_object); ++ info->return_object = NULL; ++ } + } + + ACPI_DEBUG_PRINT((ACPI_DB_NAMES, +diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c +index eb9dfaca555f..11ce4e5d10e2 100644 +--- a/drivers/acpi/acpica/psargs.c ++++ b/drivers/acpi/acpica/psargs.c +@@ -890,6 +890,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, + ACPI_POSSIBLE_METHOD_CALL); + + if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { ++ ++ /* Free method call op and corresponding namestring sub-ob */ ++ ++ acpi_ps_free_op(arg->common.value.arg); + acpi_ps_free_op(arg); + arg = NULL; + walk_state->arg_count = 1; +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 473f150d6b22..71008dbabe98 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4483,6 +4483,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + ++ /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on ++ SD7SN6S256G and SD8SN8U256G */ ++ { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, ++ + /* devices which puke on READ_NATIVE_MAX */ + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, +@@ -4543,6 +4547,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, + + /* devices that don't properly handle queued TRIM commands */ ++ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ++ ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 4ff69f508e95..6b0440a12c51 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -4287,7 +4287,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, + #ifdef ATA_DEBUG + struct scsi_device *scsidev = cmd->device; + +- DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", ++ DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n", + ap->print_id, + scsidev->channel, scsidev->id, scsidev->lun, + cmd->cmnd); +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index efdadd153abe..8fd08023c0f5 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -98,7 +98,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg) + int ret; + unsigned int val; + +- if (map->cache == REGCACHE_NONE) ++ if (map->cache_type == REGCACHE_NONE) + return false; + + if (!map->cache_ops) +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 5f2a4240a204..86258b00a1d4 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -1591,7 +1591,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) + if (new_index < 0) { + mutex_unlock(&nbd_index_mutex); + printk(KERN_ERR "nbd: failed to add new device\n"); +- return ret; ++ return new_index; + } + nbd = idr_find(&nbd_index_idr, new_index); + } +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c +index 69dfa1d3f453..f01d4a8a783a 100644 +--- a/drivers/block/null_blk.c ++++ b/drivers/block/null_blk.c +@@ -68,6 +68,7 @@ enum nullb_device_flags { + NULLB_DEV_FL_CACHE = 3, + }; + ++#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) + /* + * nullb_page is a page in memory for nullb devices. + * +@@ -82,10 +83,10 @@ enum nullb_device_flags { + */ + struct nullb_page { + struct page *page; +- unsigned long bitmap; ++ DECLARE_BITMAP(bitmap, MAP_SZ); + }; +-#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) +-#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) ++#define NULLB_PAGE_LOCK (MAP_SZ - 1) ++#define NULLB_PAGE_FREE (MAP_SZ - 2) + + struct nullb_device { + struct nullb *nullb; +@@ -725,7 +726,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) + if (!t_page->page) + goto out_freepage; + +- t_page->bitmap = 0; ++ memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); + return t_page; + out_freepage: + kfree(t_page); +@@ -735,13 +736,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) + + static void null_free_page(struct nullb_page *t_page) + { +- __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); +- if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) ++ __set_bit(NULLB_PAGE_FREE, t_page->bitmap); ++ if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) + return; + __free_page(t_page->page); + kfree(t_page); + } + ++static bool null_page_empty(struct nullb_page *page) ++{ ++ int size = MAP_SZ - 2; ++ ++ return find_first_bit(page->bitmap, size) == size; ++} ++ + static void null_free_sector(struct nullb *nullb, sector_t sector, + bool is_cache) + { +@@ -756,9 +764,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector, + + t_page = radix_tree_lookup(root, idx); + if (t_page) { +- __clear_bit(sector_bit, &t_page->bitmap); ++ __clear_bit(sector_bit, t_page->bitmap); + +- if (!t_page->bitmap) { ++ if (null_page_empty(t_page)) { + ret = radix_tree_delete_item(root, idx, t_page); + WARN_ON(ret != t_page); + null_free_page(ret); +@@ -829,7 +837,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb, + t_page = radix_tree_lookup(root, idx); + WARN_ON(t_page && t_page->page->index != idx); + +- if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) ++ if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) + return t_page; + + return NULL; +@@ -892,10 +900,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) + + t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); + +- __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); +- if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { ++ __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); ++ if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { + null_free_page(c_page); +- if (t_page && t_page->bitmap == 0) { ++ if (t_page && null_page_empty(t_page)) { + ret = radix_tree_delete_item(&nullb->dev->data, + idx, t_page); + null_free_page(t_page); +@@ -911,11 +919,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) + + for (i = 0; i < PAGE_SECTORS; + i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { +- if (test_bit(i, &c_page->bitmap)) { ++ if (test_bit(i, c_page->bitmap)) { + offset = (i << SECTOR_SHIFT); + memcpy(dst + offset, src + offset, + nullb->dev->blocksize); +- __set_bit(i, &t_page->bitmap); ++ __set_bit(i, t_page->bitmap); + } + } + +@@ -952,10 +960,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n) + * We found the page which is being flushed to disk by other + * threads + */ +- if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) ++ if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) + c_pages[i] = NULL; + else +- __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); ++ __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); + } + + one_round = 0; +@@ -1008,7 +1016,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source, + kunmap_atomic(dst); + kunmap_atomic(src); + +- __set_bit(sector & SECTOR_MASK, &t_page->bitmap); ++ __set_bit(sector & SECTOR_MASK, t_page->bitmap); + + if (is_fua) + null_free_sector(nullb, sector, true); +@@ -1922,10 +1930,6 @@ static int __init null_init(void) + struct nullb *nullb; + struct nullb_device *dev; + +- /* check for nullb_page.bitmap */ +- if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) +- return -EINVAL; +- + if (g_bs > PAGE_SIZE) { + pr_warn("null_blk: invalid block size\n"); + pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); +diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c +index 7b8c6368beb7..a026211afb51 100644 +--- a/drivers/block/paride/pcd.c ++++ b/drivers/block/paride/pcd.c +@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) + struct pcd_unit *cd = bdev->bd_disk->private_data; + int ret; + ++ check_disk_change(bdev); ++ + mutex_lock(&pcd_mutex); + ret = cdrom_open(&cd->info, bdev, mode); + mutex_unlock(&pcd_mutex); +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index 891265acb10e..7d23225f79ed 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); + + static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); + static void blkfront_gather_backend_features(struct blkfront_info *info); ++static int negotiate_mq(struct blkfront_info *info); + + static int get_id_from_freelist(struct blkfront_ring_info *rinfo) + { +@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, + unsigned int i, max_page_order; + unsigned int ring_page_order; + ++ if (!info) ++ return -ENODEV; ++ + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, + "max-ring-page-order", 0); + ring_page_order = min(xen_blkif_max_ring_order, max_page_order); + info->nr_ring_pages = 1 << ring_page_order; + ++ err = negotiate_mq(info); ++ if (err) ++ goto destroy_blkring; ++ + for (i = 0; i < info->nr_rings; i++) { + struct blkfront_ring_info *rinfo = &info->rinfo[i]; + +@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, + } + + info->xbdev = dev; +- err = negotiate_mq(info); +- if (err) { +- kfree(info); +- return err; +- } + + mutex_init(&info->mutex); + info->vdevice = vdevice; +@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) + + blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); + +- err = negotiate_mq(info); +- if (err) +- return err; +- + err = talk_to_blkback(dev, info); + if (!err) + blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index 5f7d86509f2f..bfc566d3f31a 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, + + cd_dbg(CD_OPEN, "entering cdrom_open\n"); + +- /* open is event synchronization point, check events first */ +- check_disk_change(bdev); +- + /* if this was a O_NONBLOCK open and we should honor the flags, + * do a quick open without drive/disc integrity checks. */ + cdi->use_count++; +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c +index 6495b03f576c..ae3a7537cf0f 100644 +--- a/drivers/cdrom/gdrom.c ++++ b/drivers/cdrom/gdrom.c +@@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = { + static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) + { + int ret; ++ ++ check_disk_change(bdev); ++ + mutex_lock(&gdrom_mutex); + ret = cdrom_open(gd.cd_info, bdev, mode); + mutex_unlock(&gdrom_mutex); +diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c +index 63d84e6f1891..83c695938a2d 100644 +--- a/drivers/char/hw_random/stm32-rng.c ++++ b/drivers/char/hw_random/stm32-rng.c +@@ -21,6 +21,7 @@ + #include <linux/of_address.h> + #include <linux/of_platform.h> + #include <linux/pm_runtime.h> ++#include <linux/reset.h> + #include <linux/slab.h> + + #define RNG_CR 0x00 +@@ -46,6 +47,7 @@ struct stm32_rng_private { + struct hwrng rng; + void __iomem *base; + struct clk *clk; ++ struct reset_control *rst; + }; + + static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev) + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + ++ priv->rst = devm_reset_control_get(&ofdev->dev, NULL); ++ if (!IS_ERR(priv->rst)) { ++ reset_control_assert(priv->rst); ++ udelay(2); ++ reset_control_deassert(priv->rst); ++ } ++ + dev_set_drvdata(dev, priv); + + priv->rng.name = dev_driver_string(dev), +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 0aea3bcb6158..6f2eaba1cd6a 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -763,7 +763,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); + pr_warn(PFX "Error getting flags: %d %d, %x\n", +- result, len, data[2]); ++ result, len, (len >= 3) ? data[2] : 0); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) { + /* +@@ -785,7 +785,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + if ((result < 0) || (len < 3) || (data[2] != 0)) { + /* Error clearing flags */ + pr_warn(PFX "Error clearing flags: %d %d, %x\n", +- result, len, data[2]); ++ result, len, (len >= 3) ? data[2] : 0); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { + pr_warn(PFX "Invalid response clearing flags: %x %x\n", +diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c +index 3ee7e6fea621..846d18daf893 100644 +--- a/drivers/clocksource/fsl_ftm_timer.c ++++ b/drivers/clocksource/fsl_ftm_timer.c +@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, + + static unsigned long __init ftm_clk_init(struct device_node *np) + { +- unsigned long freq; ++ long freq; + + freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); + if (freq <= 0) +diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c +index ae3167c28b12..a07f51231e33 100644 +--- a/drivers/clocksource/mips-gic-timer.c ++++ b/drivers/clocksource/mips-gic-timer.c +@@ -164,7 +164,7 @@ static int __init __gic_clocksource_init(void) + + /* Set clocksource mask. */ + count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; +- count_width >>= __fls(GIC_CONFIG_COUNTBITS); ++ count_width >>= __ffs(GIC_CONFIG_COUNTBITS); + count_width *= 4; + count_width += 32; + gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c +index dcb1cb9a4572..8b432d6e846d 100644 +--- a/drivers/cpufreq/cppc_cpufreq.c ++++ b/drivers/cpufreq/cppc_cpufreq.c +@@ -167,9 +167,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) + NSEC_PER_USEC; + policy->shared_type = cpu->shared_type; + +- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) ++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { ++ int i; ++ + cpumask_copy(policy->cpus, cpu->shared_cpu_map); +- else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { ++ ++ for_each_cpu(i, policy->cpus) { ++ if (unlikely(i == policy->cpu)) ++ continue; ++ ++ memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps, ++ sizeof(cpu->perf_caps)); ++ } ++ } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { + /* Support only SW_ANY for now. */ + pr_debug("Unsupported CPU co-ord type\n"); + return -EFAULT; +@@ -233,8 +243,13 @@ static int __init cppc_cpufreq_init(void) + return ret; + + out: +- for_each_possible_cpu(i) +- kfree(all_cpu_data[i]); ++ for_each_possible_cpu(i) { ++ cpu = all_cpu_data[i]; ++ if (!cpu) ++ break; ++ free_cpumask_var(cpu->shared_cpu_map); ++ kfree(cpu); ++ } + + kfree(all_cpu_data); + return -ENODEV; +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index ea43b147a7fe..789fc3a8289f 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1315,14 +1315,14 @@ static int cpufreq_online(unsigned int cpu) + return 0; + + out_exit_policy: ++ for_each_cpu(j, policy->real_cpus) ++ remove_cpu_dev_symlink(policy, get_cpu_device(j)); ++ + up_write(&policy->rwsem); + + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + +- for_each_cpu(j, policy->real_cpus) +- remove_cpu_dev_symlink(policy, get_cpu_device(j)); +- + out_free_policy: + cpufreq_policy_free(policy); + return ret; +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c +index f652a0e0f5a2..3548caa9e933 100644 +--- a/drivers/dma/mv_xor_v2.c ++++ b/drivers/dma/mv_xor_v2.c +@@ -163,6 +163,7 @@ struct mv_xor_v2_device { + void __iomem *dma_base; + void __iomem *glob_base; + struct clk *clk; ++ struct clk *reg_clk; + struct tasklet_struct irq_tasklet; + struct list_head free_sw_desc; + struct dma_device dmadev; +@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev) + if (ret) + return ret; + ++ xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); ++ if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { ++ if (!IS_ERR(xor_dev->reg_clk)) { ++ ret = clk_prepare_enable(xor_dev->reg_clk); ++ if (ret) ++ return ret; ++ } else { ++ return PTR_ERR(xor_dev->reg_clk); ++ } ++ } ++ + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); +- if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) +- return -EPROBE_DEFER; ++ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { ++ ret = EPROBE_DEFER; ++ goto disable_reg_clk; ++ } + if (!IS_ERR(xor_dev->clk)) { + ret = clk_prepare_enable(xor_dev->clk); + if (ret) +- return ret; ++ goto disable_reg_clk; + } + + ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, +@@ -866,8 +880,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev) + free_msi_irqs: + platform_msi_domain_free_irqs(&pdev->dev); + disable_clk: +- if (!IS_ERR(xor_dev->clk)) +- clk_disable_unprepare(xor_dev->clk); ++ clk_disable_unprepare(xor_dev->clk); ++disable_reg_clk: ++ clk_disable_unprepare(xor_dev->reg_clk); + return ret; + } + +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c +index f122c2a7b9f0..7432c8894e32 100644 +--- a/drivers/dma/pl330.c ++++ b/drivers/dma/pl330.c +@@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data) + /* Returns 1 if state was updated, 0 otherwise */ + static int pl330_update(struct pl330_dmac *pl330) + { +- struct dma_pl330_desc *descdone, *tmp; ++ struct dma_pl330_desc *descdone; + unsigned long flags; + void __iomem *regs; + u32 val; +@@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330) + } + + /* Now that we are in no hurry, do the callbacks */ +- list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { ++ while (!list_empty(&pl330->req_done)) { ++ descdone = list_first_entry(&pl330->req_done, ++ struct dma_pl330_desc, rqd); + list_del(&descdone->rqd); + spin_unlock_irqrestore(&pl330->lock, flags); + dma_pl330_rqcb(descdone, PL330_ERR_NONE); +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c +index 6d89fb6a6a92..8fbf175fdcc7 100644 +--- a/drivers/dma/qcom/bam_dma.c ++++ b/drivers/dma/qcom/bam_dma.c +@@ -388,6 +388,7 @@ struct bam_device { + struct device_dma_parameters dma_parms; + struct bam_chan *channels; + u32 num_channels; ++ u32 num_ees; + + /* execution environment ID, from DT */ + u32 ee; +@@ -1080,15 +1081,19 @@ static int bam_init(struct bam_device *bdev) + u32 val; + + /* read revision and configuration information */ +- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; +- val &= NUM_EES_MASK; ++ if (!bdev->num_ees) { ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); ++ bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK; ++ } + + /* check that configured EE is within range */ +- if (bdev->ee >= val) ++ if (bdev->ee >= bdev->num_ees) + return -EINVAL; + +- val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); +- bdev->num_channels = val & BAM_NUM_PIPES_MASK; ++ if (!bdev->num_channels) { ++ val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); ++ bdev->num_channels = val & BAM_NUM_PIPES_MASK; ++ } + + if (bdev->controlled_remotely) + return 0; +@@ -1183,6 +1188,18 @@ static int bam_dma_probe(struct platform_device *pdev) + bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, + "qcom,controlled-remotely"); + ++ if (bdev->controlled_remotely) { ++ ret = of_property_read_u32(pdev->dev.of_node, "num-channels", ++ &bdev->num_channels); ++ if (ret) ++ dev_err(bdev->dev, "num-channels unspecified in dt\n"); ++ ++ ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees", ++ &bdev->num_ees); ++ if (ret) ++ dev_err(bdev->dev, "num-ees unspecified in dt\n"); ++ } ++ + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); + if (IS_ERR(bdev->bamclk)) + return PTR_ERR(bdev->bamclk); +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c +index 2b2c7db3e480..9d6ce5051d8f 100644 +--- a/drivers/dma/sh/rcar-dmac.c ++++ b/drivers/dma/sh/rcar-dmac.c +@@ -880,7 +880,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, + + rcar_dmac_chan_configure_desc(chan, desc); + +- max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; ++ max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; + + /* + * Allocate and fill the transfer chunk descriptors. We own the only +@@ -1264,8 +1264,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, + * If the cookie doesn't correspond to the currently running transfer + * then the descriptor hasn't been processed yet, and the residue is + * equal to the full descriptor size. ++ * Also, a client driver is possible to call this function before ++ * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" ++ * will be the next descriptor, and the done list will appear. So, if ++ * the argument cookie matches the done list's cookie, we can assume ++ * the residue is zero. + */ + if (cookie != desc->async_tx.cookie) { ++ list_for_each_entry(desc, &chan->desc.done, node) { ++ if (cookie == desc->async_tx.cookie) ++ return 0; ++ } + list_for_each_entry(desc, &chan->desc.pending, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c +index e8db9659a36b..fe0d30340e96 100644 +--- a/drivers/firmware/dmi_scan.c ++++ b/drivers/firmware/dmi_scan.c +@@ -191,7 +191,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, + char *s; + int is_ff = 1, is_00 = 1, i; + +- if (dmi_ident[slot] || dm->length <= index + 16) ++ if (dmi_ident[slot] || dm->length < index + 16) + return; + + d = (u8 *) dm + index; +diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c +index 1cc41c3d6315..86a1ad17a32e 100644 +--- a/drivers/firmware/efi/arm-runtime.c ++++ b/drivers/firmware/efi/arm-runtime.c +@@ -54,6 +54,9 @@ static struct ptdump_info efi_ptdump_info = { + + static int __init ptdump_init(void) + { ++ if (!efi_enabled(EFI_RUNTIME_SERVICES)) ++ return 0; ++ + return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); + } + device_initcall(ptdump_init); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +index 8d689ab7e429..1ef486b5d54b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +@@ -26,6 +26,7 @@ + #define AMDGPU_AMDKFD_H_INCLUDED + + #include <linux/types.h> ++#include <linux/mm.h> + #include <linux/mmu_context.h> + #include <kgd_kfd_interface.h> + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +index 659997bfff30..cd84bd0b1eaf 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +@@ -322,14 +322,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) + { + unsigned i; + int r, ret = 0; ++ long tmo_gfx, tmo_mm; ++ ++ tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; ++ if (amdgpu_sriov_vf(adev)) { ++ /* for MM engines in hypervisor side they are not scheduled together ++ * with CP and SDMA engines, so even in exclusive mode MM engine could ++ * still running on other VF thus the IB TEST TIMEOUT for MM engines ++ * under SR-IOV should be set to a long time. 8 sec should be enough ++ * for the MM comes back to this VF. ++ */ ++ tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; ++ } ++ ++ if (amdgpu_sriov_runtime(adev)) { ++ /* for CP & SDMA engines since they are scheduled together so ++ * need to make the timeout width enough to cover the time ++ * cost waiting for it coming back under RUNTIME only ++ */ ++ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; ++ } + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; ++ long tmo; + + if (!ring || !ring->ready) + continue; + +- r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); ++ /* MM engine need more time */ ++ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || ++ ring->funcs->type == AMDGPU_RING_TYPE_VCE || ++ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || ++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) ++ tmo = tmo_mm; ++ else ++ tmo = tmo_gfx; ++ ++ r = amdgpu_ring_test_ib(ring, tmo); + if (r) { + ring->ready = false; + +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index 69182eeca264..1a30c54a0889 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -2889,7 +2889,13 @@ static int gfx_v9_0_hw_fini(void *handle) + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + if (amdgpu_sriov_vf(adev)) { +- pr_debug("For SRIOV client, shouldn't do anything.\n"); ++ gfx_v9_0_cp_gfx_enable(adev, false); ++ /* must disable polling for SRIOV when hw finished, otherwise ++ * CPC engine may still keep fetching WB address which is already ++ * invalid after sw finished and trigger DMAR reading error in ++ * hypervisor side. ++ */ ++ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); + return 0; + } + gfx_v9_0_cp_enable(adev, false); +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +index 6dc0f6e346e7..a1d71429fb72 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +@@ -456,7 +456,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) + adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); + if (!adev->mc.vram_width) { + /* hbm memory channel size */ +- chansize = 128; ++ if (adev->flags & AMD_IS_APU) ++ chansize = 64; ++ else ++ chansize = 128; + + tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); + tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +index 1d312603de9f..308571b09c6b 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +@@ -166,8 +166,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, + packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; + packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; + +- /* TODO: scratch support */ +- packet->sh_hidden_private_base_vmid = 0; ++ packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; + + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); +diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h +index 5f4c2e833a65..d665dd5af5dd 100644 +--- a/drivers/gpu/drm/ast/ast_tables.h ++++ b/drivers/gpu/drm/ast/ast_tables.h +@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = { + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ +- {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ ++ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ +@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { + {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ + {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ +- {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ ++ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ +diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c +index b1ab4ab09532..60373d7eb220 100644 +--- a/drivers/gpu/drm/bridge/sii902x.c ++++ b/drivers/gpu/drm/bridge/sii902x.c +@@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector) + struct sii902x *sii902x = connector_to_sii902x(connector); + struct regmap *regmap = sii902x->regmap; + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; ++ struct device *dev = &sii902x->i2c->dev; + unsigned long timeout; ++ unsigned int retries; + unsigned int status; + struct edid *edid; + int num = 0; +@@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector) + time_before(jiffies, timeout)); + + if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { +- dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n"); ++ dev_err(dev, "failed to acquire the i2c bus\n"); + return -ETIMEDOUT; + } + +@@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector) + if (ret) + return ret; + +- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); ++ /* ++ * Sometimes the I2C bus can stall after failure to use the ++ * EDID channel. Retry a few times to see if things clear ++ * up, else continue anyway. ++ */ ++ retries = 5; ++ do { ++ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, ++ &status); ++ retries--; ++ } while (ret && retries); + if (ret) +- return ret; ++ dev_err(dev, "failed to read status (%d)\n", ret); + + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_DDC_BUS_REQ | +@@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector) + + if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD)) { +- dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n"); ++ dev_err(dev, "failed to release the i2c bus\n"); + return -ETIMEDOUT; + } + +diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c +index 2b8bf2dd6387..9effe40f5fa5 100644 +--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c ++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c +@@ -926,7 +926,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) + struct drm_device *drm_dev = g2d->subdrv.drm_dev; + struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; + struct drm_exynos_pending_g2d_event *e; +- struct timeval now; ++ struct timespec64 now; + + if (list_empty(&runqueue_node->event_list)) + return; +@@ -934,9 +934,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) + e = list_first_entry(&runqueue_node->event_list, + struct drm_exynos_pending_g2d_event, base.link); + +- do_gettimeofday(&now); ++ ktime_get_ts64(&now); + e->event.tv_sec = now.tv_sec; +- e->event.tv_usec = now.tv_usec; ++ e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; + e->event.cmdlist_no = cmdlist_no; + + drm_send_event(drm_dev, &e->base); +diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h +index 30496134a3d0..d7cbe53c4c01 100644 +--- a/drivers/gpu/drm/exynos/regs-fimc.h ++++ b/drivers/gpu/drm/exynos/regs-fimc.h +@@ -569,7 +569,7 @@ + #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) + #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) + #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) +-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) ++#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0)) + + /* Real input DMA size register */ + #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) +diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c +index 53e0b24beda6..d976391dfa31 100644 +--- a/drivers/gpu/drm/imx/ipuv3-crtc.c ++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c +@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) + { + drm_crtc_vblank_on(crtc); ++} + ++static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++{ + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc)); +@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { + .mode_set_nofb = ipu_crtc_mode_set_nofb, + .atomic_check = ipu_crtc_atomic_check, + .atomic_begin = ipu_crtc_atomic_begin, ++ .atomic_flush = ipu_crtc_atomic_flush, + .atomic_disable = ipu_crtc_atomic_disable, + .atomic_enable = ipu_crtc_atomic_enable, + }; +diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c +index 5155f0179b61..05520202c967 100644 +--- a/drivers/gpu/drm/meson/meson_crtc.c ++++ b/drivers/gpu/drm/meson/meson_crtc.c +@@ -36,6 +36,7 @@ + #include "meson_venc.h" + #include "meson_vpp.h" + #include "meson_viu.h" ++#include "meson_canvas.h" + #include "meson_registers.h" + + /* CRTC definition */ +@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv) + } else + meson_vpp_disable_interlace_vscaler_osd1(priv); + ++ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, ++ priv->viu.osd1_addr, priv->viu.osd1_stride, ++ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, ++ MESON_CANVAS_BLKMODE_LINEAR); ++ + /* Enable OSD1 */ + writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, + priv->io_base + _REG(VPP_MISC)); +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c +index 7742c7d81ed8..4ad8223c60ea 100644 +--- a/drivers/gpu/drm/meson/meson_drv.c ++++ b/drivers/gpu/drm/meson/meson_drv.c +@@ -180,40 +180,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); + regs = devm_ioremap_resource(dev, res); +- if (IS_ERR(regs)) +- return PTR_ERR(regs); ++ if (IS_ERR(regs)) { ++ ret = PTR_ERR(regs); ++ goto free_drm; ++ } + + priv->io_base = regs; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); + /* Simply ioremap since it may be a shared register zone */ + regs = devm_ioremap(dev, res->start, resource_size(res)); +- if (!regs) +- return -EADDRNOTAVAIL; ++ if (!regs) { ++ ret = -EADDRNOTAVAIL; ++ goto free_drm; ++ } + + priv->hhi = devm_regmap_init_mmio(dev, regs, + &meson_regmap_config); + if (IS_ERR(priv->hhi)) { + dev_err(&pdev->dev, "Couldn't create the HHI regmap\n"); +- return PTR_ERR(priv->hhi); ++ ret = PTR_ERR(priv->hhi); ++ goto free_drm; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); + /* Simply ioremap since it may be a shared register zone */ + regs = devm_ioremap(dev, res->start, resource_size(res)); +- if (!regs) +- return -EADDRNOTAVAIL; ++ if (!regs) { ++ ret = -EADDRNOTAVAIL; ++ goto free_drm; ++ } + + priv->dmc = devm_regmap_init_mmio(dev, regs, + &meson_regmap_config); + if (IS_ERR(priv->dmc)) { + dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); +- return PTR_ERR(priv->dmc); ++ ret = PTR_ERR(priv->dmc); ++ goto free_drm; + } + + priv->vsync_irq = platform_get_irq(pdev, 0); + +- drm_vblank_init(drm, 1); ++ ret = drm_vblank_init(drm, 1); ++ if (ret) ++ goto free_drm; ++ + drm_mode_config_init(drm); + drm->mode_config.max_width = 3840; + drm->mode_config.max_height = 2160; +diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h +index 5e8b392b9d1f..8450d6ac8c9b 100644 +--- a/drivers/gpu/drm/meson/meson_drv.h ++++ b/drivers/gpu/drm/meson/meson_drv.h +@@ -43,6 +43,9 @@ struct meson_drm { + bool osd1_commit; + uint32_t osd1_ctrl_stat; + uint32_t osd1_blk0_cfg[5]; ++ uint32_t osd1_addr; ++ uint32_t osd1_stride; ++ uint32_t osd1_height; + } viu; + + struct { +diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c +index 17e96fa47868..0b6011b8d632 100644 +--- a/drivers/gpu/drm/meson/meson_plane.c ++++ b/drivers/gpu/drm/meson/meson_plane.c +@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane, + /* Update Canvas with buffer address */ + gem = drm_fb_cma_get_gem_obj(fb, 0); + +- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, +- gem->paddr, fb->pitches[0], +- fb->height, MESON_CANVAS_WRAP_NONE, +- MESON_CANVAS_BLKMODE_LINEAR); ++ priv->viu.osd1_addr = gem->paddr; ++ priv->viu.osd1_stride = fb->pitches[0]; ++ priv->viu.osd1_height = fb->height; + + spin_unlock_irqrestore(&priv->drm->event_lock, flags); + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c +index f56f60f695e1..debbbf0fd4bd 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c ++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c +@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nvif_object *device = &drm->client.device.object; +- int or = nv_encoder->or; ++ int or = ffs(nv_encoder->dcb->or) - 1; + u32 div = 1025; + u32 val; + +@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nvif_object *device = &drm->client.device.object; +- int or = nv_encoder->or; ++ int or = ffs(nv_encoder->dcb->or) - 1; + u32 div = 1025; + u32 val = (bd->props.brightness * div) / 100; + +@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nvif_object *device = &drm->client.device.object; +- int or = nv_encoder->or; ++ int or = ffs(nv_encoder->dcb->or) - 1; + u32 div, val; + + div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); +@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nvif_object *device = &drm->client.device.object; +- int or = nv_encoder->or; ++ int or = ffs(nv_encoder->dcb->or) - 1; + u32 div, val; + + div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); +@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) + return -ENODEV; + } + +- if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) ++ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) + return 0; + + if (drm->client.device.info.chipset <= 0xa0 || +diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c +index d1755f12236b..41ebb37aaa79 100644 +--- a/drivers/gpu/drm/omapdrm/dss/dss.c ++++ b/drivers/gpu/drm/omapdrm/dss/dss.c +@@ -1299,88 +1299,18 @@ static const struct soc_device_attribute dss_soc_devices[] = { + + static int dss_bind(struct device *dev) + { +- struct platform_device *pdev = to_platform_device(dev); +- struct resource *dss_mem; +- u32 rev; + int r; + +- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); +- dss.base = devm_ioremap_resource(&pdev->dev, dss_mem); +- if (IS_ERR(dss.base)) +- return PTR_ERR(dss.base); +- +- r = dss_get_clocks(); ++ r = component_bind_all(dev, NULL); + if (r) + return r; + +- r = dss_setup_default_clock(); +- if (r) +- goto err_setup_clocks; +- +- r = dss_video_pll_probe(pdev); +- if (r) +- goto err_pll_init; +- +- r = dss_init_ports(pdev); +- if (r) +- goto err_init_ports; +- +- pm_runtime_enable(&pdev->dev); +- +- r = dss_runtime_get(); +- if (r) +- goto err_runtime_get; +- +- dss.dss_clk_rate = clk_get_rate(dss.dss_clk); +- +- /* Select DPLL */ +- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); +- +- dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); +- +-#ifdef CONFIG_OMAP2_DSS_VENC +- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ +- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ +- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ +-#endif +- dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; +- dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; +- dss.dispc_clk_source = DSS_CLK_SRC_FCK; +- dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; +- dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; +- +- rev = dss_read_reg(DSS_REVISION); +- pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); +- +- dss_runtime_put(); +- +- r = component_bind_all(&pdev->dev, NULL); +- if (r) +- goto err_component; +- +- dss_debugfs_create_file("dss", dss_dump_regs); +- + pm_set_vt_switch(0); + + omapdss_gather_components(dev); + omapdss_set_is_initialized(true); + + return 0; +- +-err_component: +-err_runtime_get: +- pm_runtime_disable(&pdev->dev); +- dss_uninit_ports(pdev); +-err_init_ports: +- if (dss.video1_pll) +- dss_video_pll_uninit(dss.video1_pll); +- +- if (dss.video2_pll) +- dss_video_pll_uninit(dss.video2_pll); +-err_pll_init: +-err_setup_clocks: +- dss_put_clocks(); +- return r; + } + + static void dss_unbind(struct device *dev) +@@ -1390,18 +1320,6 @@ static void dss_unbind(struct device *dev) + omapdss_set_is_initialized(false); + + component_unbind_all(&pdev->dev, NULL); +- +- if (dss.video1_pll) +- dss_video_pll_uninit(dss.video1_pll); +- +- if (dss.video2_pll) +- dss_video_pll_uninit(dss.video2_pll); +- +- dss_uninit_ports(pdev); +- +- pm_runtime_disable(&pdev->dev); +- +- dss_put_clocks(); + } + + static const struct component_master_ops dss_component_ops = { +@@ -1433,10 +1351,46 @@ static int dss_add_child_component(struct device *dev, void *data) + return 0; + } + ++static int dss_probe_hardware(void) ++{ ++ u32 rev; ++ int r; ++ ++ r = dss_runtime_get(); ++ if (r) ++ return r; ++ ++ dss.dss_clk_rate = clk_get_rate(dss.dss_clk); ++ ++ /* Select DPLL */ ++ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); ++ ++ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); ++ ++#ifdef CONFIG_OMAP2_DSS_VENC ++ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ ++ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ ++ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ ++#endif ++ dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; ++ dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; ++ dss.dispc_clk_source = DSS_CLK_SRC_FCK; ++ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; ++ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; ++ ++ rev = dss_read_reg(DSS_REVISION); ++ pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); ++ ++ dss_runtime_put(); ++ ++ return 0; ++} ++ + static int dss_probe(struct platform_device *pdev) + { + const struct soc_device_attribute *soc; + struct component_match *match = NULL; ++ struct resource *dss_mem; + int r; + + dss.pdev = pdev; +@@ -1451,20 +1405,69 @@ static int dss_probe(struct platform_device *pdev) + else + dss.feat = of_match_device(dss_of_match, &pdev->dev)->data; + +- r = dss_initialize_debugfs(); ++ /* Map I/O registers, get and setup clocks. */ ++ dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ dss.base = devm_ioremap_resource(&pdev->dev, dss_mem); ++ if (IS_ERR(dss.base)) ++ return PTR_ERR(dss.base); ++ ++ r = dss_get_clocks(); + if (r) + return r; + +- /* add all the child devices as components */ ++ r = dss_setup_default_clock(); ++ if (r) ++ goto err_put_clocks; ++ ++ /* Setup the video PLLs and the DPI and SDI ports. */ ++ r = dss_video_pll_probe(pdev); ++ if (r) ++ goto err_put_clocks; ++ ++ r = dss_init_ports(pdev); ++ if (r) ++ goto err_uninit_plls; ++ ++ /* Enable runtime PM and probe the hardware. */ ++ pm_runtime_enable(&pdev->dev); ++ ++ r = dss_probe_hardware(); ++ if (r) ++ goto err_pm_runtime_disable; ++ ++ /* Initialize debugfs. */ ++ r = dss_initialize_debugfs(); ++ if (r) ++ goto err_pm_runtime_disable; ++ ++ dss_debugfs_create_file("dss", dss_dump_regs); ++ ++ /* Add all the child devices as components. */ + device_for_each_child(&pdev->dev, &match, dss_add_child_component); + + r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); +- if (r) { +- dss_uninitialize_debugfs(); +- return r; +- } ++ if (r) ++ goto err_uninit_debugfs; + + return 0; ++ ++err_uninit_debugfs: ++ dss_uninitialize_debugfs(); ++ ++err_pm_runtime_disable: ++ pm_runtime_disable(&pdev->dev); ++ dss_uninit_ports(pdev); ++ ++err_uninit_plls: ++ if (dss.video1_pll) ++ dss_video_pll_uninit(dss.video1_pll); ++ if (dss.video2_pll) ++ dss_video_pll_uninit(dss.video2_pll); ++ ++err_put_clocks: ++ dss_put_clocks(); ++ ++ return r; + } + + static int dss_remove(struct platform_device *pdev) +@@ -1473,6 +1476,18 @@ static int dss_remove(struct platform_device *pdev) + + dss_uninitialize_debugfs(); + ++ pm_runtime_disable(&pdev->dev); ++ ++ dss_uninit_ports(pdev); ++ ++ if (dss.video1_pll) ++ dss_video_pll_uninit(dss.video1_pll); ++ ++ if (dss.video2_pll) ++ dss_video_pll_uninit(dss.video2_pll); ++ ++ dss_put_clocks(); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 234af81fb3d0..fc56d033febe 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -1561,7 +1561,7 @@ static const struct panel_desc ontat_yx700wv03 = { + .width = 154, + .height = 83, + }, +- .bus_format = MEDIA_BUS_FMT_RGB888_1X24, ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + }; + + static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +index 12d22f3db1af..6a4b8c98a719 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +@@ -59,11 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, + + rcar_lvds_write(lvds, LVDPLLCR, pllcr); + +- /* +- * Select the input, hardcode mode 0, enable LVDS operation and turn +- * bias circuitry on. +- */ +- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN; ++ /* Select the input and set the LVDS mode. */ ++ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; + if (rcrtc->index == 2) + lvdcr0 |= LVDCR0_DUSEL; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); +@@ -74,6 +71,10 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, + LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) | + LVDCR1_CLKSTBY_GEN2); + ++ /* Enable LVDS operation and turn bias circuitry on. */ ++ lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN; ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0); ++ + /* + * Turn the PLL on, wait for the startup delay, and turn the output + * on. +@@ -95,7 +96,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, + u32 lvdcr0; + u32 pllcr; + +- /* PLL clock configuration */ ++ /* Set the PLL clock configuration and LVDS mode. */ + if (freq < 42000) + pllcr = LVDPLLCR_PLLDIVCNT_42M; + else if (freq < 85000) +@@ -107,6 +108,9 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, + + rcar_lvds_write(lvds, LVDPLLCR, pllcr); + ++ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0); ++ + /* Turn all the channels on. */ + rcar_lvds_write(lvds, LVDCR1, + LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) | +@@ -117,7 +121,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, + * Turn the PLL on, set it to LVDS normal mode, wait for the startup + * delay and turn the output on. + */ +- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON; ++ lvdcr0 |= LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + lvdcr0 |= LVDCR0_PWD; +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +index 1869c8bb76c8..bde65186a3c3 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +@@ -262,7 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). + */ + vma->vm_flags &= ~VM_PFNMAP; +- vma->vm_pgoff = 0; + + if (rk_obj->pages) + ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); +@@ -297,6 +296,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) + if (ret) + return ret; + ++ /* ++ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the ++ * whole buffer from the start. ++ */ ++ vma->vm_pgoff = 0; ++ + obj = vma->vm_private_data; + + return rockchip_drm_gem_object_mmap(obj, vma); +diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c +index d401156490f3..4460ca46a350 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c ++++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c +@@ -129,10 +129,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw) + static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) + { + struct sun4i_dclk *dclk = hw_to_dclk(hw); ++ u32 val = degrees / 120; ++ ++ val <<= 28; + + regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, + GENMASK(29, 28), +- degrees / 120); ++ val); + + return 0; + } +diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c +index 597d563d636a..0598b4c18c25 100644 +--- a/drivers/gpu/drm/tegra/drm.c ++++ b/drivers/gpu/drm/tegra/drm.c +@@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm) + + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_exit(drm); ++ drm_atomic_helper_shutdown(drm); + drm_mode_config_cleanup(drm); + + err = host1x_device_exit(device); +diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c +index b94bd5440e57..ed9c443bb8a1 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c ++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c +@@ -196,6 +196,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, + case VIRTGPU_PARAM_3D_FEATURES: + value = vgdev->has_virgl_3d == true ? 1 : 0; + break; ++ case VIRTGPU_PARAM_CAPSET_QUERY_FIX: ++ value = 1; ++ break; + default: + return -EINVAL; + } +@@ -471,7 +474,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, + { + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_virtgpu_get_caps *args = data; +- int size; ++ unsigned size, host_caps_size; + int i; + int found_valid = -1; + int ret; +@@ -480,6 +483,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, + if (vgdev->num_capsets == 0) + return -ENOSYS; + ++ /* don't allow userspace to pass 0 */ ++ if (args->size == 0) ++ return -EINVAL; ++ + spin_lock(&vgdev->display_info_lock); + for (i = 0; i < vgdev->num_capsets; i++) { + if (vgdev->capsets[i].id == args->cap_set_id) { +@@ -495,11 +502,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, + return -EINVAL; + } + +- size = vgdev->capsets[found_valid].max_size; +- if (args->size > size) { +- spin_unlock(&vgdev->display_info_lock); +- return -EINVAL; +- } ++ host_caps_size = vgdev->capsets[found_valid].max_size; ++ /* only copy to user the minimum of the host caps size or the guest caps size */ ++ size = min(args->size, host_caps_size); + + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { + if (cache_ent->id == args->cap_set_id && +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h +index 557a033fb610..8545488aa0cf 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h +@@ -135,17 +135,24 @@ + + #else + +-/* In the 32-bit version of this macro, we use "m" because there is no +- * more register left for bp ++/* ++ * In the 32-bit version of this macro, we store bp in a memory location ++ * because we've ran out of registers. ++ * Now we can't reference that memory location while we've modified ++ * %esp or %ebp, so we first push it on the stack, just before we push ++ * %ebp, and then when we need it we read it from the stack where we ++ * just pushed it. + */ + #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ + port_num, magic, bp, \ + eax, ebx, ecx, edx, si, di) \ + ({ \ +- asm volatile ("push %%ebp;" \ +- "mov %12, %%ebp;" \ ++ asm volatile ("push %12;" \ ++ "push %%ebp;" \ ++ "mov 0x04(%%esp), %%ebp;" \ + "rep outsb;" \ +- "pop %%ebp;" : \ ++ "pop %%ebp;" \ ++ "add $0x04, %%esp;" : \ + "=a"(eax), \ + "=b"(ebx), \ + "=c"(ecx), \ +@@ -167,10 +174,12 @@ + port_num, magic, bp, \ + eax, ebx, ecx, edx, si, di) \ + ({ \ +- asm volatile ("push %%ebp;" \ +- "mov %12, %%ebp;" \ ++ asm volatile ("push %12;" \ ++ "push %%ebp;" \ ++ "mov 0x04(%%esp), %%ebp;" \ + "rep insb;" \ +- "pop %%ebp" : \ ++ "pop %%ebp;" \ ++ "add $0x04, %%esp;" : \ + "=a"(eax), \ + "=b"(ebx), \ + "=c"(ecx), \ +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +index aacce4753a62..205a5f4b58f3 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +@@ -453,7 +453,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) + { + struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); ++ struct drm_crtc *crtc = plane->state->crtc ? ++ plane->state->crtc : old_state->crtc; + ++ if (vps->dmabuf) ++ vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); + vmw_dmabuf_unreference(&vps->dmabuf); + vps->dmabuf_size = 0; + +@@ -491,10 +495,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, + } + + size = new_state->crtc_w * new_state->crtc_h * 4; ++ dev_priv = vmw_priv(crtc->dev); + + if (vps->dmabuf) { +- if (vps->dmabuf_size == size) +- return 0; ++ if (vps->dmabuf_size == size) { ++ /* ++ * Note that this might temporarily up the pin-count ++ * to 2, until cleanup_fb() is called. ++ */ ++ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, ++ true); ++ } + + vmw_dmabuf_unreference(&vps->dmabuf); + vps->dmabuf_size = 0; +@@ -504,7 +515,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, + if (!vps->dmabuf) + return -ENOMEM; + +- dev_priv = vmw_priv(crtc->dev); + vmw_svga_enable(dev_priv); + + /* After we have alloced the backing store might not be able to +@@ -515,13 +525,18 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, + &vmw_vram_ne_placement, + false, &vmw_dmabuf_bo_free); + vmw_overlay_resume_all(dev_priv); +- +- if (ret != 0) ++ if (ret) { + vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ +- else +- vps->dmabuf_size = size; ++ return ret; ++ } + +- return ret; ++ vps->dmabuf_size = size; ++ ++ /* ++ * TTM already thinks the buffer is pinned, but make sure the ++ * pin_count is upped. ++ */ ++ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); + } + + +diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c +index c860a7997cb5..1d1612e28854 100644 +--- a/drivers/gpu/ipu-v3/ipu-pre.c ++++ b/drivers/gpu/ipu-v3/ipu-pre.c +@@ -125,11 +125,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index) + if (pre_node == pre->dev->of_node) { + mutex_unlock(&ipu_pre_list_mutex); + device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); ++ of_node_put(pre_node); + return pre; + } + } + mutex_unlock(&ipu_pre_list_mutex); + ++ of_node_put(pre_node); ++ + return NULL; + } + +diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c +index 0013ca9f72c8..1c36fa3a90e2 100644 +--- a/drivers/gpu/ipu-v3/ipu-prg.c ++++ b/drivers/gpu/ipu-v3/ipu-prg.c +@@ -101,11 +101,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id) + mutex_unlock(&ipu_prg_list_mutex); + device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); + prg->id = ipu_id; ++ of_node_put(prg_node); + return prg; + } + } + mutex_unlock(&ipu_prg_list_mutex); + ++ of_node_put(prg_node); ++ + return NULL; + } + +@@ -249,10 +252,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan) + { + int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); + struct ipu_prg *prg = ipu_chan->ipu->prg_priv; +- struct ipu_prg_channel *chan = &prg->chan[prg_chan]; ++ struct ipu_prg_channel *chan; + u32 val; + +- if (!chan->enabled || prg_chan < 0) ++ if (prg_chan < 0) ++ return; ++ ++ chan = &prg->chan[prg_chan]; ++ if (!chan->enabled) + return; + + clk_prepare_enable(prg->clk_ipg); +@@ -279,13 +286,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, + { + int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); + struct ipu_prg *prg = ipu_chan->ipu->prg_priv; +- struct ipu_prg_channel *chan = &prg->chan[prg_chan]; ++ struct ipu_prg_channel *chan; + u32 val; + int ret; + + if (prg_chan < 0) + return prg_chan; + ++ chan = &prg->chan[prg_chan]; ++ + if (chan->enabled) { + ipu_pre_update(prg->pres[chan->used_pre], *eba); + return 0; +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c +index c219e43b8f02..f5f3f8cf57ea 100644 +--- a/drivers/hwmon/nct6775.c ++++ b/drivers/hwmon/nct6775.c +@@ -1469,7 +1469,7 @@ static void nct6775_update_pwm(struct device *dev) + duty_is_dc = data->REG_PWM_MODE[i] && + (nct6775_read_value(data, data->REG_PWM_MODE[i]) + & data->PWM_MODE_MASK[i]); +- data->pwm_mode[i] = duty_is_dc; ++ data->pwm_mode[i] = !duty_is_dc; + + fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]); + for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) { +@@ -2350,7 +2350,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) + struct nct6775_data *data = nct6775_update_device(dev); + struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); + +- return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]); ++ return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]); + } + + static ssize_t +@@ -2371,9 +2371,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, + if (val > 1) + return -EINVAL; + +- /* Setting DC mode is not supported for all chips/channels */ ++ /* Setting DC mode (0) is not supported for all chips/channels */ + if (data->REG_PWM_MODE[nr] == 0) { +- if (val) ++ if (!val) + return -EINVAL; + return count; + } +@@ -2382,7 +2382,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, + data->pwm_mode[nr] = val; + reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]); + reg &= ~data->PWM_MODE_MASK[nr]; +- if (val) ++ if (!val) + reg |= data->PWM_MODE_MASK[nr]; + nct6775_write_value(data, data->REG_PWM_MODE[nr], reg); + mutex_unlock(&data->update_lock); +diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c +index 00d6995af4c2..8a44e94d5679 100644 +--- a/drivers/hwmon/pmbus/adm1275.c ++++ b/drivers/hwmon/pmbus/adm1275.c +@@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg) + const struct adm1275_data *data = to_adm1275_data(info); + int ret = 0; + +- if (page) ++ if (page > 0) + return -ENXIO; + + switch (reg) { +@@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg, + const struct adm1275_data *data = to_adm1275_data(info); + int ret; + +- if (page) ++ if (page > 0) + return -ENXIO; + + switch (reg) { +diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c +index dd4883a19045..e951f9b87abb 100644 +--- a/drivers/hwmon/pmbus/max8688.c ++++ b/drivers/hwmon/pmbus/max8688.c +@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg) + { + int ret; + +- if (page) ++ if (page > 0) + return -ENXIO; + + switch (reg) { +diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c +index 6ea62c62ff27..9cdb3fbc8c1f 100644 +--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c ++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c +@@ -315,7 +315,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata) + } + + pc = debug_adjust_pc(drvdata); +- dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc); ++ dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc); + + if (drvdata->edcidsr_present) + dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr); +diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c +index 1a023e30488c..c1793313bb08 100644 +--- a/drivers/hwtracing/intel_th/core.c ++++ b/drivers/hwtracing/intel_th/core.c +@@ -935,7 +935,7 @@ EXPORT_SYMBOL_GPL(intel_th_trace_disable); + int intel_th_set_output(struct intel_th_device *thdev, + unsigned int master) + { +- struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); ++ struct intel_th_device *hub = to_intel_th_hub(thdev); + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + + if (!hubdrv->set_output) +diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c +index a832c45276a4..b0fb97823d6a 100644 +--- a/drivers/i2c/busses/i2c-mv64xxx.c ++++ b/drivers/i2c/busses/i2c-mv64xxx.c +@@ -844,12 +844,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, + */ + if (of_device_is_compatible(np, "marvell,mv78230-i2c")) { + drv_data->offload_enabled = true; +- drv_data->errata_delay = true; ++ /* The delay is only needed in standard mode (100kHz) */ ++ if (bus_freq <= 100000) ++ drv_data->errata_delay = true; + } + + if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) { + drv_data->offload_enabled = false; +- drv_data->errata_delay = true; ++ /* The delay is only needed in standard mode (100kHz) */ ++ if (bus_freq <= 100000) ++ drv_data->errata_delay = true; + } + + if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c +index 6ff0be8cbdc9..4de45db76756 100644 +--- a/drivers/ide/ide-cd.c ++++ b/drivers/ide/ide-cd.c +@@ -1614,6 +1614,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode) + struct cdrom_info *info; + int rc = -ENXIO; + ++ check_disk_change(bdev); ++ + mutex_lock(&ide_cd_mutex); + info = ide_cd_get(bdev->bd_disk); + if (!info) +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c +index 45f2f095f793..4eb72ff539fc 100644 +--- a/drivers/infiniband/core/multicast.c ++++ b/drivers/infiniband/core/multicast.c +@@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, + { + int ret; + u16 gid_index; +- u8 p; +- +- if (rdma_protocol_roce(device, port_num)) { +- ret = ib_find_cached_gid_by_port(device, &rec->port_gid, +- gid_type, port_num, +- ndev, +- &gid_index); +- } else if (rdma_protocol_ib(device, port_num)) { +- ret = ib_find_cached_gid(device, &rec->port_gid, +- IB_GID_TYPE_IB, NULL, &p, +- &gid_index); +- } else { +- ret = -EINVAL; +- } + ++ /* GID table is not based on the netdevice for IB link layer, ++ * so ignore ndev during search. ++ */ ++ if (rdma_protocol_ib(device, port_num)) ++ ndev = NULL; ++ else if (!rdma_protocol_roce(device, port_num)) ++ return -EINVAL; ++ ++ ret = ib_find_cached_gid_by_port(device, &rec->port_gid, ++ gid_type, port_num, ++ ndev, ++ &gid_index); + if (ret) + return ret; + +diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c +index 9cb801d1fe54..1984d6cee3e0 100644 +--- a/drivers/infiniband/core/rdma_core.c ++++ b/drivers/infiniband/core/rdma_core.c +@@ -486,12 +486,13 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) + ret = uobject->type->type_class->remove_commit(uobject, + RDMA_REMOVE_DESTROY); + if (ret) +- return ret; ++ goto out; + + uobject->type = &null_obj_type; + ++out: + up_read(&ucontext->cleanup_rwsem); +- return 0; ++ return ret; + } + + static void alloc_commit_idr_uobject(struct ib_uobject *uobj) +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c +index ab5e1024fea9..b81d2597f563 100644 +--- a/drivers/infiniband/core/sa_query.c ++++ b/drivers/infiniband/core/sa_query.c +@@ -1291,10 +1291,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, + + resolved_dev = dev_get_by_index(dev_addr.net, + dev_addr.bound_dev_if); +- if (resolved_dev->flags & IFF_LOOPBACK) { +- dev_put(resolved_dev); +- resolved_dev = idev; +- dev_hold(resolved_dev); ++ if (!resolved_dev) { ++ dev_put(idev); ++ return -ENODEV; + } + ndev = ib_get_ndev_from_path(rec); + rcu_read_lock(); +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index c65f0e8ecbd6..e47baf0950e3 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -1315,7 +1315,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + +- if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) ++ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) + return -EINVAL; + + optval = memdup_user((void __user *) (unsigned long) cmd.optval, +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c +index 9a4e899d94b3..2b6c9b516070 100644 +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + umem->length = size; + umem->address = addr; + umem->page_shift = PAGE_SHIFT; +- umem->pid = get_task_pid(current, PIDTYPE_PID); + /* + * We ask for writable memory if any of the following + * access flags are set. "Local write" and "remote write" +@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); + + if (access & IB_ACCESS_ON_DEMAND) { +- put_pid(umem->pid); + ret = ib_umem_odp_get(context, umem, access); + if (ret) { + kfree(umem); +@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + + page_list = (struct page **) __get_free_page(GFP_KERNEL); + if (!page_list) { +- put_pid(umem->pid); + kfree(umem); + return ERR_PTR(-ENOMEM); + } +@@ -231,7 +228,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + if (ret < 0) { + if (need_release) + __ib_umem_release(context->device, umem, 0); +- put_pid(umem->pid); + kfree(umem); + } else + current->mm->pinned_vm = locked; +@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem) + + __ib_umem_release(umem->context->device, umem, 1); + +- task = get_pid_task(umem->pid, PIDTYPE_PID); +- put_pid(umem->pid); ++ task = get_pid_task(umem->context->tgid, PIDTYPE_PID); + if (!task) + goto out; + mm = get_task_mm(task); +diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c +index 8f2dc79ad4ec..5e9f72ea4579 100644 +--- a/drivers/infiniband/core/uverbs_ioctl.c ++++ b/drivers/infiniband/core/uverbs_ioctl.c +@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev, + return 0; + } + ++ if (test_bit(attr_id, attr_bundle_h->valid_bitmap)) ++ return -EINVAL; ++ + spec = &attr_spec_bucket->attrs[attr_id]; + e = &elements[attr_id]; + e->uattr = uattr_ptr; +diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c +index 76ddb6564578..48a99dce976c 100644 +--- a/drivers/infiniband/core/uverbs_ioctl_merge.c ++++ b/drivers/infiniband/core/uverbs_ioctl_merge.c +@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters, + short min = SHRT_MAX; + const void *elem; + int i, j, last_stored = -1; ++ unsigned int equal_min = 0; + + for_each_element(elem, i, j, elements, num_elements, num_offset, + data_offset) { +@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters, + */ + iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; + last_stored = i; ++ if (min == GET_ID(id)) ++ equal_min++; ++ else ++ equal_min = 1; + min = GET_ID(id); + } + +@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters, + * Therefore, we need to clean the beginning of the array to make sure + * all ids of final elements are equal to min. + */ +- for (i = num_iters - 1; i >= 0 && +- GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--) +- ; +- +- num_iters -= i + 1; +- memmove(iters, iters + i + 1, sizeof(*iters) * num_iters); ++ memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min); + + *min_id = min; +- return num_iters; ++ return equal_min; + } + + #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ +@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me + hash = kzalloc(sizeof(*hash) + + ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), + sizeof(long)) + +- BITS_TO_LONGS(attr_max_bucket) * sizeof(long), ++ BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long), + GFP_KERNEL); + if (!hash) { + res = -ENOMEM; +@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_ + * first handler which != NULL. This also defines the + * set of flags used for this handler. + */ +- for (i = num_object_defs - 1; ++ for (i = num_method_defs - 1; + i >= 0 && !method_defs[i]->handler; i--) + ; + hash->methods[min_id++] = method; +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +index b210495ff33c..ef9135aa392c 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +@@ -1180,7 +1180,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, + rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); +- goto fail; ++ goto free_umem; + } + } + +@@ -1208,6 +1208,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, + return &qp->ib_qp; + qp_destroy: + bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); ++free_umem: ++ if (udata) { ++ if (qp->rumem) ++ ib_umem_release(qp->rumem); ++ if (qp->sumem) ++ ib_umem_release(qp->sumem); ++ } + fail: + kfree(qp); + return ERR_PTR(rc); +@@ -1956,10 +1963,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, + wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; + wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + ++ /* Need unconditional fence for local invalidate ++ * opcode to work as expected. ++ */ ++ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; ++ + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; +- if (wr->send_flags & IB_SEND_FENCE) +- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; + +@@ -1980,8 +1990,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, + wqe->frmr.levels = qplib_frpl->hwq.level + 1; + wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; + +- if (wr->wr.send_flags & IB_SEND_FENCE) +- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; ++ /* Need unconditional fence for reg_mr ++ * opcode to function as expected. ++ */ ++ ++ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; ++ + if (wr->wr.send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c +index e7450ea92aa9..bf811b23bc95 100644 +--- a/drivers/infiniband/hw/bnxt_re/main.c ++++ b/drivers/infiniband/hw/bnxt_re/main.c +@@ -1240,9 +1240,12 @@ static void bnxt_re_task(struct work_struct *work) + switch (re_work->event) { + case NETDEV_REGISTER: + rc = bnxt_re_ib_reg(rdev); +- if (rc) ++ if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to register with IB: %#x", rc); ++ bnxt_re_remove_one(rdev); ++ bnxt_re_dev_unreg(rdev); ++ } + break; + case NETDEV_UP: + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, +@@ -1398,6 +1401,11 @@ static void __exit bnxt_re_mod_exit(void) + + list_for_each_entry(rdev, &to_be_deleted, list) { + dev_info(rdev_to_dev(rdev), "Unregistering Device"); ++ /* ++ * Flush out any scheduled tasks before destroying the ++ * resources ++ */ ++ flush_workqueue(bnxt_re_wq); + bnxt_re_dev_stop(rdev); + bnxt_re_ib_unreg(rdev, true); + bnxt_re_remove_one(rdev); +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +index 2bdb1562bd21..8d91733009a4 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +@@ -457,7 +457,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, + int rc; + + RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); +- ++ /* Supply (log-base-2-of-host-page-size - base-page-shift) ++ * to bono to adjust the doorbell page sizes. ++ */ ++ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - ++ RCFW_DBR_BASE_PAGE_SHIFT); + /* + * VFs need not setup the HW context area, PF + * shall setup this area for VF. Skipping the +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +index 85b16da287f9..7c85e3c4445b 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +@@ -49,6 +49,7 @@ + #define RCFW_COMM_SIZE 0x104 + + #define RCFW_DBR_PCI_BAR_REGION 2 ++#define RCFW_DBR_BASE_PAGE_SHIFT 12 + + #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ + do { \ +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c +index e277e54a05eb..9536de8c5fb8 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c +@@ -130,7 +130,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, + attr->max_pkey = le32_to_cpu(sb->max_pkeys); + + attr->max_inline_data = le32_to_cpu(sb->max_inline_data); +- attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; ++ attr->l2_db_size = (sb->l2_db_space_size + 1) * ++ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); + attr->max_sgid = le32_to_cpu(sb->max_gid); + + strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver)); +diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h +index eeb55b2db57e..480f592e5b4b 100644 +--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h ++++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h +@@ -1734,7 +1734,30 @@ struct cmdq_initialize_fw { + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) +- __le16 reserved16; ++ /* This value is (log-base-2-of-DBR-page-size - 12). ++ * 0 for 4KB. HW supported values are enumerated below. ++ */ ++ __le16 log2_dbr_pg_size; ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL ++ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ ++ CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M + __le64 qpc_page_dir; + __le64 mrw_page_dir; + __le64 srq_page_dir; +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c +index 82114ba86041..259562282668 100644 +--- a/drivers/infiniband/hw/hfi1/chip.c ++++ b/drivers/infiniband/hw/hfi1/chip.c +@@ -5945,6 +5945,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, + u64 status; + u32 sw_index; + int i = 0; ++ unsigned long irq_flags; + + sw_index = dd->hw_to_sw[hw_context]; + if (sw_index >= dd->num_send_contexts) { +@@ -5954,10 +5955,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, + return; + } + sci = &dd->send_contexts[sw_index]; ++ spin_lock_irqsave(&dd->sc_lock, irq_flags); + sc = sci->sc; + if (!sc) { + dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, + sw_index, hw_context); ++ spin_unlock_irqrestore(&dd->sc_lock, irq_flags); + return; + } + +@@ -5979,6 +5982,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, + */ + if (sc->type != SC_USER) + queue_work(dd->pport->hfi1_wq, &sc->halt_work); ++ spin_unlock_irqrestore(&dd->sc_lock, irq_flags); + + /* + * Update the counters for the corresponding status bits. +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c +index cab796341697..d92f639c287f 100644 +--- a/drivers/infiniband/hw/mlx4/cq.c ++++ b/drivers/infiniband/hw/mlx4/cq.c +@@ -597,6 +597,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct + wc->dlid_path_bits = 0; + + if (is_eth) { ++ wc->slid = 0; + wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); + memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); + memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); +@@ -845,7 +846,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, + } + } + +- wc->slid = be16_to_cpu(cqe->rlid); + g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); + wc->src_qp = g_mlpath_rqpn & 0xffffff; + wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; +@@ -854,6 +854,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, + wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, + cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; + if (is_eth) { ++ wc->slid = 0; + wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; + if (be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_CVLAN_PRESENT_MASK) { +@@ -865,6 +866,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, + memcpy(wc->smac, cqe->smac, ETH_ALEN); + wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); + } else { ++ wc->slid = be16_to_cpu(cqe->rlid); + wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; + wc->vlan_id = 0xffff; + } +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 8c681a36e6c7..e2beb182d54c 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, + gid_tbl[i].version = 2; + if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) + gid_tbl[i].type = 1; +- else +- memset(&gid_tbl[i].gid, 0, 12); + } + } + +@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, + if (!gids) { + ret = -ENOMEM; + } else { +- for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) +- memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); ++ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { ++ memcpy(&gids[i].gid, ++ &port_gid_table->gids[i].gid, ++ sizeof(union ib_gid)); ++ gids[i].gid_type = ++ port_gid_table->gids[i].gid_type; ++ } + } + } + spin_unlock_bh(&iboe->lock); +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c +index faedc080a5e6..d804880d637a 100644 +--- a/drivers/infiniband/hw/mlx5/cq.c ++++ b/drivers/infiniband/hw/mlx5/cq.c +@@ -224,7 +224,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); + break; + } +- wc->slid = be16_to_cpu(cqe->slid); + wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; + wc->dlid_path_bits = cqe->ml_path; + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; +@@ -239,10 +238,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + } + + if (ll != IB_LINK_LAYER_ETHERNET) { ++ wc->slid = be16_to_cpu(cqe->slid); + wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; + return; + } + ++ wc->slid = 0; + vlan_present = cqe->l4_l3_hdr_type & 0x1; + roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; + if (vlan_present) { +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index fb5302ee57c7..ab70194a73db 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -270,6 +270,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, + if (err) + return err; + ++ props->active_width = IB_WIDTH_4X; ++ props->active_speed = IB_SPEED_QDR; ++ + translate_eth_proto_oper(eth_prot_oper, &props->active_speed, + &props->active_width); + +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c +index 749fe906a5b6..ef9ee6c328a1 100644 +--- a/drivers/infiniband/hw/mlx5/qp.c ++++ b/drivers/infiniband/hw/mlx5/qp.c +@@ -2881,8 +2881,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, + goto out; + + if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || +- !optab[mlx5_cur][mlx5_new]) ++ !optab[mlx5_cur][mlx5_new]) { ++ err = -EINVAL; + goto out; ++ } + + op = optab[mlx5_cur][mlx5_new]; + optpar = ib_mask_to_mlx5_opt(attr_mask); +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c +index 97d033f51dc9..ddb05b42e5e6 100644 +--- a/drivers/infiniband/hw/qedr/main.c ++++ b/drivers/infiniband/hw/qedr/main.c +@@ -782,7 +782,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, + + dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); + if (!dev->num_cnq) { +- DP_ERR(dev, "not enough CNQ resources.\n"); ++ DP_ERR(dev, "Failed. At least one CNQ is required.\n"); ++ rc = -ENOMEM; + goto init_err; + } + +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c +index 769ac07c3c8e..7f4cc9336442 100644 +--- a/drivers/infiniband/hw/qedr/verbs.c ++++ b/drivers/infiniband/hw/qedr/verbs.c +@@ -1663,14 +1663,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) + + static int qedr_update_qp_state(struct qedr_dev *dev, + struct qedr_qp *qp, ++ enum qed_roce_qp_state cur_state, + enum qed_roce_qp_state new_state) + { + int status = 0; + +- if (new_state == qp->state) ++ if (new_state == cur_state) + return 0; + +- switch (qp->state) { ++ switch (cur_state) { + case QED_ROCE_QP_STATE_RESET: + switch (new_state) { + case QED_ROCE_QP_STATE_INIT: +@@ -1774,6 +1775,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); + const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); + enum ib_qp_state old_qp_state, new_qp_state; ++ enum qed_roce_qp_state cur_state; + int rc = 0; + + DP_DEBUG(dev, QEDR_MSG_QP, +@@ -1903,18 +1905,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); + +- qp_params.ack_timeout = attr->timeout; +- if (attr->timeout) { +- u32 temp; +- +- temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; +- /* FW requires [msec] */ +- qp_params.ack_timeout = temp; +- } else { +- /* Infinite */ ++ /* The received timeout value is an exponent used like this: ++ * "12.7.34 LOCAL ACK TIMEOUT ++ * Value representing the transport (ACK) timeout for use by ++ * the remote, expressed as: 4.096 * 2^timeout [usec]" ++ * The FW expects timeout in msec so we need to divide the usec ++ * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2, ++ * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8). ++ * The value of zero means infinite so we use a 'max_t' to make ++ * sure that sub 1 msec values will be configured as 1 msec. ++ */ ++ if (attr->timeout) ++ qp_params.ack_timeout = ++ 1 << max_t(int, attr->timeout - 8, 0); ++ else + qp_params.ack_timeout = 0; +- } + } ++ + if (attr_mask & IB_QP_RETRY_CNT) { + SET_FIELD(qp_params.modify_flags, + QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); +@@ -1987,13 +1994,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + qp->dest_qp_num = attr->dest_qp_num; + } + ++ cur_state = qp->state; ++ ++ /* Update the QP state before the actual ramrod to prevent a race with ++ * fast path. Modifying the QP state to error will cause the device to ++ * flush the CQEs and while polling the flushed CQEs will considered as ++ * a potential issue if the QP isn't in error state. ++ */ ++ if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI && ++ !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR) ++ qp->state = QED_ROCE_QP_STATE_ERR; ++ + if (qp->qp_type != IB_QPT_GSI) + rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, + qp->qed_qp, &qp_params); + + if (attr_mask & IB_QP_STATE) { + if ((qp->qp_type != IB_QPT_GSI) && (!udata)) +- rc = qedr_update_qp_state(dev, qp, qp_params.new_state); ++ rc = qedr_update_qp_state(dev, qp, cur_state, ++ qp_params.new_state); + qp->state = qp_params.new_state; + } + +@@ -2832,6 +2851,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: ++ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { ++ rc = -EINVAL; ++ *bad_wr = wr; ++ break; ++ } + wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; + swqe = (struct rdma_sq_send_wqe_1st *)wqe; + swqe->wqe_size = 2; +@@ -2873,6 +2897,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + break; + + case IB_WR_RDMA_WRITE_WITH_IMM: ++ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { ++ rc = -EINVAL; ++ *bad_wr = wr; ++ break; ++ } + wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; + rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; + +@@ -3518,7 +3547,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) + { + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qedr_cq *cq = get_qedr_cq(ibcq); +- union rdma_cqe *cqe = cq->latest_cqe; ++ union rdma_cqe *cqe; + u32 old_cons, new_cons; + unsigned long flags; + int update = 0; +@@ -3535,6 +3564,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) + return qedr_gsi_poll_cq(ibcq, num_entries, wc); + + spin_lock_irqsave(&cq->cq_lock, flags); ++ cqe = cq->latest_cqe; + old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); + while (num_entries && is_valid_cqe(cq, cqe)) { + struct qedr_qp *qp; +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c +index 906bacf365d4..1cbf4e407afa 100644 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c +@@ -1206,7 +1206,7 @@ int rxe_register_device(struct rxe_dev *rxe) + rxe->ndev->dev_addr); + dev->dev.dma_ops = &dma_virt_ops; + dma_coerce_mask_and_coherent(&dev->dev, +- dma_get_required_mask(dev->dev.parent)); ++ dma_get_required_mask(&dev->dev)); + + dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; + dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 99a2a57b6cfd..10190e361a13 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -311,6 +311,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid) + + if (dev_data == NULL) { + dev_data = alloc_dev_data(devid); ++ if (!dev_data) ++ return NULL; + + if (translation_pre_enabled(iommu)) + dev_data->defer_attach = true; +diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c +index 16d33ac19db0..c30f62700431 100644 +--- a/drivers/iommu/mtk_iommu.c ++++ b/drivers/iommu/mtk_iommu.c +@@ -60,7 +60,7 @@ + (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) + + #define REG_MMU_IVRP_PADDR 0x114 +-#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31)) ++ + #define REG_MMU_VLD_PA_RNG 0x118 + #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) + +@@ -532,8 +532,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) + F_INT_PRETETCH_TRANSATION_FIFO_FAULT; + writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); + +- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), +- data->base + REG_MMU_IVRP_PADDR); ++ if (data->m4u_plat == M4U_MT8173) ++ regval = (data->protect_base >> 1) | (data->enable_4GB << 31); ++ else ++ regval = lower_32_bits(data->protect_base) | ++ upper_32_bits(data->protect_base); ++ writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); ++ + if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { + /* + * If 4GB mode is enabled, the validate PA range is from +@@ -688,6 +693,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) + reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); + reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); + reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); ++ reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); + clk_disable_unprepare(data->bclk); + return 0; + } +@@ -710,8 +716,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) + writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); + writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); + writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); +- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), +- base + REG_MMU_IVRP_PADDR); ++ writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); + if (data->m4u_dom) + writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], + base + REG_MMU_PT_BASE_ADDR); +diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h +index b4451a1c7c2f..778498b8633f 100644 +--- a/drivers/iommu/mtk_iommu.h ++++ b/drivers/iommu/mtk_iommu.h +@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg { + u32 ctrl_reg; + u32 int_control0; + u32 int_main_control; ++ u32 ivrp_paddr; + }; + + enum mtk_iommu_plat { +diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c +index 910b5b6f96b1..eb65b6e78d57 100644 +--- a/drivers/macintosh/rack-meter.c ++++ b/drivers/macintosh/rack-meter.c +@@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause) + DBDMA_DO_STOP(rm->dma_regs); + return; + } +- memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1)); +- memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2)); ++ memset(rdma->buf1, 0, sizeof(rdma->buf1)); ++ memset(rdma->buf2, 0, sizeof(rdma->buf2)); + + rm->dma_buf_v->mark = 0; + +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c +index f34ad8720756..5b63afff46d5 100644 +--- a/drivers/md/bcache/request.c ++++ b/drivers/md/bcache/request.c +@@ -651,11 +651,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) + static void search_free(struct closure *cl) + { + struct search *s = container_of(cl, struct search, cl); +- bio_complete(s); + + if (s->iop.bio) + bio_put(s->iop.bio); + ++ bio_complete(s); + closure_debug_destroy(cl); + mempool_free(s, s->d->c->search); + } +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c +index f046dedc59ab..930b00f6a3a2 100644 +--- a/drivers/md/bcache/writeback.c ++++ b/drivers/md/bcache/writeback.c +@@ -421,9 +421,15 @@ static int bch_writeback_thread(void *arg) + while (!kthread_should_stop()) { + down_write(&dc->writeback_lock); + set_current_state(TASK_INTERRUPTIBLE); +- if (!atomic_read(&dc->has_dirty) || +- (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && +- !dc->writeback_running)) { ++ /* ++ * If the bache device is detaching, skip here and continue ++ * to perform writeback. Otherwise, if no dirty data on cache, ++ * or there is dirty data on cache but writeback is disabled, ++ * the writeback thread should sleep here and wait for others ++ * to wake up it. ++ */ ++ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && ++ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { + up_write(&dc->writeback_lock); + + if (kthread_should_stop()) { +@@ -444,6 +450,14 @@ static int bch_writeback_thread(void *arg) + cached_dev_put(dc); + SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); + bch_write_bdev_super(dc, NULL); ++ /* ++ * If bcache device is detaching via sysfs interface, ++ * writeback thread should stop after there is no dirty ++ * data on cache. BCACHE_DEV_DETACHING flag is set in ++ * bch_cached_dev_detach(). ++ */ ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) ++ break; + } + + up_write(&dc->writeback_lock); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index e058c209bbcf..24e64b04424a 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -779,6 +779,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, + struct bio *bio; + int ff = 0; + ++ if (!page) ++ return; ++ + if (test_bit(Faulty, &rdev->flags)) + return; + +@@ -5434,6 +5437,7 @@ int md_run(struct mddev *mddev) + * the only valid external interface is through the md + * device. + */ ++ mddev->has_superblocks = false; + rdev_for_each(rdev, mddev) { + if (test_bit(Faulty, &rdev->flags)) + continue; +@@ -5447,6 +5451,9 @@ int md_run(struct mddev *mddev) + set_disk_ro(mddev->gendisk, 1); + } + ++ if (rdev->sb_page) ++ mddev->has_superblocks = true; ++ + /* perform some consistency tests on the device. + * We don't want the data to overlap the metadata, + * Internal Bitmap issues have been handled elsewhere. +@@ -5479,8 +5486,10 @@ int md_run(struct mddev *mddev) + } + if (mddev->sync_set == NULL) { + mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); +- if (!mddev->sync_set) +- return -ENOMEM; ++ if (!mddev->sync_set) { ++ err = -ENOMEM; ++ goto abort; ++ } + } + + spin_lock(&pers_lock); +@@ -5493,7 +5502,8 @@ int md_run(struct mddev *mddev) + else + pr_warn("md: personality for level %s is not loaded!\n", + mddev->clevel); +- return -EINVAL; ++ err = -EINVAL; ++ goto abort; + } + spin_unlock(&pers_lock); + if (mddev->level != pers->level) { +@@ -5506,7 +5516,8 @@ int md_run(struct mddev *mddev) + pers->start_reshape == NULL) { + /* This personality cannot handle reshaping... */ + module_put(pers->owner); +- return -EINVAL; ++ err = -EINVAL; ++ goto abort; + } + + if (pers->sync_request) { +@@ -5580,7 +5591,7 @@ int md_run(struct mddev *mddev) + mddev->private = NULL; + module_put(pers->owner); + bitmap_destroy(mddev); +- return err; ++ goto abort; + } + if (mddev->queue) { + bool nonrot = true; +@@ -5642,6 +5653,18 @@ int md_run(struct mddev *mddev) + sysfs_notify_dirent_safe(mddev->sysfs_action); + sysfs_notify(&mddev->kobj, NULL, "degraded"); + return 0; ++ ++abort: ++ if (mddev->bio_set) { ++ bioset_free(mddev->bio_set); ++ mddev->bio_set = NULL; ++ } ++ if (mddev->sync_set) { ++ bioset_free(mddev->sync_set); ++ mddev->sync_set = NULL; ++ } ++ ++ return err; + } + EXPORT_SYMBOL_GPL(md_run); + +@@ -8006,6 +8029,7 @@ EXPORT_SYMBOL(md_done_sync); + bool md_write_start(struct mddev *mddev, struct bio *bi) + { + int did_change = 0; ++ + if (bio_data_dir(bi) != WRITE) + return true; + +@@ -8038,6 +8062,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) + rcu_read_unlock(); + if (did_change) + sysfs_notify_dirent_safe(mddev->sysfs_state); ++ if (!mddev->has_superblocks) ++ return true; + wait_event(mddev->sb_wait, + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || + mddev->suspended); +@@ -8496,6 +8522,19 @@ void md_do_sync(struct md_thread *thread) + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); + ++ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && ++ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && ++ mddev->delta_disks > 0 && ++ mddev->pers->finish_reshape && ++ mddev->pers->size && ++ mddev->queue) { ++ mddev_lock_nointr(mddev); ++ md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); ++ mddev_unlock(mddev); ++ set_capacity(mddev->gendisk, mddev->array_sectors); ++ revalidate_disk(mddev->gendisk); ++ } ++ + spin_lock(&mddev->lock); + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + /* We completed so min/max setting can be forgotten if used. */ +diff --git a/drivers/md/md.h b/drivers/md/md.h +index d8287d3cd1bf..9b0a896890ef 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -462,6 +462,8 @@ struct mddev { + void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; /* good device num within cluster raid */ ++ ++ bool has_superblocks:1; + }; + + enum recovery_flags { +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 788fc0800465..e4e01d3bab81 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1813,6 +1813,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) + struct md_rdev *repl = + conf->mirrors[conf->raid_disks + number].rdev; + freeze_array(conf, 0); ++ if (atomic_read(&repl->nr_pending)) { ++ /* It means that some queued IO of retry_list ++ * hold repl. Thus, we cannot set replacement ++ * as NULL, avoiding rdev NULL pointer ++ * dereference in sync_request_write and ++ * handle_write_finished. ++ */ ++ err = -EBUSY; ++ unfreeze_array(conf); ++ goto abort; ++ } + clear_bit(Replacement, &repl->flags); + p->rdev = repl; + conf->mirrors[conf->raid_disks + number].rdev = NULL; +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 0d18d3b95201..5fb31ef52945 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -2625,7 +2625,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) + for (m = 0; m < conf->copies; m++) { + int dev = r10_bio->devs[m].devnum; + rdev = conf->mirrors[dev].rdev; +- if (r10_bio->devs[m].bio == NULL) ++ if (r10_bio->devs[m].bio == NULL || ++ r10_bio->devs[m].bio->bi_end_io == NULL) + continue; + if (!r10_bio->devs[m].bio->bi_status) { + rdev_clear_badblocks( +@@ -2640,7 +2641,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) + md_error(conf->mddev, rdev); + } + rdev = conf->mirrors[dev].replacement; +- if (r10_bio->devs[m].repl_bio == NULL) ++ if (r10_bio->devs[m].repl_bio == NULL || ++ r10_bio->devs[m].repl_bio->bi_end_io == NULL) + continue; + + if (!r10_bio->devs[m].repl_bio->bi_status) { +@@ -4691,17 +4693,11 @@ static void raid10_finish_reshape(struct mddev *mddev) + return; + + if (mddev->delta_disks > 0) { +- sector_t size = raid10_size(mddev, 0, 0); +- md_set_array_sectors(mddev, size); + if (mddev->recovery_cp > mddev->resync_max_sectors) { + mddev->recovery_cp = mddev->resync_max_sectors; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } +- mddev->resync_max_sectors = size; +- if (mddev->queue) { +- set_capacity(mddev->gendisk, mddev->array_sectors); +- revalidate_disk(mddev->gendisk); +- } ++ mddev->resync_max_sectors = mddev->array_sectors; + } else { + int d; + rcu_read_lock(); +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 7ec822ced80b..de1ef6264ee7 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2197,15 +2197,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) + static int grow_stripes(struct r5conf *conf, int num) + { + struct kmem_cache *sc; ++ size_t namelen = sizeof(conf->cache_name[0]); + int devs = max(conf->raid_disks, conf->previous_raid_disks); + + if (conf->mddev->gendisk) +- sprintf(conf->cache_name[0], ++ snprintf(conf->cache_name[0], namelen, + "raid%d-%s", conf->level, mdname(conf->mddev)); + else +- sprintf(conf->cache_name[0], ++ snprintf(conf->cache_name[0], namelen, + "raid%d-%p", conf->level, conf->mddev); +- sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); ++ snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); + + conf->active_name = 0; + sc = kmem_cache_create(conf->cache_name[conf->active_name], +@@ -8000,13 +8001,7 @@ static void raid5_finish_reshape(struct mddev *mddev) + + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + +- if (mddev->delta_disks > 0) { +- md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); +- if (mddev->queue) { +- set_capacity(mddev->gendisk, mddev->array_sectors); +- revalidate_disk(mddev->gendisk); +- } +- } else { ++ if (mddev->delta_disks <= 0) { + int d; + spin_lock_irq(&conf->device_lock); + mddev->degraded = raid5_calc_degraded(conf); +diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h +index b1afeccbb97f..c96dcda1111f 100644 +--- a/drivers/misc/cxl/cxl.h ++++ b/drivers/misc/cxl/cxl.h +@@ -365,6 +365,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; + #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ + #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ + ++/****** CXL_PSL_DEBUG *****************************************************/ ++#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */ ++ + /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ + #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ + #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ +@@ -659,6 +662,7 @@ struct cxl_native { + irq_hw_number_t err_hwirq; + unsigned int err_virq; + u64 ps_off; ++ bool no_data_cache; /* set if no data cache on the card */ + const struct cxl_service_layer_ops *sl_ops; + }; + +diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c +index 4a82c313cf71..9c042b0b8c55 100644 +--- a/drivers/misc/cxl/native.c ++++ b/drivers/misc/cxl/native.c +@@ -352,8 +352,17 @@ int cxl_data_cache_flush(struct cxl *adapter) + u64 reg; + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + +- pr_devel("Flushing data cache\n"); ++ /* ++ * Do a datacache flush only if datacache is available. ++ * In case of PSL9D datacache absent hence flush operation. ++ * would timeout. ++ */ ++ if (adapter->native->no_data_cache) { ++ pr_devel("No PSL data cache. Ignoring cache flush req.\n"); ++ return 0; ++ } + ++ pr_devel("Flushing data cache\n"); + reg = cxl_p1_read(adapter, CXL_PSL_Control); + reg |= CXL_PSL_Control_Fr; + cxl_p1_write(adapter, CXL_PSL_Control, reg); +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c +index 81093f8157a9..2b3fd0a51701 100644 +--- a/drivers/misc/cxl/pci.c ++++ b/drivers/misc/cxl/pci.c +@@ -457,6 +457,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, + u64 chipid; + u32 phb_index; + u64 capp_unit_id; ++ u64 psl_debug; + int rc; + + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); +@@ -507,6 +508,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, + if (cxl_is_power9_dd1()) + cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); + ++ /* ++ * Check if PSL has data-cache. We need to flush adapter datacache ++ * when as its about to be removed. ++ */ ++ psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG); ++ if (psl_debug & CXL_PSL_DEBUG_CDC) { ++ dev_dbg(&dev->dev, "No data-cache present\n"); ++ adapter->native->no_data_cache = true; ++ } ++ + return 0; + } + +@@ -1450,10 +1461,8 @@ int cxl_pci_reset(struct cxl *adapter) + + /* + * The adapter is about to be reset, so ignore errors. +- * Not supported on P9 DD1 + */ +- if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) +- cxl_data_cache_flush(adapter); ++ cxl_data_cache_flush(adapter); + + /* pcie_warm_reset requests a fundamental pci reset which includes a + * PERST assert/deassert. PERST triggers a loading of the image +@@ -1898,10 +1907,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) + + /* + * Flush adapter datacache as its about to be removed. +- * Not supported on P9 DD1. + */ +- if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) +- cxl_data_cache_flush(adapter); ++ cxl_data_cache_flush(adapter); + + cxl_deconfigure_adapter(adapter); + +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c +index 61666d269771..0cfbdb3ab68a 100644 +--- a/drivers/mmc/host/sdhci-iproc.c ++++ b/drivers/mmc/host/sdhci-iproc.c +@@ -33,6 +33,8 @@ struct sdhci_iproc_host { + const struct sdhci_iproc_data *data; + u32 shadow_cmd; + u32 shadow_blk; ++ bool is_cmd_shadowed; ++ bool is_blk_shadowed; + }; + + #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) +@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg) + + static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) + { +- u32 val = sdhci_iproc_readl(host, (reg & ~3)); +- u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host); ++ u32 val; ++ u16 word; ++ ++ if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) { ++ /* Get the saved transfer mode */ ++ val = iproc_host->shadow_cmd; ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && ++ iproc_host->is_blk_shadowed) { ++ /* Get the saved block info */ ++ val = iproc_host->shadow_blk; ++ } else { ++ val = sdhci_iproc_readl(host, (reg & ~3)); ++ } ++ word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; + return word; + } + +@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) + + if (reg == SDHCI_COMMAND) { + /* Write the block now as we are issuing a command */ +- if (iproc_host->shadow_blk != 0) { ++ if (iproc_host->is_blk_shadowed) { + sdhci_iproc_writel(host, iproc_host->shadow_blk, + SDHCI_BLOCK_SIZE); +- iproc_host->shadow_blk = 0; ++ iproc_host->is_blk_shadowed = false; + } + oldval = iproc_host->shadow_cmd; +- } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { ++ iproc_host->is_cmd_shadowed = false; ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && ++ iproc_host->is_blk_shadowed) { + /* Block size and count are stored in shadow reg */ + oldval = iproc_host->shadow_blk; + } else { +@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) + if (reg == SDHCI_TRANSFER_MODE) { + /* Save the transfer mode until the command is issued */ + iproc_host->shadow_cmd = newval; ++ iproc_host->is_cmd_shadowed = true; + } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { + /* Save the block info until the command is issued */ + iproc_host->shadow_blk = newval; ++ iproc_host->is_blk_shadowed = true; + } else { + /* Command or other regular 32-bit write */ + sdhci_iproc_writel(host, newval, reg & ~3); +@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { + + static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, +- .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, ++ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .ops = &sdhci_iproc_32only_ops, + }; + +@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = { + .caps1 = SDHCI_DRIVER_TYPE_C | + SDHCI_DRIVER_TYPE_D | + SDHCI_SUPPORT_DDR50, +- .mmc_caps = MMC_CAP_1_8V_DDR, + }; + + static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index f0aa57222f17..00245b73c224 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1528,7 +1528,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + if (res) { + netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", + slave_dev->name); +- goto err_close; ++ goto err_hwaddr_unsync; + } + + prev_slave = bond_last_slave(bond); +@@ -1769,6 +1769,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + synchronize_rcu(); + slave_disable_netpoll(new_slave); + ++err_hwaddr_unsync: ++ if (!bond_uses_primary(bond)) ++ bond_hw_addr_flush(bond_dev, slave_dev); ++ + err_close: + slave_dev->priv_flags &= ~IFF_BONDING; + dev_close(slave_dev); +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index f4947a74b65f..5d4e61741476 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -25,6 +25,7 @@ + #include <linux/platform_device.h> + #include <linux/iopoll.h> + #include <linux/can/dev.h> ++#include <linux/pinctrl/consumer.h> + + /* napi related */ + #define M_CAN_NAPI_WEIGHT 64 +@@ -246,7 +247,7 @@ enum m_can_mram_cfg { + + /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ + #define RXFC_FWM_SHIFT 24 +-#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) ++#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT) + #define RXFC_FS_SHIFT 16 + #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) + +@@ -1682,6 +1683,8 @@ static __maybe_unused int m_can_suspend(struct device *dev) + m_can_clk_stop(priv); + } + ++ pinctrl_pm_select_sleep_state(dev); ++ + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +@@ -1692,6 +1695,8 @@ static __maybe_unused int m_can_resume(struct device *dev) + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + ++ pinctrl_pm_select_default_state(dev); ++ + m_can_init_ram(priv); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; +diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile +index d040aeb45172..15c2a831edf1 100644 +--- a/drivers/net/dsa/Makefile ++++ b/drivers/net/dsa/Makefile +@@ -1,7 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0 + obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o + bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o +-obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o ++obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o ++ifdef CONFIG_NET_DSA_LOOP ++obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o ++endif + obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o + obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o + obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c +index c142b97add2c..3b073e152237 100644 +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -1122,6 +1122,7 @@ static const struct of_device_id mt7530_of_match[] = { + { .compatible = "mediatek,mt7530" }, + { /* sentinel */ }, + }; ++MODULE_DEVICE_TABLE(of, mt7530_of_match); + + static struct mdio_driver mt7530_mdio_driver = { + .probe = mt7530_probe, +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c +index 48d672b204a4..a4080f18135c 100644 +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -532,7 +532,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, + int i; + + for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { +- int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; ++ u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1); ++ unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN; + + slot = &ring->slots[i]; + dev_kfree_skb(slot->skb); +diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h +index 4040d846da8e..40d02fec2747 100644 +--- a/drivers/net/ethernet/broadcom/bgmac.h ++++ b/drivers/net/ethernet/broadcom/bgmac.h +@@ -479,9 +479,9 @@ struct bgmac_rx_header { + struct bgmac { + union { + struct { +- void *base; +- void *idm_base; +- void *nicpm_base; ++ void __iomem *base; ++ void __iomem *idm_base; ++ void __iomem *nicpm_base; + } plat; + struct { + struct bcma_device *core; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 807cf75f0a98..bfd2d0382f4c 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -3808,6 +3808,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_tpa_cfg_input req = {0}; + ++ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) ++ return 0; ++ + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + + if (tpa_flags) { +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +index 92d9d795d874..44a0d04dd8a0 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -815,8 +815,6 @@ static int setup_fw_sge_queues(struct adapter *adap) + + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], + adap->msi_idx, NULL, fwevtq_handler, NULL, -1); +- if (err) +- t4_free_sge_resources(adap); + return err; + } + +@@ -4679,7 +4677,6 @@ static void dummy_setup(struct net_device *dev) + /* Initialize the device structure. */ + dev->netdev_ops = &cxgb4_mgmt_netdev_ops; + dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; +- dev->needs_free_netdev = true; + } + + static int config_mgmt_dev(struct pci_dev *pdev) +@@ -5117,6 +5114,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + if (err) + goto out_free_dev; + ++ err = setup_fw_sge_queues(adapter); ++ if (err) { ++ dev_err(adapter->pdev_dev, ++ "FW sge queue allocation failed, err %d", err); ++ goto out_free_dev; ++ } ++ + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only +@@ -5165,7 +5169,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + cxgb4_ptp_init(adapter); + + print_adapter_info(adapter); +- setup_fw_sge_queues(adapter); + return 0; + + sriov: +@@ -5221,6 +5224,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + #endif + + out_free_dev: ++ t4_free_sge_resources(adapter); + free_some_resources(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +index 71a315bc1409..99a9d5278369 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +@@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type) + { + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + ++ adap->sge.uld_rxq_info[uld_type] = NULL; + kfree(rxq_info->rspq_id); + kfree(rxq_info->uldrxq); + kfree(rxq_info); +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index d24ee1ad3be1..aef40f02c77f 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -1897,6 +1897,8 @@ static int enic_open(struct net_device *netdev) + } + + for (i = 0; i < enic->rq_count; i++) { ++ /* enable rq before updating rq desc */ ++ vnic_rq_enable(&enic->rq[i]); + vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); + /* Need at least one buffer on ring to get going */ + if (vnic_rq_desc_used(&enic->rq[i]) == 0) { +@@ -1908,8 +1910,6 @@ static int enic_open(struct net_device *netdev) + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_enable(&enic->wq[i]); +- for (i = 0; i < enic->rq_count; i++) +- vnic_rq_enable(&enic->rq[i]); + + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) + enic_dev_add_station_addr(enic); +@@ -1935,8 +1935,12 @@ static int enic_open(struct net_device *netdev) + return 0; + + err_out_free_rq: +- for (i = 0; i < enic->rq_count; i++) ++ for (i = 0; i < enic->rq_count; i++) { ++ err = vnic_rq_disable(&enic->rq[i]); ++ if (err) ++ return err; + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); ++ } + enic_dev_notify_unset(enic); + err_out_free_intr: + enic_unset_affinity_hint(enic); +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +index 4f6e9d3470d5..5b4f05805006 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +@@ -1930,8 +1930,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, + goto csum_failed; + } + ++ /* SGT[0] is used by the linear part */ + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); +- qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); ++ frag_len = skb_headlen(skb); ++ qm_sg_entry_set_len(&sgt[0], frag_len); + sgt[0].bpid = FSL_DPAA_BPID_INV; + sgt[0].offset = 0; + addr = dma_map_single(dev, skb->data, +@@ -1944,9 +1946,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, + qm_sg_entry_set64(&sgt[0], addr); + + /* populate the rest of SGT entries */ +- frag = &skb_shinfo(skb)->frags[0]; +- frag_len = frag->size; +- for (i = 1; i <= nr_frags; i++, frag++) { ++ for (i = 0; i < nr_frags; i++) { ++ frag = &skb_shinfo(skb)->frags[i]; ++ frag_len = frag->size; + WARN_ON(!skb_frag_page(frag)); + addr = skb_frag_dma_map(dev, frag, 0, + frag_len, dma_dir); +@@ -1956,15 +1958,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, + goto sg_map_failed; + } + +- qm_sg_entry_set_len(&sgt[i], frag_len); +- sgt[i].bpid = FSL_DPAA_BPID_INV; +- sgt[i].offset = 0; ++ qm_sg_entry_set_len(&sgt[i + 1], frag_len); ++ sgt[i + 1].bpid = FSL_DPAA_BPID_INV; ++ sgt[i + 1].offset = 0; + + /* keep the offset in the address */ +- qm_sg_entry_set64(&sgt[i], addr); +- frag_len = frag->size; ++ qm_sg_entry_set64(&sgt[i + 1], addr); + } +- qm_sg_entry_set_f(&sgt[i - 1], frag_len); ++ ++ /* Set the final bit in the last used entry of the SGT */ ++ qm_sg_entry_set_f(&sgt[nr_frags], frag_len); + + qm_fd_set_sg(fd, priv->tx_headroom, skb->len); + +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +index faea674094b9..85306d1b2acf 100644 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +@@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, + if (epause->rx_pause) + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + if (epause->tx_pause) +- newadv |= ADVERTISED_Asym_Pause; ++ newadv ^= ADVERTISED_Asym_Pause; + + oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); +diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c +index ea43b4974149..7af31ddd093f 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c ++++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c +@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) + set_bucket(dtsec->regs, bucket, true); + + /* Create element to be added to the driver hash table */ +- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); ++ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); + if (!hash_entry) + return -ENOMEM; + hash_entry->addr = addr; +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index 3bdeb295514b..63daae120b2d 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -3072,9 +3072,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) + if (ndev->features & NETIF_F_RXCSUM) + gfar_rx_checksum(skb, fcb); + +- /* Tell the skb what kind of packet this is */ +- skb->protocol = eth_type_trans(skb, ndev); +- + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. + * Even if vlan rx accel is disabled, on some chips + * RXFCB_VLN is pseudo randomly set. +@@ -3145,13 +3142,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) + continue; + } + ++ gfar_process_frame(ndev, skb); ++ + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len; + + skb_record_rx_queue(skb, rx_queue->qindex); + +- gfar_process_frame(ndev, skb); ++ skb->protocol = eth_type_trans(skb, ndev); + + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb); +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 3ae02b0620bc..98493be7b4af 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -933,6 +933,35 @@ static int ibmvnic_open(struct net_device *netdev) + return rc; + } + ++static void clean_rx_pools(struct ibmvnic_adapter *adapter) ++{ ++ struct ibmvnic_rx_pool *rx_pool; ++ u64 rx_entries; ++ int rx_scrqs; ++ int i, j; ++ ++ if (!adapter->rx_pool) ++ return; ++ ++ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); ++ rx_entries = adapter->req_rx_add_entries_per_subcrq; ++ ++ /* Free any remaining skbs in the rx buffer pools */ ++ for (i = 0; i < rx_scrqs; i++) { ++ rx_pool = &adapter->rx_pool[i]; ++ if (!rx_pool) ++ continue; ++ ++ netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); ++ for (j = 0; j < rx_entries; j++) { ++ if (rx_pool->rx_buff[j].skb) { ++ dev_kfree_skb_any(rx_pool->rx_buff[j].skb); ++ rx_pool->rx_buff[j].skb = NULL; ++ } ++ } ++ } ++} ++ + static void clean_tx_pools(struct ibmvnic_adapter *adapter) + { + struct ibmvnic_tx_pool *tx_pool; +@@ -1010,7 +1039,7 @@ static int __ibmvnic_close(struct net_device *netdev) + } + } + } +- ++ clean_rx_pools(adapter); + clean_tx_pools(adapter); + adapter->state = VNIC_CLOSED; + return rc; +@@ -1460,8 +1489,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, + return 0; + } + +- netif_carrier_on(netdev); +- + /* kick napi */ + for (i = 0; i < adapter->req_rx_queues; i++) + napi_schedule(&adapter->napi[i]); +@@ -1469,6 +1496,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, + if (adapter->reset_reason != VNIC_RESET_FAILOVER) + netdev_notify_peers(netdev); + ++ netif_carrier_on(netdev); ++ + return 0; + } + +@@ -1636,6 +1665,12 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) + be16_to_cpu(next->rx_comp.rc)); + /* free the entry */ + next->rx_comp.first = 0; ++ dev_kfree_skb_any(rx_buff->skb); ++ remove_buff_from_pool(adapter, rx_buff); ++ continue; ++ } else if (!rx_buff->skb) { ++ /* free the entry */ ++ next->rx_comp.first = 0; + remove_buff_from_pool(adapter, rx_buff); + continue; + } +@@ -1927,6 +1962,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, + } + + memset(scrq->msgs, 0, 4 * PAGE_SIZE); ++ atomic_set(&scrq->used, 0); + scrq->cur = 0; + + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c +index 31277d3bb7dc..ff308b05d68c 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c +@@ -1602,7 +1602,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) +- return -E1000_ERR_CONFIG; ++ return 1; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to +diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c +index f457c5703d0c..db735644b312 100644 +--- a/drivers/net/ethernet/intel/e1000e/mac.c ++++ b/drivers/net/ethernet/intel/e1000e/mac.c +@@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) +- return -E1000_ERR_CONFIG; ++ return 1; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index 991c2a0dd67e..7a226537877b 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -2329,8 +2329,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + { + struct pci_dev *pdev = adapter->pdev; + +- ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, +- GFP_KERNEL); ++ ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, ++ GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index d36b799116e4..04dbf64fb1cb 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -7196,6 +7196,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) + } + i40e_get_oem_version(&pf->hw); + ++ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && ++ ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || ++ hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { ++ /* The following delay is necessary for 4.33 firmware and older ++ * to recover after EMP reset. 200 ms should suffice but we ++ * put here 300 ms to be sure that FW is ready to operate ++ * after reset. ++ */ ++ mdelay(300); ++ } ++ + /* re-verify the eeprom if we just had an EMP reset */ + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) + i40e_verify_eeprom(pf); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index 9e30cfeac04b..20a8018d41ef 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -7658,7 +7658,8 @@ static void ixgbe_service_task(struct work_struct *work) + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { + ixgbe_ptp_overflow_check(adapter); +- ixgbe_ptp_rx_hang(adapter); ++ if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) ++ ixgbe_ptp_rx_hang(adapter); + ixgbe_ptp_tx_hang(adapter); + } + +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index a539263cd79c..d28f873169a9 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -1112,6 +1112,7 @@ static void mvneta_port_up(struct mvneta_port *pp) + } + mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + ++ q_map = 0; + /* Enable all initialized RXQs. */ + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +index fdaef00465d7..576b61c119bb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +@@ -46,7 +46,7 @@ config MLX5_MPFS + + config MLX5_ESWITCH + bool "Mellanox Technologies MLX5 SRIOV E-Switch support" +- depends on MLX5_CORE_EN ++ depends on MLX5_CORE_EN && NET_SWITCHDEV + default y + ---help--- + Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index e9a1fbcc4adf..3efe45bc2471 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -1802,7 +1802,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; +- cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; ++ cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; + + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + if (cmd->cmdif_rev > CMD_IF_REV) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 225b2ad3e15f..337ce9423794 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4022,7 +4022,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) + } + } + +-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) ++#if IS_ENABLED(CONFIG_MLX5_ESWITCH) + static const struct switchdev_ops mlx5e_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, + }; +@@ -4126,7 +4126,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) + + mlx5e_set_netdev_dev_addr(netdev); + +-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) ++#if IS_ENABLED(CONFIG_MLX5_ESWITCH) + if (MLX5_VPORT_MANAGER(mdev)) + netdev->switchdev_ops = &mlx5e_switchdev_ops; + #endif +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 5ffd1db4e797..4727e7390834 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -825,9 +825,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) + + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; + +-#ifdef CONFIG_NET_SWITCHDEV + netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; +-#endif + + netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; + netdev->hw_features |= NETIF_F_HW_TC; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index ede66e6af786..e28f9dab9ceb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2018,7 +2018,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { +- if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) ++ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || ++ tcf_vlan_push_prio(a)) + return -EOPNOTSUPP; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +index f6963b0b4a55..122506daa586 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { + MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), + MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), + MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), +- MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), +- MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), +- MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), +- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), +- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), +- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), +- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), +- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), +- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), ++ MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), ++ MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), ++ MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), ++ MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), ++ MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), ++ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), ++ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), ++ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), ++ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), + }; + +-#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 ++#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 + + struct mlxsw_afk_element_inst { /* element instance in actual block */ + const struct mlxsw_afk_element_info *info; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +index 99bd6e88ebc7..8b48338b4a70 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +@@ -1417,6 +1417,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) + } + + mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; ++ mlxsw_sp_port_vlan->ref_count = 1; + mlxsw_sp_port_vlan->vid = vid; + list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); + +@@ -1444,8 +1445,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); +- if (mlxsw_sp_port_vlan) ++ if (mlxsw_sp_port_vlan) { ++ mlxsw_sp_port_vlan->ref_count++; + return mlxsw_sp_port_vlan; ++ } + + return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); + } +@@ -1454,6 +1457,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) + { + struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + ++ if (--mlxsw_sp_port_vlan->ref_count != 0) ++ return; ++ + if (mlxsw_sp_port_vlan->bridge_port) + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); + else if (fid) +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +index 88892d47acae..8c4ce0a0cc82 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +@@ -194,6 +194,7 @@ struct mlxsw_sp_port_vlan { + struct list_head list; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp_fid *fid; ++ unsigned int ref_count; + u16 vid; + struct mlxsw_sp_bridge_port *bridge_port; + struct list_head bridge_vlan_node; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +index bbd238e50f05..54262af4e98f 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, + [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, + [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, ++ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, + }; + + static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, +- [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, + }; + + static const int *mlxsw_sp_packet_type_sfgc_types[] = { +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +index af106be8cc08..629bfa0cd3f0 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +@@ -2471,7 +2471,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) + if (rc) + return rc; + +- /* Free Task CXT */ ++ /* Free Task CXT ( Intentionally RoCE as task-id is shared between ++ * RoCE and iWARP ) ++ */ ++ proto = PROTOCOLID_ROCE; + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, + qed_cxt_get_proto_tid_count(p_hwfn, proto)); + if (rc) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c +index 6fb99518a61f..1b6554866138 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c +@@ -360,6 +360,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); + + qed_rdma_resc_free(p_hwfn); ++ qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); + } + + static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) +diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c +index 6fc854b120b0..d50cc2635477 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c +@@ -320,13 +320,11 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + +- /* mmiowb is needed to synchronize doorbell writes from more than one +- * processor. It guarantees that the write arrives to the device before +- * the queue lock is released and another start_xmit is called (possibly +- * on another CPU). Without this barrier, the next doorbell can bypass +- * this doorbell. This is applicable to IA64/Altix systems. ++ /* Fence required to flush the write combined buffer, since another ++ * CPU may write to the same doorbell address and data may be lost ++ * due to relaxed order nature of write combined bar. + */ +- mmiowb(); ++ wmb(); + } + + static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, +@@ -1247,16 +1245,10 @@ static int qede_rx_process_cqe(struct qede_dev *edev, + + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { +- if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { ++ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) + rxq->rx_ip_frags++; +- } else { +- DP_NOTICE(edev, +- "CQE has error, flags = %x, dropping incoming packet\n", +- parse_flag); ++ else + rxq->rx_hw_errors++; +- qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); +- return 0; +- } + } + + /* Basic validation passed; Need to prepare an SKB. This would also +diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c +index 3ed9033e56db..44f797ab5d15 100644 +--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c ++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c +@@ -1204,9 +1204,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) + while (tx_q->tpd.consume_idx != hw_consume_idx) { + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); + if (tpbuf->dma_addr) { +- dma_unmap_single(adpt->netdev->dev.parent, +- tpbuf->dma_addr, tpbuf->length, +- DMA_TO_DEVICE); ++ dma_unmap_page(adpt->netdev->dev.parent, ++ tpbuf->dma_addr, tpbuf->length, ++ DMA_TO_DEVICE); + tpbuf->dma_addr = 0; + } + +@@ -1363,9 +1363,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, + + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); + tpbuf->length = mapped_len; +- tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, +- skb->data, tpbuf->length, +- DMA_TO_DEVICE); ++ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, ++ virt_to_page(skb->data), ++ offset_in_page(skb->data), ++ tpbuf->length, ++ DMA_TO_DEVICE); + ret = dma_mapping_error(adpt->netdev->dev.parent, + tpbuf->dma_addr); + if (ret) +@@ -1381,9 +1383,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, + if (mapped_len < len) { + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); + tpbuf->length = len - mapped_len; +- tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, +- skb->data + mapped_len, +- tpbuf->length, DMA_TO_DEVICE); ++ tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, ++ virt_to_page(skb->data + ++ mapped_len), ++ offset_in_page(skb->data + ++ mapped_len), ++ tpbuf->length, DMA_TO_DEVICE); + ret = dma_mapping_error(adpt->netdev->dev.parent, + tpbuf->dma_addr); + if (ret) +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index db31963c5d9d..38080e95a82d 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -753,6 +753,7 @@ static struct sh_eth_cpu_data sh7757_data = { + .rpadir = 1, + .rpadir_value = 2 << 16, + .rtrate = 1, ++ .dual_port = 1, + }; + + #define SH_GIGA_ETH_BASE 0xfee00000UL +@@ -831,6 +832,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = { + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, ++ .dual_port = 1, + }; + + /* SH7734 */ +@@ -901,6 +903,7 @@ static struct sh_eth_cpu_data sh7763_data = { + .tsu = 1, + .irq_flags = IRQF_SHARED, + .magic = 1, ++ .dual_port = 1, + }; + + static struct sh_eth_cpu_data sh7619_data = { +@@ -933,6 +936,7 @@ static struct sh_eth_cpu_data sh771x_data = { + EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + .tsu = 1, ++ .dual_port = 1, + }; + + static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) +@@ -2911,7 +2915,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, + /* SuperH's TSU register init function */ + static void sh_eth_tsu_init(struct sh_eth_private *mdp) + { +- if (sh_eth_is_rz_fast_ether(mdp)) { ++ if (!mdp->cd->dual_port) { + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, + TSU_FWSLC); /* Enable POST registers */ +diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h +index a6753ccba711..6ab3d46d4f28 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.h ++++ b/drivers/net/ethernet/renesas/sh_eth.h +@@ -509,6 +509,7 @@ struct sh_eth_cpu_data { + unsigned rmiimode:1; /* EtherC has RMIIMODE register */ + unsigned rtrate:1; /* EtherC has RTRATE register */ + unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ ++ unsigned dual_port:1; /* Dual EtherC/E-DMAC */ + }; + + struct sh_eth_private { +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c +index 012fb66eed8d..f0afb88d7bc2 100644 +--- a/drivers/net/ethernet/smsc/smsc911x.c ++++ b/drivers/net/ethernet/smsc/smsc911x.c +@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) + pdata = netdev_priv(dev); + BUG_ON(!pdata); + BUG_ON(!pdata->ioaddr); +- WARN_ON(dev->phydev); + + SMSC_TRACE(pdata, ifdown, "Stopping driver"); + ++ unregister_netdev(dev); ++ + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + +- unregister_netdev(dev); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index d0cc73795056..9866d2e34cdd 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1829,6 +1829,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) + if (unlikely(status & tx_dma_own)) + break; + ++ /* Make sure descriptor fields are read after reading ++ * the own bit. ++ */ ++ dma_rmb(); ++ + /* Just consider the last segment and ...*/ + if (likely(!(status & tx_not_ls))) { + /* ... verify the status error condition */ +@@ -2368,7 +2373,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) + continue; + + packet = priv->plat->rx_queues_cfg[queue].pkt_route; +- priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); ++ priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); + } + } + +@@ -2918,8 +2923,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) + tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); + + /* If context desc is used to change MSS */ +- if (mss_desc) ++ if (mss_desc) { ++ /* Make sure that first descriptor has been completely ++ * written, including its own bit. This is because MSS is ++ * actually before first descriptor, so we need to make ++ * sure that MSS's own bit is the last thing written. ++ */ ++ dma_wmb(); + priv->hw->desc->set_tx_owner(mss_desc); ++ } + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c +index 0b95105f7060..65347d2f139b 100644 +--- a/drivers/net/ethernet/sun/sunvnet.c ++++ b/drivers/net/ethernet/sun/sunvnet.c +@@ -311,7 +311,7 @@ static struct vnet *vnet_new(const u64 *local_mac, + dev->ethtool_ops = &vnet_ethtool_ops; + dev->watchdog_timeo = VNET_TX_TIMEOUT; + +- dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | ++ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO | + NETIF_F_HW_CSUM | NETIF_F_SG; + dev->features = dev->hw_features; + +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c +index 99be63eacaeb..4647ecbe6f36 100644 +--- a/drivers/net/hyperv/netvsc.c ++++ b/drivers/net/hyperv/netvsc.c +@@ -1261,7 +1261,7 @@ void netvsc_channel_cb(void *context) + /* disable interupts from host */ + hv_begin_read(rbi); + +- __napi_schedule(&nvchan->napi); ++ __napi_schedule_irqoff(&nvchan->napi); + } + } + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index 11b46c8d2d67..3a7241c8713c 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -66,12 +66,43 @@ static int debug = -1; + module_param(debug, int, S_IRUGO); + MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +-static void netvsc_set_multicast_list(struct net_device *net) ++static void netvsc_change_rx_flags(struct net_device *net, int change) + { +- struct net_device_context *net_device_ctx = netdev_priv(net); +- struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); ++ struct net_device_context *ndev_ctx = netdev_priv(net); ++ struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); ++ int inc; + +- rndis_filter_update(nvdev); ++ if (!vf_netdev) ++ return; ++ ++ if (change & IFF_PROMISC) { ++ inc = (net->flags & IFF_PROMISC) ? 1 : -1; ++ dev_set_promiscuity(vf_netdev, inc); ++ } ++ ++ if (change & IFF_ALLMULTI) { ++ inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; ++ dev_set_allmulti(vf_netdev, inc); ++ } ++} ++ ++static void netvsc_set_rx_mode(struct net_device *net) ++{ ++ struct net_device_context *ndev_ctx = netdev_priv(net); ++ struct net_device *vf_netdev; ++ struct netvsc_device *nvdev; ++ ++ rcu_read_lock(); ++ vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); ++ if (vf_netdev) { ++ dev_uc_sync(vf_netdev, net); ++ dev_mc_sync(vf_netdev, net); ++ } ++ ++ nvdev = rcu_dereference(ndev_ctx->nvdev); ++ if (nvdev) ++ rndis_filter_update(nvdev); ++ rcu_read_unlock(); + } + + static int netvsc_open(struct net_device *net) +@@ -1582,7 +1613,8 @@ static const struct net_device_ops device_ops = { + .ndo_open = netvsc_open, + .ndo_stop = netvsc_close, + .ndo_start_xmit = netvsc_start_xmit, +- .ndo_set_rx_mode = netvsc_set_multicast_list, ++ .ndo_change_rx_flags = netvsc_change_rx_flags, ++ .ndo_set_rx_mode = netvsc_set_rx_mode, + .ndo_change_mtu = netvsc_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = netvsc_set_mac_addr, +@@ -1814,6 +1846,15 @@ static void __netvsc_vf_setup(struct net_device *ndev, + netdev_warn(vf_netdev, + "unable to change mtu to %u\n", ndev->mtu); + ++ /* set multicast etc flags on VF */ ++ dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); ++ ++ /* sync address list from ndev to VF */ ++ netif_addr_lock_bh(ndev); ++ dev_uc_sync(vf_netdev, ndev); ++ dev_mc_sync(vf_netdev, ndev); ++ netif_addr_unlock_bh(ndev); ++ + if (netif_running(ndev)) { + ret = dev_open(vf_netdev); + if (ret) +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c +index 6dde92c1c113..d1ae184008b4 100644 +--- a/drivers/net/hyperv/rndis_filter.c ++++ b/drivers/net/hyperv/rndis_filter.c +@@ -850,15 +850,19 @@ static void rndis_set_multicast(struct work_struct *w) + { + struct rndis_device *rdev + = container_of(w, struct rndis_device, mcast_work); ++ u32 filter = NDIS_PACKET_TYPE_DIRECTED; ++ unsigned int flags = rdev->ndev->flags; + +- if (rdev->ndev->flags & IFF_PROMISC) +- rndis_filter_set_packet_filter(rdev, +- NDIS_PACKET_TYPE_PROMISCUOUS); +- else +- rndis_filter_set_packet_filter(rdev, +- NDIS_PACKET_TYPE_BROADCAST | +- NDIS_PACKET_TYPE_ALL_MULTICAST | +- NDIS_PACKET_TYPE_DIRECTED); ++ if (flags & IFF_PROMISC) { ++ filter = NDIS_PACKET_TYPE_PROMISCUOUS; ++ } else { ++ if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI)) ++ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; ++ if (flags & IFF_BROADCAST) ++ filter |= NDIS_PACKET_TYPE_BROADCAST; ++ } ++ ++ rndis_filter_set_packet_filter(rdev, filter); + } + + void rndis_filter_update(struct netvsc_device *nvdev) +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c +index 24a1eabbbc9d..22e466ea919a 100644 +--- a/drivers/net/ieee802154/ca8210.c ++++ b/drivers/net/ieee802154/ca8210.c +@@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write( + struct ca8210_priv *priv = filp->private_data; + u8 command[CA8210_SPI_BUF_SIZE]; + +- if (len > CA8210_SPI_BUF_SIZE) { ++ memset(command, SPI_IDLE, 6); ++ if (len > CA8210_SPI_BUF_SIZE || len < 2) { + dev_warn( + &priv->spi->dev, +- "userspace requested erroneously long write (%zu)\n", ++ "userspace requested erroneous write length (%zu)\n", + len + ); +- return -EMSGSIZE; ++ return -EBADE; + } + + ret = copy_from_user(command, in_buf, len); +@@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write( + ); + return -EIO; + } ++ if (len != command[1] + 2) { ++ dev_err( ++ &priv->spi->dev, ++ "write len does not match packet length field\n" ++ ); ++ return -EBADE; ++ } + + ret = ca8210_test_check_upstream(command, priv->spi); + if (ret == 0) { +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 0f35597553f4..963a02c988e9 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -1448,7 +1448,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, + /* the macvlan port may be freed by macvlan_uninit when fail to register. + * so we destroy the macvlan port only when it's valid. + */ +- if (create && macvlan_port_get_rtnl(dev)) ++ if (create && macvlan_port_get_rtnl(lowerdev)) + macvlan_port_destroy(port->dev); + return err; + } +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index cbd629822f04..26fbbd3ffe33 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev) + kfree(dp83640); + } + ++static int dp83640_soft_reset(struct phy_device *phydev) ++{ ++ int ret; ++ ++ ret = genphy_soft_reset(phydev); ++ if (ret < 0) ++ return ret; ++ ++ /* From DP83640 datasheet: "Software driver code must wait 3 us ++ * following a software reset before allowing further serial MII ++ * operations with the DP83640." ++ */ ++ udelay(10); /* Taking udelay inaccuracy into account */ ++ ++ return 0; ++} ++ + static int dp83640_config_init(struct phy_device *phydev) + { + struct dp83640_private *dp83640 = phydev->priv; +@@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = { + .flags = PHY_HAS_INTERRUPT, + .probe = dp83640_probe, + .remove = dp83640_remove, ++ .soft_reset = dp83640_soft_reset, + .config_init = dp83640_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 1fb464837b3e..9881edc568ba 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -2083,10 +2083,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) + + dev->fc_autoneg = phydev->autoneg; + +- phy_start(phydev); +- +- netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); +- + return 0; + + error: +@@ -2352,6 +2348,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) + u32 buf; + int ret = 0; + unsigned long timeout; ++ u8 sig; + + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_LRST_; +@@ -2451,6 +2448,15 @@ static int lan78xx_reset(struct lan78xx_net *dev) + /* LAN7801 only has RGMII mode */ + if (dev->chipid == ID_REV_CHIP_ID_7801_) + buf &= ~MAC_CR_GMII_EN_; ++ ++ if (dev->chipid == ID_REV_CHIP_ID_7800_) { ++ ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); ++ if (!ret && sig != EEPROM_INDICATOR) { ++ /* Implies there is no external eeprom. Set mac speed */ ++ netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); ++ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; ++ } ++ } + ret = lan78xx_write_reg(dev, MAC_CR, buf); + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); +@@ -2513,9 +2519,9 @@ static int lan78xx_open(struct net_device *net) + if (ret < 0) + goto done; + +- ret = lan78xx_phy_init(dev); +- if (ret < 0) +- goto done; ++ phy_start(net->phydev); ++ ++ netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); + + /* for Link Check */ + if (dev->urb_intr) { +@@ -2576,13 +2582,8 @@ static int lan78xx_stop(struct net_device *net) + if (timer_pending(&dev->stat_monitor)) + del_timer_sync(&dev->stat_monitor); + +- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); +- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); +- +- phy_stop(net->phydev); +- phy_disconnect(net->phydev); +- +- net->phydev = NULL; ++ if (net->phydev) ++ phy_stop(net->phydev); + + clear_bit(EVENT_DEV_OPEN, &dev->flags); + netif_stop_queue(net); +@@ -3497,8 +3498,13 @@ static void lan78xx_disconnect(struct usb_interface *intf) + return; + + udev = interface_to_usbdev(intf); +- + net = dev->net; ++ ++ phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); ++ phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); ++ ++ phy_disconnect(net->phydev); ++ + unregister_netdev(net); + + cancel_delayed_work_sync(&dev->wq); +@@ -3658,8 +3664,14 @@ static int lan78xx_probe(struct usb_interface *intf, + pm_runtime_set_autosuspend_delay(&udev->dev, + DEFAULT_AUTOSUSPEND_DELAY); + ++ ret = lan78xx_phy_init(dev); ++ if (ret < 0) ++ goto out4; ++ + return 0; + ++out4: ++ unregister_netdev(netdev); + out3: + lan78xx_unbind(dev, intf); + out2: +@@ -4007,7 +4019,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf) + + lan78xx_reset(dev); + +- lan78xx_phy_init(dev); ++ phy_start(dev->net->phydev); + + return lan78xx_resume(intf); + } +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index e522085ecbf7..8e06f308ce44 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1184,6 +1184,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ ++ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index d51d9abf7986..aa88b640cb6c 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -1793,7 +1793,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) + + tx_data += len; + agg->skb_len += len; +- agg->skb_num++; ++ agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; + + dev_kfree_skb_any(skb); + +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c +index d0a113743195..7a6a1fe79309 100644 +--- a/drivers/net/usb/smsc75xx.c ++++ b/drivers/net/usb/smsc75xx.c +@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev, + /* it's racing here! */ + + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); +- if (ret < 0) ++ if (ret < 0) { + netdev_warn(dev->net, "Error writing RFE_CTL\n"); +- +- return ret; ++ return ret; ++ } ++ return 0; + } + + static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index bb15b3012aa5..948611317c97 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -513,7 +513,7 @@ static struct sk_buff *receive_small(struct net_device *dev, + void *orig_data; + u32 act; + +- if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) ++ if (unlikely(hdr->hdr.gso_type)) + goto err_xdp; + + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { +@@ -2655,8 +2655,8 @@ static int virtnet_probe(struct virtio_device *vdev) + + /* Assume link up if device can't report link status, + otherwise get link status from config. */ ++ netif_carrier_off(dev); + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { +- netif_carrier_off(dev); + schedule_work(&vi->config_work); + } else { + vi->status = VIRTIO_NET_S_LINK_UP; +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index c1772215702a..df11bb449988 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -7059,10 +7059,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, + { + struct ath10k *ar = hw->priv; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; ++ struct ath10k_vif *arvif = (void *)vif->drv_priv; ++ struct ath10k_peer *peer; + u32 bw, smps; + + spin_lock_bh(&ar->data_lock); + ++ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); ++ if (!peer) { ++ spin_unlock_bh(&ar->data_lock); ++ ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", ++ sta->addr, arvif->vdev_id); ++ return; ++ } ++ + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", + sta->addr, changed, sta->bandwidth, sta->rx_nss, +@@ -7810,6 +7820,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { + .max_interfaces = 8, + .num_different_channels = 1, + .beacon_int_infra_match = true, ++ .beacon_int_min_gcd = 1, + #ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | +@@ -7933,6 +7944,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, ++ .beacon_int_min_gcd = 1, + #ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | +diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c +index 5e77fe1f5b0d..a41bcbda1d9e 100644 +--- a/drivers/net/wireless/ath/ath9k/common-spectral.c ++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c +@@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv) + { + int i = 0; + int ret = 0; ++ struct rchan_buf *buf; + struct rchan *rc = spec_priv->rfs_chan_spec_scan; + +- for_each_online_cpu(i) +- ret += relay_buf_full(*per_cpu_ptr(rc->buf, i)); +- +- i = num_online_cpus(); ++ for_each_possible_cpu(i) { ++ if ((buf = *per_cpu_ptr(rc->buf, i))) { ++ ret += relay_buf_full(buf); ++ } ++ } + +- if (ret == i) ++ if (ret) + return 1; + else + return 0; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index 4157c90ad973..083e5ce7eac7 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -6916,7 +6916,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, + return; + + /* ignore non-ISO3166 country codes */ +- for (i = 0; i < sizeof(req->alpha2); i++) ++ for (i = 0; i < 2; i++) + if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { + brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n", + req->alpha2[0], req->alpha2[1]); +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +index 3721a3ed358b..f824bebceb06 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h ++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +@@ -211,7 +211,7 @@ enum { + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. +- * @T2_V2_START_IMMEDIATELY: start time event immediately ++ * @TE_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_DEP_OTHER: depends on another time event + * @TE_V2_DEP_TSF: depends on a specific time + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC +@@ -230,7 +230,7 @@ enum iwl_time_event_policy { + TE_V2_NOTIF_HOST_FRAG_END = BIT(5), + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), +- T2_V2_START_IMMEDIATELY = BIT(11), ++ TE_V2_START_IMMEDIATELY = BIT(11), + + /* placement characteristics */ + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +index f5dd7d83cd0a..2fa7ec466275 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +@@ -8,6 +8,7 @@ + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -33,6 +34,7 @@ + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -928,7 +930,6 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) + + out: + iwl_fw_free_dump_desc(fwrt); +- fwrt->dump.trig = NULL; + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); + } + IWL_EXPORT_SYMBOL(iwl_fw_error_dump); +@@ -1084,6 +1085,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work) + fwrt->ops->dump_start(fwrt->ops_ctx)) + return; + ++ if (fwrt->ops && fwrt->ops->fw_running && ++ !fwrt->ops->fw_running(fwrt->ops_ctx)) { ++ IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); ++ iwl_fw_free_dump_desc(fwrt); ++ clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); ++ goto out; ++ } ++ + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* stop recording */ + iwl_fw_dbg_stop_recording(fwrt); +@@ -1117,7 +1126,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); + } + } +- ++out: + if (fwrt->ops && fwrt->ops->dump_end) + fwrt->ops->dump_end(fwrt->ops_ctx); + } +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +index 223fb77a3aa9..72259bff9922 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +@@ -8,6 +8,7 @@ + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -33,6 +34,7 @@ + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) + if (fwrt->dump.desc != &iwl_dump_desc_assert) + kfree(fwrt->dump.desc); + fwrt->dump.desc = NULL; ++ fwrt->dump.trig = NULL; + } + + void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +index 50cfb6d795a5..fb1ad3c5c93c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h ++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +@@ -6,6 +6,7 @@ + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -26,6 +27,7 @@ + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -68,6 +70,7 @@ + struct iwl_fw_runtime_ops { + int (*dump_start)(void *ctx); + void (*dump_end)(void *ctx); ++ bool (*fw_running)(void *ctx); + }; + + #define MAX_NUM_LMAC 2 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +index e97904c2c4d4..714996187236 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +@@ -8,6 +8,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -35,6 +36,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -1209,9 +1211,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, + { + int ret; + +- if (!iwl_mvm_firmware_running(mvm)) +- return -EIO; +- + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); + if (ret) + return ret; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +index 2f22e14e00fe..8ba16fc24e3a 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + } + + /* Allocate the CAB queue for softAP and GO interfaces */ +- if (vif->type == NL80211_IFTYPE_AP) { ++ if (vif->type == NL80211_IFTYPE_AP || ++ vif->type == NL80211_IFTYPE_ADHOC) { + /* + * For TVQM this will be overwritten later with the FW assigned + * queue value (when queue is enabled). +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index a9ac872226fd..db1fab9aa1c6 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -8,6 +8,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -2127,15 +2128,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, + if (ret) + goto out_remove; + +- ret = iwl_mvm_add_mcast_sta(mvm, vif); +- if (ret) +- goto out_unbind; +- +- /* Send the bcast station. At this stage the TBTT and DTIM time events +- * are added and applied to the scheduler */ +- ret = iwl_mvm_send_add_bcast_sta(mvm, vif); +- if (ret) +- goto out_rm_mcast; ++ /* ++ * This is not very nice, but the simplest: ++ * For older FWs adding the mcast sta before the bcast station may ++ * cause assert 0x2b00. ++ * This is fixed in later FW so make the order of removal depend on ++ * the TLV ++ */ ++ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { ++ ret = iwl_mvm_add_mcast_sta(mvm, vif); ++ if (ret) ++ goto out_unbind; ++ /* ++ * Send the bcast station. At this stage the TBTT and DTIM time ++ * events are added and applied to the scheduler ++ */ ++ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); ++ if (ret) { ++ iwl_mvm_rm_mcast_sta(mvm, vif); ++ goto out_unbind; ++ } ++ } else { ++ /* ++ * Send the bcast station. At this stage the TBTT and DTIM time ++ * events are added and applied to the scheduler ++ */ ++ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); ++ if (ret) ++ goto out_unbind; ++ ret = iwl_mvm_add_mcast_sta(mvm, vif); ++ if (ret) { ++ iwl_mvm_send_rm_bcast_sta(mvm, vif); ++ goto out_unbind; ++ } ++ } + + /* must be set before quota calculations */ + mvmvif->ap_ibss_active = true; +@@ -2165,7 +2191,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, + iwl_mvm_power_update_mac(mvm); + mvmvif->ap_ibss_active = false; + iwl_mvm_send_rm_bcast_sta(mvm, vif); +-out_rm_mcast: + iwl_mvm_rm_mcast_sta(mvm, vif); + out_unbind: + iwl_mvm_binding_remove_vif(mvm, vif); +@@ -2703,6 +2728,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, + + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); ++ ++ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, ++ false); ++ + ret = 0; + } else if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { +@@ -3468,6 +3497,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, + ret = 0; + goto out; + case NL80211_IFTYPE_STATION: ++ mvmvif->csa_bcn_pending = false; + break; + case NL80211_IFTYPE_MONITOR: + /* always disable PS when a monitor interface is active */ +@@ -3511,7 +3541,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, + } + + if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { +- u32 duration = 2 * vif->bss_conf.beacon_int; ++ u32 duration = 3 * vif->bss_conf.beacon_int; + + /* iwl_mvm_protect_session() reads directly from the + * device (the system time), so make sure it is +@@ -3524,6 +3554,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, + /* Protect the session to make sure we hear the first + * beacon on the new channel. + */ ++ mvmvif->csa_bcn_pending = true; + iwl_mvm_protect_session(mvm, vif, duration, duration, + vif->bss_conf.beacon_int / 2, + true); +@@ -3967,6 +3998,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + if (vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + ++ mvmvif->csa_bcn_pending = false; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +index 2ec27ceb8af9..736c176f1fd6 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +@@ -434,6 +434,9 @@ struct iwl_mvm_vif { + bool csa_failed; + u16 csa_target_freq; + ++ /* Indicates that we are waiting for a beacon on a new channel */ ++ bool csa_bcn_pending; ++ + /* TCP Checksum Offload */ + netdev_features_t features; + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index 9fb40955d5f4..54f411b83bea 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -8,6 +8,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -35,6 +36,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -553,9 +555,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) + iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); + } + ++static bool iwl_mvm_fwrt_fw_running(void *ctx) ++{ ++ return iwl_mvm_firmware_running(ctx); ++} ++ + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { + .dump_start = iwl_mvm_fwrt_dump_start, + .dump_end = iwl_mvm_fwrt_dump_end, ++ .fw_running = iwl_mvm_fwrt_fw_running, + }; + + static struct iwl_op_mode * +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +index d22cef7381ba..386fdee23eb0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +@@ -2690,7 +2690,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + enum nl80211_band band, +- struct rs_rate *rate) ++ struct rs_rate *rate, ++ bool init) + { + int i, nentries; + unsigned long active_rate; +@@ -2744,14 +2745,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, + */ + if (sta->vht_cap.vht_supported && + best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { +- switch (sta->bandwidth) { +- case IEEE80211_STA_RX_BW_160: +- case IEEE80211_STA_RX_BW_80: +- case IEEE80211_STA_RX_BW_40: ++ /* ++ * In AP mode, when a new station associates, rs is initialized ++ * immediately upon association completion, before the phy ++ * context is updated with the association parameters, so the ++ * sta bandwidth might be wider than the phy context allows. ++ * To avoid this issue, always initialize rs with 20mhz ++ * bandwidth rate, and after authorization, when the phy context ++ * is already up-to-date, re-init rs with the correct bw. ++ */ ++ u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); ++ ++ switch (bw) { ++ case RATE_MCS_CHAN_WIDTH_40: ++ case RATE_MCS_CHAN_WIDTH_80: ++ case RATE_MCS_CHAN_WIDTH_160: + initial_rates = rs_optimal_rates_vht; + nentries = ARRAY_SIZE(rs_optimal_rates_vht); + break; +- case IEEE80211_STA_RX_BW_20: ++ case RATE_MCS_CHAN_WIDTH_20: + initial_rates = rs_optimal_rates_vht_20mhz; + nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); + break; +@@ -2762,7 +2774,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, + + active_rate = lq_sta->active_siso_rate; + rate->type = LQ_VHT_SISO; +- rate->bw = rs_bw_from_sta_bw(sta); ++ rate->bw = bw; + } else if (sta->ht_cap.ht_supported && + best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { + initial_rates = rs_optimal_rates_ht; +@@ -2844,7 +2856,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, + tbl = &(lq_sta->lq_info[active_tbl]); + rate = &tbl->rate; + +- rs_get_initial_rate(mvm, sta, lq_sta, band, rate); ++ rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); + rs_init_optimal_rate(mvm, sta, lq_sta); + + WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +index 819e6f66a5b5..e2196dc35dc6 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); + struct iwl_mvm_key_pn *ptk_pn; ++ int res; + u8 tid, keyidx; + u8 pn[IEEE80211_CCMP_PN_LEN]; + u8 *extiv; +@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, + pn[4] = extiv[1]; + pn[5] = extiv[0]; + +- if (memcmp(pn, ptk_pn->q[queue].pn[tid], +- IEEE80211_CCMP_PN_LEN) <= 0) ++ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); ++ if (res < 0) ++ return -1; ++ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) + return -1; + +- if (!(stats->flag & RX_FLAG_AMSDU_MORE)) +- memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); ++ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + stats->flag |= RX_FLAG_PN_VALIDATED; + + return 0; +@@ -310,28 +312,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, + } + + /* +- * returns true if a packet outside BA session is a duplicate and +- * should be dropped ++ * returns true if a packet is a duplicate and should be dropped. ++ * Updates AMSDU PN tracking info + */ +-static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, +- struct ieee80211_rx_status *rx_status, +- struct ieee80211_hdr *hdr, +- struct iwl_rx_mpdu_desc *desc) ++static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, ++ struct ieee80211_rx_status *rx_status, ++ struct ieee80211_hdr *hdr, ++ struct iwl_rx_mpdu_desc *desc) + { + struct iwl_mvm_sta *mvm_sta; + struct iwl_mvm_rxq_dup_data *dup_data; +- u8 baid, tid, sub_frame_idx; ++ u8 tid, sub_frame_idx; + + if (WARN_ON(IS_ERR_OR_NULL(sta))) + return false; + +- baid = (le32_to_cpu(desc->reorder_data) & +- IWL_RX_MPDU_REORDER_BAID_MASK) >> +- IWL_RX_MPDU_REORDER_BAID_SHIFT; +- +- if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) +- return false; +- + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + dup_data = &mvm_sta->dup_data[queue]; + +@@ -361,6 +356,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, + dup_data->last_sub_frame[tid] >= sub_frame_idx)) + return true; + ++ /* Allow same PN as the first subframe for following sub frames */ ++ if (dup_data->last_seq[tid] == hdr->seq_ctrl && ++ sub_frame_idx > dup_data->last_sub_frame[tid] && ++ desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) ++ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; ++ + dup_data->last_seq[tid] = hdr->seq_ctrl; + dup_data->last_sub_frame[tid] = sub_frame_idx; + +@@ -929,7 +930,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, + if (ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_csum(sta, skb, desc); + +- if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { ++ if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { + kfree_skb(skb); + goto out; + } +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +index 0d7929799942..d31d84eebc5d 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +@@ -1679,7 +1679,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, + u32 qmask, enum nl80211_iftype iftype, + enum iwl_sta_type type) + { +- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { ++ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || ++ sta->sta_id == IWL_MVM_INVALID_STA) { + sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + return -ENOSPC; +@@ -2023,7 +2024,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = msta->sta_id, +- .tid = IWL_MAX_TID_COUNT, ++ .tid = 0, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; +@@ -2036,6 +2037,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + vif->type != NL80211_IFTYPE_ADHOC)) + return -ENOTSUPP; + ++ /* ++ * In IBSS, ieee80211_check_queues() sets the cab_queue to be ++ * invalid, so make sure we use the queue we want. ++ * Note that this is done here as we want to avoid making DQA ++ * changes in mac80211 layer. ++ */ ++ if (vif->type == NL80211_IFTYPE_ADHOC) { ++ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; ++ mvmvif->cab_queue = vif->cab_queue; ++ } ++ + /* + * While in previous FWs we had to exclude cab queue from TFD queue + * mask, now it is needed as any other queue. +@@ -2063,24 +2075,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + if (iwl_mvm_has_new_tx_api(mvm)) { + int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, + msta->sta_id, +- IWL_MAX_TID_COUNT, ++ 0, + timeout); + mvmvif->cab_queue = queue; + } else if (!fw_has_api(&mvm->fw->ucode_capa, +- IWL_UCODE_TLV_API_STA_TYPE)) { +- /* +- * In IBSS, ieee80211_check_queues() sets the cab_queue to be +- * invalid, so make sure we use the queue we want. +- * Note that this is done here as we want to avoid making DQA +- * changes in mac80211 layer. +- */ +- if (vif->type == NL80211_IFTYPE_ADHOC) { +- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; +- mvmvif->cab_queue = vif->cab_queue; +- } ++ IWL_UCODE_TLV_API_STA_TYPE)) + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, + &cfg, timeout); +- } + + return 0; + } +@@ -2099,7 +2100,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); + + iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, +- IWL_MAX_TID_COUNT, 0); ++ 0, 0); + + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); + if (ret) +@@ -2435,28 +2436,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + + /* + * Note the possible cases: +- * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed +- * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free +- * one and mark it as reserved +- * 3. In DQA mode, but no traffic yet on this TID: same treatment as in +- * non-DQA mode, since the TXQ hasn't yet been allocated +- * Don't support case 3 for new TX path as it is not expected to happen +- * and aggregation will be offloaded soon anyway ++ * 1. An enabled TXQ - TXQ needs to become agg'ed ++ * 2. The TXQ hasn't yet been enabled, so find a free one and mark ++ * it as reserved + */ + txq_id = mvmsta->tid_data[tid].txq_id; +- if (iwl_mvm_has_new_tx_api(mvm)) { +- if (txq_id == IWL_MVM_INVALID_QUEUE) { +- ret = -ENXIO; +- goto release_locks; +- } +- } else if (unlikely(mvm->queue_info[txq_id].status == +- IWL_MVM_QUEUE_SHARED)) { +- ret = -ENXIO; +- IWL_DEBUG_TX_QUEUES(mvm, +- "Can't start tid %d agg on shared queue!\n", +- tid); +- goto release_locks; +- } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { ++ if (txq_id == IWL_MVM_INVALID_QUEUE) { + txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); +@@ -2465,16 +2450,16 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + IWL_ERR(mvm, "Failed to allocate agg queue\n"); + goto release_locks; + } +- /* +- * TXQ shouldn't be in inactive mode for non-DQA, so getting +- * an inactive queue from iwl_mvm_find_free_queue() is +- * certainly a bug +- */ +- WARN_ON(mvm->queue_info[txq_id].status == +- IWL_MVM_QUEUE_INACTIVE); + + /* TXQ hasn't yet been enabled, so mark it only as reserved */ + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; ++ } else if (unlikely(mvm->queue_info[txq_id].status == ++ IWL_MVM_QUEUE_SHARED)) { ++ ret = -ENXIO; ++ IWL_DEBUG_TX_QUEUES(mvm, ++ "Can't start tid %d agg on shared queue!\n", ++ tid); ++ goto release_locks; + } + + spin_unlock(&mvm->queue_info_lock); +@@ -2645,8 +2630,10 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + + static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, +- u16 txq_id) ++ struct iwl_mvm_tid_data *tid_data) + { ++ u16 txq_id = tid_data->txq_id; ++ + if (iwl_mvm_has_new_tx_api(mvm)) + return; + +@@ -2658,8 +2645,10 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ +- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) ++ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; ++ tid_data->txq_id = IWL_MVM_INVALID_QUEUE; ++ } + + spin_unlock_bh(&mvm->queue_info_lock); + } +@@ -2690,7 +2679,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + + mvmsta->agg_tids &= ~BIT(tid); + +- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); ++ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); + + switch (tid_data->state) { + case IWL_AGG_ON: +@@ -2757,7 +2746,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + mvmsta->agg_tids &= ~BIT(tid); + spin_unlock_bh(&mvmsta->lock); + +- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); ++ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); + + if (old_state >= IWL_AGG_ON) { + iwl_mvm_drain_sta(mvm, mvmsta, true); +@@ -3119,8 +3108,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, + int ret, size; + u32 status; + ++ /* This is a valid situation for GTK removal */ + if (sta_id == IWL_MVM_INVALID_STA) +- return -EINVAL; ++ return 0; + + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK); +@@ -3181,17 +3171,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + } + sta_id = mvm_sta->sta_id; + +- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || +- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || +- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { +- ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, +- false); +- goto end; +- } +- + /* + * It is possible that the 'sta' parameter is NULL, and thus +- * there is a need to retrieve the sta from the local station ++ * there is a need to retrieve the sta from the local station + * table. + */ + if (!sta) { +@@ -3206,6 +3188,17 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, + + if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) + return -EINVAL; ++ } else { ++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); ++ ++ sta_id = mvmvif->mcast_sta.sta_id; ++ } ++ ++ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || ++ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || ++ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ++ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); ++ goto end; + } + + /* If the key_offset is not pre-assigned, we need to find a +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +index e25cda9fbf6c..342ca1778efd 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +@@ -8,6 +8,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -18,11 +19,6 @@ + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, +- * USA +- * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * +@@ -35,6 +31,7 @@ + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH ++ * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -203,9 +200,13 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const char *errmsg) + { ++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); ++ + if (vif->type != NL80211_IFTYPE_STATION) + return false; +- if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) ++ ++ if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && ++ vif->bss_conf.dtim_period) + return false; + if (errmsg) + IWL_ERR(mvm, "%s\n", errmsg); +@@ -349,7 +350,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, + * and know the dtim period. + */ + iwl_mvm_te_check_disconnect(mvm, te_data->vif, +- "No association and the time event is over already..."); ++ "No beacon heard and the time event is over already..."); + break; + default: + break; +@@ -621,7 +622,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, + time_cmd.repeat = 1; + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_NOTIF_HOST_EVENT_END | +- T2_V2_START_IMMEDIATELY); ++ TE_V2_START_IMMEDIATELY); + + if (!wait_for_notif) { + iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); +@@ -814,7 +815,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + time_cmd.repeat = 1; + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_NOTIF_HOST_EVENT_END | +- T2_V2_START_IMMEDIATELY); ++ TE_V2_START_IMMEDIATELY); + + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + } +@@ -924,6 +925,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, + time_cmd.interval = cpu_to_le32(1); + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_ABSENCE); ++ if (!apply_time) ++ time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); + + return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + } +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +index 887a504ce64a..6c014c273922 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + { + struct ieee80211_key_conf *keyconf = info->control.hw_key; + u8 *crypto_hdr = skb_frag->data + hdrlen; ++ enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; + u64 pn; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: +- case WLAN_CIPHER_SUITE_CCMP_256: + iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); + iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); + break; +@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: ++ type = TX_CMD_SEC_GCMP; ++ /* Fall through */ ++ case WLAN_CIPHER_SUITE_CCMP_256: + /* TODO: Taking the key from the table might introduce a race + * when PTK rekeying is done, having an old packets with a PN + * based on the old key but the message encrypted with a new + * one. + * Need to handle this. + */ +- tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; ++ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; + tx_cmd->key[0] = keyconf->hw_key_idx; + iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); + break; +@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || + info.control.vif->type == NL80211_IFTYPE_AP || + info.control.vif->type == NL80211_IFTYPE_ADHOC) { +- sta_id = mvmvif->bcast_sta.sta_id; ++ if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) ++ sta_id = mvmvif->bcast_sta.sta_id; ++ else ++ sta_id = mvmvif->mcast_sta.sta_id; ++ + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, + hdr->frame_control); + if (queue < 0) +@@ -1872,14 +1879,12 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) + struct iwl_mvm_int_sta *int_sta = sta; + struct iwl_mvm_sta *mvm_sta = sta; + +- if (iwl_mvm_has_new_tx_api(mvm)) { +- if (internal) +- return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id, +- BIT(IWL_MGMT_TID), flags); ++ BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != ++ offsetof(struct iwl_mvm_sta, sta_id)); + ++ if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, +- 0xFF, flags); +- } ++ 0xff | BIT(IWL_MGMT_TID), flags); + + if (internal) + return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +index 43ab172d31cb..d2cada0ab426 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +@@ -810,12 +810,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + .scd_queue = queue, + .action = SCD_CFG_DISABLE_QUEUE, + }; +- bool remove_mac_queue = true; ++ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; + int ret; + ++ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) ++ return -EINVAL; ++ + if (iwl_mvm_has_new_tx_api(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); +- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); ++ ++ if (remove_mac_queue) ++ mvm->hw_queue_to_mac80211[queue] &= ++ ~BIT(mac80211_queue); ++ + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_free(mvm->trans, queue); +diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c +index 8d3a4839b6ef..370161ca2a1c 100644 +--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c ++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c +@@ -636,11 +636,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, + u32 *read_buf, u16 size) + { + u32 addr_on_bus, *data; +- u32 align[2] = {}; + u16 ms_addr; + int status; + +- data = PTR_ALIGN(&align[0], 8); ++ data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ data = PTR_ALIGN(data, 8); + + ms_addr = (addr >> 16); + status = rsi_sdio_master_access_msword(adapter, ms_addr); +@@ -648,7 +651,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); +- return status; ++ goto err; + } + addr &= 0xFFFF; + +@@ -666,7 +669,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, + (u8 *)data, 4); + if (status < 0) { + rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__); +- return status; ++ goto err; + } + if (size == 2) { + if ((addr & 0x3) == 0) +@@ -688,17 +691,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, + *read_buf = *data; + } + +- return 0; ++err: ++ kfree(data); ++ return status; + } + + static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, + unsigned long addr, + unsigned long data, u16 size) + { +- unsigned long data1[2], *data_aligned; ++ unsigned long *data_aligned; + int status; + +- data_aligned = PTR_ALIGN(&data1[0], 8); ++ data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); ++ if (!data_aligned) ++ return -ENOMEM; ++ ++ data_aligned = PTR_ALIGN(data_aligned, 8); + + if (size == 2) { + *data_aligned = ((data << 16) | (data & 0xFFFF)); +@@ -717,6 +726,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); ++ kfree(data_aligned); + return -EIO; + } + addr = addr & 0xFFFF; +@@ -726,12 +736,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, + (adapter, + (addr | RSI_SD_REQUEST_MASTER), + (u8 *)data_aligned, size); +- if (status < 0) { ++ if (status < 0) + rsi_dbg(ERR_ZONE, + "%s: Unable to do AHB reg write\n", __func__); +- return status; +- } +- return 0; ++ ++ kfree(data_aligned); ++ return status; + } + + /** +diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h +index 95e4bed57baf..903392039200 100644 +--- a/drivers/net/wireless/rsi/rsi_sdio.h ++++ b/drivers/net/wireless/rsi/rsi_sdio.h +@@ -46,6 +46,8 @@ enum sdio_interrupt_type { + #define PKT_BUFF_AVAILABLE 1 + #define FW_ASSERT_IND 2 + ++#define RSI_MASTER_REG_BUF_SIZE 12 ++ + #define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3 + #define RSI_FN1_INT_REGISTER 0xf9 + #define RSI_SD_REQUEST_MASTER 0x10000 +diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c +index 8cd42544c90e..740aae51e1c6 100644 +--- a/drivers/nvme/host/fabrics.c ++++ b/drivers/nvme/host/fabrics.c +@@ -606,8 +606,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, + opts->discovery_nqn = + !(strcmp(opts->subsysnqn, + NVME_DISC_SUBSYS_NAME)); +- if (opts->discovery_nqn) ++ if (opts->discovery_nqn) { ++ opts->kato = 0; + opts->nr_io_queues = 0; ++ } + break; + case NVMF_OPT_TRADDR: + p = match_strdup(args); +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index eab17405e815..3d4724e38aa9 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -1013,12 +1013,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) + if (!(csts & NVME_CSTS_CFS) && !nssro) + return false; + +- /* If PCI error recovery process is happening, we cannot reset or +- * the recovery mechanism will surely fail. +- */ +- if (pci_channel_offline(to_pci_dev(dev->dev))) +- return false; +- + return true; + } + +@@ -1049,6 +1043,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) + struct nvme_command cmd; + u32 csts = readl(dev->bar + NVME_REG_CSTS); + ++ /* If PCI error recovery process is happening, we cannot reset or ++ * the recovery mechanism will surely fail. ++ */ ++ mb(); ++ if (pci_channel_offline(to_pci_dev(dev->dev))) ++ return BLK_EH_RESET_TIMER; ++ + /* + * Reset immediately if the controller is failed + */ +@@ -1322,7 +1323,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) + nvmeq->cq_vector = qid - 1; + result = adapter_alloc_cq(dev, qid, nvmeq); + if (result < 0) +- return result; ++ goto release_vector; + + result = adapter_alloc_sq(dev, qid, nvmeq); + if (result < 0) +@@ -1336,9 +1337,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) + return result; + + release_sq: ++ dev->online_queues--; + adapter_delete_sq(dev, qid); + release_cq: + adapter_delete_cq(dev, qid); ++ release_vector: ++ nvmeq->cq_vector = -1; + return result; + } + +@@ -1766,7 +1770,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) + int result, nr_io_queues; + unsigned long size; + +- nr_io_queues = num_present_cpus(); ++ nr_io_queues = num_possible_cpus(); + result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); + if (result < 0) + return result; +@@ -2310,10 +2314,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { + /* + * Samsung SSD 960 EVO drops off the PCIe bus after system +- * suspend on a Ryzen board, ASUS PRIME B350M-A. ++ * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as ++ * within few minutes after bootup on a Coffee Lake board - ++ * ASUS PRIME Z370-A + */ + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && +- dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) ++ (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || ++ dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) + return NVME_QUIRK_NO_APST; + } + +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index 645ba7eee35d..240b0d628222 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -505,9 +505,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, + goto fail; + } + +- /* either variant of SGLs is fine, as we don't support metadata */ +- if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && +- (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { ++ /* ++ * For fabrics, PSDT field shall describe metadata pointer (MPTR) that ++ * contains an address of a single contiguous physical buffer that is ++ * byte aligned. ++ */ ++ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto fail; + } +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c +index 41b740aed3a3..69bd98421eb1 100644 +--- a/drivers/parisc/lba_pci.c ++++ b/drivers/parisc/lba_pci.c +@@ -1403,9 +1403,27 @@ lba_hw_init(struct lba_device *d) + WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); + } + +- /* Set HF mode as the default (vs. -1 mode). */ ++ ++ /* ++ * Hard Fail vs. Soft Fail on PCI "Master Abort". ++ * ++ * "Master Abort" means the MMIO transaction timed out - usually due to ++ * the device not responding to an MMIO read. We would like HF to be ++ * enabled to find driver problems, though it means the system will ++ * crash with a HPMC. ++ * ++ * In SoftFail mode "~0L" is returned as a result of a timeout on the ++ * pci bus. This is like how PCI busses on x86 and most other ++ * architectures behave. In order to increase compatibility with ++ * existing (x86) PCI hardware and existing Linux drivers we enable ++ * Soft Faul mode on PA-RISC now too. ++ */ + stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); ++#if defined(ENABLE_HARDFAIL) + WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); ++#else ++ WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); ++#endif + + /* + ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index bb0927de79dd..ea69b4dbab66 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -1164,11 +1164,14 @@ static int pci_pm_runtime_suspend(struct device *dev) + int error; + + /* +- * If pci_dev->driver is not set (unbound), the device should +- * always remain in D0 regardless of the runtime PM status ++ * If pci_dev->driver is not set (unbound), we leave the device in D0, ++ * but it may go to D3cold when the bridge above it runtime suspends. ++ * Save its config space in case that happens. + */ +- if (!pci_dev->driver) ++ if (!pci_dev->driver) { ++ pci_save_state(pci_dev); + return 0; ++ } + + if (!pm || !pm->runtime_suspend) + return -ENOSYS; +@@ -1216,16 +1219,18 @@ static int pci_pm_runtime_resume(struct device *dev) + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + /* +- * If pci_dev->driver is not set (unbound), the device should +- * always remain in D0 regardless of the runtime PM status ++ * Restoring config space is necessary even if the device is not bound ++ * to a driver because although we left it in D0, it may have gone to ++ * D3cold when the bridge above it runtime suspended. + */ ++ pci_restore_standard_config(pci_dev); ++ + if (!pci_dev->driver) + return 0; + + if (!pm || !pm->runtime_resume) + return -ENOSYS; + +- pci_restore_standard_config(pci_dev); + pci_fixup_device(pci_fixup_resume_early, pci_dev); + pci_enable_wake(pci_dev, PCI_D0, false); + pci_fixup_device(pci_fixup_resume, pci_dev); +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 116127a0accb..929d68f744af 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3896,6 +3896,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, + quirk_dma_func1_alias); ++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, ++ quirk_dma_func1_alias); + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, + quirk_dma_func1_alias); +diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c +index c3b615c94b4b..8c8caec3a72c 100644 +--- a/drivers/pcmcia/cs.c ++++ b/drivers/pcmcia/cs.c +@@ -452,17 +452,20 @@ static int socket_insert(struct pcmcia_socket *skt) + + static int socket_suspend(struct pcmcia_socket *skt) + { +- if (skt->state & SOCKET_SUSPEND) ++ if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME)) + return -EBUSY; + + mutex_lock(&skt->ops_mutex); +- skt->suspended_state = skt->state; ++ /* store state on first suspend, but not after spurious wakeups */ ++ if (!(skt->state & SOCKET_IN_RESUME)) ++ skt->suspended_state = skt->state; + + skt->socket = dead_socket; + skt->ops->set_socket(skt, &skt->socket); + if (skt->ops->suspend) + skt->ops->suspend(skt); + skt->state |= SOCKET_SUSPEND; ++ skt->state &= ~SOCKET_IN_RESUME; + mutex_unlock(&skt->ops_mutex); + return 0; + } +@@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt) + skt->ops->set_socket(skt, &skt->socket); + if (skt->state & SOCKET_PRESENT) + skt->resume_status = socket_setup(skt, resume_delay); ++ skt->state |= SOCKET_IN_RESUME; + mutex_unlock(&skt->ops_mutex); + return 0; + } +@@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt) + int ret = 0; + + mutex_lock(&skt->ops_mutex); +- skt->state &= ~SOCKET_SUSPEND; ++ skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME); + mutex_unlock(&skt->ops_mutex); + + if (!(skt->state & SOCKET_PRESENT)) { +diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h +index e86cd6b31773..384629ce48f5 100644 +--- a/drivers/pcmcia/cs_internal.h ++++ b/drivers/pcmcia/cs_internal.h +@@ -70,6 +70,7 @@ struct pccard_resource_ops { + /* Flags in socket state */ + #define SOCKET_PRESENT 0x0008 + #define SOCKET_INUSE 0x0010 ++#define SOCKET_IN_RESUME 0x0040 + #define SOCKET_SUSPEND 0x0080 + #define SOCKET_WIN_REQ(i) (0x0100<<(i)) + #define SOCKET_CARDBUS 0x8000 +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c +index e17f0351ccc2..2526971f9929 100644 +--- a/drivers/phy/qualcomm/phy-qcom-qmp.c ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c +@@ -751,8 +751,6 @@ static int qcom_qmp_phy_poweroff(struct phy *phy) + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + +- clk_disable_unprepare(qphy->pipe_clk); +- + regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs); + + return 0; +@@ -936,6 +934,8 @@ static int qcom_qmp_phy_exit(struct phy *phy) + const struct qmp_phy_cfg *cfg = qmp->cfg; + int i = cfg->num_clks; + ++ clk_disable_unprepare(qphy->pipe_clk); ++ + /* PHY reset */ + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + +diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c +index f1b24f18e9b2..b0d10934413f 100644 +--- a/drivers/phy/rockchip/phy-rockchip-emmc.c ++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c +@@ -76,6 +76,10 @@ + #define PHYCTRL_OTAPDLYSEL_MASK 0xf + #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7 + ++#define PHYCTRL_IS_CALDONE(x) \ ++ ((((x) >> PHYCTRL_CALDONE_SHIFT) & \ ++ PHYCTRL_CALDONE_MASK) == PHYCTRL_CALDONE_DONE) ++ + struct rockchip_emmc_phy { + unsigned int reg_offset; + struct regmap *reg_base; +@@ -90,6 +94,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) + unsigned int freqsel = PHYCTRL_FREQSEL_200M; + unsigned long rate; + unsigned long timeout; ++ int ret; + + /* + * Keep phyctrl_pdb and phyctrl_endll low to allow +@@ -160,17 +165,19 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) + PHYCTRL_PDB_SHIFT)); + + /* +- * According to the user manual, it asks driver to +- * wait 5us for calpad busy trimming ++ * According to the user manual, it asks driver to wait 5us for ++ * calpad busy trimming. However it is documented that this value is ++ * PVT(A.K.A process,voltage and temperature) relevant, so some ++ * failure cases are found which indicates we should be more tolerant ++ * to calpad busy trimming. + */ +- udelay(5); +- regmap_read(rk_phy->reg_base, +- rk_phy->reg_offset + GRF_EMMCPHY_STATUS, +- &caldone); +- caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK; +- if (caldone != PHYCTRL_CALDONE_DONE) { +- pr_err("rockchip_emmc_phy_power: caldone timeout.\n"); +- return -ETIMEDOUT; ++ ret = regmap_read_poll_timeout(rk_phy->reg_base, ++ rk_phy->reg_offset + GRF_EMMCPHY_STATUS, ++ caldone, PHYCTRL_IS_CALDONE(caldone), ++ 0, 50); ++ if (ret) { ++ pr_err("%s: caldone failed, ret=%d\n", __func__, ret); ++ return ret; + } + + /* Set the frequency of the DLL operation */ +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c +index 1ff6c3573493..b601039d6c69 100644 +--- a/drivers/pinctrl/devicetree.c ++++ b/drivers/pinctrl/devicetree.c +@@ -122,8 +122,10 @@ static int dt_to_map_one_config(struct pinctrl *p, + /* OK let's just assume this will appear later then */ + return -EPROBE_DEFER; + } +- if (!pctldev) +- pctldev = get_pinctrl_dev_from_of_node(np_pctldev); ++ /* If we're creating a hog we can use the passed pctldev */ ++ if (pctldev && (np_pctldev == p->dev->of_node)) ++ break; ++ pctldev = get_pinctrl_dev_from_of_node(np_pctldev); + if (pctldev) + break; + /* Do not defer probing of hogs (circular loop) */ +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c +index 447763aad815..db9cca4a83ff 100644 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c +@@ -779,6 +779,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, + { + int status, ret; + bool mirror = false; ++ struct regmap_config *one_regmap_config = NULL; + + mutex_init(&mcp->lock); + +@@ -799,22 +800,36 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, + switch (type) { + #ifdef CONFIG_SPI_MASTER + case MCP_TYPE_S08: +- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, +- &mcp23x08_regmap); +- mcp->reg_shift = 0; +- mcp->chip.ngpio = 8; +- mcp->chip.label = "mcp23s08"; +- break; +- + case MCP_TYPE_S17: ++ switch (type) { ++ case MCP_TYPE_S08: ++ one_regmap_config = ++ devm_kmemdup(dev, &mcp23x08_regmap, ++ sizeof(struct regmap_config), GFP_KERNEL); ++ mcp->reg_shift = 0; ++ mcp->chip.ngpio = 8; ++ mcp->chip.label = "mcp23s08"; ++ break; ++ case MCP_TYPE_S17: ++ one_regmap_config = ++ devm_kmemdup(dev, &mcp23x17_regmap, ++ sizeof(struct regmap_config), GFP_KERNEL); ++ mcp->reg_shift = 1; ++ mcp->chip.ngpio = 16; ++ mcp->chip.label = "mcp23s17"; ++ break; ++ } ++ if (!one_regmap_config) ++ return -ENOMEM; ++ ++ one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", (addr & ~0x40) >> 1); + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, +- &mcp23x17_regmap); +- mcp->reg_shift = 1; +- mcp->chip.ngpio = 16; +- mcp->chip.label = "mcp23s17"; ++ one_regmap_config); + break; + + case MCP_TYPE_S18: ++ if (!one_regmap_config) ++ return -ENOMEM; + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, + &mcp23x17_regmap); + mcp->reg_shift = 1; +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index ff491da64dab..19cd357bb464 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) + return -EINVAL; + + chip = &pctrl->chip; +- chip->base = 0; ++ chip->base = -1; + chip->ngpio = ngpio; + chip->label = dev_name(pctrl->dev); + chip->parent = pctrl->dev; +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c +index 200e1f4f6db9..711333fb2c6e 100644 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c +@@ -1,7 +1,7 @@ + /* + * R8A7796 processor support - PFC hardware block. + * +- * Copyright (C) 2016 Renesas Electronics Corp. ++ * Copyright (C) 2016-2017 Renesas Electronics Corp. + * + * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c + * +@@ -477,7 +477,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28 + #define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1) + #define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3) + #define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0) +-#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1) ++#define MOD_SEL1_20 FM(SEL_SSI1_0) FM(SEL_SSI1_1) + #define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1) + #define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3) + #define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1) +@@ -1224,7 +1224,7 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_GPSR(IP13_11_8, HSCK0), + PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3), + PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0), +- PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1), + PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3), + PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3), + PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2), +@@ -1232,14 +1232,14 @@ static const u16 pinmux_data[] = { + + PINMUX_IPSR_GPSR(IP13_15_12, HRX0), + PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3), +- PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI2_1), + PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3), + PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3), + PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2), + + PINMUX_IPSR_GPSR(IP13_19_16, HTX0), + PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3), +- PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI9_1), + PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3), + PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3), + PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2), +@@ -1247,7 +1247,7 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N), + PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1), + PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3), +- PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI9_0), + PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3), + PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3), + PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2), +@@ -1256,7 +1256,7 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N), + PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1), + PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3), +- PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI9_0), + PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3), + PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0), + PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A), +@@ -1271,7 +1271,7 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0), + PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0), + PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2), +- PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0), + PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2), + PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A), + PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1), +@@ -1280,7 +1280,7 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0), + PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3), + PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0), +- PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0), + PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3), + PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D), + PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1), +@@ -1308,10 +1308,10 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5), + + /* IPSR15 */ +- PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI1_0), + +- PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0), +- PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI2_0), ++ PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI1_1), + + PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349), + PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0), +@@ -1397,11 +1397,11 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0), + PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0), + +- PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI9_0), + PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1), + PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2), + PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0), +- PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI1_1), + PINMUX_IPSR_GPSR(IP16_31_28, SCK1), + PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0), + PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0), +@@ -1433,7 +1433,7 @@ static const u16 pinmux_data[] = { + + PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN), + PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2), +- PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI1_0), + PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4), + PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4), + PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1), +@@ -1443,7 +1443,7 @@ static const u16 pinmux_data[] = { + + PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC), + PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2), +- PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0), ++ PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI1_0), + PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4), + PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4), + PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1), +@@ -1453,7 +1453,7 @@ static const u16 pinmux_data[] = { + + PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN), + PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B), +- PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI2_1), + PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3), + PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_3), + PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4), +@@ -1465,7 +1465,7 @@ static const u16 pinmux_data[] = { + + PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC), + PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B), +- PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI2_1), + PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3), + PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3), + PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4), +@@ -1476,7 +1476,7 @@ static const u16 pinmux_data[] = { + /* IPSR18 */ + PINMUX_IPSR_GPSR(IP18_3_0, GP6_30), + PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B), +- PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI9_1), + PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4), + PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4), + PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1), +@@ -1486,7 +1486,7 @@ static const u16 pinmux_data[] = { + + PINMUX_IPSR_GPSR(IP18_7_4, GP6_31), + PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B), +- PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1), ++ PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI9_1), + PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4), + PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4), + PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1), +diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c +index 08e4fd9ee607..9621d6dd88c6 100644 +--- a/drivers/power/supply/ltc2941-battery-gauge.c ++++ b/drivers/power/supply/ltc2941-battery-gauge.c +@@ -316,15 +316,15 @@ static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val) + + if (info->id == LTC2942_ID) { + reg = LTC2942_REG_TEMPERATURE_MSB; +- value = 60000; /* Full-scale is 600 Kelvin */ ++ value = 6000; /* Full-scale is 600 Kelvin */ + } else { + reg = LTC2943_REG_TEMPERATURE_MSB; +- value = 51000; /* Full-scale is 510 Kelvin */ ++ value = 5100; /* Full-scale is 510 Kelvin */ + } + ret = ltc294x_read_regs(info->client, reg, &datar[0], 2); + value *= (datar[0] << 8) | datar[1]; +- /* Convert to centidegrees */ +- *val = value / 0xFFFF - 27215; ++ /* Convert to tenths of degree Celsius */ ++ *val = value / 0xFFFF - 2722; + return ret; + } + +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c +index 5b556a13f517..9c7eaaeda343 100644 +--- a/drivers/power/supply/max17042_battery.c ++++ b/drivers/power/supply/max17042_battery.c +@@ -1021,6 +1021,7 @@ static int max17042_probe(struct i2c_client *client, + + i2c_set_clientdata(client, chip); + psy_cfg.drv_data = chip; ++ psy_cfg.of_node = dev->of_node; + + /* When current is not measured, + * CURRENT_NOW and CURRENT_AVG properties should be invisible. */ +diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c +index 0fce06acfaec..a2eb50719c7b 100644 +--- a/drivers/regulator/gpio-regulator.c ++++ b/drivers/regulator/gpio-regulator.c +@@ -271,8 +271,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) + drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); + if (drvdata->desc.name == NULL) { + dev_err(&pdev->dev, "Failed to allocate supply name\n"); +- ret = -ENOMEM; +- goto err; ++ return -ENOMEM; + } + + if (config->nr_gpios != 0) { +@@ -292,7 +291,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) + dev_err(&pdev->dev, + "Could not obtain regulator setting GPIOs: %d\n", + ret); +- goto err_memstate; ++ goto err_memgpio; + } + } + +@@ -303,7 +302,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) + if (drvdata->states == NULL) { + dev_err(&pdev->dev, "Failed to allocate state data\n"); + ret = -ENOMEM; +- goto err_memgpio; ++ goto err_stategpio; + } + drvdata->nr_states = config->nr_states; + +@@ -324,7 +323,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) + default: + dev_err(&pdev->dev, "No regulator type set\n"); + ret = -EINVAL; +- goto err_memgpio; ++ goto err_memstate; + } + + /* build initial state from gpio init data. */ +@@ -361,22 +360,21 @@ static int gpio_regulator_probe(struct platform_device *pdev) + if (IS_ERR(drvdata->dev)) { + ret = PTR_ERR(drvdata->dev); + dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); +- goto err_stategpio; ++ goto err_memstate; + } + + platform_set_drvdata(pdev, drvdata); + + return 0; + +-err_stategpio: +- gpio_free_array(drvdata->gpios, drvdata->nr_gpios); + err_memstate: + kfree(drvdata->states); ++err_stategpio: ++ gpio_free_array(drvdata->gpios, drvdata->nr_gpios); + err_memgpio: + kfree(drvdata->gpios); + err_name: + kfree(drvdata->desc.name); +-err: + return ret; + } + +diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c +index 14637a01ba2d..c9875355905d 100644 +--- a/drivers/regulator/of_regulator.c ++++ b/drivers/regulator/of_regulator.c +@@ -305,6 +305,7 @@ int of_regulator_match(struct device *dev, struct device_node *node, + dev_err(dev, + "failed to parse DT for regulator %s\n", + child->name); ++ of_node_put(child); + return -EINVAL; + } + match->of_node = of_node_get(child); +diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c +index 633268e9d550..05bcbce2013a 100644 +--- a/drivers/remoteproc/imx_rproc.c ++++ b/drivers/remoteproc/imx_rproc.c +@@ -339,8 +339,10 @@ static int imx_rproc_probe(struct platform_device *pdev) + } + + dcfg = of_device_get_match_data(dev); +- if (!dcfg) +- return -EINVAL; ++ if (!dcfg) { ++ ret = -EINVAL; ++ goto err_put_rproc; ++ } + + priv = rproc->priv; + priv->rproc = rproc; +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index 29f35e29d480..e67c1d8a193d 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -2596,8 +2596,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) + case DASD_CQR_QUEUED: + /* request was not started - just set to cleared */ + cqr->status = DASD_CQR_CLEARED; +- if (cqr->callback_data == DASD_SLEEPON_START_TAG) +- cqr->callback_data = DASD_SLEEPON_END_TAG; + break; + case DASD_CQR_IN_IO: + /* request in IO - terminate IO and release again */ +@@ -3917,9 +3915,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) + wait_event(dasd_flush_wq, + (cqr->status != DASD_CQR_CLEAR_PENDING)); + +- /* mark sleepon requests as ended */ +- if (cqr->callback_data == DASD_SLEEPON_START_TAG) +- cqr->callback_data = DASD_SLEEPON_END_TAG; ++ /* ++ * requeue requests to blocklayer will only work ++ * for block device requests ++ */ ++ if (_dasd_requeue_request(cqr)) ++ continue; + + /* remove requests from device and block queue */ + list_del_init(&cqr->devlist); +@@ -3932,13 +3933,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) + cqr = refers; + } + +- /* +- * requeue requests to blocklayer will only work +- * for block device requests +- */ +- if (_dasd_requeue_request(cqr)) +- continue; +- + if (cqr->block) + list_del_init(&cqr->blocklist); + cqr->block->base->discipline->free_cp( +@@ -3955,8 +3949,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) + list_splice_tail(&requeue_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); + } +- /* wake up generic waitqueue for eventually ended sleepon requests */ +- wake_up(&generic_waitq); ++ dasd_schedule_device_bh(device); + return rc; + } + +diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c +index f98ea674c3d8..28837ad75712 100644 +--- a/drivers/s390/cio/device_fsm.c ++++ b/drivers/s390/cio/device_fsm.c +@@ -796,6 +796,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) + + ccw_device_set_timeout(cdev, 0); + cdev->private->iretry = 255; ++ cdev->private->async_kill_io_rc = -ETIMEDOUT; + ret = ccw_device_cancel_halt_clear(cdev); + if (ret == -EBUSY) { + ccw_device_set_timeout(cdev, 3*HZ); +@@ -872,7 +873,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) + /* OK, i/o is dead now. Call interrupt handler. */ + if (cdev->handler) + cdev->handler(cdev, cdev->private->intparm, +- ERR_PTR(-EIO)); ++ ERR_PTR(cdev->private->async_kill_io_rc)); + } + + static void +@@ -889,14 +890,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) + ccw_device_online_verify(cdev, 0); + if (cdev->handler) + cdev->handler(cdev, cdev->private->intparm, +- ERR_PTR(-EIO)); ++ ERR_PTR(cdev->private->async_kill_io_rc)); + } + + void ccw_device_kill_io(struct ccw_device *cdev) + { + int ret; + ++ ccw_device_set_timeout(cdev, 0); + cdev->private->iretry = 255; ++ cdev->private->async_kill_io_rc = -EIO; + ret = ccw_device_cancel_halt_clear(cdev); + if (ret == -EBUSY) { + ccw_device_set_timeout(cdev, 3*HZ); +diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c +index cf8c4ac6323a..b22922ec32d1 100644 +--- a/drivers/s390/cio/device_ops.c ++++ b/drivers/s390/cio/device_ops.c +@@ -160,7 +160,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) + } + + /** +- * ccw_device_start_key() - start a s390 channel program with key ++ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key + * @cdev: target ccw device + * @cpa: logical start address of channel program + * @intparm: user specific interruption parameter; will be presented back to +@@ -171,10 +171,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) + * @key: storage key to be used for the I/O + * @flags: additional flags; defines the action to be performed for I/O + * processing. ++ * @expires: timeout value in jiffies + * + * Start a S/390 channel program. When the interrupt arrives, the + * IRQ handler is called, either immediately, delayed (dev-end missing, + * or sense required) or never (no IRQ handler registered). ++ * This function notifies the device driver if the channel program has not ++ * completed during the time specified by @expires. If a timeout occurs, the ++ * channel program is terminated via xsch, hsch or csch, and the device's ++ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * Returns: + * %0, if the operation was successful; + * -%EBUSY, if the device is busy, or status pending; +@@ -183,9 +188,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) + * Context: + * Interrupts disabled, ccw device lock held + */ +-int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, +- unsigned long intparm, __u8 lpm, __u8 key, +- unsigned long flags) ++int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, ++ unsigned long intparm, __u8 lpm, __u8 key, ++ unsigned long flags, int expires) + { + struct subchannel *sch; + int ret; +@@ -225,6 +230,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + switch (ret) { + case 0: + cdev->private->intparm = intparm; ++ if (expires) ++ ccw_device_set_timeout(cdev, expires); + break; + case -EACCES: + case -ENODEV: +@@ -235,7 +242,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + } + + /** +- * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key ++ * ccw_device_start_key() - start a s390 channel program with key + * @cdev: target ccw device + * @cpa: logical start address of channel program + * @intparm: user specific interruption parameter; will be presented back to +@@ -246,15 +253,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + * @key: storage key to be used for the I/O + * @flags: additional flags; defines the action to be performed for I/O + * processing. +- * @expires: timeout value in jiffies + * + * Start a S/390 channel program. When the interrupt arrives, the + * IRQ handler is called, either immediately, delayed (dev-end missing, + * or sense required) or never (no IRQ handler registered). +- * This function notifies the device driver if the channel program has not +- * completed during the time specified by @expires. If a timeout occurs, the +- * channel program is terminated via xsch, hsch or csch, and the device's +- * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). + * Returns: + * %0, if the operation was successful; + * -%EBUSY, if the device is busy, or status pending; +@@ -263,19 +265,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + * Context: + * Interrupts disabled, ccw device lock held + */ +-int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, +- unsigned long intparm, __u8 lpm, __u8 key, +- unsigned long flags, int expires) ++int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ++ unsigned long intparm, __u8 lpm, __u8 key, ++ unsigned long flags) + { +- int ret; +- +- if (!cdev) +- return -ENODEV; +- ccw_device_set_timeout(cdev, expires); +- ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); +- if (ret != 0) +- ccw_device_set_timeout(cdev, 0); +- return ret; ++ return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, ++ flags, 0); + } + + /** +@@ -490,18 +485,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) + EXPORT_SYMBOL(ccw_device_get_id); + + /** +- * ccw_device_tm_start_key() - perform start function ++ * ccw_device_tm_start_timeout_key() - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access ++ * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +-int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, +- unsigned long intparm, u8 lpm, u8 key) ++int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, ++ unsigned long intparm, u8 lpm, u8 key, ++ int expires) + { + struct subchannel *sch; + int rc; +@@ -528,37 +525,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + return -EACCES; + } + rc = cio_tm_start_key(sch, tcw, lpm, key); +- if (rc == 0) ++ if (rc == 0) { + cdev->private->intparm = intparm; ++ if (expires) ++ ccw_device_set_timeout(cdev, expires); ++ } + return rc; + } +-EXPORT_SYMBOL(ccw_device_tm_start_key); ++EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); + + /** +- * ccw_device_tm_start_timeout_key() - perform start function ++ * ccw_device_tm_start_key() - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access +- * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +-int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, +- unsigned long intparm, u8 lpm, u8 key, +- int expires) ++int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, ++ unsigned long intparm, u8 lpm, u8 key) + { +- int ret; +- +- ccw_device_set_timeout(cdev, expires); +- ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); +- if (ret != 0) +- ccw_device_set_timeout(cdev, 0); +- return ret; ++ return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0); + } +-EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); ++EXPORT_SYMBOL(ccw_device_tm_start_key); + + /** + * ccw_device_tm_start() - perform start function +diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h +index af571d8d6925..90e4e3a7841b 100644 +--- a/drivers/s390/cio/io_sch.h ++++ b/drivers/s390/cio/io_sch.h +@@ -157,6 +157,7 @@ struct ccw_device_private { + unsigned long intparm; /* user interruption parameter */ + struct qdio_irq *qdio_data; + struct irb irb; /* device status */ ++ int async_kill_io_rc; + struct senseid senseid; /* SenseID info */ + struct pgid pgid[8]; /* path group IDs per chpid*/ + struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ +diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c +index e96b85579f21..3c800642134e 100644 +--- a/drivers/s390/cio/vfio_ccw_fsm.c ++++ b/drivers/s390/cio/vfio_ccw_fsm.c +@@ -129,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private, + if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { + orb = (union orb *)io_region->orb_area; + ++ /* Don't try to build a cp if transport mode is specified. */ ++ if (orb->tm.b) { ++ io_region->ret_code = -EOPNOTSUPP; ++ goto err_out; ++ } + io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), + orb); + if (io_region->ret_code) +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index 9be34d37c356..3f3cb72e0c0c 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -525,6 +525,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) + struct scsi_cd *cd; + int ret = -ENXIO; + ++ check_disk_change(bdev); ++ + mutex_lock(&sr_mutex); + cd = scsi_cd_get(bdev->bd_disk); + if (cd) { +@@ -585,18 +587,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + static unsigned int sr_block_check_events(struct gendisk *disk, + unsigned int clearing) + { +- struct scsi_cd *cd = scsi_cd(disk); ++ unsigned int ret = 0; ++ struct scsi_cd *cd; + +- if (atomic_read(&cd->device->disk_events_disable_depth)) ++ cd = scsi_cd_get(disk); ++ if (!cd) + return 0; + +- return cdrom_check_events(&cd->cdi, clearing); ++ if (!atomic_read(&cd->device->disk_events_disable_depth)) ++ ret = cdrom_check_events(&cd->cdi, clearing); ++ ++ scsi_cd_put(cd); ++ return ret; + } + + static int sr_block_revalidate_disk(struct gendisk *disk) + { +- struct scsi_cd *cd = scsi_cd(disk); + struct scsi_sense_hdr sshdr; ++ struct scsi_cd *cd; ++ ++ cd = scsi_cd_get(disk); ++ if (!cd) ++ return -ENXIO; + + /* if the unit is not ready, nothing more to do */ + if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) +@@ -605,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk) + sr_cd_check(&cd->cdi); + get_sectorsize(cd); + out: ++ scsi_cd_put(cd); + return 0; + } + +diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c +index 2a21f2d48592..35fab1e18adc 100644 +--- a/drivers/scsi/sr_ioctl.c ++++ b/drivers/scsi/sr_ioctl.c +@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) + struct scsi_device *SDev; + struct scsi_sense_hdr sshdr; + int result, err = 0, retries = 0; ++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL; + + SDev = cd->device; + ++ if (cgc->sense) ++ senseptr = sense_buffer; ++ + retry: + if (!scsi_block_when_processing_errors(SDev)) { + err = -ENODEV; +@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) + } + + result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, +- cgc->buffer, cgc->buflen, +- (unsigned char *)cgc->sense, &sshdr, ++ cgc->buffer, cgc->buflen, senseptr, &sshdr, + cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); + ++ if (cgc->sense) ++ memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); ++ + /* Minimal error checking. Ignore cases we know about, and report the rest. */ + if (driver_byte(result) != 0) { + switch (sshdr.sense_key) { +diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c +index 47e7aa963dbb..1613ccf0c059 100644 +--- a/drivers/soc/imx/gpc.c ++++ b/drivers/soc/imx/gpc.c +@@ -456,13 +456,21 @@ static int imx_gpc_probe(struct platform_device *pdev) + + static int imx_gpc_remove(struct platform_device *pdev) + { ++ struct device_node *pgc_node; + int ret; + ++ pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); ++ ++ /* bail out if DT too old and doesn't provide the necessary info */ ++ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && ++ !pgc_node) ++ return 0; ++ + /* + * If the old DT binding is used the toplevel driver needs to + * de-register the power domains + */ +- if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { ++ if (!pgc_node) { + of_genpd_del_provider(pdev->dev.of_node); + + ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); +diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c +index d008e5b82db4..df3ccb30bc2d 100644 +--- a/drivers/soc/qcom/wcnss_ctrl.c ++++ b/drivers/soc/qcom/wcnss_ctrl.c +@@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) + /* Increment for next fragment */ + req->seq++; + +- data += req->hdr.len; ++ data += NV_FRAGMENT_SIZE; + left -= NV_FRAGMENT_SIZE; + } while (left > 0); + +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c +index ff01f865a173..6573152ce893 100644 +--- a/drivers/spi/spi-bcm-qspi.c ++++ b/drivers/spi/spi-bcm-qspi.c +@@ -1255,7 +1255,7 @@ int bcm_qspi_probe(struct platform_device *pdev, + qspi->base[MSPI] = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->base[MSPI])) { + ret = PTR_ERR(qspi->base[MSPI]); +- goto qspi_probe_err; ++ goto qspi_resource_err; + } + } else { + goto qspi_resource_err; +@@ -1266,7 +1266,7 @@ int bcm_qspi_probe(struct platform_device *pdev, + qspi->base[BSPI] = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->base[BSPI])) { + ret = PTR_ERR(qspi->base[BSPI]); +- goto qspi_probe_err; ++ goto qspi_resource_err; + } + qspi->bspi_mode = true; + } else { +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c +index 6dda3623a276..e1faee1f8602 100644 +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -65,6 +65,23 @@ + #define AX_INDXC 0x30 + #define AX_DATAC 0x34 + ++#define PT_ADDR_INDX 0xE8 ++#define PT_READ_INDX 0xE4 ++#define PT_SIG_1_ADDR 0xA520 ++#define PT_SIG_2_ADDR 0xA521 ++#define PT_SIG_3_ADDR 0xA522 ++#define PT_SIG_4_ADDR 0xA523 ++#define PT_SIG_1_DATA 0x78 ++#define PT_SIG_2_DATA 0x56 ++#define PT_SIG_3_DATA 0x34 ++#define PT_SIG_4_DATA 0x12 ++#define PT4_P1_REG 0xB521 ++#define PT4_P2_REG 0xB522 ++#define PT2_P1_REG 0xD520 ++#define PT2_P2_REG 0xD521 ++#define PT1_P1_REG 0xD522 ++#define PT1_P2_REG 0xD523 ++ + #define NB_PCIE_INDX_ADDR 0xe0 + #define NB_PCIE_INDX_DATA 0xe4 + #define PCIE_P_CNTL 0x10040 +@@ -511,6 +528,98 @@ void usb_amd_dev_put(void) + } + EXPORT_SYMBOL_GPL(usb_amd_dev_put); + ++/* ++ * Check if port is disabled in BIOS on AMD Promontory host. ++ * BIOS Disabled ports may wake on connect/disconnect and need ++ * driver workaround to keep them disabled. ++ * Returns true if port is marked disabled. ++ */ ++bool usb_amd_pt_check_port(struct device *device, int port) ++{ ++ unsigned char value, port_shift; ++ struct pci_dev *pdev; ++ u16 reg; ++ ++ pdev = to_pci_dev(device); ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR); ++ ++ pci_read_config_byte(pdev, PT_READ_INDX, &value); ++ if (value != PT_SIG_1_DATA) ++ return false; ++ ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR); ++ ++ pci_read_config_byte(pdev, PT_READ_INDX, &value); ++ if (value != PT_SIG_2_DATA) ++ return false; ++ ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR); ++ ++ pci_read_config_byte(pdev, PT_READ_INDX, &value); ++ if (value != PT_SIG_3_DATA) ++ return false; ++ ++ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR); ++ ++ pci_read_config_byte(pdev, PT_READ_INDX, &value); ++ if (value != PT_SIG_4_DATA) ++ return false; ++ ++ /* Check disabled port setting, if bit is set port is enabled */ ++ switch (pdev->device) { ++ case 0x43b9: ++ case 0x43ba: ++ /* ++ * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba) ++ * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0 ++ * PT4_P2_REG bits[6..0] represents ports 13 to 7 ++ */ ++ if (port > 6) { ++ reg = PT4_P2_REG; ++ port_shift = port - 7; ++ } else { ++ reg = PT4_P1_REG; ++ port_shift = port + 1; ++ } ++ break; ++ case 0x43bb: ++ /* ++ * device is AMD_PROMONTORYA_2(0x43bb) ++ * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0 ++ * PT2_P2_REG bits[5..0] represents ports 9 to 3 ++ */ ++ if (port > 2) { ++ reg = PT2_P2_REG; ++ port_shift = port - 3; ++ } else { ++ reg = PT2_P1_REG; ++ port_shift = port + 5; ++ } ++ break; ++ case 0x43bc: ++ /* ++ * device is AMD_PROMONTORYA_1(0x43bc) ++ * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0 ++ * PT1_P2_REG[5..0] represents ports 9 to 4 ++ */ ++ if (port > 3) { ++ reg = PT1_P2_REG; ++ port_shift = port - 4; ++ } else { ++ reg = PT1_P1_REG; ++ port_shift = port + 4; ++ } ++ break; ++ default: ++ return false; ++ } ++ pci_write_config_word(pdev, PT_ADDR_INDX, reg); ++ pci_read_config_byte(pdev, PT_READ_INDX, &value); ++ ++ return !(value & BIT(port_shift)); ++} ++EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); ++ + /* + * Make sure the controller is completely inactive, unable to + * generate interrupts or do DMA. +diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h +index b68dcb5dd0fd..4ca0d9b7e463 100644 +--- a/drivers/usb/host/pci-quirks.h ++++ b/drivers/usb/host/pci-quirks.h +@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); + void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); + void sb800_prefetch(struct device *dev, int on); + bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); ++bool usb_amd_pt_check_port(struct device *device, int port); + #else + struct pci_dev; + static inline void usb_amd_quirk_pll_disable(void) {} +@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} + static inline void usb_amd_dev_put(void) {} + static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} + static inline void sb800_prefetch(struct device *dev, int on) {} ++static inline bool usb_amd_pt_check_port(struct device *device, int port) ++{ ++ return false; ++} + #endif /* CONFIG_USB_PCI */ + + #endif /* __LINUX_USB_PCI_QUIRKS_H */ +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 9762333d8d7f..00b8d4cdcac3 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -1531,6 +1531,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + t2 &= ~PORT_WKDISC_E; + } ++ ++ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && ++ (hcd->speed < HCD_USB3)) { ++ if (usb_amd_pt_check_port(hcd->self.controller, ++ port_index)) ++ t2 &= ~PORT_WAKE_BITS; ++ } + } else + t2 &= ~PORT_WAKE_BITS; + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index d79ab0d85924..838d37e79fa2 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -54,6 +54,10 @@ + #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 + #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 + ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb ++#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 + + static const char hcd_name[] = "xhci_hcd"; +@@ -143,6 +147,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_AMD) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + ++ if ((pdev->vendor == PCI_VENDOR_ID_AMD) && ++ ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || ++ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) ++ xhci->quirks |= XHCI_U2_DISABLE_WAKE; ++ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { + xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_INTEL_HOST; +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index f5fb1f4a092c..2a72060dda1b 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1829,7 +1829,7 @@ struct xhci_hcd { + /* For controller with a broken Port Disable implementation */ + #define XHCI_BROKEN_PORT_PED (1 << 25) + #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) +-/* Reserved. It was XHCI_U2_DISABLE_WAKE */ ++#define XHCI_U2_DISABLE_WAKE (1 << 27) + #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) + #define XHCI_SUSPEND_DELAY (1 << 30) + +diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c +index af6fc97f4ba4..a436d44f1b7f 100644 +--- a/drivers/video/fbdev/sbuslib.c ++++ b/drivers/video/fbdev/sbuslib.c +@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, + unsigned char __user *ured; + unsigned char __user *ugreen; + unsigned char __user *ublue; +- int index, count, i; ++ unsigned int index, count, i; + + if (get_user(index, &c->index) || + __get_user(count, &c->count) || +@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, + unsigned char __user *ugreen; + unsigned char __user *ublue; + struct fb_cmap *cmap = &info->cmap; +- int index, count, i; ++ unsigned int index, count, i; + u8 red, green, blue; + + if (get_user(index, &c->index) || +diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c +index 7dd0da644a7f..2cf56b459d84 100644 +--- a/drivers/watchdog/asm9260_wdt.c ++++ b/drivers/watchdog/asm9260_wdt.c +@@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev) + if (IS_ERR(priv->iobase)) + return PTR_ERR(priv->iobase); + +- ret = asm9260_wdt_get_dt_clks(priv); +- if (ret) +- return ret; +- + priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + ++ ret = asm9260_wdt_get_dt_clks(priv); ++ if (ret) ++ return ret; ++ + wdd = &priv->wdd; + wdd->info = &asm9260_wdt_ident; + wdd->ops = &asm9260_wdt_ops; +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c +index 79cc766cd30f..fd91007b4e41 100644 +--- a/drivers/watchdog/aspeed_wdt.c ++++ b/drivers/watchdog/aspeed_wdt.c +@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); + #define WDT_RELOAD_VALUE 0x04 + #define WDT_RESTART 0x08 + #define WDT_CTRL 0x0C ++#define WDT_CTRL_BOOT_SECONDARY BIT(7) + #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5) + #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5) + #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5) +@@ -158,6 +159,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd, + { + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + ++ wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY; + aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000); + + mdelay(1000); +@@ -232,16 +234,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev) + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM; + } else { + if (!strcmp(reset_type, "cpu")) +- wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU; ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU | ++ WDT_CTRL_RESET_SYSTEM; + else if (!strcmp(reset_type, "soc")) +- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC; ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | ++ WDT_CTRL_RESET_SYSTEM; + else if (!strcmp(reset_type, "system")) +- wdt->ctrl |= WDT_CTRL_RESET_SYSTEM; ++ wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP | ++ WDT_CTRL_RESET_SYSTEM; + else if (strcmp(reset_type, "none")) + return -EINVAL; + } + if (of_property_read_bool(np, "aspeed,external-signal")) + wdt->ctrl |= WDT_CTRL_WDT_EXT; ++ if (of_property_read_bool(np, "aspeed,alt-boot")) ++ wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY; + + writel(wdt->ctrl, wdt->base + WDT_CTRL); + +diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c +index 2f46487af86d..6d9a5d8c3c8d 100644 +--- a/drivers/watchdog/davinci_wdt.c ++++ b/drivers/watchdog/davinci_wdt.c +@@ -198,15 +198,22 @@ static int davinci_wdt_probe(struct platform_device *pdev) + + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem); +- if (IS_ERR(davinci_wdt->base)) +- return PTR_ERR(davinci_wdt->base); ++ if (IS_ERR(davinci_wdt->base)) { ++ ret = PTR_ERR(davinci_wdt->base); ++ goto err_clk_disable; ++ } + + ret = watchdog_register_device(wdd); +- if (ret < 0) { +- clk_disable_unprepare(davinci_wdt->clk); ++ if (ret) { + dev_err(dev, "cannot register watchdog device\n"); ++ goto err_clk_disable; + } + ++ return 0; ++ ++err_clk_disable: ++ clk_disable_unprepare(davinci_wdt->clk); ++ + return ret; + } + +diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c +index c2f4ff516230..918357bccf5e 100644 +--- a/drivers/watchdog/dw_wdt.c ++++ b/drivers/watchdog/dw_wdt.c +@@ -34,6 +34,7 @@ + + #define WDOG_CONTROL_REG_OFFSET 0x00 + #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 ++#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02 + #define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04 + #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4 + #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08 +@@ -121,14 +122,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s) + return 0; + } + ++static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt) ++{ ++ u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); ++ ++ /* Disable interrupt mode; always perform system reset. */ ++ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK; ++ /* Enable watchdog. */ ++ val |= WDOG_CONTROL_REG_WDT_EN_MASK; ++ writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); ++} ++ + static int dw_wdt_start(struct watchdog_device *wdd) + { + struct dw_wdt *dw_wdt = to_dw_wdt(wdd); + + dw_wdt_set_timeout(wdd, wdd->timeout); +- +- writel(WDOG_CONTROL_REG_WDT_EN_MASK, +- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); ++ dw_wdt_arm_system_reset(dw_wdt); + + return 0; + } +@@ -152,16 +162,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd, + unsigned long action, void *data) + { + struct dw_wdt *dw_wdt = to_dw_wdt(wdd); +- u32 val; + + writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); +- val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); +- if (val & WDOG_CONTROL_REG_WDT_EN_MASK) ++ if (dw_wdt_is_enabled(dw_wdt)) + writel(WDOG_COUNTER_RESTART_KICK_VALUE, + dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET); + else +- writel(WDOG_CONTROL_REG_WDT_EN_MASK, +- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); ++ dw_wdt_arm_system_reset(dw_wdt); + + /* wait for reset to assert... */ + mdelay(500); +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c +index e682bf046e50..88cd2a52d8d3 100644 +--- a/drivers/watchdog/f71808e_wdt.c ++++ b/drivers/watchdog/f71808e_wdt.c +@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf, + char c; + if (get_user(c, buf + i)) + return -EFAULT; +- expect_close = (c == 'V'); ++ if (c == 'V') ++ expect_close = true; + } + + /* Properly order writes across fork()ed processes */ +diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c +index 316c2eb122d2..e8bd9887c566 100644 +--- a/drivers/watchdog/sbsa_gwdt.c ++++ b/drivers/watchdog/sbsa_gwdt.c +@@ -50,6 +50,7 @@ + */ + + #include <linux/io.h> ++#include <linux/io-64-nonatomic-lo-hi.h> + #include <linux/interrupt.h> + #include <linux/module.h> + #include <linux/moduleparam.h> +@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd) + !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) + timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); + +- timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - ++ timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) - + arch_counter_get_cntvct(); + + do_div(timeleft, gwdt->clk); +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c +index 1ab4bd11f5f3..762378f1811c 100644 +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -755,8 +755,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, + mutex_unlock(&irq_mapping_update_lock); + return irq; + error_irq: +- for (; i >= 0; i--) +- __unbind_from_irq(irq + i); ++ while (nvec--) ++ __unbind_from_irq(irq + nvec); + mutex_unlock(&irq_mapping_update_lock); + return ret; + } +diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c +index b209cd44bb8d..169293c25a91 100644 +--- a/drivers/xen/pvcalls-back.c ++++ b/drivers/xen/pvcalls-back.c +@@ -424,7 +424,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev, + sock); + if (!map) { + ret = -EFAULT; +- sock_release(map->sock); ++ sock_release(sock); + } + + out: +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 82fc54f8eb77..f98b8c135db9 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -365,7 +365,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, + * physical address */ + phys = xen_bus_to_phys(dev_addr); + +- if (((dev_addr + size - 1 > dma_mask)) || ++ if (((dev_addr + size - 1 <= dma_mask)) || + range_straddles_page_boundary(phys, size)) + xen_destroy_contiguous_region(phys, order); + +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c +index 23e391d3ec01..22863f5f2474 100644 +--- a/drivers/xen/xen-acpi-processor.c ++++ b/drivers/xen/xen-acpi-processor.c +@@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) + } + /* There are more ACPI Processor objects than in x2APIC or MADT. + * This can happen with incorrect ACPI SSDT declerations. */ +- if (acpi_id > nr_acpi_bits) { +- pr_debug("We only have %u, trying to set %u\n", +- nr_acpi_bits, acpi_id); ++ if (acpi_id >= nr_acpi_bits) { ++ pr_debug("max acpi id %u, trying to set %u\n", ++ nr_acpi_bits - 1, acpi_id); + return AE_OK; + } + /* OK, There is a ACPI Processor object */ +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c +index 74888cacd0b0..ec9eb4fba59c 100644 +--- a/drivers/xen/xenbus/xenbus_probe.c ++++ b/drivers/xen/xenbus/xenbus_probe.c +@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus, + + /* Register with generic device framework. */ + err = device_register(&xendev->dev); +- if (err) ++ if (err) { ++ put_device(&xendev->dev); ++ xendev = NULL; + goto fail; ++ } + + return 0; + fail: +diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c +index cc1b1ac57d61..47728477297e 100644 +--- a/drivers/zorro/zorro.c ++++ b/drivers/zorro/zorro.c +@@ -16,6 +16,7 @@ + #include <linux/bitops.h> + #include <linux/string.h> + #include <linux/platform_device.h> ++#include <linux/dma-mapping.h> + #include <linux/slab.h> + + #include <asm/byteorder.h> +@@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) + z->dev.parent = &bus->dev; + z->dev.bus = &zorro_bus_type; + z->dev.id = i; ++ switch (z->rom.er_Type & ERT_TYPEMASK) { ++ case ERT_ZORROIII: ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(32); ++ break; ++ ++ case ERT_ZORROII: ++ default: ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(24); ++ break; ++ } ++ z->dev.dma_mask = &z->dev.coherent_dma_mask; + } + + /* ... then register them */ +diff --git a/fs/affs/namei.c b/fs/affs/namei.c +index d8aa0ae3d037..1ed0fa4c4d48 100644 +--- a/fs/affs/namei.c ++++ b/fs/affs/namei.c +@@ -206,9 +206,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) + + affs_lock_dir(dir); + bh = affs_find_entry(dir, dentry); +- affs_unlock_dir(dir); +- if (IS_ERR(bh)) ++ if (IS_ERR(bh)) { ++ affs_unlock_dir(dir); + return ERR_CAST(bh); ++ } + if (bh) { + u32 ino = bh->b_blocknr; + +@@ -222,10 +223,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) + } + affs_brelse(bh); + inode = affs_iget(sb, ino); +- if (IS_ERR(inode)) ++ if (IS_ERR(inode)) { ++ affs_unlock_dir(dir); + return ERR_CAST(inode); ++ } + } + d_add(dentry, inode); ++ affs_unlock_dir(dir); + return NULL; + } + +diff --git a/fs/aio.c b/fs/aio.c +index c3ace7833a03..4e23958c2509 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -1087,8 +1087,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) + + ctx = rcu_dereference(table->table[id]); + if (ctx && ctx->user_id == ctx_id) { +- percpu_ref_get(&ctx->users); +- ret = ctx; ++ if (percpu_ref_tryget_live(&ctx->users)) ++ ret = ctx; + } + out: + rcu_read_unlock(); +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index c44703e21396..588760c49fe2 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -2969,7 +2969,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) + kfree(fs_info->super_copy); + kfree(fs_info->super_for_commit); + security_free_mnt_opts(&fs_info->security_opts); +- kfree(fs_info); ++ kvfree(fs_info); + } + + /* tree mod log functions from ctree.c */ +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 4a630aeabb10..27d59cf36341 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1276,7 +1276,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) + if (!writers) + return ERR_PTR(-ENOMEM); + +- ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); ++ ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); + if (ret < 0) { + kfree(writers); + return ERR_PTR(ret); +@@ -3896,7 +3896,8 @@ void close_ctree(struct btrfs_fs_info *fs_info) + btrfs_err(fs_info, "commit super ret %d", ret); + } + +- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) ++ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) || ++ test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) + btrfs_error_commit_super(fs_info); + + kthread_stop(fs_info->transaction_kthread); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 1bc62294fe6b..53487102081d 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -4675,6 +4675,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, + if (wait_for_alloc) { + mutex_unlock(&fs_info->chunk_mutex); + wait_for_alloc = 0; ++ cond_resched(); + goto again; + } + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index b0fa3a032143..8ecbac3b862e 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6664,8 +6664,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, + goto out_unlock_inode; + } else { + btrfs_update_inode(trans, root, inode); +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + } + + out_unlock: +@@ -6742,8 +6741,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, + goto out_unlock_inode; + + BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + + out_unlock: + btrfs_end_transaction(trans); +@@ -6890,12 +6888,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + if (err) + goto out_fail_inode; + +- d_instantiate(dentry, inode); +- /* +- * mkdir is special. We're unlocking after we call d_instantiate +- * to avoid a race with nfsd calling d_instantiate. +- */ +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + drop_on_err = 0; + + out_fail: +@@ -10573,8 +10566,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, + goto out_unlock_inode; + } + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + + out_unlock: + btrfs_end_transaction(trans); +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 2c35717a3470..baf5a4cd7ffc 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -5008,6 +5008,9 @@ static int send_hole(struct send_ctx *sctx, u64 end) + u64 len; + int ret = 0; + ++ if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) ++ return send_update_extent(sctx, offset, end - offset); ++ + p = fs_path_alloc(); + if (!p) + return -ENOMEM; +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index e8f5e24325f3..8e3ce81d3f44 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -1581,7 +1581,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, + * it for searching for existing supers, so this lets us do that and + * then open_ctree will properly initialize everything later. + */ +- fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); ++ fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); + if (!fs_info) { + error = -ENOMEM; + goto error_sec_opts; +diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c +index 0f4ce970d195..578fd045e859 100644 +--- a/fs/btrfs/tests/qgroup-tests.c ++++ b/fs/btrfs/tests/qgroup-tests.c +@@ -63,7 +63,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr, + btrfs_set_extent_generation(leaf, item, 1); + btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK); + block_info = (struct btrfs_tree_block_info *)(item + 1); +- btrfs_set_tree_block_level(leaf, block_info, 1); ++ btrfs_set_tree_block_level(leaf, block_info, 0); + iref = (struct btrfs_extent_inline_ref *)(block_info + 1); + if (parent > 0) { + btrfs_set_extent_inline_ref_type(leaf, iref, +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index f615d59b0489..27638b96079d 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -319,7 +319,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, + if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + root->last_trans < trans->transid) || force) { + WARN_ON(root == fs_info->extent_root); +- WARN_ON(root->commit_root != root->node); ++ WARN_ON(!force && root->commit_root != root->node); + + /* + * see below for IN_TRANS_SETUP usage rules +@@ -1365,6 +1365,14 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return 0; + ++ /* ++ * Ensure dirty @src will be commited. Or, after comming ++ * commit_fs_roots() and switch_commit_roots(), any dirty but not ++ * recorded root will never be updated again, causing an outdated root ++ * item. ++ */ ++ record_root_in_trans(trans, src, 1); ++ + /* + * We are going to commit transaction, see btrfs_commit_transaction() + * comment for reason locking tree_log_mutex +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 2794f3550db6..fc4c14a72366 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -2272,8 +2272,10 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, + nritems = btrfs_header_nritems(path->nodes[0]); + if (path->slots[0] >= nritems) { + ret = btrfs_next_leaf(root, path); +- if (ret) ++ if (ret == 1) + break; ++ else if (ret < 0) ++ goto out; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, + path->slots[0]); +@@ -2377,13 +2379,41 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, + if (ret) + break; + +- /* for regular files, make sure corresponding +- * orphan item exist. extents past the new EOF +- * will be truncated later by orphan cleanup. ++ /* ++ * Before replaying extents, truncate the inode to its ++ * size. We need to do it now and not after log replay ++ * because before an fsync we can have prealloc extents ++ * added beyond the inode's i_size. If we did it after, ++ * through orphan cleanup for example, we would drop ++ * those prealloc extents just after replaying them. + */ + if (S_ISREG(mode)) { +- ret = insert_orphan_item(wc->trans, root, +- key.objectid); ++ struct inode *inode; ++ u64 from; ++ ++ inode = read_one_inode(root, key.objectid); ++ if (!inode) { ++ ret = -EIO; ++ break; ++ } ++ from = ALIGN(i_size_read(inode), ++ root->fs_info->sectorsize); ++ ret = btrfs_drop_extents(wc->trans, root, inode, ++ from, (u64)-1, 1); ++ /* ++ * If the nlink count is zero here, the iput ++ * will free the inode. We bump it to make ++ * sure it doesn't get freed until the link ++ * count fixup is done. ++ */ ++ if (!ret) { ++ if (inode->i_nlink == 0) ++ inc_nlink(inode); ++ /* Update link count and nbytes. */ ++ ret = btrfs_update_inode(wc->trans, ++ root, inode); ++ } ++ iput(inode); + if (ret) + break; + } +@@ -3432,8 +3462,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, + * from this directory and from this transaction + */ + ret = btrfs_next_leaf(root, path); +- if (ret == 1) { +- last_offset = (u64)-1; ++ if (ret) { ++ if (ret == 1) ++ last_offset = (u64)-1; ++ else ++ err = ret; + goto done; + } + btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); +@@ -3885,6 +3918,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, + ASSERT(ret == 0); + src = src_path->nodes[0]; + i = 0; ++ need_find_last_extent = true; + } + + btrfs_item_key_to_cpu(src, &key, i); +@@ -4234,6 +4268,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, + num++; + } + ++ /* ++ * Add all prealloc extents beyond the inode's i_size to make sure we ++ * don't lose them after doing a fast fsync and replaying the log. ++ */ ++ if (inode->flags & BTRFS_INODE_PREALLOC) { ++ struct rb_node *node; ++ ++ for (node = rb_last(&tree->map); node; node = rb_prev(node)) { ++ em = rb_entry(node, struct extent_map, rb_node); ++ if (em->start < i_size_read(&inode->vfs_inode)) ++ break; ++ if (!list_empty(&em->list)) ++ continue; ++ /* Same as above loop. */ ++ if (++num > 32768) { ++ list_del_init(&tree->modified_extents); ++ ret = -EFBIG; ++ goto process; ++ } ++ refcount_inc(&em->refs); ++ set_bit(EXTENT_FLAG_LOGGING, &em->flags); ++ list_add_tail(&em->list, &extents); ++ } ++ } ++ + list_sort(NULL, &extents, extent_cmp); + btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end); + /* +@@ -5888,7 +5947,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, + * this will force the logging code to walk the dentry chain + * up for the file + */ +- if (S_ISREG(inode->vfs_inode.i_mode)) ++ if (!S_ISDIR(inode->vfs_inode.i_mode)) + inode->last_unlink_trans = trans->transid; + + /* +diff --git a/fs/ceph/super.c b/fs/ceph/super.c +index e4082afedcb1..48ffe720bf09 100644 +--- a/fs/ceph/super.c ++++ b/fs/ceph/super.c +@@ -224,6 +224,7 @@ static int parse_fsopt_token(char *c, void *private) + return -ENOMEM; + break; + case Opt_mds_namespace: ++ kfree(fsopt->mds_namespace); + fsopt->mds_namespace = kstrndup(argstr[0].from, + argstr[0].to-argstr[0].from, + GFP_KERNEL); +@@ -231,6 +232,7 @@ static int parse_fsopt_token(char *c, void *private) + return -ENOMEM; + break; + case Opt_fscache_uniq: ++ kfree(fsopt->fscache_uniq); + fsopt->fscache_uniq = kstrndup(argstr[0].from, + argstr[0].to-argstr[0].from, + GFP_KERNEL); +@@ -710,14 +712,17 @@ static int __init init_caches(void) + goto bad_dentry; + + ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD); +- + if (!ceph_file_cachep) + goto bad_file; + +- if ((error = ceph_fscache_register())) +- goto bad_file; ++ error = ceph_fscache_register(); ++ if (error) ++ goto bad_fscache; + + return 0; ++ ++bad_fscache: ++ kmem_cache_destroy(ceph_file_cachep); + bad_file: + kmem_cache_destroy(ceph_dentry_cachep); + bad_dentry: +@@ -835,7 +840,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) + int err; + unsigned long started = jiffies; /* note the start time */ + struct dentry *root; +- int first = 0; /* first vfsmount for this super_block */ + + dout("mount start %p\n", fsc); + mutex_lock(&fsc->client->mount_mutex); +@@ -860,17 +864,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) + path = fsc->mount_options->server_path + 1; + dout("mount opening path %s\n", path); + } ++ ++ err = ceph_fs_debugfs_init(fsc); ++ if (err < 0) ++ goto out; ++ + root = open_root_dentry(fsc, path, started); + if (IS_ERR(root)) { + err = PTR_ERR(root); + goto out; + } + fsc->sb->s_root = dget(root); +- first = 1; +- +- err = ceph_fs_debugfs_init(fsc); +- if (err < 0) +- goto fail; + } else { + root = dget(fsc->sb->s_root); + } +@@ -880,11 +884,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) + mutex_unlock(&fsc->client->mount_mutex); + return root; + +-fail: +- if (first) { +- dput(fsc->sb->s_root); +- fsc->sb->s_root = NULL; +- } + out: + mutex_unlock(&fsc->client->mount_mutex); + return ERR_PTR(err); +diff --git a/fs/dcache.c b/fs/dcache.c +index c28b9c91b5cb..5f31a93150d1 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1867,6 +1867,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode) + } + EXPORT_SYMBOL(d_instantiate); + ++/* ++ * This should be equivalent to d_instantiate() + unlock_new_inode(), ++ * with lockdep-related part of unlock_new_inode() done before ++ * anything else. Use that instead of open-coding d_instantiate()/ ++ * unlock_new_inode() combinations. ++ */ ++void d_instantiate_new(struct dentry *entry, struct inode *inode) ++{ ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); ++ BUG_ON(!inode); ++ lockdep_annotate_inode_mutex_key(inode); ++ security_d_instantiate(entry, inode); ++ spin_lock(&inode->i_lock); ++ __d_instantiate(entry, inode); ++ WARN_ON(!(inode->i_state & I_NEW)); ++ inode->i_state &= ~I_NEW; ++ smp_mb(); ++ wake_up_bit(&inode->i_state, __I_NEW); ++ spin_unlock(&inode->i_lock); ++} ++EXPORT_SYMBOL(d_instantiate_new); ++ + /** + * d_instantiate_no_diralias - instantiate a non-aliased dentry + * @entry: dentry to complete +@@ -2460,7 +2482,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, + + retry: + rcu_read_lock(); +- seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; ++ seq = smp_load_acquire(&parent->d_inode->i_dir_seq); + r_seq = read_seqbegin(&rename_lock); + dentry = __d_lookup_rcu(parent, name, &d_seq); + if (unlikely(dentry)) { +@@ -2481,8 +2503,14 @@ struct dentry *d_alloc_parallel(struct dentry *parent, + rcu_read_unlock(); + goto retry; + } ++ ++ if (unlikely(seq & 1)) { ++ rcu_read_unlock(); ++ goto retry; ++ } ++ + hlist_bl_lock(b); +- if (unlikely(parent->d_inode->i_dir_seq != seq)) { ++ if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { + hlist_bl_unlock(b); + rcu_read_unlock(); + goto retry; +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c +index efc2db42d175..bda65a730790 100644 +--- a/fs/ecryptfs/inode.c ++++ b/fs/ecryptfs/inode.c +@@ -284,8 +284,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, + iget_failed(ecryptfs_inode); + goto out; + } +- unlock_new_inode(ecryptfs_inode); +- d_instantiate(ecryptfs_dentry, ecryptfs_inode); ++ d_instantiate_new(ecryptfs_dentry, ecryptfs_inode); + out: + return rc; + } +diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c +index e078075dc66f..aa6ec191cac0 100644 +--- a/fs/ext2/namei.c ++++ b/fs/ext2/namei.c +@@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) + { + int err = ext2_add_link(dentry, inode); + if (!err) { +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + } + inode_dec_link_count(inode); +@@ -269,8 +268,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) + if (err) + goto out_fail; + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + out: + return err; + +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index fccf295fcb03..6747861f9b70 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -2420,8 +2420,7 @@ static int ext4_add_nondir(handle_t *handle, + int err = ext4_add_entry(handle, dentry, inode); + if (!err) { + ext4_mark_inode_dirty(handle, inode); +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + } + drop_nlink(inode); +@@ -2660,8 +2659,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + err = ext4_mark_inode_dirty(handle, dir); + if (err) + goto out_clear_inode; +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + if (IS_DIRSYNC(dir)) + ext4_handle_sync(handle); + +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 9102ae7709d3..ec74d06fa24a 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -3663,6 +3663,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + ext4_msg(sb, KERN_INFO, "mounting ext2 file system " + "using the ext4 subsystem"); + else { ++ /* ++ * If we're probing be silent, if this looks like ++ * it's actually an ext[34] filesystem. ++ */ ++ if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) ++ goto failed_mount; + ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " + "to feature incompatibilities"); + goto failed_mount; +@@ -3674,6 +3680,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + ext4_msg(sb, KERN_INFO, "mounting ext3 file system " + "using the ext4 subsystem"); + else { ++ /* ++ * If we're probing be silent, if this looks like ++ * it's actually an ext4 filesystem. ++ */ ++ if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) ++ goto failed_mount; + ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " + "to feature incompatibilities"); + goto failed_mount; +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c +index 04fe1df052b2..c282e21f5b5e 100644 +--- a/fs/f2fs/checkpoint.c ++++ b/fs/f2fs/checkpoint.c +@@ -1108,6 +1108,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) + + if (cpc->reason & CP_TRIMMED) + __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG); ++ else ++ __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG); + + if (cpc->reason & CP_UMOUNT) + __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c +index ff2352a0ed15..aff6c2ed1c02 100644 +--- a/fs/f2fs/extent_cache.c ++++ b/fs/f2fs/extent_cache.c +@@ -706,6 +706,9 @@ void f2fs_drop_extent_tree(struct inode *inode) + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et = F2FS_I(inode)->extent_tree; + ++ if (!f2fs_may_extent_tree(inode)) ++ return; ++ + set_inode_flag(inode, FI_NO_EXTENT); + + write_lock(&et->lock); +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index b8372095ba0a..29c5f799890c 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -1321,8 +1321,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, + } + + out: +- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) +- f2fs_i_size_write(inode, new_size); ++ if (new_size > i_size_read(inode)) { ++ if (mode & FALLOC_FL_KEEP_SIZE) ++ file_set_keep_isize(inode); ++ else ++ f2fs_i_size_write(inode, new_size); ++ } + out_sem: + up_write(&F2FS_I(inode)->i_mmap_sem); + +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c +index a4dab98c4b7b..b80e7db3b55b 100644 +--- a/fs/f2fs/namei.c ++++ b/fs/f2fs/namei.c +@@ -201,8 +201,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + + alloc_nid_done(sbi, ino); + +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + + if (IS_DIRSYNC(dir)) + f2fs_sync_fs(sbi->sb, 1); +@@ -529,8 +528,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, + err = page_symlink(inode, disk_link.name, disk_link.len); + + err_out: +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + + /* + * Let's flush symlink data in order to avoid broken symlink as much as +@@ -588,8 +586,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + + alloc_nid_done(sbi, inode->i_ino); + +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + + if (IS_DIRSYNC(dir)) + f2fs_sync_fs(sbi->sb, 1); +@@ -637,8 +634,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, + + alloc_nid_done(sbi, inode->i_ino); + +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + + if (IS_DIRSYNC(dir)) + f2fs_sync_fs(sbi->sb, 1); +diff --git a/fs/fscache/page.c b/fs/fscache/page.c +index 0ad3fd3ad0b4..ae9470f3643c 100644 +--- a/fs/fscache/page.c ++++ b/fs/fscache/page.c +@@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op) + + _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); + ++again: + spin_lock(&object->lock); + cookie = object->cookie; + +@@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op) + goto superseded; + page = results[0]; + _debug("gang %d [%lx]", n, page->index); +- if (page->index >= op->store_limit) { +- fscache_stat(&fscache_n_store_pages_over_limit); +- goto superseded; +- } + + radix_tree_tag_set(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG); +@@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op) + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + ++ if (page->index >= op->store_limit) ++ goto discard_page; ++ + fscache_stat(&fscache_n_store_pages); + fscache_stat(&fscache_n_cop_write_page); + ret = object->cache->ops->write_page(op, page); +@@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op) + _leave(""); + return; + ++discard_page: ++ fscache_stat(&fscache_n_store_pages_over_limit); ++ fscache_end_page_write(object, page); ++ goto again; ++ + superseded: + /* this writer is going away and there aren't any more things to + * write */ +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c +index 2a29cf3371f6..10f0fac031f4 100644 +--- a/fs/gfs2/file.c ++++ b/fs/gfs2/file.c +@@ -803,7 +803,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_alloc_parms ap = { .aflags = 0, }; + unsigned int data_blocks = 0, ind_blocks = 0, rblocks; +- loff_t bytes, max_bytes, max_blks = UINT_MAX; ++ loff_t bytes, max_bytes, max_blks; + int error; + const loff_t pos = offset; + const loff_t count = len; +@@ -855,7 +855,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t + return error; + /* ap.allowed tells us how many blocks quota will allow + * us to write. Check if this reduces max_blks */ +- if (ap.allowed && ap.allowed < max_blks) ++ max_blks = UINT_MAX; ++ if (ap.allowed) + max_blks = ap.allowed; + + error = gfs2_inplace_reserve(ip, &ap); +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h +index 5e47c935a515..836f29480be6 100644 +--- a/fs/gfs2/quota.h ++++ b/fs/gfs2/quota.h +@@ -45,6 +45,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, + { + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + int ret; ++ ++ ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + return 0; + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c +index 0a754f38462e..e5a6deb38e1e 100644 +--- a/fs/jffs2/dir.c ++++ b/fs/jffs2/dir.c +@@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, + __func__, inode->i_ino, inode->i_mode, inode->i_nlink, + f->inocache->pino_nlink, inode->i_mapping->nrpages); + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + + fail: +@@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char + mutex_unlock(&dir_f->sem); + jffs2_complete_reservation(c); + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + + fail: +@@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode + mutex_unlock(&dir_f->sem); + jffs2_complete_reservation(c); + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + + fail: +@@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode + mutex_unlock(&dir_f->sem); + jffs2_complete_reservation(c); + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + + fail: +diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c +index b41596d71858..56c3fcbfe80e 100644 +--- a/fs/jfs/namei.c ++++ b/fs/jfs/namei.c +@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, + unlock_new_inode(ip); + iput(ip); + } else { +- unlock_new_inode(ip); +- d_instantiate(dentry, ip); ++ d_instantiate_new(dentry, ip); + } + + out2: +@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) + unlock_new_inode(ip); + iput(ip); + } else { +- unlock_new_inode(ip); +- d_instantiate(dentry, ip); ++ d_instantiate_new(dentry, ip); + } + + out2: +@@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, + unlock_new_inode(ip); + iput(ip); + } else { +- unlock_new_inode(ip); +- d_instantiate(dentry, ip); ++ d_instantiate_new(dentry, ip); + } + + out2: +@@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, + unlock_new_inode(ip); + iput(ip); + } else { +- unlock_new_inode(ip); +- d_instantiate(dentry, ip); ++ d_instantiate_new(dentry, ip); + } + + out1: +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index e9bea90dc017..fb85d04fdc4c 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -858,8 +858,10 @@ static int nfs4_set_client(struct nfs_server *server, + if (IS_ERR(clp)) + return PTR_ERR(clp); + +- if (server->nfs_client == clp) ++ if (server->nfs_client == clp) { ++ nfs_put_client(clp); + return -ELOOP; ++ } + + /* + * Query for the lease time on clientid setup or renewal +@@ -1217,11 +1219,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname, + clp->cl_proto, clnt->cl_timeout, + clp->cl_minorversion, net); + clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); +- nfs_put_client(clp); + if (error != 0) { + nfs_server_insert_lists(server); + return error; + } ++ nfs_put_client(clp); + + if (server->nfs_client->cl_hostname == NULL) + server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); +diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c +index 515d13c196da..1ba4719de70d 100644 +--- a/fs/nilfs2/namei.c ++++ b/fs/nilfs2/namei.c +@@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) + int err = nilfs_add_link(dentry, inode); + + if (!err) { +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + return 0; + } + inode_dec_link_count(inode); +@@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + goto out_fail; + + nilfs_mark_inode_dirty(inode); +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + out: + if (!err) + err = nilfs_transaction_commit(dir->i_sb); +diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c +index a2b19fbdcf46..6099a8034b17 100644 +--- a/fs/ocfs2/dlm/dlmdomain.c ++++ b/fs/ocfs2/dlm/dlmdomain.c +@@ -676,20 +676,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm) + spin_unlock(&dlm->spinlock); + } + +-int dlm_shutting_down(struct dlm_ctxt *dlm) +-{ +- int ret = 0; +- +- spin_lock(&dlm_domain_lock); +- +- if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) +- ret = 1; +- +- spin_unlock(&dlm_domain_lock); +- +- return ret; +-} +- + void dlm_unregister_domain(struct dlm_ctxt *dlm) + { + int leave = 0; +diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h +index fd6122a38dbd..8a9281411c18 100644 +--- a/fs/ocfs2/dlm/dlmdomain.h ++++ b/fs/ocfs2/dlm/dlmdomain.h +@@ -28,7 +28,30 @@ + extern spinlock_t dlm_domain_lock; + extern struct list_head dlm_domains; + +-int dlm_shutting_down(struct dlm_ctxt *dlm); ++static inline int dlm_joined(struct dlm_ctxt *dlm) ++{ ++ int ret = 0; ++ ++ spin_lock(&dlm_domain_lock); ++ if (dlm->dlm_state == DLM_CTXT_JOINED) ++ ret = 1; ++ spin_unlock(&dlm_domain_lock); ++ ++ return ret; ++} ++ ++static inline int dlm_shutting_down(struct dlm_ctxt *dlm) ++{ ++ int ret = 0; ++ ++ spin_lock(&dlm_domain_lock); ++ if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) ++ ret = 1; ++ spin_unlock(&dlm_domain_lock); ++ ++ return ret; ++} ++ + void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, + int node_num); + +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c +index ec8f75813beb..505ab4281f36 100644 +--- a/fs/ocfs2/dlm/dlmrecovery.c ++++ b/fs/ocfs2/dlm/dlmrecovery.c +@@ -1378,6 +1378,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, + if (!dlm_grab(dlm)) + return -EINVAL; + ++ if (!dlm_joined(dlm)) { ++ mlog(ML_ERROR, "Domain %s not joined! " ++ "lockres %.*s, master %u\n", ++ dlm->name, mres->lockname_len, ++ mres->lockname, mres->master); ++ dlm_put(dlm); ++ return -EINVAL; ++ } ++ + BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); + + real_master = mres->master; +diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c +index 7e9e5d0ea3bc..f8f3c73d2664 100644 +--- a/fs/orangefs/namei.c ++++ b/fs/orangefs/namei.c +@@ -71,8 +71,7 @@ static int orangefs_create(struct inode *dir, + get_khandle_from_ino(inode), + dentry); + +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + orangefs_set_timeout(dentry); + ORANGEFS_I(inode)->getattr_time = jiffies - 1; + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; +@@ -320,8 +319,7 @@ static int orangefs_symlink(struct inode *dir, + "Assigned symlink inode new number of %pU\n", + get_khandle_from_ino(inode)); + +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + orangefs_set_timeout(dentry); + ORANGEFS_I(inode)->getattr_time = jiffies - 1; + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; +@@ -385,8 +383,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode + "Assigned dir inode new number of %pU\n", + get_khandle_from_ino(inode)); + +- d_instantiate(dentry, inode); +- unlock_new_inode(inode); ++ d_instantiate_new(dentry, inode); + orangefs_set_timeout(dentry); + ORANGEFS_I(inode)->getattr_time = jiffies - 1; + ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index c5cbbdff3c3d..82ac5f682b73 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -707,7 +707,10 @@ static bool proc_sys_link_fill_cache(struct file *file, + struct ctl_table *table) + { + bool ret = true; ++ + head = sysctl_head_grab(head); ++ if (IS_ERR(head)) ++ return false; + + if (S_ISLNK(table->mode)) { + /* It is not an error if we can not follow the link ignore it */ +diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c +index bd39a998843d..5089dac02660 100644 +--- a/fs/reiserfs/namei.c ++++ b/fs/reiserfs/namei.c +@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod + reiserfs_update_inode_transaction(inode); + reiserfs_update_inode_transaction(dir); + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + retval = journal_end(&th); + + out_failed: +@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode + goto out_failed; + } + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + retval = journal_end(&th); + + out_failed: +@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode + /* the above add_entry did not update dir's stat data */ + reiserfs_update_sd(&th, dir); + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + retval = journal_end(&th); + out_failed: + reiserfs_write_unlock(dir->i_sb); +@@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir, + goto out_failed; + } + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + retval = journal_end(&th); + out_failed: + reiserfs_write_unlock(parent_dir->i_sb); +diff --git a/fs/super.c b/fs/super.c +index 79d7fc5e0ddd..219f7ca7c5d2 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -120,13 +120,23 @@ static unsigned long super_cache_count(struct shrinker *shrink, + sb = container_of(shrink, struct super_block, s_shrink); + + /* +- * Don't call trylock_super as it is a potential +- * scalability bottleneck. The counts could get updated +- * between super_cache_count and super_cache_scan anyway. +- * Call to super_cache_count with shrinker_rwsem held +- * ensures the safety of call to list_lru_shrink_count() and +- * s_op->nr_cached_objects(). ++ * We don't call trylock_super() here as it is a scalability bottleneck, ++ * so we're exposed to partial setup state. The shrinker rwsem does not ++ * protect filesystem operations backing list_lru_shrink_count() or ++ * s_op->nr_cached_objects(). Counts can change between ++ * super_cache_count and super_cache_scan, so we really don't need locks ++ * here. ++ * ++ * However, if we are currently mounting the superblock, the underlying ++ * filesystem might be in a state of partial construction and hence it ++ * is dangerous to access it. trylock_super() uses a SB_BORN check to ++ * avoid this situation, so do the same here. The memory barrier is ++ * matched with the one in mount_fs() as we don't hold locks here. + */ ++ if (!(sb->s_flags & SB_BORN)) ++ return 0; ++ smp_rmb(); ++ + if (sb->s_op && sb->s_op->nr_cached_objects) + total_objects = sb->s_op->nr_cached_objects(sb, sc); + +@@ -1232,6 +1242,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data) + sb = root->d_sb; + BUG_ON(!sb); + WARN_ON(!sb->s_bdi); ++ ++ /* ++ * Write barrier is for super_cache_count(). We place it before setting ++ * SB_BORN as the data dependency between the two functions is the ++ * superblock structure contents that we just set up, not the SB_BORN ++ * flag. ++ */ ++ smp_wmb(); + sb->s_flags |= SB_BORN; + + error = security_sb_kern_mount(sb, flags, secdata); +diff --git a/fs/udf/namei.c b/fs/udf/namei.c +index 885198dfd9f8..041bf34f781f 100644 +--- a/fs/udf/namei.c ++++ b/fs/udf/namei.c +@@ -621,8 +621,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode) + if (fibh.sbh != fibh.ebh) + brelse(fibh.ebh); + brelse(fibh.sbh); +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + + return 0; + } +@@ -732,8 +731,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + inc_nlink(dir); + dir->i_ctime = dir->i_mtime = current_time(dir); + mark_inode_dirty(dir); +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + if (fibh.sbh != fibh.ebh) + brelse(fibh.ebh); + brelse(fibh.sbh); +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 08bf097507f6..9b0d6562d0a1 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -2091,8 +2091,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) + bool lvid_open = false; + + uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); +- uopt.uid = INVALID_UID; +- uopt.gid = INVALID_GID; ++ /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */ ++ uopt.uid = make_kuid(current_user_ns(), overflowuid); ++ uopt.gid = make_kgid(current_user_ns(), overflowgid); + uopt.umask = 0; + uopt.fmode = UDF_INVALID_MODE; + uopt.dmode = UDF_INVALID_MODE; +diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c +index 32545cd00ceb..d5f43ba76c59 100644 +--- a/fs/ufs/namei.c ++++ b/fs/ufs/namei.c +@@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) + { + int err = ufs_add_link(dentry, inode); + if (!err) { +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + } + inode_dec_link_count(inode); +@@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) + if (err) + goto out_fail; + +- unlock_new_inode(inode); +- d_instantiate(dentry, inode); ++ d_instantiate_new(dentry, inode); + return 0; + + out_fail: +diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c +index b2cde5426182..7b68e6c9a474 100644 +--- a/fs/xfs/xfs_discard.c ++++ b/fs/xfs/xfs_discard.c +@@ -50,19 +50,19 @@ xfs_trim_extents( + + pag = xfs_perag_get(mp, agno); + +- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); +- if (error || !agbp) +- goto out_put_perag; +- +- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); +- + /* + * Force out the log. This means any transactions that might have freed +- * space before we took the AGF buffer lock are now on disk, and the ++ * space before we take the AGF buffer lock are now on disk, and the + * volatile disk cache is flushed. + */ + xfs_log_force(mp, XFS_LOG_SYNC); + ++ error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); ++ if (error || !agbp) ++ goto out_put_perag; ++ ++ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); ++ + /* + * Look up the longest btree in the AGF and start with it. + */ +diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h +index af2cc94a61bf..ae1a33aa8955 100644 +--- a/include/asm-generic/bug.h ++++ b/include/asm-generic/bug.h +@@ -50,6 +50,7 @@ struct bug_entry { + #ifndef HAVE_ARCH_BUG + #define BUG() do { \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ ++ barrier_before_unreachable(); \ + panic("BUG!"); \ + } while (0) + #endif +diff --git a/include/linux/bio.h b/include/linux/bio.h +index 45f00dd6323c..5aa40f4712ff 100644 +--- a/include/linux/bio.h ++++ b/include/linux/bio.h +@@ -501,6 +501,7 @@ void zero_fill_bio(struct bio *bio); + extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); + extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); + extern unsigned int bvec_nr_vecs(unsigned short idx); ++extern const char *bio_devname(struct bio *bio, char *buffer); + + #define bio_set_dev(bio, bdev) \ + do { \ +@@ -519,9 +520,6 @@ do { \ + #define bio_dev(bio) \ + disk_devt((bio)->bi_disk) + +-#define bio_devname(bio, buf) \ +- __bdevname(bio_dev(bio), (buf)) +- + #ifdef CONFIG_BLK_CGROUP + int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); + int bio_associate_current(struct bio *bio); +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index b78b31af36f8..f43113b8890b 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -211,6 +211,15 @@ + #endif + #endif + ++/* ++ * calling noreturn functions, __builtin_unreachable() and __builtin_trap() ++ * confuse the stack allocation in gcc, leading to overly large stack ++ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 ++ * ++ * Adding an empty inline assembly before it works around the problem ++ */ ++#define barrier_before_unreachable() asm volatile("") ++ + /* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer +@@ -221,7 +230,11 @@ + * unreleased. Really, we need to have autoconf for the kernel. + */ + #define unreachable() \ +- do { annotate_unreachable(); __builtin_unreachable(); } while (0) ++ do { \ ++ annotate_unreachable(); \ ++ barrier_before_unreachable(); \ ++ __builtin_unreachable(); \ ++ } while (0) + + /* Mark a function definition as prohibited from being cloned. */ + #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index e8c9cd18bb05..853929f98962 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, + # define barrier_data(ptr) barrier() + #endif + ++/* workaround for GCC PR82365 if needed */ ++#ifndef barrier_before_unreachable ++# define barrier_before_unreachable() do { } while (0) ++#endif ++ + /* Unreachable code */ + #ifdef CONFIG_STACK_VALIDATION + #define annotate_reachable() ({ \ +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index f05a659cdf34..006f4ccda5f5 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -226,6 +226,7 @@ extern seqlock_t rename_lock; + * These are the low-level FS interfaces to the dcache.. + */ + extern void d_instantiate(struct dentry *, struct inode *); ++extern void d_instantiate_new(struct dentry *, struct inode *); + extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); + extern int d_instantiate_no_diralias(struct dentry *, struct inode *); + extern void __d_drop(struct dentry *dentry); +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h +index ab927383c99d..87b8c20d5b27 100644 +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -300,32 +300,47 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, + } + + /** +- * __vlan_insert_tag - regular VLAN tag inserting ++ * __vlan_insert_inner_tag - inner VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert ++ * @mac_len: MAC header length including outer vlan headers + * +- * Inserts the VLAN tag into @skb as part of the payload ++ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len + * Returns error if skb_cow_head failes. + * + * Does not change skb->protocol so this function can be used during receive. + */ +-static inline int __vlan_insert_tag(struct sk_buff *skb, +- __be16 vlan_proto, u16 vlan_tci) ++static inline int __vlan_insert_inner_tag(struct sk_buff *skb, ++ __be16 vlan_proto, u16 vlan_tci, ++ unsigned int mac_len) + { + struct vlan_ethhdr *veth; + + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + +- veth = skb_push(skb, VLAN_HLEN); ++ skb_push(skb, VLAN_HLEN); + +- /* Move the mac addresses to the beginning of the new header. */ +- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); ++ /* Move the mac header sans proto to the beginning of the new header. */ ++ if (likely(mac_len > ETH_TLEN)) ++ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); + skb->mac_header -= VLAN_HLEN; + ++ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); ++ + /* first, the ethernet type */ +- veth->h_vlan_proto = vlan_proto; ++ if (likely(mac_len >= ETH_TLEN)) { ++ /* h_vlan_encapsulated_proto should already be populated, and ++ * skb->data has space for h_vlan_proto ++ */ ++ veth->h_vlan_proto = vlan_proto; ++ } else { ++ /* h_vlan_encapsulated_proto should not be populated, and ++ * skb->data has no space for h_vlan_proto ++ */ ++ veth->h_vlan_encapsulated_proto = skb->protocol; ++ } + + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); +@@ -334,12 +349,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, + } + + /** +- * vlan_insert_tag - regular VLAN tag inserting ++ * __vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload ++ * Returns error if skb_cow_head failes. ++ * ++ * Does not change skb->protocol so this function can be used during receive. ++ */ ++static inline int __vlan_insert_tag(struct sk_buff *skb, ++ __be16 vlan_proto, u16 vlan_tci) ++{ ++ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); ++} ++ ++/** ++ * vlan_insert_inner_tag - inner VLAN tag inserting ++ * @skb: skbuff to tag ++ * @vlan_proto: VLAN encapsulation protocol ++ * @vlan_tci: VLAN TCI to insert ++ * @mac_len: MAC header length including outer vlan headers ++ * ++ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function +@@ -347,12 +380,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, + * + * Does not change skb->protocol so this function can be used during receive. + */ +-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, +- __be16 vlan_proto, u16 vlan_tci) ++static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, ++ __be16 vlan_proto, ++ u16 vlan_tci, ++ unsigned int mac_len) + { + int err; + +- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); ++ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); + if (err) { + dev_kfree_skb_any(skb); + return NULL; +@@ -360,6 +395,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + return skb; + } + ++/** ++ * vlan_insert_tag - regular VLAN tag inserting ++ * @skb: skbuff to tag ++ * @vlan_proto: VLAN encapsulation protocol ++ * @vlan_tci: VLAN TCI to insert ++ * ++ * Inserts the VLAN tag into @skb as part of the payload ++ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. ++ * ++ * Following the skb_unshare() example, in case of error, the calling function ++ * doesn't have to worry about freeing the original skb. ++ * ++ * Does not change skb->protocol so this function can be used during receive. ++ */ ++static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, ++ __be16 vlan_proto, u16 vlan_tci) ++{ ++ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); ++} ++ + /** + * vlan_insert_tag_set_proto - regular VLAN tag inserting + * @skb: skbuff to tag +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 5a8019befafd..39f0489eb137 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -1104,7 +1104,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) + { + } + #endif +-void kvm_arch_irq_routing_update(struct kvm *kvm); + + static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) + { +@@ -1113,6 +1112,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) + + #endif /* CONFIG_HAVE_KVM_EVENTFD */ + ++void kvm_arch_irq_routing_update(struct kvm *kvm); ++ + static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) + { + /* +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h +index 35d125569e68..e8b12b79a0de 100644 +--- a/include/linux/ptr_ring.h ++++ b/include/linux/ptr_ring.h +@@ -450,7 +450,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + */ + static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) + { +- if (size * sizeof(void *) > KMALLOC_MAX_SIZE) ++ if (size > KMALLOC_MAX_SIZE / sizeof(void *)) + return NULL; + return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); + } +diff --git a/include/net/ip.h b/include/net/ip.h +index af8addbaa3c1..81da1123fc8e 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -326,6 +326,13 @@ int ip_decrease_ttl(struct iphdr *iph) + return --iph->ttl; + } + ++static inline int ip_mtu_locked(const struct dst_entry *dst) ++{ ++ const struct rtable *rt = (const struct rtable *)dst; ++ ++ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); ++} ++ + static inline + int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) + { +@@ -333,7 +340,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) + + return pmtudisc == IP_PMTUDISC_DO || + (pmtudisc == IP_PMTUDISC_WANT && +- !(dst_metric_locked(dst, RTAX_MTU))); ++ !ip_mtu_locked(dst)); + } + + static inline bool ip_sk_accept_pmtu(const struct sock *sk) +@@ -359,7 +366,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + struct net *net = dev_net(dst->dev); + + if (net->ipv4.sysctl_ip_fwd_use_pmtu || +- dst_metric_locked(dst, RTAX_MTU) || ++ ip_mtu_locked(dst) || + !forwarding) + return dst_mtu(dst); + +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index 1a7f7e424320..5c5d344c0629 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -59,6 +59,7 @@ struct fib_nh_exception { + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; ++ bool fnhe_mtu_locked; + __be32 fnhe_gw; + unsigned long fnhe_expires; + struct rtable __rcu *fnhe_rth_input; +diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h +index ea985aa7a6c5..df528a623548 100644 +--- a/include/net/llc_conn.h ++++ b/include/net/llc_conn.h +@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk); + + /* Access to a connection */ + int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); +-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); ++int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); + void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); + void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); + void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); +diff --git a/include/net/mac80211.h b/include/net/mac80211.h +index 4f1d2dec43ce..87b62bae20af 100644 +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -4141,7 +4141,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid); + * The TX headroom reserved by mac80211 for its own tx_status functions. + * This is enough for the radiotap header. + */ +-#define IEEE80211_TX_STATUS_HEADROOM 14 ++#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4) + + /** + * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames +diff --git a/include/net/regulatory.h b/include/net/regulatory.h +index ebc5a2ed8631..f83cacce3308 100644 +--- a/include/net/regulatory.h ++++ b/include/net/regulatory.h +@@ -78,7 +78,7 @@ struct regulatory_request { + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; +- char alpha2[2]; ++ char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + bool intersect; + bool processed; +diff --git a/include/net/route.h b/include/net/route.h +index d538e6db1afe..6077a0fb3044 100644 +--- a/include/net/route.h ++++ b/include/net/route.h +@@ -63,7 +63,8 @@ struct rtable { + __be32 rt_gateway; + + /* Miscellaneous cached information */ +- u32 rt_pmtu; ++ u32 rt_mtu_locked:1, ++ rt_pmtu:31; + + u32 rt_table_id; + +diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h +index 23159dd5be18..a1fd63871d17 100644 +--- a/include/rdma/ib_umem.h ++++ b/include/rdma/ib_umem.h +@@ -48,7 +48,6 @@ struct ib_umem { + int writable; + int hugetlb; + struct work_struct work; +- struct pid *pid; + struct mm_struct *mm; + unsigned long diff; + struct ib_umem_odp *odp_data; +diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h +index c2d1b15da136..a91f25151a5b 100644 +--- a/include/soc/arc/mcip.h ++++ b/include/soc/arc/mcip.h +@@ -15,6 +15,7 @@ + + #define ARC_REG_MCIP_BCR 0x0d0 + #define ARC_REG_MCIP_IDU_BCR 0x0D5 ++#define ARC_REG_GFRC_BUILD 0x0D6 + #define ARC_REG_MCIP_CMD 0x600 + #define ARC_REG_MCIP_WDATA 0x601 + #define ARC_REG_MCIP_READBACK 0x602 +@@ -36,10 +37,14 @@ struct mcip_cmd { + #define CMD_SEMA_RELEASE 0x12 + + #define CMD_DEBUG_SET_MASK 0x34 ++#define CMD_DEBUG_READ_MASK 0x35 + #define CMD_DEBUG_SET_SELECT 0x36 ++#define CMD_DEBUG_READ_SELECT 0x37 + + #define CMD_GFRC_READ_LO 0x42 + #define CMD_GFRC_READ_HI 0x43 ++#define CMD_GFRC_SET_CORE 0x47 ++#define CMD_GFRC_READ_CORE 0x48 + + #define CMD_IDU_ENABLE 0x71 + #define CMD_IDU_DISABLE 0x72 +diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h +index 91a31ffed828..9a781f0611df 100644 +--- a/include/uapi/drm/virtgpu_drm.h ++++ b/include/uapi/drm/virtgpu_drm.h +@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer { + }; + + #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ ++#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ + + struct drm_virtgpu_getparam { + __u64 param; +diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h +index 3ee3bf7c8526..244e3213ecb0 100644 +--- a/include/uapi/linux/if_ether.h ++++ b/include/uapi/linux/if_ether.h +@@ -30,6 +30,7 @@ + */ + + #define ETH_ALEN 6 /* Octets in one ethernet addr */ ++#define ETH_TLEN 2 /* Octets in ethernet type field */ + #define ETH_HLEN 14 /* Total octets in header. */ + #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ + #define ETH_DATA_LEN 1500 /* Max. octets in payload */ +diff --git a/ipc/shm.c b/ipc/shm.c +index a9cce632ed48..44cca2529a95 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -1309,14 +1309,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, + + if (addr) { + if (addr & (shmlba - 1)) { +- /* +- * Round down to the nearest multiple of shmlba. +- * For sane do_mmap_pgoff() parameters, avoid +- * round downs that trigger nil-page and MAP_FIXED. +- */ +- if ((shmflg & SHM_RND) && addr >= shmlba) +- addr &= ~(shmlba - 1); +- else ++ if (shmflg & SHM_RND) { ++ addr &= ~(shmlba - 1); /* round down */ ++ ++ /* ++ * Ensure that the round-down is non-nil ++ * when remapping. This can happen for ++ * cases when addr < shmlba. ++ */ ++ if (!addr && (shmflg & SHM_REMAP)) ++ goto out; ++ } else + #ifndef __ARCH_FORCE_SHMLBA + if (addr & ~PAGE_MASK) + #endif +diff --git a/kernel/audit.c b/kernel/audit.c +index 5b34d3114af4..d301276bca58 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -1058,6 +1058,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature + return; + + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); ++ if (!ab) ++ return; + audit_log_task_info(ab, current); + audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", + audit_feature_names[which], !!old_feature, !!new_feature, +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c +index c8146d53ca67..07aefa8dbee8 100644 +--- a/kernel/debug/kdb/kdb_main.c ++++ b/kernel/debug/kdb/kdb_main.c +@@ -1566,6 +1566,7 @@ static int kdb_md(int argc, const char **argv) + int symbolic = 0; + int valid = 0; + int phys = 0; ++ int raw = 0; + + kdbgetintenv("MDCOUNT", &mdcount); + kdbgetintenv("RADIX", &radix); +@@ -1575,9 +1576,10 @@ static int kdb_md(int argc, const char **argv) + repeat = mdcount * 16 / bytesperword; + + if (strcmp(argv[0], "mdr") == 0) { +- if (argc != 2) ++ if (argc == 2 || (argc == 0 && last_addr != 0)) ++ valid = raw = 1; ++ else + return KDB_ARGCOUNT; +- valid = 1; + } else if (isdigit(argv[0][2])) { + bytesperword = (int)(argv[0][2] - '0'); + if (bytesperword == 0) { +@@ -1613,7 +1615,10 @@ static int kdb_md(int argc, const char **argv) + radix = last_radix; + bytesperword = last_bytesperword; + repeat = last_repeat; +- mdcount = ((repeat * bytesperword) + 15) / 16; ++ if (raw) ++ mdcount = repeat; ++ else ++ mdcount = ((repeat * bytesperword) + 15) / 16; + } + + if (argc) { +@@ -1630,7 +1635,10 @@ static int kdb_md(int argc, const char **argv) + diag = kdbgetularg(argv[nextarg], &val); + if (!diag) { + mdcount = (int) val; +- repeat = mdcount * 16 / bytesperword; ++ if (raw) ++ repeat = mdcount; ++ else ++ repeat = mdcount * 16 / bytesperword; + } + } + if (argc >= nextarg+1) { +@@ -1640,8 +1648,15 @@ static int kdb_md(int argc, const char **argv) + } + } + +- if (strcmp(argv[0], "mdr") == 0) +- return kdb_mdr(addr, mdcount); ++ if (strcmp(argv[0], "mdr") == 0) { ++ int ret; ++ last_addr = addr; ++ ret = kdb_mdr(addr, mdcount); ++ last_addr += mdcount; ++ last_repeat = mdcount; ++ last_bytesperword = bytesperword; // to make REPEAT happy ++ return ret; ++ } + + switch (radix) { + case 10: +diff --git a/kernel/events/core.c b/kernel/events/core.c +index cb8274d7824f..7c394ddf1ce6 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -642,9 +642,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp) + + static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) + { +- struct perf_cgroup *cgrp_out = cpuctx->cgrp; +- if (cgrp_out) +- __update_cgrp_time(cgrp_out); ++ struct perf_cgroup *cgrp = cpuctx->cgrp; ++ struct cgroup_subsys_state *css; ++ ++ if (cgrp) { ++ for (css = &cgrp->css; css; css = css->parent) { ++ cgrp = container_of(css, struct perf_cgroup, css); ++ __update_cgrp_time(cgrp); ++ } ++ } + } + + static inline void update_cgrp_time_from_event(struct perf_event *event) +@@ -672,6 +678,7 @@ perf_cgroup_set_timestamp(struct task_struct *task, + { + struct perf_cgroup *cgrp; + struct perf_cgroup_info *info; ++ struct cgroup_subsys_state *css; + + /* + * ctx->lock held by caller +@@ -682,8 +689,12 @@ perf_cgroup_set_timestamp(struct task_struct *task, + return; + + cgrp = perf_cgroup_from_task(task, ctx); +- info = this_cpu_ptr(cgrp->info); +- info->timestamp = ctx->timestamp; ++ ++ for (css = &cgrp->css; css; css = css->parent) { ++ cgrp = container_of(css, struct perf_cgroup, css); ++ info = this_cpu_ptr(cgrp->info); ++ info->timestamp = ctx->timestamp; ++ } + } + + static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); +@@ -889,27 +900,39 @@ list_update_cgroup_event(struct perf_event *event, + if (!is_cgroup_event(event)) + return; + +- if (add && ctx->nr_cgroups++) +- return; +- else if (!add && --ctx->nr_cgroups) +- return; + /* + * Because cgroup events are always per-cpu events, + * this will always be called from the right CPU. + */ + cpuctx = __get_cpu_context(ctx); +- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; +- /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/ +- if (add) { ++ ++ /* ++ * Since setting cpuctx->cgrp is conditional on the current @cgrp ++ * matching the event's cgroup, we must do this for every new event, ++ * because if the first would mismatch, the second would not try again ++ * and we would leave cpuctx->cgrp unset. ++ */ ++ if (add && !cpuctx->cgrp) { + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); + +- list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) + cpuctx->cgrp = cgrp; +- } else { +- list_del(cpuctx_entry); +- cpuctx->cgrp = NULL; + } ++ ++ if (add && ctx->nr_cgroups++) ++ return; ++ else if (!add && --ctx->nr_cgroups) ++ return; ++ ++ /* no cgroup running */ ++ if (!add) ++ cpuctx->cgrp = NULL; ++ ++ cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; ++ if (add) ++ list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); ++ else ++ list_del(cpuctx_entry); + } + + #else /* !CONFIG_CGROUP_PERF */ +@@ -2393,6 +2416,18 @@ static int __perf_install_in_context(void *info) + raw_spin_lock(&task_ctx->lock); + } + ++#ifdef CONFIG_CGROUP_PERF ++ if (is_cgroup_event(event)) { ++ /* ++ * If the current cgroup doesn't match the event's ++ * cgroup, we should not try to schedule it. ++ */ ++ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); ++ reprogram = cgroup_is_descendant(cgrp->css.cgroup, ++ event->cgrp->css.cgroup); ++ } ++#endif ++ + if (reprogram) { + ctx_sched_out(ctx, cpuctx, EVENT_TIME); + add_event_to_ctx(event, ctx); +@@ -5802,7 +5837,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; + +- if (leader != event) ++ if ((leader != event) && ++ (leader->state == PERF_EVENT_STATE_ACTIVE)) + leader->pmu->read(leader); + + values[n++] = perf_event_count(leader); +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h +index fed95fa941e6..8b3102d22823 100644 +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -559,8 +559,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) + } + t = list_entry(rnp->gp_tasks->prev, + struct task_struct, rcu_node_entry); +- list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) ++ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { ++ /* ++ * We could be printing a lot while holding a spinlock. ++ * Avoid triggering hard lockup. ++ */ ++ touch_nmi_watchdog(); + sched_show_task(t); ++ } + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } + +@@ -1677,6 +1683,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) + char *ticks_title; + unsigned long ticks_value; + ++ /* ++ * We could be printing a lot while holding a spinlock. Avoid ++ * triggering hard lockup. ++ */ ++ touch_nmi_watchdog(); ++ + if (rsp->gpnum == rdp->gpnum) { + ticks_title = "ticks this GP"; + ticks_value = rdp->ticks_this_gp; +diff --git a/kernel/relay.c b/kernel/relay.c +index 55da824f4adc..1537158c67b3 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) + { + struct rchan_buf *buf; + +- if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) ++ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *)) + return NULL; + + buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 470a0c9e93de..113eaeb6c0f8 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -843,6 +843,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) + continue; + + raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ + if (rt_rq->rt_time) { + u64 runtime; + +diff --git a/kernel/sys.c b/kernel/sys.c +index b5c1bc9e3769..de4ed027dfd7 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -1395,6 +1395,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, + if (resource >= RLIM_NLIMITS) + return -EINVAL; + ++ resource = array_index_nospec(resource, RLIM_NLIMITS); + task_lock(current->group_leader); + x = current->signal->rlim[resource]; + task_unlock(current->group_leader); +@@ -1414,6 +1415,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, + if (resource >= RLIM_NLIMITS) + return -EINVAL; + ++ resource = array_index_nospec(resource, RLIM_NLIMITS); + task_lock(current->group_leader); + r = current->signal->rlim[resource]; + task_unlock(current->group_leader); +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index d0c6b50792c8..d8a7f8939c81 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -5350,7 +5350,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) + + ret = device_register(&wq_dev->dev); + if (ret) { +- kfree(wq_dev); ++ put_device(&wq_dev->dev); + wq->wq_dev = NULL; + return ret; + } +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index 70d677820740..d172f0341b80 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -2037,10 +2037,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root, + unsigned long index, void *item) + { + struct radix_tree_node *node = NULL; +- void __rcu **slot; ++ void __rcu **slot = NULL; + void *entry; + + entry = __radix_tree_lookup(root, index, &node, &slot); ++ if (!slot) ++ return NULL; + if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, + get_slot_offset(node, slot)))) + return NULL; +diff --git a/lib/test_kmod.c b/lib/test_kmod.c +index fba78d25e825..96c304fd656a 100644 +--- a/lib/test_kmod.c ++++ b/lib/test_kmod.c +@@ -1149,7 +1149,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) + mutex_lock(®_dev_mutex); + + /* int should suffice for number of devices, test for wrap */ +- if (unlikely(num_test_devs + 1) < 0) { ++ if (num_test_devs + 1 == INT_MAX) { + pr_err("reached limit of number of test devices\n"); + goto out; + } +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index a403d29da6fd..e774898c91d5 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -555,7 +555,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, + + VM_BUG_ON_PAGE(!PageCompound(page), page); + +- if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { ++ if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg, ++ true)) { + put_page(page); + count_vm_event(THP_FAULT_FALLBACK); + return VM_FAULT_FALLBACK; +@@ -1304,7 +1305,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) + } + + if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, +- huge_gfp, &memcg, true))) { ++ huge_gfp | __GFP_NORETRY, &memcg, true))) { + put_page(new_page); + split_huge_pmd(vma, vmf->pmd, vmf->address); + if (page) +diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c +index 6f319fb81718..d90f29a166d8 100644 +--- a/mm/kasan/kasan.c ++++ b/mm/kasan/kasan.c +@@ -737,6 +737,40 @@ void __asan_unpoison_stack_memory(const void *addr, size_t size) + EXPORT_SYMBOL(__asan_unpoison_stack_memory); + + #ifdef CONFIG_MEMORY_HOTPLUG ++static bool shadow_mapped(unsigned long addr) ++{ ++ pgd_t *pgd = pgd_offset_k(addr); ++ p4d_t *p4d; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *pte; ++ ++ if (pgd_none(*pgd)) ++ return false; ++ p4d = p4d_offset(pgd, addr); ++ if (p4d_none(*p4d)) ++ return false; ++ pud = pud_offset(p4d, addr); ++ if (pud_none(*pud)) ++ return false; ++ ++ /* ++ * We can't use pud_large() or pud_huge(), the first one is ++ * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse ++ * pud_bad(), if pud is bad then it's bad because it's huge. ++ */ ++ if (pud_bad(*pud)) ++ return true; ++ pmd = pmd_offset(pud, addr); ++ if (pmd_none(*pmd)) ++ return false; ++ ++ if (pmd_bad(*pmd)) ++ return true; ++ pte = pte_offset_kernel(pmd, addr); ++ return !pte_none(*pte); ++} ++ + static int __meminit kasan_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) + { +@@ -758,6 +792,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, + case MEM_GOING_ONLINE: { + void *ret; + ++ /* ++ * If shadow is mapped already than it must have been mapped ++ * during the boot. This could happen if we onlining previously ++ * offlined memory. ++ */ ++ if (shadow_mapped(shadow_start)) ++ return NOTIFY_OK; ++ + ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, + shadow_end, GFP_KERNEL, + PAGE_KERNEL, VM_NO_GUARD, +@@ -769,8 +811,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, + kmemleak_ignore(ret); + return NOTIFY_OK; + } +- case MEM_OFFLINE: +- vfree((void *)shadow_start); ++ case MEM_CANCEL_ONLINE: ++ case MEM_OFFLINE: { ++ struct vm_struct *vm; ++ ++ /* ++ * shadow_start was either mapped during boot by kasan_init() ++ * or during memory online by __vmalloc_node_range(). ++ * In the latter case we can use vfree() to free shadow. ++ * Non-NULL result of the find_vm_area() will tell us if ++ * that was the second case. ++ * ++ * Currently it's not possible to free shadow mapped ++ * during boot by kasan_init(). It's because the code ++ * to do that hasn't been written yet. So we'll just ++ * leak the memory. ++ */ ++ vm = find_vm_area((void *)shadow_start); ++ if (vm) ++ vfree((void *)shadow_start); ++ } + } + + return NOTIFY_OK; +@@ -783,5 +843,5 @@ static int __init kasan_memhotplug_init(void) + return 0; + } + +-module_init(kasan_memhotplug_init); ++core_initcall(kasan_memhotplug_init); + #endif +diff --git a/mm/khugepaged.c b/mm/khugepaged.c +index 29221602d802..0a5bb3e8a8a3 100644 +--- a/mm/khugepaged.c ++++ b/mm/khugepaged.c +@@ -965,7 +965,9 @@ static void collapse_huge_page(struct mm_struct *mm, + goto out_nolock; + } + +- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { ++ /* Do not oom kill for khugepaged charges */ ++ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, ++ &memcg, true))) { + result = SCAN_CGROUP_CHARGE_FAIL; + goto out_nolock; + } +@@ -1324,7 +1326,9 @@ static void collapse_shmem(struct mm_struct *mm, + goto out; + } + +- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { ++ /* Do not oom kill for khugepaged charges */ ++ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, ++ &memcg, true))) { + result = SCAN_CGROUP_CHARGE_FAIL; + goto out; + } +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index bd1374f402cd..d9e0be2a8189 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -1658,8 +1658,7 @@ static void start_scan_thread(void) + } + + /* +- * Stop the automatic memory scanning thread. This function must be called +- * with the scan_mutex held. ++ * Stop the automatic memory scanning thread. + */ + static void stop_scan_thread(void) + { +@@ -1922,12 +1921,15 @@ static void kmemleak_do_cleanup(struct work_struct *work) + { + stop_scan_thread(); + ++ mutex_lock(&scan_mutex); + /* +- * Once the scan thread has stopped, it is safe to no longer track +- * object freeing. Ordering of the scan thread stopping and the memory +- * accesses below is guaranteed by the kthread_stop() function. ++ * Once it is made sure that kmemleak_scan has stopped, it is safe to no ++ * longer track object freeing. Ordering of the scan thread stopping and ++ * the memory accesses below is guaranteed by the kthread_stop() ++ * function. + */ + kmemleak_free_enabled = 0; ++ mutex_unlock(&scan_mutex); + + if (!kmemleak_found_leaks) + __kmemleak_do_cleanup(); +diff --git a/mm/ksm.c b/mm/ksm.c +index 5b6be9eeb095..fdc8746ebcb4 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -2085,8 +2085,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) + tree_rmap_item = + unstable_tree_search_insert(rmap_item, page, &tree_page); + if (tree_rmap_item) { ++ bool split; ++ + kpage = try_to_merge_two_pages(rmap_item, page, + tree_rmap_item, tree_page); ++ /* ++ * If both pages we tried to merge belong to the same compound ++ * page, then we actually ended up increasing the reference ++ * count of the same compound page twice, and split_huge_page ++ * failed. ++ * Here we set a flag if that happened, and we use it later to ++ * try split_huge_page again. Since we call put_page right ++ * afterwards, the reference count will be correct and ++ * split_huge_page should succeed. ++ */ ++ split = PageTransCompound(page) ++ && compound_head(page) == compound_head(tree_page); + put_page(tree_page); + if (kpage) { + /* +@@ -2113,6 +2127,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) + break_cow(tree_rmap_item); + break_cow(rmap_item); + } ++ } else if (split) { ++ /* ++ * We are here if we tried to merge two pages and ++ * failed because they both belonged to the same ++ * compound page. We will split the page now, but no ++ * merging will take place. ++ * We do not want to add the cost of a full lock; if ++ * the page is locked, it is better to skip it and ++ * perhaps try again later. ++ */ ++ if (!trylock_page(page)) ++ return; ++ split_huge_page(page); ++ unlock_page(page); + } + } + } +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index 2d3077ce50cd..ecbda7f5d494 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -2128,6 +2128,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) + case MPOL_INTERLEAVE: + return !!nodes_equal(a->v.nodes, b->v.nodes); + case MPOL_PREFERRED: ++ /* a's ->flags is the same as b's */ ++ if (a->flags & MPOL_F_LOCAL) ++ return true; + return a->v.preferred_node == b->v.preferred_node; + default: + BUG(); +diff --git a/mm/page_idle.c b/mm/page_idle.c +index 0a49374e6931..e412a63b2b74 100644 +--- a/mm/page_idle.c ++++ b/mm/page_idle.c +@@ -65,11 +65,15 @@ static bool page_idle_clear_pte_refs_one(struct page *page, + while (page_vma_mapped_walk(&pvmw)) { + addr = pvmw.address; + if (pvmw.pte) { +- referenced = ptep_clear_young_notify(vma, addr, +- pvmw.pte); ++ /* ++ * For PTE-mapped THP, one sub page is referenced, ++ * the whole THP is referenced. ++ */ ++ if (ptep_clear_young_notify(vma, addr, pvmw.pte)) ++ referenced = true; + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { +- referenced = pmdp_clear_young_notify(vma, addr, +- pvmw.pmd); ++ if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) ++ referenced = true; + } else { + /* unexpected pmd-mapped page? */ + WARN_ON_ONCE(1); +diff --git a/mm/page_owner.c b/mm/page_owner.c +index 4f44b95b9d1e..a71fe4c623ef 100644 +--- a/mm/page_owner.c ++++ b/mm/page_owner.c +@@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order) + static inline bool check_recursive_alloc(struct stack_trace *trace, + unsigned long ip) + { +- int i, count; ++ int i; + + if (!trace->nr_entries) + return false; + +- for (i = 0, count = 0; i < trace->nr_entries; i++) { +- if (trace->entries[i] == ip && ++count == 2) ++ for (i = 0; i < trace->nr_entries; i++) { ++ if (trace->entries[i] == ip) + return true; + } + +diff --git a/mm/slab.c b/mm/slab.c +index 1bfc3d847a0a..198c1e2c5358 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void) + nr_node_ids * sizeof(struct kmem_cache_node *), + SLAB_HWCACHE_ALIGN); + list_add(&kmem_cache->list, &slab_caches); ++ memcg_link_cache(kmem_cache); + slab_state = PARTIAL; + + /* +diff --git a/mm/swapfile.c b/mm/swapfile.c +index e47a21e64764..03d2ce288d83 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -2954,6 +2954,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p, + maxpages = swp_offset(pte_to_swp_entry( + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; + last_page = swap_header->info.last_page; ++ if (!last_page) { ++ pr_warn("Empty swap-file\n"); ++ return 0; ++ } + if (last_page > maxpages) { + pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", + maxpages << (PAGE_SHIFT - 10), +diff --git a/mm/vmscan.c b/mm/vmscan.c +index b3f5e337b64a..1a581468a9cf 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -3961,7 +3961,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) + */ + int page_evictable(struct page *page) + { +- return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); ++ int ret; ++ ++ /* Prevent address_space of inode and swap cache from being freed */ ++ rcu_read_lock(); ++ ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); ++ rcu_read_unlock(); ++ return ret; + } + + #ifdef CONFIG_SHMEM +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 4bb13e72ac97..e085b13c572e 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1770,9 +1770,11 @@ static void vmstat_update(struct work_struct *w) + * to occur in the future. Keep on running the + * update worker thread. + */ ++ preempt_disable(); + queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, + this_cpu_ptr(&vmstat_work), + round_jiffies_relative(sysctl_stat_interval)); ++ preempt_enable(); + } + } + +diff --git a/mm/z3fold.c b/mm/z3fold.c +index ddfb20cfd9af..f33403d718ac 100644 +--- a/mm/z3fold.c ++++ b/mm/z3fold.c +@@ -469,6 +469,8 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, + spin_lock_init(&pool->lock); + spin_lock_init(&pool->stale_lock); + pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); ++ if (!pool->unbuddied) ++ goto out_pool; + for_each_possible_cpu(cpu) { + struct list_head *unbuddied = + per_cpu_ptr(pool->unbuddied, cpu); +@@ -481,7 +483,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, + pool->name = name; + pool->compact_wq = create_singlethread_workqueue(pool->name); + if (!pool->compact_wq) +- goto out; ++ goto out_unbuddied; + pool->release_wq = create_singlethread_workqueue(pool->name); + if (!pool->release_wq) + goto out_wq; +@@ -491,8 +493,11 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, + + out_wq: + destroy_workqueue(pool->compact_wq); +-out: ++out_unbuddied: ++ free_percpu(pool->unbuddied); ++out_pool: + kfree(pool); ++out: + return NULL; + } + +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c +index 64aa9f755e1d..45c9bf5ff3a0 100644 +--- a/net/8021q/vlan_core.c ++++ b/net/8021q/vlan_core.c +@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp) + * original position later + */ + skb_push(skb, offset); +- skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, +- skb->vlan_tci); ++ skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto, ++ skb->vlan_tci, skb->mac_len); + if (!skb) + return false; + skb_pull(skb, offset + VLAN_HLEN); +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c +index 83ba5483455a..71d8809fbe94 100644 +--- a/net/batman-adv/bat_iv_ogm.c ++++ b/net/batman-adv/bat_iv_ogm.c +@@ -2719,7 +2719,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + struct batadv_neigh_node *router; + struct batadv_gw_node *curr_gw; +- int ret = -EINVAL; ++ int ret = 0; + void *hdr; + + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); +diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c +index 4e2724c5b33d..a8f4c3902cf5 100644 +--- a/net/batman-adv/bat_v.c ++++ b/net/batman-adv/bat_v.c +@@ -930,7 +930,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, + struct batadv_neigh_ifinfo *router_ifinfo = NULL; + struct batadv_neigh_node *router; + struct batadv_gw_node *curr_gw; +- int ret = -EINVAL; ++ int ret = 0; + void *hdr; + + router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c +index cdd8e8e4df0b..422ee16b7854 100644 +--- a/net/batman-adv/bridge_loop_avoidance.c ++++ b/net/batman-adv/bridge_loop_avoidance.c +@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + { + struct batadv_bla_claim *claim; + int idx = 0; ++ int ret = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, head, hash_entry) { + if (idx++ < *idx_skip) + continue; +- if (batadv_bla_claim_dump_entry(msg, portid, seq, +- primary_if, claim)) { ++ ++ ret = batadv_bla_claim_dump_entry(msg, portid, seq, ++ primary_if, claim); ++ if (ret) { + *idx_skip = idx - 1; + goto unlock; + } + } + +- *idx_skip = idx; ++ *idx_skip = 0; + unlock: + rcu_read_unlock(); +- return 0; ++ return ret; + } + + /** +@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, + { + struct batadv_bla_backbone_gw *backbone_gw; + int idx = 0; ++ int ret = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { + if (idx++ < *idx_skip) + continue; +- if (batadv_bla_backbone_dump_entry(msg, portid, seq, +- primary_if, backbone_gw)) { ++ ++ ret = batadv_bla_backbone_dump_entry(msg, portid, seq, ++ primary_if, backbone_gw); ++ if (ret) { + *idx_skip = idx - 1; + goto unlock; + } + } + +- *idx_skip = idx; ++ *idx_skip = 0; + unlock: + rcu_read_unlock(); +- return 0; ++ return ret; + } + + /** +diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c +index b6cfa78e9381..4f0111bc6621 100644 +--- a/net/batman-adv/distributed-arp-table.c ++++ b/net/batman-adv/distributed-arp-table.c +@@ -391,7 +391,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, + batadv_arp_hw_src(skb, hdr_size), &ip_src, + batadv_arp_hw_dst(skb, hdr_size), &ip_dst); + +- if (hdr_size == 0) ++ if (hdr_size < sizeof(struct batadv_unicast_packet)) + return; + + unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c +index a98cf1104a30..b6abd19ab23e 100644 +--- a/net/batman-adv/fragmentation.c ++++ b/net/batman-adv/fragmentation.c +@@ -287,7 +287,8 @@ batadv_frag_merge_packets(struct hlist_head *chain) + /* Move the existing MAC header to just before the payload. (Override + * the fragment header.) + */ +- skb_pull_rcsum(skb_out, hdr_size); ++ skb_pull(skb_out, hdr_size); ++ skb_out->ip_summed = CHECKSUM_NONE; + memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); + skb_set_mac_header(skb_out, -ETH_HLEN); + skb_reset_network_header(skb_out); +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c +index de9955d5224d..06276ae9f752 100644 +--- a/net/batman-adv/gateway_client.c ++++ b/net/batman-adv/gateway_client.c +@@ -705,7 +705,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, + { + struct batadv_neigh_node *neigh_curr = NULL; + struct batadv_neigh_node *neigh_old = NULL; +- struct batadv_orig_node *orig_dst_node; ++ struct batadv_orig_node *orig_dst_node = NULL; + struct batadv_gw_node *gw_node = NULL; + struct batadv_gw_node *curr_gw = NULL; + struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; +@@ -716,6 +716,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, + + vid = batadv_get_vid(skb, 0); + ++ if (is_multicast_ether_addr(ethhdr->h_dest)) ++ goto out; ++ + orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest, vid); + if (!orig_dst_node) +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c +index d327670641ac..fa02fb73367c 100644 +--- a/net/batman-adv/multicast.c ++++ b/net/batman-adv/multicast.c +@@ -540,8 +540,8 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) + bat_priv->mcast.enabled = true; + } + +- return !(mcast_data.flags & +- (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); ++ return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 && ++ mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6); + } + + /** +@@ -809,8 +809,8 @@ static struct batadv_orig_node * + batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, + struct ethhdr *ethhdr) + { +- return batadv_transtable_search(bat_priv, ethhdr->h_source, +- ethhdr->h_dest, BATADV_NO_FLAGS); ++ return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, ++ BATADV_NO_FLAGS); + } + + /** +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c +index f10e3ff26f9d..cd82cff716c7 100644 +--- a/net/batman-adv/routing.c ++++ b/net/batman-adv/routing.c +@@ -743,6 +743,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, + /** + * batadv_reroute_unicast_packet - update the unicast header for re-routing + * @bat_priv: the bat priv with all the soft interface information ++ * @skb: unicast packet to process + * @unicast_packet: the unicast header to be updated + * @dst_addr: the payload destination + * @vid: VLAN identifier +@@ -754,7 +755,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, + * Return: true if the packet header has been updated, false otherwise + */ + static bool +-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, ++batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, + struct batadv_unicast_packet *unicast_packet, + u8 *dst_addr, unsigned short vid) + { +@@ -783,8 +784,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, + } + + /* update the packet header */ ++ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); + ether_addr_copy(unicast_packet->dest, orig_addr); + unicast_packet->ttvn = orig_ttvn; ++ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); + + ret = true; + out: +@@ -825,7 +828,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, + * the packet to + */ + if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { +- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, ++ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet, + ethhdr->h_dest, vid)) + batadv_dbg_ratelimited(BATADV_DBG_TT, + bat_priv, +@@ -871,7 +874,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, + * destination can possibly be updated and forwarded towards the new + * target host + */ +- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, ++ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet, + ethhdr->h_dest, vid)) { + batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, + "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", +@@ -894,12 +897,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, + if (!primary_if) + return false; + ++ /* update the packet header */ ++ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); + ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); ++ unicast_packet->ttvn = curr_ttvn; ++ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); + + batadv_hardif_put(primary_if); + +- unicast_packet->ttvn = curr_ttvn; +- + return true; + } + +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c +index 10f7edfb176e..aa2c49fa31ce 100644 +--- a/net/batman-adv/soft-interface.c ++++ b/net/batman-adv/soft-interface.c +@@ -451,13 +451,7 @@ void batadv_interface_rx(struct net_device *soft_iface, + + /* skb->dev & skb->pkt_type are set here */ + skb->protocol = eth_type_trans(skb, soft_iface); +- +- /* should not be necessary anymore as we use skb_pull_rcsum() +- * TODO: please verify this and remove this TODO +- * -- Dec 21st 2009, Simon Wunderlich +- */ +- +- /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ ++ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + + batadv_inc_counter(bat_priv, BATADV_CNT_RX); + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 2800c4c4978c..5b8cd359c4c0 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, + int off = ebt_compat_match_offset(match, m->match_size); + compat_uint_t msize = m->match_size - off; + +- BUG_ON(off >= m->match_size); ++ if (WARN_ON(off >= m->match_size)) ++ return -EINVAL; + + if (copy_to_user(cm->u.name, match->name, + strlen(match->name) + 1) || put_user(msize, &cm->match_size)) +@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t, + int off = xt_compat_target_offset(target); + compat_uint_t tsize = t->target_size - off; + +- BUG_ON(off >= t->target_size); ++ if (WARN_ON(off >= t->target_size)) ++ return -EINVAL; + + if (copy_to_user(cm->u.name, target->name, + strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) +@@ -1907,7 +1909,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state, + if (state->buf_kern_start == NULL) + goto count_only; + +- BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); ++ if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) ++ return -EINVAL; + + memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); + +@@ -1920,7 +1923,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) + { + char *b = state->buf_kern_start; + +- BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); ++ if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) ++ return -EINVAL; + + if (b != NULL && sz > 0) + memset(b + state->buf_kern_offset, 0, sz); +@@ -1997,8 +2001,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, + pad = XT_ALIGN(size_kern) - size_kern; + + if (pad > 0 && dst) { +- BUG_ON(state->buf_kern_len <= pad); +- BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); ++ if (WARN_ON(state->buf_kern_len <= pad)) ++ return -EINVAL; ++ if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) ++ return -EINVAL; + memset(dst + size_kern, 0, pad); + } + return off + match_size; +@@ -2048,7 +2054,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, + if (ret < 0) + return ret; + +- BUG_ON(ret < match32->match_size); ++ if (WARN_ON(ret < match32->match_size)) ++ return -EINVAL; + growth += ret - match32->match_size; + growth += ebt_compat_entry_padsize(); + +@@ -2117,8 +2124,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, + * offsets are relative to beginning of struct ebt_entry (i.e., 0). + */ + for (i = 0; i < 4 ; ++i) { +- if (offsets[i] >= *total) ++ if (offsets[i] > *total) ++ return -EINVAL; ++ ++ if (i < 3 && offsets[i] == *total) + return -EINVAL; ++ + if (i == 0) + continue; + if (offsets[i-1] > offsets[i]) +@@ -2157,7 +2168,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, + + startoff = state->buf_user_offset - startoff; + +- BUG_ON(*total < startoff); ++ if (WARN_ON(*total < startoff)) ++ return -EINVAL; + *total -= startoff; + return 0; + } +@@ -2286,7 +2298,8 @@ static int compat_do_replace(struct net *net, void __user *user, + state.buf_kern_len = size64; + + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); +- BUG_ON(ret < 0); /* parses same data again */ ++ if (WARN_ON(ret < 0)) ++ goto out_unlock; + + vfree(entries_tmp); + tmp.entries_size = size64; +diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c +index 5c036d2f401e..cdb5b693a135 100644 +--- a/net/ceph/ceph_common.c ++++ b/net/ceph/ceph_common.c +@@ -418,11 +418,15 @@ ceph_parse_options(char *options, const char *dev_name, + opt->flags |= CEPH_OPT_FSID; + break; + case Opt_name: ++ kfree(opt->name); + opt->name = kstrndup(argstr[0].from, + argstr[0].to-argstr[0].from, + GFP_KERNEL); + break; + case Opt_secret: ++ ceph_crypto_key_destroy(opt->key); ++ kfree(opt->key); ++ + opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); + if (!opt->key) { + err = -ENOMEM; +@@ -433,6 +437,9 @@ ceph_parse_options(char *options, const char *dev_name, + goto out; + break; + case Opt_key: ++ ceph_crypto_key_destroy(opt->key); ++ kfree(opt->key); ++ + opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); + if (!opt->key) { + err = -ENOMEM; +diff --git a/net/core/ethtool.c b/net/core/ethtool.c +index d374a904f1b1..490eab16b04b 100644 +--- a/net/core/ethtool.c ++++ b/net/core/ethtool.c +@@ -2505,11 +2505,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr) + static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) + { + struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; ++ int rc; + + if (!dev->ethtool_ops->get_fecparam) + return -EOPNOTSUPP; + +- dev->ethtool_ops->get_fecparam(dev, &fecparam); ++ rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); ++ if (rc) ++ return rc; + + if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) + return -EFAULT; +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index ef734ad1d852..c132eca9e383 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -4939,13 +4939,18 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); + + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) + { ++ int mac_len; ++ + if (skb_cow(skb, skb_headroom(skb)) < 0) { + kfree_skb(skb); + return NULL; + } + +- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, +- 2 * ETH_ALEN); ++ mac_len = skb->data - skb_mac_header(skb); ++ if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { ++ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), ++ mac_len - VLAN_HLEN - ETH_TLEN); ++ } + skb->mac_header += VLAN_HLEN; + return skb; + } +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 23e6d5532b5c..2459e9cc22a6 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -951,9 +951,6 @@ static void __gre_tunnel_init(struct net_device *dev) + + t_hlen = tunnel->hlen + sizeof(struct iphdr); + +- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; +- dev->mtu = ETH_DATA_LEN - t_hlen - 4; +- + dev->features |= GRE_FEATURES; + dev->hw_features |= GRE_FEATURES; + +@@ -1253,8 +1250,6 @@ static int erspan_tunnel_init(struct net_device *dev) + sizeof(struct erspanhdr); + t_hlen = tunnel->hlen + sizeof(struct iphdr); + +- dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; +- dev->mtu = ETH_DATA_LEN - t_hlen - 4; + dev->features |= GRE_FEATURES; + dev->hw_features |= GRE_FEATURES; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index a2fcc20774a6..4784f3f36b7e 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -1103,8 +1103,14 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], + eth_hw_addr_random(dev); + + mtu = ip_tunnel_bind_dev(dev); +- if (!tb[IFLA_MTU]) ++ if (tb[IFLA_MTU]) { ++ unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; ++ ++ dev->mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, ++ (unsigned int)(max - sizeof(struct iphdr))); ++ } else { + dev->mtu = mtu; ++ } + + ip_tunnel_add(itn, nt); + out: +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index c9cd891f69c2..5c5699c08575 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -396,8 +396,6 @@ static int vti_tunnel_init(struct net_device *dev) + memcpy(dev->dev_addr, &iph->saddr, 4); + memcpy(dev->broadcast, &iph->daddr, 4); + +- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); +- dev->mtu = ETH_DATA_LEN; + dev->flags = IFF_NOARP; + dev->addr_len = 4; + dev->features |= NETIF_F_LLTX; +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c +index c07e9db95ccc..cc7c9d67ac19 100644 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c +@@ -228,7 +228,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, + c->hash_mode = i->hash_mode; + c->hash_initval = i->hash_initval; + refcount_set(&c->refcount, 1); +- refcount_set(&c->entries, 1); + + spin_lock_bh(&cn->lock); + if (__clusterip_config_find(net, ip)) { +@@ -259,8 +258,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, + + c->notifier.notifier_call = clusterip_netdev_event; + err = register_netdevice_notifier(&c->notifier); +- if (!err) ++ if (!err) { ++ refcount_set(&c->entries, 1); + return c; ++ } + + #ifdef CONFIG_PROC_FS + proc_remove(c->pde); +@@ -269,7 +270,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, + spin_lock_bh(&cn->lock); + list_del_rcu(&c->list); + spin_unlock_bh(&cn->lock); +- kfree(c); ++ clusterip_config_put(c); + + return ERR_PTR(err); + } +@@ -492,12 +493,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) + return PTR_ERR(config); + } + } +- cipinfo->config = config; + + ret = nf_ct_netns_get(par->net, par->family); +- if (ret < 0) ++ if (ret < 0) { + pr_info("cannot load conntrack support for proto=%u\n", + par->family); ++ clusterip_config_entry_put(par->net, config); ++ clusterip_config_put(config); ++ return ret; ++ } + + if (!par->net->xt.clusterip_deprecated_warning) { + pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " +@@ -505,6 +509,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) + par->net->xt.clusterip_deprecated_warning = true; + } + ++ cipinfo->config = config; + return ret; + } + +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 28bc3a98adc7..7afa8d2463d8 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -639,6 +639,7 @@ static inline u32 fnhe_hashfun(__be32 daddr) + static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) + { + rt->rt_pmtu = fnhe->fnhe_pmtu; ++ rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; + rt->dst.expires = fnhe->fnhe_expires; + + if (fnhe->fnhe_gw) { +@@ -649,7 +650,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh + } + + static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, +- u32 pmtu, unsigned long expires) ++ u32 pmtu, bool lock, unsigned long expires) + { + struct fnhe_hash_bucket *hash; + struct fib_nh_exception *fnhe; +@@ -686,8 +687,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, + fnhe->fnhe_genid = genid; + if (gw) + fnhe->fnhe_gw = gw; +- if (pmtu) ++ if (pmtu) { + fnhe->fnhe_pmtu = pmtu; ++ fnhe->fnhe_mtu_locked = lock; ++ } + fnhe->fnhe_expires = max(1UL, expires); + /* Update all cached dsts too */ + rt = rcu_dereference(fnhe->fnhe_rth_input); +@@ -711,6 +714,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, + fnhe->fnhe_daddr = daddr; + fnhe->fnhe_gw = gw; + fnhe->fnhe_pmtu = pmtu; ++ fnhe->fnhe_mtu_locked = lock; + fnhe->fnhe_expires = max(1UL, expires); + + /* Exception created; mark the cached routes for the nexthop +@@ -792,7 +796,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow + struct fib_nh *nh = &FIB_RES_NH(res); + + update_or_create_fnhe(nh, fl4->daddr, new_gw, +- 0, jiffies + ip_rt_gc_timeout); ++ 0, false, ++ jiffies + ip_rt_gc_timeout); + } + if (kill_route) + rt->dst.obsolete = DST_OBSOLETE_KILL; +@@ -1005,15 +1010,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) + { + struct dst_entry *dst = &rt->dst; + struct fib_result res; ++ bool lock = false; + +- if (dst_metric_locked(dst, RTAX_MTU)) ++ if (ip_mtu_locked(dst)) + return; + + if (ipv4_mtu(dst) < mtu) + return; + +- if (mtu < ip_rt_min_pmtu) ++ if (mtu < ip_rt_min_pmtu) { ++ lock = true; + mtu = ip_rt_min_pmtu; ++ } + + if (rt->rt_pmtu == mtu && + time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) +@@ -1023,7 +1031,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) + if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { + struct fib_nh *nh = &FIB_RES_NH(res); + +- update_or_create_fnhe(nh, fl4->daddr, 0, mtu, ++ update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock, + jiffies + ip_rt_mtu_expires); + } + rcu_read_unlock(); +@@ -1276,7 +1284,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) + + mtu = READ_ONCE(dst->dev->mtu); + +- if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { ++ if (unlikely(ip_mtu_locked(dst))) { + if (rt->rt_uses_gateway && mtu > 576) + mtu = 576; + } +@@ -1548,6 +1556,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, + rt->rt_is_input = 0; + rt->rt_iif = 0; + rt->rt_pmtu = 0; ++ rt->rt_mtu_locked = 0; + rt->rt_gateway = 0; + rt->rt_uses_gateway = 0; + rt->rt_table_id = 0; +@@ -2526,6 +2535,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or + rt->rt_is_input = ort->rt_is_input; + rt->rt_iif = ort->rt_iif; + rt->rt_pmtu = ort->rt_pmtu; ++ rt->rt_mtu_locked = ort->rt_mtu_locked; + + rt->rt_genid = rt_genid_ipv4(net); + rt->rt_flags = ort->rt_flags; +@@ -2628,6 +2638,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, + memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); + if (rt->rt_pmtu && expires) + metrics[RTAX_MTU - 1] = rt->rt_pmtu; ++ if (rt->rt_mtu_locked && expires) ++ metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); + if (rtnetlink_put_metrics(skb, metrics) < 0) + goto nla_put_failure; + +diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c +index 7c843578f233..faddf4f9a707 100644 +--- a/net/ipv4/tcp_illinois.c ++++ b/net/ipv4/tcp_illinois.c +@@ -6,7 +6,7 @@ + * The algorithm is described in: + * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm + * for High-Speed Networks" +- * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf ++ * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf + * + * Implemented from description in paper and ns-2 simulation. + * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c +index 05017e2c849c..4b586e7d5637 100644 +--- a/net/ipv4/xfrm4_policy.c ++++ b/net/ipv4/xfrm4_policy.c +@@ -100,6 +100,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, + xdst->u.rt.rt_gateway = rt->rt_gateway; + xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; + xdst->u.rt.rt_pmtu = rt->rt_pmtu; ++ xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked; + xdst->u.rt.rt_table_id = rt->rt_table_id; + INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index d61a82fd4b60..565a0388587a 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -1990,14 +1990,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, + { + struct net *net = dev_net(dev); + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); +- struct ip6_tnl *nt, *t; + struct ip_tunnel_encap ipencap; ++ struct ip6_tnl *nt, *t; ++ int err; + + nt = netdev_priv(dev); + + if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { +- int err = ip6_tnl_encap_setup(nt, &ipencap); +- ++ err = ip6_tnl_encap_setup(nt, &ipencap); + if (err < 0) + return err; + } +@@ -2013,7 +2013,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, + return -EEXIST; + } + +- return ip6_tnl_create2(dev); ++ err = ip6_tnl_create2(dev); ++ if (!err && tb[IFLA_MTU]) ++ ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); ++ ++ return err; + } + + static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index 2493a40bc4b1..0e0ab90a4334 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -852,7 +852,7 @@ static void vti6_dev_setup(struct net_device *dev) + dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); + dev->mtu = ETH_DATA_LEN; + dev->min_mtu = IPV6_MIN_MTU; +- dev->max_mtu = IP_MAX_MTU; ++ dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); + dev->flags |= IFF_NOARP; + dev->addr_len = sizeof(struct in6_addr); + netif_keep_dst(dev); +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index b35d8905794c..ad1e7e6ce009 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -1569,6 +1569,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, + if (err < 0) + return err; + ++ if (tb[IFLA_MTU]) { ++ u32 mtu = nla_get_u32(tb[IFLA_MTU]); ++ ++ if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) ++ dev->mtu = mtu; ++ } ++ + #ifdef CONFIG_IPV6_SIT_6RD + if (ipip6_netlink_6rd_parms(data, &ip6rd)) + err = ipip6_tunnel_update_6rd(nt, &ip6rd); +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c +index f8d4ab8ca1a5..4b60f68cb492 100644 +--- a/net/llc/llc_c_ac.c ++++ b/net/llc/llc_c_ac.c +@@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) + llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); + if (likely(!rc)) { +- llc_conn_send_pdu(sk, skb); ++ rc = llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, skb); + } + return rc; +@@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, + llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); + if (likely(!rc)) { +- llc_conn_send_pdu(sk, skb); ++ rc = llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, skb); + } + return rc; +@@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, + int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) + { + struct llc_sock *llc = llc_sk(sk); ++ int ret; + + if (llc->ack_must_be_send) { +- llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); ++ ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); + llc->ack_must_be_send = 0 ; + llc->ack_pf = 0; +- } else +- llc_conn_ac_send_i_cmd_p_set_0(sk, skb); +- return 0; ++ } else { ++ ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb); ++ } ++ ++ return ret; + } + + /** +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c +index 9a42448eb182..b084fd19ad32 100644 +--- a/net/llc/llc_conn.c ++++ b/net/llc/llc_conn.c +@@ -30,7 +30,7 @@ + #endif + + static int llc_find_offset(int state, int ev_type); +-static void llc_conn_send_pdus(struct sock *sk); ++static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb); + static int llc_conn_service(struct sock *sk, struct sk_buff *skb); + static int llc_exec_conn_trans_actions(struct sock *sk, + struct llc_conn_state_trans *trans, +@@ -193,11 +193,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) + return rc; + } + +-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) ++int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) + { + /* queue PDU to send to MAC layer */ + skb_queue_tail(&sk->sk_write_queue, skb); +- llc_conn_send_pdus(sk); ++ return llc_conn_send_pdus(sk, skb); + } + + /** +@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) + if (howmany_resend > 0) + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; + /* any PDUs to re-send are queued up; start sending to MAC */ +- llc_conn_send_pdus(sk); ++ llc_conn_send_pdus(sk, NULL); + out:; + } + +@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) + if (howmany_resend > 0) + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; + /* any PDUs to re-send are queued up; start sending to MAC */ +- llc_conn_send_pdus(sk); ++ llc_conn_send_pdus(sk, NULL); + out:; + } + +@@ -340,12 +340,16 @@ int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked) + /** + * llc_conn_send_pdus - Sends queued PDUs + * @sk: active connection ++ * @hold_skb: the skb held by caller, or NULL if does not care + * +- * Sends queued pdus to MAC layer for transmission. ++ * Sends queued pdus to MAC layer for transmission. When @hold_skb is ++ * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent ++ * successfully, or 1 for failure. + */ +-static void llc_conn_send_pdus(struct sock *sk) ++static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) + { + struct sk_buff *skb; ++ int ret = 0; + + while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { + struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); +@@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk) + skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); + if (!skb2) + break; +- skb = skb2; ++ dev_queue_xmit(skb2); ++ } else { ++ bool is_target = skb == hold_skb; ++ int rc; ++ ++ if (is_target) ++ skb_get(skb); ++ rc = dev_queue_xmit(skb); ++ if (is_target) ++ ret = rc; + } +- dev_queue_xmit(skb); + } ++ ++ return ret; + } + + /** +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 2849a1fc41c5..3a7cfe01ee6d 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -8,6 +8,7 @@ + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2007-2010, Intel Corporation + * Copyright(c) 2015-2017 Intel Deutschland GmbH ++ * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as +@@ -322,9 +323,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, + * driver so reject the timeout update. + */ + status = WLAN_STATUS_REQUEST_DECLINED; +- ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, +- tid, dialog_token, status, +- 1, buf_size, timeout); + goto end; + } + +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 9675814f64db..894937bcd479 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -1466,7 +1466,7 @@ struct ieee802_11_elems { + const struct ieee80211_timeout_interval_ie *timeout_int; + const u8 *opmode_notif; + const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; +- const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; ++ struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; + const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; + + /* length of them, respectively */ +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c +index a550c707cd8a..96e57d7c2872 100644 +--- a/net/mac80211/mesh.c ++++ b/net/mac80211/mesh.c +@@ -1253,13 +1253,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, + } + + static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, +- struct ieee80211_mgmt *mgmt, size_t len) ++ struct ieee80211_mgmt *mgmt, size_t len, ++ struct ieee802_11_elems *elems) + { + struct ieee80211_mgmt *mgmt_fwd; + struct sk_buff *skb; + struct ieee80211_local *local = sdata->local; +- u8 *pos = mgmt->u.action.u.chan_switch.variable; +- size_t offset_ttl; + + skb = dev_alloc_skb(local->tx_headroom + len); + if (!skb) +@@ -1267,13 +1266,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, + skb_reserve(skb, local->tx_headroom); + mgmt_fwd = skb_put(skb, len); + +- /* offset_ttl is based on whether the secondary channel +- * offset is available or not. Subtract 1 from the mesh TTL +- * and disable the initiator flag before forwarding. +- */ +- offset_ttl = (len < 42) ? 7 : 10; +- *(pos + offset_ttl) -= 1; +- *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; ++ elems->mesh_chansw_params_ie->mesh_ttl--; ++ elems->mesh_chansw_params_ie->mesh_flags &= ++ ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; + + memcpy(mgmt_fwd, mgmt, len); + eth_broadcast_addr(mgmt_fwd->da); +@@ -1321,7 +1316,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, + + /* forward or re-broadcast the CSA frame */ + if (fwd_csa) { +- if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0) ++ if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) + mcsa_dbg(sdata, "Failed to forward the CSA frame"); + } + } +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 4daafb07602f..dddd498e1338 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -3928,7 +3928,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, + if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS)) != + fast_rx->expected_ds_bits) +- goto drop; ++ return false; + + /* assign the key to drop unencrypted frames (later) + * and strip the IV/MIC if necessary +diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c +index ee0181778a42..029334835747 100644 +--- a/net/mac80211/spectmgmt.c ++++ b/net/mac80211/spectmgmt.c +@@ -8,6 +8,7 @@ + * Copyright 2007, Michael Wu <flamingice@sourmilk.net> + * Copyright 2007-2008, Intel Corporation + * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> ++ * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as +@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + u32 sta_flags, u8 *bssid, + struct ieee80211_csa_ie *csa_ie) + { +- enum nl80211_band new_band; ++ enum nl80211_band new_band = current_band; + int new_freq; + u8 new_chan_no; + struct ieee80211_channel *new_chan; +@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + elems->ext_chansw_ie->new_operating_class, + &new_band)) { + sdata_info(sdata, +- "cannot understand ECSA IE operating class %d, disconnecting\n", ++ "cannot understand ECSA IE operating class, %d, ignoring\n", + elems->ext_chansw_ie->new_operating_class); +- return -EINVAL; + } + new_chan_no = elems->ext_chansw_ie->new_ch_num; + csa_ie->count = elems->ext_chansw_ie->count; + csa_ie->mode = elems->ext_chansw_ie->mode; + } else if (elems->ch_switch_ie) { +- new_band = current_band; + new_chan_no = elems->ch_switch_ie->new_ch_num; + csa_ie->count = elems->ch_switch_ie->count; + csa_ie->mode = elems->ch_switch_ie->mode; +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index 69615016d5bf..f1b496222bda 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + + if (ieee80211_hw_check(hw, USES_RSS)) { + sta->pcpu_rx_stats = +- alloc_percpu(struct ieee80211_sta_rx_stats); ++ alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); + if (!sta->pcpu_rx_stats) + goto free; + } +@@ -439,6 +439,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + if (sta->sta.txq[0]) + kfree(to_txq_info(sta->sta.txq[0])); + free: ++ free_percpu(sta->pcpu_rx_stats); + #ifdef CONFIG_MAC80211_MESH + kfree(sta->mesh); + #endif +diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c +index 3e17d32b629d..58d5d05aec24 100644 +--- a/net/netfilter/ipvs/ip_vs_ftp.c ++++ b/net/netfilter/ipvs/ip_vs_ftp.c +@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, + buf_len = strlen(buf); + + ct = nf_ct_get(skb, &ctinfo); +- if (ct && (ct->status & IPS_NAT_MASK)) { ++ if (ct) { + bool mangled; + + /* If mangling fails this function will return 0 +diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c +index 22dc1b9d6362..c070dfc0190a 100644 +--- a/net/netlabel/netlabel_unlabeled.c ++++ b/net/netlabel/netlabel_unlabeled.c +@@ -1472,6 +1472,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, + iface = rcu_dereference(netlbl_unlhsh_def); + if (iface == NULL || !iface->valid) + goto unlabel_getattr_nolabel; ++ ++#if IS_ENABLED(CONFIG_IPV6) ++ /* When resolving a fallback label, check the sk_buff version as ++ * it is possible (e.g. SCTP) to have family = PF_INET6 while ++ * receiving ip_hdr(skb)->version = 4. ++ */ ++ if (family == PF_INET6 && ip_hdr(skb)->version == 4) ++ family = PF_INET; ++#endif /* IPv6 */ ++ + switch (family) { + case PF_INET: { + struct iphdr *hdr4; +diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c +index 367d8c027101..2ceefa183cee 100644 +--- a/net/nfc/llcp_commands.c ++++ b/net/nfc/llcp_commands.c +@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri, + + pr_debug("uri: %s, len: %zu\n", uri, uri_len); + ++ /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */ ++ if (WARN_ON_ONCE(uri_len > U8_MAX - 4)) ++ return NULL; ++ + sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); + if (sdreq == NULL) + return NULL; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index b251fb936a27..08ed6abe4aae 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { + }; + + static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { +- [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, ++ [NFC_SDP_ATTR_URI] = { .type = NLA_STRING, ++ .len = U8_MAX - 4 }, + [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, + }; + +diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c +index 50615d5efac1..9cf089b9754e 100644 +--- a/net/qrtr/smd.c ++++ b/net/qrtr/smd.c +@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = { + + module_rpmsg_driver(qcom_smd_qrtr_driver); + ++MODULE_ALIAS("rpmsg:IPCRTR"); + MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); + MODULE_LICENSE("GPL v2"); +diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c +index c061d6eb465d..22571189f21e 100644 +--- a/net/rds/tcp_listen.c ++++ b/net/rds/tcp_listen.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2006 Oracle. All rights reserved. ++ * Copyright (c) 2006, 2018 Oracle. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU +@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock) + if (ret) + goto out; + +- new_sock->type = sock->type; +- new_sock->ops = sock->ops; + ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); + if (ret < 0) + goto out; + ++ /* sock_create_lite() does not get a hold on the owner module so we ++ * need to do it here. Note that sock_release() uses sock->ops to ++ * determine if it needs to decrement the reference count. So set ++ * sock->ops after calling accept() in case that fails. And there's ++ * no need to do try_module_get() as the listener should have a hold ++ * already. ++ */ ++ new_sock->ops = sock->ops; ++ __module_get(new_sock->ops->owner); ++ + ret = rds_tcp_keepalive(new_sock); + if (ret < 0) + goto out; +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c +index e56e23ed2229..5edb636dbc4d 100644 +--- a/net/rxrpc/input.c ++++ b/net/rxrpc/input.c +@@ -1175,16 +1175,19 @@ void rxrpc_data_ready(struct sock *udp_sk) + goto discard_unlock; + + if (sp->hdr.callNumber == chan->last_call) { +- /* For the previous service call, if completed successfully, we +- * discard all further packets. ++ if (chan->call || ++ sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) ++ goto discard_unlock; ++ ++ /* For the previous service call, if completed ++ * successfully, we discard all further packets. + */ + if (rxrpc_conn_is_service(conn) && +- (chan->last_type == RXRPC_PACKET_TYPE_ACK || +- sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)) ++ chan->last_type == RXRPC_PACKET_TYPE_ACK) + goto discard_unlock; + +- /* But otherwise we need to retransmit the final packet from +- * data cached in the connection record. ++ /* But otherwise we need to retransmit the final packet ++ * from data cached in the connection record. + */ + rxrpc_post_packet_to_conn(conn, skb); + goto out_unlock; +diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c +index bdece21f313d..abcf48026d99 100644 +--- a/net/rxrpc/recvmsg.c ++++ b/net/rxrpc/recvmsg.c +@@ -513,9 +513,10 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, + sizeof(unsigned int), &id32); + } else { ++ unsigned long idl = call->user_call_ID; ++ + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, +- sizeof(unsigned long), +- &call->user_call_ID); ++ sizeof(unsigned long), &idl); + } + if (ret < 0) + goto error_unlock_call; +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c +index d2f51d6a253c..016e293681b8 100644 +--- a/net/rxrpc/sendmsg.c ++++ b/net/rxrpc/sendmsg.c +@@ -92,7 +92,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix) + spin_lock_bh(&call->lock); + + if (call->state < RXRPC_CALL_COMPLETE) { +- call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS; ++ call->rxtx_annotations[ix] = ++ (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) | ++ RXRPC_TX_ANNO_RETRANS; + if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) + rxrpc_queue_call(call); + } +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c +index 2b087623fb1d..364a878e51cb 100644 +--- a/net/sched/act_bpf.c ++++ b/net/sched/act_bpf.c +@@ -356,7 +356,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, + return res; + out: + if (res == ACT_P_CREATED) +- tcf_idr_cleanup(*act, est); ++ tcf_idr_release(*act, bind); + + return ret; + } +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c +index d9e399a7e3d5..18b2fd2ba7d7 100644 +--- a/net/sched/act_ipt.c ++++ b/net/sched/act_ipt.c +@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t) + static void tcf_ipt_release(struct tc_action *a, int bind) + { + struct tcf_ipt *ipt = to_ipt(a); +- ipt_destroy_target(ipt->tcfi_t); ++ ++ if (ipt->tcfi_t) { ++ ipt_destroy_target(ipt->tcfi_t); ++ kfree(ipt->tcfi_t); ++ } + kfree(ipt->tcfi_tname); +- kfree(ipt->tcfi_t); + } + + static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { +@@ -187,7 +190,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, + kfree(tname); + err1: + if (ret == ACT_P_CREATED) +- tcf_idr_cleanup(*a, est); ++ tcf_idr_release(*a, bind); + return err; + } + +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c +index 491fe5deb09e..51ab463d9e16 100644 +--- a/net/sched/act_pedit.c ++++ b/net/sched/act_pedit.c +@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, + p = to_pedit(*a); + keys = kmalloc(ksize, GFP_KERNEL); + if (keys == NULL) { +- tcf_idr_cleanup(*a, est); ++ tcf_idr_release(*a, bind); + kfree(keys_ex); + return -ENOMEM; + } +diff --git a/net/sched/act_police.c b/net/sched/act_police.c +index 3bb2ebf9e9ae..c16127109f21 100644 +--- a/net/sched/act_police.c ++++ b/net/sched/act_police.c +@@ -194,7 +194,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, + qdisc_put_rtab(P_tab); + qdisc_put_rtab(R_tab); + if (ret == ACT_P_CREATED) +- tcf_idr_cleanup(*a, est); ++ tcf_idr_release(*a, bind); + return err; + } + +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c +index 8b5abcd2f32f..53752b9327d0 100644 +--- a/net/sched/act_sample.c ++++ b/net/sched/act_sample.c +@@ -103,7 +103,8 @@ static void tcf_sample_cleanup_rcu(struct rcu_head *rcu) + + psample_group = rcu_dereference_protected(s->psample_group, 1); + RCU_INIT_POINTER(s->psample_group, NULL); +- psample_group_put(psample_group); ++ if (psample_group) ++ psample_group_put(psample_group); + } + + static void tcf_sample_cleanup(struct tc_action *a, int bind) +diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c +index e7b57e5071a3..b5f80e675783 100644 +--- a/net/sched/act_simple.c ++++ b/net/sched/act_simple.c +@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, + d = to_defact(*a); + ret = alloc_defdata(d, defdata); + if (ret < 0) { +- tcf_idr_cleanup(*a, est); ++ tcf_idr_release(*a, bind); + return ret; + } + d->tcf_action = parm->action; +diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c +index 821823b2518a..d227599f7e73 100644 +--- a/net/sched/act_skbmod.c ++++ b/net/sched/act_skbmod.c +@@ -155,7 +155,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, + ASSERT_RTNL(); + p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); + if (unlikely(!p)) { +- if (ovr) ++ if (ret == ACT_P_CREATED) + tcf_idr_release(*a, bind); + return -ENOMEM; + } +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c +index 7166e7ecbe86..f04a037dc967 100644 +--- a/net/smc/smc_core.c ++++ b/net/smc/smc_core.c +@@ -174,6 +174,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, + + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + /* initialize link */ ++ lnk->link_id = SMC_SINGLE_LINK; + lnk->smcibdev = smcibdev; + lnk->ibport = ibport; + lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c +index 9033b8a36fe1..4410d0071515 100644 +--- a/net/smc/smc_ib.c ++++ b/net/smc/smc_ib.c +@@ -23,6 +23,8 @@ + #include "smc_wr.h" + #include "smc.h" + ++#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */ ++ + #define SMC_QP_MIN_RNR_TIMER 5 + #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ + #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ +@@ -435,9 +437,15 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) + long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) + { + struct ib_cq_init_attr cqattr = { +- .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 }; ++ .cqe = SMC_MAX_CQE, .comp_vector = 0 }; ++ int cqe_size_order, smc_order; + long rc; + ++ /* the calculated number of cq entries fits to mlx5 cq allocation */ ++ cqe_size_order = cache_line_size() == 128 ? 7 : 6; ++ smc_order = MAX_ORDER - cqe_size_order - 1; ++ if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) ++ cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; + smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, + smc_wr_tx_cq_handler, NULL, + smcibdev, &cqattr); +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c +index 92fe4cc8c82c..b4aa4fcedb96 100644 +--- a/net/smc/smc_llc.c ++++ b/net/smc/smc_llc.c +@@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], + memcpy(confllc->sender_mac, mac, ETH_ALEN); + memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); + hton24(confllc->sender_qp_num, link->roce_qp->qp_num); +- /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ ++ confllc->link_num = link->link_id; + memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); + confllc->max_links = SMC_LINKS_PER_LGR_MAX; + /* send llc message */ +diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h +index 2acf12b06063..c307402e67d6 100644 +--- a/net/smc/smc_wr.h ++++ b/net/smc/smc_wr.h +@@ -19,7 +19,6 @@ + #include "smc.h" + #include "smc_core.h" + +-#define SMC_WR_MAX_CQE 32768 /* max. # of completion queue elements */ + #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ + + #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index dfef930d1e50..ffb1a3a69bdd 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -299,7 +299,8 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, + goto out; + } + lock_sock(sk); +- memcpy(crypto_info_aes_gcm_128->iv, ctx->iv, ++ memcpy(crypto_info_aes_gcm_128->iv, ++ ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, + TLS_CIPHER_AES_GCM_128_IV_SIZE); + release_sock(sk); + if (copy_to_user(optval, +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index 3dd05a08c60a..d014aea07160 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -989,6 +989,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, + wdev->current_bss = NULL; + wdev->ssid_len = 0; + wdev->conn_owner_nlportid = 0; ++ kzfree(wdev->connect_keys); ++ wdev->connect_keys = NULL; + + nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); + +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 5b2409746ae0..9f492dc417d5 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -26,6 +26,12 @@ struct xfrm_trans_tasklet { + }; + + struct xfrm_trans_cb { ++ union { ++ struct inet_skb_parm h4; ++#if IS_ENABLED(CONFIG_IPV6) ++ struct inet6_skb_parm h6; ++#endif ++ } header; + int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); + }; + +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c +index 73ad8c8ef344..35610cc881a9 100644 +--- a/net/xfrm/xfrm_output.c ++++ b/net/xfrm/xfrm_output.c +@@ -285,8 +285,9 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) + return; + + afinfo = xfrm_state_get_afinfo(proto); +- if (afinfo) ++ if (afinfo) { + afinfo->local_error(skb, mtu); +- rcu_read_unlock(); ++ rcu_read_unlock(); ++ } + } + EXPORT_SYMBOL_GPL(xfrm_local_error); +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 7d17c207fc8a..9c57d6a5816c 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1459,10 +1459,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, + static int xfrm_get_tos(const struct flowi *fl, int family) + { + const struct xfrm_policy_afinfo *afinfo; +- int tos = 0; ++ int tos; + + afinfo = xfrm_policy_get_afinfo(family); +- tos = afinfo ? afinfo->get_tos(fl) : 0; ++ if (!afinfo) ++ return 0; ++ ++ tos = afinfo->get_tos(fl); + + rcu_read_unlock(); + +diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c +index 02501817227b..bdb9b5121ba8 100644 +--- a/net/xfrm/xfrm_replay.c ++++ b/net/xfrm/xfrm_replay.c +@@ -658,7 +658,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff + } else { + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; +- xo->seq.low = oseq = oseq + 1; ++ xo->seq.low = oseq + 1; + xo->seq.hi = oseq_hi; + oseq += skb_shinfo(skb)->gso_segs; + } +diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh +index 513da1a4a2da..d67830e6e360 100755 +--- a/scripts/adjust_autoksyms.sh ++++ b/scripts/adjust_autoksyms.sh +@@ -84,6 +84,13 @@ while read sympath; do + depfile="include/config/ksym/${sympath}.h" + mkdir -p "$(dirname "$depfile")" + touch "$depfile" ++ # Filesystems with coarse time precision may create timestamps ++ # equal to the one from a file that was very recently built and that ++ # needs to be rebuild. Let's guard against that by making sure our ++ # dep files are always newer than the first file we created here. ++ while [ ! "$depfile" -nt "$new_ksyms_file" ]; do ++ touch "$depfile" ++ done + echo $((count += 1)) + done | tail -1 ) + changed=${changed:-0} +diff --git a/scripts/package/builddeb b/scripts/package/builddeb +index 0bc87473f68f..e15159d0a884 100755 +--- a/scripts/package/builddeb ++++ b/scripts/package/builddeb +@@ -313,7 +313,7 @@ fi + + # Build kernel header package + (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles" +-(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles" ++(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles" + (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles" + (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles" + if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then +diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c +index 06554c448dce..9676c8887da9 100644 +--- a/security/integrity/digsig.c ++++ b/security/integrity/digsig.c +@@ -18,6 +18,7 @@ + #include <linux/cred.h> + #include <linux/key-type.h> + #include <linux/digsig.h> ++#include <linux/vmalloc.h> + #include <crypto/public_key.h> + #include <keys/system_keyring.h> + +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig +index 35ef69312811..6a8f67714c83 100644 +--- a/security/integrity/ima/Kconfig ++++ b/security/integrity/ima/Kconfig +@@ -10,6 +10,7 @@ config IMA + select CRYPTO_HASH_INFO + select TCG_TPM if HAS_IOMEM && !UML + select TCG_TIS if TCG_TPM && X86 ++ select TCG_CRB if TCG_TPM && ACPI + select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES + help + The Trusted Computing Group(TCG) runtime Integrity +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index 802d5d20f36f..90453aa1c813 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -78,6 +78,8 @@ int __init ima_init_crypto(void) + hash_algo_name[ima_hash_algo], rc); + return rc; + } ++ pr_info("Allocated hash algorithm: %s\n", ++ hash_algo_name[ima_hash_algo]); + return 0; + } + +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index ab70a395f490..7e334fd31c15 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -16,6 +16,9 @@ + * implements the IMA hooks: ima_bprm_check, ima_file_mmap, + * and ima_file_check. + */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ + #include <linux/module.h> + #include <linux/file.h> + #include <linux/binfmts.h> +@@ -427,6 +430,16 @@ static int __init init_ima(void) + ima_init_template_list(); + hash_setup(CONFIG_IMA_DEFAULT_HASH); + error = ima_init(); ++ ++ if (error && strcmp(hash_algo_name[ima_hash_algo], ++ CONFIG_IMA_DEFAULT_HASH) != 0) { ++ pr_info("Allocating %s failed, going to use default hash algorithm %s\n", ++ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH); ++ hash_setup_done = 0; ++ hash_setup(CONFIG_IMA_DEFAULT_HASH); ++ error = ima_init(); ++ } ++ + if (!error) { + ima_initialized = 1; + ima_update_policy_flag(); +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 15e82a656d96..4fdc9e11e832 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) + else + timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : +- SNDRV_TIMER_EVENT_CONTINUE); ++ SNDRV_TIMER_EVENT_PAUSE); + unlock: + spin_unlock_irqrestore(&timer->lock, flags); + return result; +@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop) + list_del_init(&timeri->ack_list); + list_del_init(&timeri->active_list); + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : +- SNDRV_TIMER_EVENT_CONTINUE); ++ SNDRV_TIMER_EVENT_PAUSE); + spin_unlock(&timeri->timer->lock); + } + spin_unlock_irqrestore(&slave_active_lock, flags); +diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c +index 8632301489fa..b67de2bb06a2 100644 +--- a/sound/core/vmaster.c ++++ b/sound/core/vmaster.c +@@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave) + return -ENOMEM; + uctl->id = slave->slave.id; + err = slave->slave.get(&slave->slave, uctl); ++ if (err < 0) ++ goto error; + for (ch = 0; ch < slave->info.count; ch++) + slave->vals[ch] = uctl->value.integer.value[ch]; ++ error: + kfree(uctl); +- return 0; ++ return err < 0 ? err : 0; + } + + /* get the slave ctl info and save the initial values */ +diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c +index 457a1521f32f..785f4e95148c 100644 +--- a/tools/hv/hv_fcopy_daemon.c ++++ b/tools/hv/hv_fcopy_daemon.c +@@ -23,13 +23,14 @@ + #include <unistd.h> + #include <errno.h> + #include <linux/hyperv.h> ++#include <linux/limits.h> + #include <syslog.h> + #include <sys/stat.h> + #include <fcntl.h> + #include <getopt.h> + + static int target_fd; +-static char target_fname[W_MAX_PATH]; ++static char target_fname[PATH_MAX]; + static unsigned long long filesize; + + static int hv_start_fcopy(struct hv_start_fcopy *smsg) +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c +index b2b4ebffab8c..34031a297f02 100644 +--- a/tools/hv/hv_vss_daemon.c ++++ b/tools/hv/hv_vss_daemon.c +@@ -22,6 +22,7 @@ + #include <sys/poll.h> + #include <sys/ioctl.h> + #include <sys/stat.h> ++#include <sys/sysmacros.h> + #include <fcntl.h> + #include <stdio.h> + #include <mntent.h> +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf +index 91ef44bfaf3e..2a858ea56a81 100644 +--- a/tools/perf/Makefile.perf ++++ b/tools/perf/Makefile.perf +@@ -368,7 +368,8 @@ LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive + + ifeq ($(USE_CLANG), 1) + CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization +- LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a)) ++ CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l)) ++ LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so)) + LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group + endif + +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c +index b205c1340456..3b570e808b31 100644 +--- a/tools/perf/builtin-record.c ++++ b/tools/perf/builtin-record.c +@@ -926,6 +926,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) + } + } + ++ /* ++ * If we have just single event and are sending data ++ * through pipe, we need to force the ids allocation, ++ * because we synthesize event name through the pipe ++ * and need the id for that. ++ */ ++ if (data->is_pipe && rec->evlist->nr_entries == 1) ++ rec->opts.sample_id = true; ++ + if (record__open(rec) != 0) { + err = -1; + goto out_child; +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index 9df0af17e9c2..52486c90ab93 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -2185,11 +2185,16 @@ static int add_default_attributes(void) + return 0; + + if (transaction_run) { ++ struct parse_events_error errinfo; ++ + if (pmu_have_event("cpu", "cycles-ct") && + pmu_have_event("cpu", "el-start")) +- err = parse_events(evsel_list, transaction_attrs, NULL); ++ err = parse_events(evsel_list, transaction_attrs, ++ &errinfo); + else +- err = parse_events(evsel_list, transaction_limited_attrs, NULL); ++ err = parse_events(evsel_list, ++ transaction_limited_attrs, ++ &errinfo); + if (err) { + fprintf(stderr, "Cannot set up transaction events\n"); + return -1; +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c +index dd57978b2096..3103a33c13a8 100644 +--- a/tools/perf/builtin-top.c ++++ b/tools/perf/builtin-top.c +@@ -1080,8 +1080,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) + + static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused) + { +- if (!strcmp(var, "top.call-graph")) +- var = "call-graph.record-mode"; /* fall-through */ ++ if (!strcmp(var, "top.call-graph")) { ++ var = "call-graph.record-mode"; ++ return perf_default_config(var, value, cb); ++ } + if (!strcmp(var, "top.children")) { + symbol_conf.cumulate_callchain = perf_config_bool(var, value); + return 0; +diff --git a/tools/perf/perf.h b/tools/perf/perf.h +index 55086389fc06..de1debcd3ee7 100644 +--- a/tools/perf/perf.h ++++ b/tools/perf/perf.h +@@ -61,6 +61,7 @@ struct record_opts { + bool tail_synthesize; + bool overwrite; + bool ignore_missing_thread; ++ bool sample_id; + unsigned int freq; + unsigned int mmap_pages; + unsigned int auxtrace_mmap_pages; +diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c +index 260418969120..2f008067d989 100644 +--- a/tools/perf/tests/dwarf-unwind.c ++++ b/tools/perf/tests/dwarf-unwind.c +@@ -37,6 +37,19 @@ static int init_live_machine(struct machine *machine) + mmap_handler, machine, true, 500); + } + ++/* ++ * We need to keep these functions global, despite the ++ * fact that they are used only locally in this object, ++ * in order to keep them around even if the binary is ++ * stripped. If they are gone, the unwind check for ++ * symbol fails. ++ */ ++int test_dwarf_unwind__thread(struct thread *thread); ++int test_dwarf_unwind__compare(void *p1, void *p2); ++int test_dwarf_unwind__krava_3(struct thread *thread); ++int test_dwarf_unwind__krava_2(struct thread *thread); ++int test_dwarf_unwind__krava_1(struct thread *thread); ++ + #define MAX_STACK 8 + + static int unwind_entry(struct unwind_entry *entry, void *arg) +@@ -45,12 +58,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) + char *symbol = entry->sym ? entry->sym->name : NULL; + static const char *funcs[MAX_STACK] = { + "test__arch_unwind_sample", +- "unwind_thread", +- "compare", ++ "test_dwarf_unwind__thread", ++ "test_dwarf_unwind__compare", + "bsearch", +- "krava_3", +- "krava_2", +- "krava_1", ++ "test_dwarf_unwind__krava_3", ++ "test_dwarf_unwind__krava_2", ++ "test_dwarf_unwind__krava_1", + "test__dwarf_unwind" + }; + /* +@@ -77,7 +90,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) + return strcmp((const char *) symbol, funcs[idx]); + } + +-static noinline int unwind_thread(struct thread *thread) ++noinline int test_dwarf_unwind__thread(struct thread *thread) + { + struct perf_sample sample; + unsigned long cnt = 0; +@@ -108,7 +121,7 @@ static noinline int unwind_thread(struct thread *thread) + + static int global_unwind_retval = -INT_MAX; + +-static noinline int compare(void *p1, void *p2) ++noinline int test_dwarf_unwind__compare(void *p1, void *p2) + { + /* Any possible value should be 'thread' */ + struct thread *thread = *(struct thread **)p1; +@@ -117,17 +130,17 @@ static noinline int compare(void *p1, void *p2) + /* Call unwinder twice for both callchain orders. */ + callchain_param.order = ORDER_CALLER; + +- global_unwind_retval = unwind_thread(thread); ++ global_unwind_retval = test_dwarf_unwind__thread(thread); + if (!global_unwind_retval) { + callchain_param.order = ORDER_CALLEE; +- global_unwind_retval = unwind_thread(thread); ++ global_unwind_retval = test_dwarf_unwind__thread(thread); + } + } + + return p1 - p2; + } + +-static noinline int krava_3(struct thread *thread) ++noinline int test_dwarf_unwind__krava_3(struct thread *thread) + { + struct thread *array[2] = {thread, thread}; + void *fp = &bsearch; +@@ -141,18 +154,19 @@ static noinline int krava_3(struct thread *thread) + size_t, int (*)(void *, void *)); + + _bsearch = fp; +- _bsearch(array, &thread, 2, sizeof(struct thread **), compare); ++ _bsearch(array, &thread, 2, sizeof(struct thread **), ++ test_dwarf_unwind__compare); + return global_unwind_retval; + } + +-static noinline int krava_2(struct thread *thread) ++noinline int test_dwarf_unwind__krava_2(struct thread *thread) + { +- return krava_3(thread); ++ return test_dwarf_unwind__krava_3(thread); + } + +-static noinline int krava_1(struct thread *thread) ++noinline int test_dwarf_unwind__krava_1(struct thread *thread) + { +- return krava_2(thread); ++ return test_dwarf_unwind__krava_2(thread); + } + + int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused) +@@ -189,7 +203,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu + goto out; + } + +- err = krava_1(thread); ++ err = test_dwarf_unwind__krava_1(thread); + thread__put(thread); + + out: +diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +index a2f757da49d9..73bea00f590f 100755 +--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh ++++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +@@ -21,12 +21,12 @@ trace_libc_inet_pton_backtrace() { + expected[3]=".*packets transmitted.*" + expected[4]="rtt min.*" + expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" +- expected[6]=".*inet_pton[[:space:]]\($libc\)$" ++ expected[6]=".*inet_pton[[:space:]]\($libc|inlined\)$" + case "$(uname -m)" in + s390x) + eventattr='call-graph=dwarf' +- expected[7]="gaih_inet[[:space:]]\(inlined\)$" +- expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$" ++ expected[7]="gaih_inet.*[[:space:]]\($libc|inlined\)$" ++ expected[8]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$" + expected[9]="main[[:space:]]\(.*/bin/ping.*\)$" + expected[10]="__libc_start_main[[:space:]]\($libc\)$" + expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$" +diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c +index f6789fb029d6..884cad122acf 100644 +--- a/tools/perf/tests/vmlinux-kallsyms.c ++++ b/tools/perf/tests/vmlinux-kallsyms.c +@@ -125,7 +125,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest + + if (pair && UM(pair->start) == mem_start) { + next_pair: +- if (strcmp(sym->name, pair->name) == 0) { ++ if (arch__compare_symbol_names(sym->name, pair->name) == 0) { + /* + * kallsyms don't have the symbol end, so we + * set that by using the next symbol start - 1, +diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c +index 8f7f59d1a2b5..0c486d2683c4 100644 +--- a/tools/perf/ui/browsers/annotate.c ++++ b/tools/perf/ui/browsers/annotate.c +@@ -312,6 +312,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) + struct map_symbol *ms = ab->b.priv; + struct symbol *sym = ms->sym; + u8 pcnt_width = annotate_browser__pcnt_width(ab); ++ int width = 0; + + /* PLT symbols contain external offsets */ + if (strstr(sym->name, "@plt")) +@@ -335,13 +336,17 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) + to = (u64)btarget->idx; + } + ++ if (ab->have_cycles) ++ width = IPC_WIDTH + CYCLES_WIDTH; ++ + ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS); +- __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width, ++ __ui_browser__line_arrow(browser, ++ pcnt_width + 2 + ab->addr_width + width, + from, to); + + if (is_fused(ab, cursor)) { + ui_browser__mark_fused(browser, +- pcnt_width + 3 + ab->addr_width, ++ pcnt_width + 3 + ab->addr_width + width, + from - 1, + to > from ? true : false); + } +diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp +index 1bfc946e37dc..bf31ceab33bd 100644 +--- a/tools/perf/util/c++/clang.cpp ++++ b/tools/perf/util/c++/clang.cpp +@@ -9,6 +9,7 @@ + * Copyright (C) 2016 Huawei Inc. + */ + ++#include "clang/Basic/Version.h" + #include "clang/CodeGen/CodeGenAction.h" + #include "clang/Frontend/CompilerInvocation.h" + #include "clang/Frontend/CompilerInstance.h" +@@ -58,7 +59,8 @@ createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path, + + FrontendOptions& Opts = CI->getFrontendOpts(); + Opts.Inputs.clear(); +- Opts.Inputs.emplace_back(Path, IK_C); ++ Opts.Inputs.emplace_back(Path, ++ FrontendOptions::getInputKindForExtension("c")); + return CI; + } + +@@ -71,10 +73,17 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags, + + Clang.setVirtualFileSystem(&*VFS); + ++#if CLANG_VERSION_MAJOR < 4 + IntrusiveRefCntPtr<CompilerInvocation> CI = + createCompilerInvocation(std::move(CFlags), Path, + Clang.getDiagnostics()); + Clang.setInvocation(&*CI); ++#else ++ std::shared_ptr<CompilerInvocation> CI( ++ createCompilerInvocation(std::move(CFlags), Path, ++ Clang.getDiagnostics())); ++ Clang.setInvocation(CI); ++#endif + + std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx)); + if (!Clang.ExecuteAction(*Act)) +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c +index 097473600d94..5d420209505e 100644 +--- a/tools/perf/util/hist.c ++++ b/tools/perf/util/hist.c +@@ -878,7 +878,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter, + * cumulated only one time to prevent entries more than 100% + * overhead. + */ +- he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); ++ he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); + if (he_cache == NULL) + return -ENOMEM; + +@@ -1043,8 +1043,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + if (err) + return err; + +- iter->max_stack = max_stack_depth; +- + err = iter->ops->prepare_entry(iter, al); + if (err) + goto out; +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h +index f6630cb95eff..b99d68943f25 100644 +--- a/tools/perf/util/hist.h ++++ b/tools/perf/util/hist.h +@@ -107,7 +107,6 @@ struct hist_entry_iter { + int curr; + + bool hide_unresolved; +- int max_stack; + + struct perf_evsel *evsel; + struct perf_sample *sample; +diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c +index 1e97937b03a9..6f09e4962dad 100644 +--- a/tools/perf/util/record.c ++++ b/tools/perf/util/record.c +@@ -137,6 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, + struct perf_evsel *evsel; + bool use_sample_identifier = false; + bool use_comm_exec; ++ bool sample_id = opts->sample_id; + + /* + * Set the evsel leader links before we configure attributes, +@@ -163,8 +164,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, + * match the id. + */ + use_sample_identifier = perf_can_sample_identifier(); +- evlist__for_each_entry(evlist, evsel) +- perf_evsel__set_sample_id(evsel, use_sample_identifier); ++ sample_id = true; + } else if (evlist->nr_entries > 1) { + struct perf_evsel *first = perf_evlist__first(evlist); + +@@ -174,6 +174,10 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, + use_sample_identifier = perf_can_sample_identifier(); + break; + } ++ sample_id = true; ++ } ++ ++ if (sample_id) { + evlist__for_each_entry(evlist, evsel) + perf_evsel__set_sample_id(evsel, use_sample_identifier); + } +diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c +index 30cd0b296f1a..8e61aad0ca3f 100644 +--- a/tools/testing/radix-tree/idr-test.c ++++ b/tools/testing/radix-tree/idr-test.c +@@ -202,6 +202,13 @@ void idr_checks(void) + idr_remove(&idr, 3); + idr_remove(&idr, 0); + ++ assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); ++ idr_remove(&idr, 1); ++ for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) ++ assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); ++ idr_remove(&idr, 1 << 30); ++ idr_destroy(&idr); ++ + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile +index 3c9c0bbe7dbb..ea300e7818a7 100644 +--- a/tools/testing/selftests/Makefile ++++ b/tools/testing/selftests/Makefile +@@ -122,6 +122,7 @@ ifdef INSTALL_PATH + BUILD_TARGET=$$BUILD/$$TARGET; \ + echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \ + echo "echo ========================================" >> $(ALL_SCRIPT); \ ++ echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \ + echo "cd $$TARGET" >> $(ALL_SCRIPT); \ + make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \ + echo "cd \$$ROOT" >> $(ALL_SCRIPT); \ +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c +index 8b9470b5af6d..96c6238a4a1f 100644 +--- a/tools/testing/selftests/bpf/test_maps.c ++++ b/tools/testing/selftests/bpf/test_maps.c +@@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data) + fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, + 2, map_flags); + if (fd < 0) { ++ if (errno == ENOMEM) ++ return; + printf("Failed to create hashmap key=%d value=%d '%s'\n", + i, j, strerror(errno)); + exit(1); +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +new file mode 100644 +index 000000000000..5ba73035e1d9 +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +@@ -0,0 +1,46 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++# description: Kprobe event string type argument ++ ++[ -f kprobe_events ] || exit_unsupported # this is configurable ++ ++echo 0 > events/enable ++echo > kprobe_events ++ ++case `uname -m` in ++x86_64) ++ ARG2=%si ++ OFFS=8 ++;; ++i[3456]86) ++ ARG2=%cx ++ OFFS=4 ++;; ++aarch64) ++ ARG2=%x1 ++ OFFS=8 ++;; ++arm*) ++ ARG2=%r1 ++ OFFS=4 ++;; ++*) ++ echo "Please implement other architecture here" ++ exit_untested ++esac ++ ++: "Test get argument (1)" ++echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events ++echo 1 > events/kprobes/testprobe/enable ++! echo test >> kprobe_events ++tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\"" ++ ++echo 0 > events/kprobes/testprobe/enable ++: "Test get argument (2)" ++echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events ++echo 1 > events/kprobes/testprobe/enable ++! echo test1 test2 >> kprobe_events ++tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\"" ++ ++echo 0 > events/enable ++echo > kprobe_events +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +new file mode 100644 +index 000000000000..231bcd2c4eb5 +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +@@ -0,0 +1,97 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++# description: Kprobe event argument syntax ++ ++[ -f kprobe_events ] || exit_unsupported # this is configurable ++ ++grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue ++ ++echo 0 > events/enable ++echo > kprobe_events ++ ++PROBEFUNC="vfs_read" ++GOODREG= ++BADREG= ++GOODSYM="_sdata" ++if ! grep -qw ${GOODSYM} /proc/kallsyms ; then ++ GOODSYM=$PROBEFUNC ++fi ++BADSYM="deaqswdefr" ++SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "` ++GOODTYPE="x16" ++BADTYPE="y16" ++ ++case `uname -m` in ++x86_64|i[3456]86) ++ GOODREG=%ax ++ BADREG=%ex ++;; ++aarch64) ++ GOODREG=%x0 ++ BADREG=%ax ++;; ++arm*) ++ GOODREG=%r0 ++ BADREG=%ax ++;; ++esac ++ ++test_goodarg() # Good-args ++{ ++ while [ "$1" ]; do ++ echo "p ${PROBEFUNC} $1" > kprobe_events ++ shift 1 ++ done; ++} ++ ++test_badarg() # Bad-args ++{ ++ while [ "$1" ]; do ++ ! echo "p ${PROBEFUNC} $1" > kprobe_events ++ shift 1 ++ done; ++} ++ ++echo > kprobe_events ++ ++: "Register access" ++test_goodarg ${GOODREG} ++test_badarg ${BADREG} ++ ++: "Symbol access" ++test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10" ++test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \ ++ "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10" ++ ++: "Stack access" ++test_goodarg "\$stack" "\$stack0" "\$stack1" ++test_badarg "\$stackp" "\$stack0+10" "\$stack1-10" ++ ++: "Retval access" ++echo "r ${PROBEFUNC} \$retval" > kprobe_events ++! echo "p ${PROBEFUNC} \$retval" > kprobe_events ++ ++: "Comm access" ++test_goodarg "\$comm" ++ ++: "Indirect memory access" ++test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \ ++ "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))" ++test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \ ++ "+10(\$comm)" "+0(${GOODREG})+10" ++ ++: "Name assignment" ++test_goodarg "varname=${GOODREG}" ++test_badarg "varname=varname2=${GOODREG}" ++ ++: "Type syntax" ++test_goodarg "${GOODREG}:${GOODTYPE}" ++test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \ ++ "${GOODTYPE}:${GOODREG}" ++ ++: "Combination check" ++ ++test_goodarg "\$comm:string" "+0(\$stack):string" ++test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string" ++ ++echo > kprobe_events +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc +new file mode 100644 +index 000000000000..4fda01a08da4 +--- /dev/null ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc +@@ -0,0 +1,43 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++# description: Kprobe events - probe points ++ ++[ -f kprobe_events ] || exit_unsupported # this is configurable ++ ++TARGET_FUNC=create_trace_kprobe ++ ++dec_addr() { # hexaddr ++ printf "%d" "0x"`echo $1 | tail -c 8` ++} ++ ++set_offs() { # prev target next ++ A1=`dec_addr $1` ++ A2=`dec_addr $2` ++ A3=`dec_addr $3` ++ TARGET="0x$2" # an address ++ PREV=`expr $A1 - $A2` # offset to previous symbol ++ NEXT=+`expr $A3 - $A2` # offset to next symbol ++ OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol ++} ++ ++# We have to decode symbol addresses to get correct offsets. ++# If the offset is not an instruction boundary, it cause -EILSEQ. ++set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs` ++ ++UINT_TEST=no ++# printf "%x" -1 returns (unsigned long)-1. ++if [ `printf "%x" -1 | wc -c` != 9 ]; then ++ UINT_TEST=yes ++fi ++ ++echo 0 > events/enable ++echo > kprobe_events ++echo "p:testprobe ${TARGET_FUNC}" > kprobe_events ++echo "p:testprobe ${TARGET}" > kprobe_events ++echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events ++! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events ++if [ "${UINT_TEST}" = yes ]; then ++! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events ++fi ++echo > kprobe_events ++clear_trace +diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile +index cea4adcd42b8..a63e8453984d 100644 +--- a/tools/testing/selftests/futex/Makefile ++++ b/tools/testing/selftests/futex/Makefile +@@ -12,9 +12,9 @@ all: + BUILD_TARGET=$(OUTPUT)/$$DIR; \ + mkdir $$BUILD_TARGET -p; \ + make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ +- if [ -e $$DIR/$(TEST_PROGS) ]; then +- rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; +- fi ++ if [ -e $$DIR/$(TEST_PROGS) ]; then \ ++ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ ++ fi \ + done + + override define RUN_TESTS +diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile +index 3926a0409dda..36409cb7288c 100644 +--- a/tools/testing/selftests/memfd/Makefile ++++ b/tools/testing/selftests/memfd/Makefile +@@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/ + CFLAGS += -I../../../../usr/include/ + + TEST_PROGS := run_tests.sh ++TEST_FILES := run_fuse_test.sh + TEST_GEN_FILES := memfd_test fuse_mnt fuse_test + + fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) +diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config +new file mode 100644 +index 000000000000..835c7f4dadcd +--- /dev/null ++++ b/tools/testing/selftests/memfd/config +@@ -0,0 +1 @@ ++CONFIG_FUSE_FS=m +diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c +index 989f917068d1..d4346b16b2c1 100644 +--- a/tools/testing/selftests/net/psock_fanout.c ++++ b/tools/testing/selftests/net/psock_fanout.c +@@ -128,6 +128,8 @@ static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id) + + static void sock_fanout_set_ebpf(int fd) + { ++ static char log_buf[65536]; ++ + const int len_off = __builtin_offsetof(struct __sk_buff, len); + struct bpf_insn prog[] = { + { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 }, +@@ -140,7 +142,6 @@ static void sock_fanout_set_ebpf(int fd) + { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 }, + { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 } + }; +- char log_buf[512]; + union bpf_attr attr; + int pfd; + +diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c +index 35ade7406dcd..3ae77ba93208 100644 +--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c ++++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c +@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size) + return 0; + } + ++static int syscall_available(void) ++{ ++ int rc; ++ ++ errno = 0; ++ rc = syscall(__NR_subpage_prot, 0, 0, 0); ++ ++ return rc == 0 || (errno != ENOENT && errno != ENOSYS); ++} ++ + int test_anon(void) + { + unsigned long align; +@@ -145,6 +155,8 @@ int test_anon(void) + void *mallocblock; + unsigned long mallocsize; + ++ SKIP_IF(!syscall_available()); ++ + if (getpagesize() != 0x10000) { + fprintf(stderr, "Kernel page size must be 64K!\n"); + return 1; +@@ -180,6 +192,8 @@ int test_file(void) + off_t filesize; + int fd; + ++ SKIP_IF(!syscall_available()); ++ + fd = open(file_name, O_RDWR); + if (fd == -1) { + perror("failed to open file"); +diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config +index 6a8e5a9bfc10..d148f9f89fb6 100644 +--- a/tools/testing/selftests/pstore/config ++++ b/tools/testing/selftests/pstore/config +@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y + CONFIG_PSTORE=y + CONFIG_PSTORE_PMSG=y + CONFIG_PSTORE_CONSOLE=y ++CONFIG_PSTORE_RAM=m +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c +index 194759ec9e70..e350cf3d4f90 100644 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c +@@ -145,6 +145,15 @@ struct seccomp_data { + #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) + #endif + ++#ifndef PTRACE_SECCOMP_GET_METADATA ++#define PTRACE_SECCOMP_GET_METADATA 0x420d ++ ++struct seccomp_metadata { ++ __u64 filter_off; /* Input: which filter */ ++ __u64 flags; /* Output: filter's flags */ ++}; ++#endif ++ + #ifndef seccomp + int seccomp(unsigned int op, unsigned int flags, void *args) + { +@@ -2861,6 +2870,58 @@ TEST(get_action_avail) + EXPECT_EQ(errno, EOPNOTSUPP); + } + ++TEST(get_metadata) ++{ ++ pid_t pid; ++ int pipefd[2]; ++ char buf; ++ struct seccomp_metadata md; ++ ++ ASSERT_EQ(0, pipe(pipefd)); ++ ++ pid = fork(); ++ ASSERT_GE(pid, 0); ++ if (pid == 0) { ++ struct sock_filter filter[] = { ++ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), ++ }; ++ struct sock_fprog prog = { ++ .len = (unsigned short)ARRAY_SIZE(filter), ++ .filter = filter, ++ }; ++ ++ /* one with log, one without */ ++ ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, ++ SECCOMP_FILTER_FLAG_LOG, &prog)); ++ ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); ++ ++ ASSERT_EQ(0, close(pipefd[0])); ++ ASSERT_EQ(1, write(pipefd[1], "1", 1)); ++ ASSERT_EQ(0, close(pipefd[1])); ++ ++ while (1) ++ sleep(100); ++ } ++ ++ ASSERT_EQ(0, close(pipefd[1])); ++ ASSERT_EQ(1, read(pipefd[0], &buf, 1)); ++ ++ ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); ++ ASSERT_EQ(pid, waitpid(pid, NULL, 0)); ++ ++ md.filter_off = 0; ++ ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md)); ++ EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); ++ EXPECT_EQ(md.filter_off, 0); ++ ++ md.filter_off = 1; ++ ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md)); ++ EXPECT_EQ(md.flags, 0); ++ EXPECT_EQ(md.filter_off, 1); ++ ++ ASSERT_EQ(0, kill(pid, SIGKILL)); ++} ++ + /* + * TODO: + * - add microbenchmarks +diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile +index b3c8ba3cb668..d0121a8a3523 100644 +--- a/tools/testing/selftests/sync/Makefile ++++ b/tools/testing/selftests/sync/Makefile +@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS) + $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) + + $(OBJS): $(OUTPUT)/%.o: %.c +- $(CC) -c $^ -o $@ ++ $(CC) -c $^ -o $@ $(CFLAGS) + + $(TESTS): $(OUTPUT)/%.o: %.c + $(CC) -c $^ -o $@ +diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile +index 3d5a62ff7d31..f5d7a7851e21 100644 +--- a/tools/testing/selftests/vDSO/Makefile ++++ b/tools/testing/selftests/vDSO/Makefile +@@ -1,4 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 ++include ../lib.mk ++ + ifndef CROSS_COMPILE + CFLAGS := -std=gnu99 + CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector +@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y) + LDLIBS += -lgcc_s + endif + +-TEST_PROGS := vdso_test vdso_standalone_test_x86 ++TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86 + + all: $(TEST_PROGS) +-vdso_test: parse_vdso.c vdso_test.c +-vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c ++$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c ++$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c + $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ + vdso_standalone_test_x86.c parse_vdso.c \ +- -o vdso_standalone_test_x86 ++ -o $@ + +-include ../lib.mk +-clean: +- rm -fr $(TEST_PROGS) ++EXTRA_CLEAN := $(TEST_PROGS) + endif +diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests +index cc826326de87..45708aa3ce47 100755 +--- a/tools/testing/selftests/vm/run_vmtests ++++ b/tools/testing/selftests/vm/run_vmtests +@@ -2,25 +2,33 @@ + # SPDX-License-Identifier: GPL-2.0 + #please run as root + +-#we need 256M, below is the size in kB +-needmem=262144 + mnt=./huge + exitcode=0 + +-#get pagesize and freepages from /proc/meminfo ++#get huge pagesize and freepages from /proc/meminfo + while read name size unit; do + if [ "$name" = "HugePages_Free:" ]; then + freepgs=$size + fi + if [ "$name" = "Hugepagesize:" ]; then +- pgsize=$size ++ hpgsize_KB=$size + fi + done < /proc/meminfo + ++# Simple hugetlbfs tests have a hardcoded minimum requirement of ++# huge pages totaling 256MB (262144KB) in size. The userfaultfd ++# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take ++# both of these requirements into account and attempt to increase ++# number of huge pages available. ++nr_cpus=$(nproc) ++hpgsize_MB=$((hpgsize_KB / 1024)) ++half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) ++needmem_KB=$((half_ufd_size_MB * 2 * 1024)) ++ + #set proper nr_hugepages +-if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then ++if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then + nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` +- needpgs=`expr $needmem / $pgsize` ++ needpgs=$((needmem_KB / hpgsize_KB)) + tries=2 + while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do + lackpgs=$(( $needpgs - $freepgs )) +@@ -107,8 +115,9 @@ fi + echo "---------------------------" + echo "running userfaultfd_hugetlb" + echo "---------------------------" +-# 256MB total huge pages == 128MB src and 128MB dst +-./userfaultfd hugetlb 128 32 $mnt/ufd_test_file ++# Test requires source and destination huge pages. Size of source ++# (half_ufd_size_MB) is passed as argument to test. ++./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file + if [ $? -ne 0 ]; then + echo "[FAIL]" + exitcode=1 +diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c +index 1c12536f2081..18f523557983 100644 +--- a/tools/thermal/tmon/sysfs.c ++++ b/tools/thermal/tmon/sysfs.c +@@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst) + int update_thermal_data() + { + int i; ++ int next_thermal_record = cur_thermal_record + 1; + char tz_name[256]; + static unsigned long samples; + +@@ -495,9 +496,9 @@ int update_thermal_data() + } + + /* circular buffer for keeping historic data */ +- if (cur_thermal_record >= NR_THERMAL_RECORDS) +- cur_thermal_record = 0; +- gettimeofday(&trec[cur_thermal_record].tv, NULL); ++ if (next_thermal_record >= NR_THERMAL_RECORDS) ++ next_thermal_record = 0; ++ gettimeofday(&trec[next_thermal_record].tv, NULL); + if (tmon_log) { + fprintf(tmon_log, "%lu ", ++samples); + fprintf(tmon_log, "%3.1f ", p_param.t_target); +@@ -507,11 +508,12 @@ int update_thermal_data() + snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE, + ptdata.tzi[i].instance); + sysfs_get_ulong(tz_name, "temp", +- &trec[cur_thermal_record].temp[i]); ++ &trec[next_thermal_record].temp[i]); + if (tmon_log) + fprintf(tmon_log, "%lu ", +- trec[cur_thermal_record].temp[i]/1000); ++ trec[next_thermal_record].temp[i] / 1000); + } ++ cur_thermal_record = next_thermal_record; + for (i = 0; i < ptdata.nr_cooling_dev; i++) { + char cdev_name[256]; + unsigned long val; +diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c +index 9aa19652e8e8..b43138f8b862 100644 +--- a/tools/thermal/tmon/tmon.c ++++ b/tools/thermal/tmon/tmon.c +@@ -336,7 +336,6 @@ int main(int argc, char **argv) + show_data_w(); + show_cooling_device(); + } +- cur_thermal_record++; + time_elapsed += ticktime; + controller_handler(trec[0].temp[target_tz_index] / 1000, + &yk); +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c +index c1e4bdd66131..b4c5baf4af45 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -110,9 +110,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); ++ unsigned long flags; + ++ spin_lock_irqsave(&irq->irq_lock, flags); + if (irq_is_pending(irq)) + value |= (1U << i); ++ spin_unlock_irqrestore(&irq->irq_lock, flags); + + vgic_put_irq(vcpu->kvm, irq); + } +diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h +index f7450dc41ab3..21a2240164f3 100644 +--- a/virt/kvm/arm/vgic/vgic.h ++++ b/virt/kvm/arm/vgic/vgic.h +@@ -96,6 +96,7 @@ + /* we only support 64 kB translation table page size */ + #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) + ++/* Requires the irq_lock to be held by the caller. */ + static inline bool irq_is_pending(struct vgic_irq *irq) + { + if (irq->config == VGIC_CONFIG_EDGE) diff --git a/1045_linux-4.14.46.patch b/1045_linux-4.14.46.patch new file mode 100644 index 00000000..1414cad3 --- /dev/null +++ b/1045_linux-4.14.46.patch @@ -0,0 +1,850 @@ +diff --git a/Makefile b/Makefile +index f3ea74e7a516..3b1845f2b8f8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 45 ++SUBLEVEL = 46 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h +index 1f57bbe82b6f..df24fc8da1bc 100644 +--- a/tools/arch/arm/include/uapi/asm/kvm.h ++++ b/tools/arch/arm/include/uapi/asm/kvm.h +@@ -180,6 +180,12 @@ struct kvm_arch_memory_slot { + #define KVM_REG_ARM_VFP_FPINST 0x1009 + #define KVM_REG_ARM_VFP_FPINST2 0x100A + ++/* KVM-as-firmware specific pseudo-registers */ ++#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) ++#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ ++ KVM_REG_ARM_FW | ((r) & 0xffff)) ++#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) ++ + /* Device Control API: ARM VGIC */ + #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 + #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h +index 51149ec75fe4..9f74ce5899f0 100644 +--- a/tools/arch/arm64/include/uapi/asm/kvm.h ++++ b/tools/arch/arm64/include/uapi/asm/kvm.h +@@ -200,6 +200,12 @@ struct kvm_arch_memory_slot { + #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) + #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) + ++/* KVM-as-firmware specific pseudo-registers */ ++#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) ++#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ ++ KVM_REG_ARM_FW | ((r) & 0xffff)) ++#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) ++ + /* Device Control API: ARM VGIC */ + #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 + #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h +index 61d6049f4c1e..8aaec831053a 100644 +--- a/tools/arch/powerpc/include/uapi/asm/kvm.h ++++ b/tools/arch/powerpc/include/uapi/asm/kvm.h +@@ -607,6 +607,8 @@ struct kvm_ppc_rmmu_info { + #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc) + #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) + ++#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) ++ + /* Transactional Memory checkpointed state: + * This is all GPRs, all VSX regs and a subset of SPRs + */ +diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h +index 9ad172dcd912..a3938db010f7 100644 +--- a/tools/arch/s390/include/uapi/asm/kvm.h ++++ b/tools/arch/s390/include/uapi/asm/kvm.h +@@ -228,6 +228,7 @@ struct kvm_guest_debug_arch { + #define KVM_SYNC_RICCB (1UL << 7) + #define KVM_SYNC_FPRS (1UL << 8) + #define KVM_SYNC_GSCB (1UL << 9) ++#define KVM_SYNC_BPBC (1UL << 10) + /* length and alignment of the sdnx as a power of two */ + #define SDNXC 8 + #define SDNXL (1UL << SDNXC) +@@ -251,7 +252,9 @@ struct kvm_sync_regs { + }; + __u8 reserved[512]; /* for future vector expansion */ + __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ +- __u8 padding1[52]; /* riccb needs to be 64byte aligned */ ++ __u8 bpbc : 1; /* bp mode */ ++ __u8 reserved2 : 7; ++ __u8 padding1[51]; /* riccb needs to be 64byte aligned */ + __u8 riccb[64]; /* runtime instrumentation controls block */ + __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ + union { +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h +index 793690fbda36..403e97d5e243 100644 +--- a/tools/arch/x86/include/asm/cpufeatures.h ++++ b/tools/arch/x86/include/asm/cpufeatures.h +@@ -13,173 +13,176 @@ + /* + * Defines x86 CPU feature bits + */ +-#define NCAPINTS 18 /* N 32-bit words worth of info */ +-#define NBUGINTS 1 /* N 32-bit bug flags */ ++#define NCAPINTS 19 /* N 32-bit words worth of info */ ++#define NBUGINTS 1 /* N 32-bit bug flags */ + + /* + * Note: If the comment begins with a quoted string, that string is used + * in /proc/cpuinfo instead of the macro name. If the string is "", + * this feature bit is not displayed in /proc/cpuinfo at all. ++ * ++ * When adding new features here that depend on other features, ++ * please update the table in kernel/cpu/cpuid-deps.c as well. + */ + +-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ +-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ +-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ +-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ +-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ +-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ +-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ +-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ +-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ +-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ +-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ +-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ +-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ +-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ +-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ +-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ +- /* (plus FCMOVcc, FCOMI with FPU) */ +-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ +-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ +-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ +-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ +-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ +-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ +-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ +-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ +-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ +-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ +-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ +-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ +-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ +-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ ++/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ ++#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ ++#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ ++#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ ++#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ ++#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ ++#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ ++#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ ++#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ ++#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ ++#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ ++#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ ++#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ ++#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ ++#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ ++#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ ++#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ ++#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ ++#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ ++#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ ++#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ ++#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ ++#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ ++#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ ++#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ ++#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ ++#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ ++#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ ++#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ ++#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ ++#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ + + /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ + /* Don't duplicate feature flags which are redundant with Intel! */ +-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ +-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ +-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ +-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ +-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ +-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ +-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ +-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ +-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ ++#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ ++#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */ ++#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ ++#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ ++#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ ++#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ ++#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ ++#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ ++#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */ ++#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */ + + /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ +-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ +-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ +-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ ++#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ ++#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ ++#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ + + /* Other features, Linux-defined mapping, word 3 */ + /* This range is used for feature bits which conflict or are synthesized */ +-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ +-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ +-/* cpu types for specific tunings: */ +-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ +-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ +-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ +-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ +-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ +-#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ +-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ +-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ +-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ +-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ +-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ +-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ +-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ +-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ +-#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ +-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ +-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ +-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ +-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ +-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ +-#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ +-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ +-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ +-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ +-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ +-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ ++#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ ++#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ ++#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ ++#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ ++ ++/* CPU types for specific tunings: */ ++#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ ++#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ ++#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ ++#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ ++#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ ++#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */ ++#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */ ++#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ ++#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ ++#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ ++#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ ++#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ ++#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ ++#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ ++#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ ++#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ ++#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ ++#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ ++#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */ ++#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ ++#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ ++#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ ++#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */ ++#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */ ++#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ ++#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ ++#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ + +-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ +-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ +-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ +-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ +-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ +-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ +-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ +-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ +-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ +-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ +-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ +-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ +-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ +-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ +-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ +-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ +-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ +-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ +-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ +-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ +-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ +-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ +-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ +-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ +-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ +-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ +-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ +-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ +-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ +-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ +-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ ++/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ ++#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ ++#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ ++#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ ++#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ ++#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ ++#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ ++#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */ ++#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ ++#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ ++#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ ++#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ ++#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ ++#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ ++#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */ ++#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ ++#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */ ++#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ ++#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ ++#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ ++#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ ++#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */ ++#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ ++#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ ++#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */ ++#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ ++#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ ++#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */ ++#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ ++#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */ ++#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */ ++#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ + + /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ +-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ +-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ +-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ +-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ +-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ ++#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ ++#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ ++#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ ++#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ ++#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ ++#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ ++#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ ++#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ ++#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ ++#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ + +-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ +-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ +-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ +-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ +-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ +-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ +-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ +-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ +-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ +-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ +-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ +-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ +-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ +-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ +-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ +-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ +-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ +-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ +-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ +-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ +-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ +-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ +-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ +-#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ +-#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ +-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ ++/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ ++#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ ++#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ ++#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */ ++#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ ++#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ ++#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ ++#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ ++#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ ++#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ ++#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ ++#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ ++#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ ++#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ ++#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ ++#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ ++#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ ++#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */ ++#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ ++#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */ ++#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */ ++#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */ ++#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ ++#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */ ++#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */ ++#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ ++#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */ + + /* + * Auxiliary flags: Linux defined - For features scattered in various +@@ -187,146 +190,185 @@ + * + * Reuse free bits when adding new feature flags! + */ +-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ +-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ +-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ +-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +-#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ +-#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ +-#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +- +-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ +-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ ++#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ ++#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ ++#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ ++#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ ++#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ ++#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ ++#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ ++#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ ++#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ ++#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ ++#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ ++#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ ++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ + +-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ +-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ +-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ +-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ ++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ ++#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ + +-#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ ++#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ ++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ ++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ ++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ ++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ ++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ + + /* Virtualization flags: Linux defined, word 8 */ +-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ +-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ +-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ ++#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ ++#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ ++#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ ++#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ ++#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ + +-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ +-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ ++#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ ++#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ + + +-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ +-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ +-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ +-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ +-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ +-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ +-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ +-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ +-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ +-#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ +-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ +-#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ +-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ +-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ +-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +-#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ +-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ +-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ +-#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ +-#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ ++#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ ++#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ ++#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ ++#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ ++#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ ++#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ ++#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ ++#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ ++#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ ++#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ ++#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ ++#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ ++#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ ++#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ ++#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ ++#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */ ++#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */ ++#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ ++#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ ++#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ ++#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ ++#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ ++#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ ++#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ ++#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ ++#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ ++#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ ++#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ + +-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ +-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ +-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ +-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ +-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ ++/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ ++#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */ ++#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ ++#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ ++#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ + +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ +-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ ++#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ + +-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ +-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ +-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ +-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ ++#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ ++#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ ++#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ + +-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ +-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ +-#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ ++/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ ++#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ ++#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ ++#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ ++#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ ++#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ + +-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ +-#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +-#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +-#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +-#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +-#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +-#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +-#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ ++/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ ++#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ ++#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ ++#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ ++#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ ++#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ ++#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ ++#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ ++#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ ++#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ ++#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ + +-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ +-#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +-#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +-#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +-#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +-#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +-#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ +-#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ +-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ +-#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ ++/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ ++#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ ++#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ ++#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ ++#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ ++#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ ++#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ ++#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ ++#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ ++#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ ++#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ ++#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ ++#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ ++#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ + +-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ +-#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +-#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +-#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ +-#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +-#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ ++#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ ++#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ ++#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ ++#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ ++#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ ++#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ ++#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ ++#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ ++#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ ++#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ ++#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ ++#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ ++#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ ++#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ + +-/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ +-#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ +-#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ +-#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ ++/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ ++#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ ++#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ ++#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ ++ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ ++#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ ++#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ ++#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ ++#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ ++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ + + /* + * BUG word(s) + */ +-#define X86_BUG(x) (NCAPINTS*32 + (x)) ++#define X86_BUG(x) (NCAPINTS*32 + (x)) + +-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ +-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ ++#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ ++#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ ++#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ ++#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ ++#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ ++#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ ++#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ ++#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ ++#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ + #ifdef CONFIG_X86_32 + /* + * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional + * to avoid confusion. + */ +-#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ ++#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ + #endif +-#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +-#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +-#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ +-#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ ++#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ ++#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ ++#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ ++#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ ++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ ++ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h +index c10c9128f54e..c6a3af198294 100644 +--- a/tools/arch/x86/include/asm/disabled-features.h ++++ b/tools/arch/x86/include/asm/disabled-features.h +@@ -44,6 +44,12 @@ + # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) + #endif + ++#ifdef CONFIG_PAGE_TABLE_ISOLATION ++# define DISABLE_PTI 0 ++#else ++# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) ++#endif ++ + /* + * Make sure to add features to the correct mask + */ +@@ -54,7 +60,7 @@ + #define DISABLED_MASK4 (DISABLE_PCID) + #define DISABLED_MASK5 0 + #define DISABLED_MASK6 0 +-#define DISABLED_MASK7 0 ++#define DISABLED_MASK7 (DISABLE_PTI) + #define DISABLED_MASK8 0 + #define DISABLED_MASK9 (DISABLE_MPX) + #define DISABLED_MASK10 0 +@@ -65,6 +71,7 @@ + #define DISABLED_MASK15 0 + #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) + #define DISABLED_MASK17 0 +-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) ++#define DISABLED_MASK18 0 ++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) + + #endif /* _ASM_X86_DISABLED_FEATURES_H */ +diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h +index d91ba04dd007..fb3a6de7440b 100644 +--- a/tools/arch/x86/include/asm/required-features.h ++++ b/tools/arch/x86/include/asm/required-features.h +@@ -106,6 +106,7 @@ + #define REQUIRED_MASK15 0 + #define REQUIRED_MASK16 (NEED_LA57) + #define REQUIRED_MASK17 0 +-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) ++#define REQUIRED_MASK18 0 ++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) + + #endif /* _ASM_X86_REQUIRED_FEATURES_H */ +diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h +index 7e99999d6236..857bad91c454 100644 +--- a/tools/include/uapi/linux/kvm.h ++++ b/tools/include/uapi/linux/kvm.h +@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_PPC_SMT_POSSIBLE 147 + #define KVM_CAP_HYPERV_SYNIC2 148 + #define KVM_CAP_HYPERV_VP_INDEX 149 ++#define KVM_CAP_S390_BPB 152 + + #ifdef KVM_CAP_IRQ_ROUTING + +diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore +index 643cc4ba6872..3e5135dded16 100644 +--- a/tools/perf/.gitignore ++++ b/tools/perf/.gitignore +@@ -31,5 +31,6 @@ config.mak.autogen + .config-detected + util/intel-pt-decoder/inat-tables.c + arch/*/include/generated/ ++trace/beauty/generated/ + pmu-events/pmu-events.c + pmu-events/jevents +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c +index 3b570e808b31..b205c1340456 100644 +--- a/tools/perf/builtin-record.c ++++ b/tools/perf/builtin-record.c +@@ -926,15 +926,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) + } + } + +- /* +- * If we have just single event and are sending data +- * through pipe, we need to force the ids allocation, +- * because we synthesize event name through the pipe +- * and need the id for that. +- */ +- if (data->is_pipe && rec->evlist->nr_entries == 1) +- rec->opts.sample_id = true; +- + if (record__open(rec) != 0) { + err = -1; + goto out_child; +diff --git a/tools/perf/perf.h b/tools/perf/perf.h +index de1debcd3ee7..55086389fc06 100644 +--- a/tools/perf/perf.h ++++ b/tools/perf/perf.h +@@ -61,7 +61,6 @@ struct record_opts { + bool tail_synthesize; + bool overwrite; + bool ignore_missing_thread; +- bool sample_id; + unsigned int freq; + unsigned int mmap_pages; + unsigned int auxtrace_mmap_pages; +diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c +index 6f09e4962dad..1e97937b03a9 100644 +--- a/tools/perf/util/record.c ++++ b/tools/perf/util/record.c +@@ -137,7 +137,6 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, + struct perf_evsel *evsel; + bool use_sample_identifier = false; + bool use_comm_exec; +- bool sample_id = opts->sample_id; + + /* + * Set the evsel leader links before we configure attributes, +@@ -164,7 +163,8 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, + * match the id. + */ + use_sample_identifier = perf_can_sample_identifier(); +- sample_id = true; ++ evlist__for_each_entry(evlist, evsel) ++ perf_evsel__set_sample_id(evsel, use_sample_identifier); + } else if (evlist->nr_entries > 1) { + struct perf_evsel *first = perf_evlist__first(evlist); + +@@ -174,10 +174,6 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, + use_sample_identifier = perf_can_sample_identifier(); + break; + } +- sample_id = true; +- } +- +- if (sample_id) { + evlist__for_each_entry(evlist, evsel) + perf_evsel__set_sample_id(evsel, use_sample_identifier); + } |