summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1215_linux-5.10.216.patch4009
2 files changed, 4013 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index fe9c9853..0f28b50e 100644
--- a/0000_README
+++ b/0000_README
@@ -903,6 +903,10 @@ Patch: 1214_linux-5.10.215.patch
From: https://www.kernel.org
Desc: Linux 5.10.215
+Patch: 1215_linux-5.10.216.patch
+From: https://www.kernel.org
+Desc: Linux 5.10.216
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1215_linux-5.10.216.patch b/1215_linux-5.10.216.patch
new file mode 100644
index 00000000..6dc594cd
--- /dev/null
+++ b/1215_linux-5.10.216.patch
@@ -0,0 +1,4009 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
+index b8ebff4b1c4ca..4514cf9fc7a15 100644
+--- a/Documentation/ABI/testing/sysfs-class-devfreq
++++ b/Documentation/ABI/testing/sysfs-class-devfreq
+@@ -66,6 +66,9 @@ Description:
+
+ echo 0 > /sys/class/devfreq/.../trans_stat
+
++ If the transition table is bigger than PAGE_SIZE, reading
++ this will return an -EFBIG error.
++
+ What: /sys/class/devfreq/.../userspace/set_freq
+ Date: September 2011
+ Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
+diff --git a/Makefile b/Makefile
+index 2af799d3ce78b..6fe6554ecfb8c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 215
++SUBLEVEL = 216
+ EXTRAVERSION =
+ NAME = Dare mighty things
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 240277d5626c8..72e4cef062aca 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -9,6 +9,14 @@
+ #
+ source "arch/$(SRCARCH)/Kconfig"
+
++config ARCH_CONFIGURES_CPU_MITIGATIONS
++ bool
++
++if !ARCH_CONFIGURES_CPU_MITIGATIONS
++config CPU_MITIGATIONS
++ def_bool y
++endif
++
+ menu "General architecture-dependent options"
+
+ config CRASH_CORE
+diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
+index dcaa44e408ace..27f4194b376bb 100644
+--- a/arch/arc/boot/dts/hsdk.dts
++++ b/arch/arc/boot/dts/hsdk.dts
+@@ -205,7 +205,6 @@ dmac_cfg_clk: dmac-gpu-cfg-clk {
+ };
+
+ gmac: ethernet@8000 {
+- #interrupt-cells = <1>;
+ compatible = "snps,dwmac";
+ reg = <0x8000 0x2000>;
+ interrupts = <10>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+index 9d20cabf4f699..99515c13da3cf 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+@@ -127,7 +127,7 @@ ethernet_phy0: ethernet-phy@5 {
+ };
+
+ &pio {
+- eth_default: eth_default {
++ eth_default: eth-default-pins {
+ tx_pins {
+ pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
+ <MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
+@@ -154,7 +154,7 @@ mdio_pins {
+ };
+ };
+
+- eth_sleep: eth_sleep {
++ eth_sleep: eth-sleep-pins {
+ tx_pins {
+ pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
+ <MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
+@@ -180,14 +180,14 @@ mdio_pins {
+ };
+ };
+
+- usb0_id_pins_float: usb0_iddig {
++ usb0_id_pins_float: usb0-iddig-pins {
+ pins_iddig {
+ pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
+ bias-pull-up;
+ };
+ };
+
+- usb1_id_pins_float: usb1_iddig {
++ usb1_id_pins_float: usb1-iddig-pins {
+ pins_iddig {
+ pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
+ bias-pull-up;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+index cc3d1c99517d1..f7ce2eba10f7a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -249,10 +249,11 @@ topckgen: syscon@10000000 {
+ #clock-cells = <1>;
+ };
+
+- infracfg: syscon@10001000 {
++ infracfg: clock-controller@10001000 {
+ compatible = "mediatek,mt2712-infracfg", "syscon";
+ reg = <0 0x10001000 0 0x1000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ pericfg: syscon@10003000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 884930a5849a2..4454115ad8a0d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -244,7 +244,7 @@ scpsys: power-controller@10006000 {
+ clock-names = "hif_sel";
+ };
+
+- cir: cir@10009000 {
++ cir: ir-receiver@10009000 {
+ compatible = "mediatek,mt7622-cir";
+ reg = <0 0x10009000 0 0x1000>;
+ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
+@@ -275,16 +275,14 @@ thermal_calibration: calib@198 {
+ };
+ };
+
+- apmixedsys: apmixedsys@10209000 {
+- compatible = "mediatek,mt7622-apmixedsys",
+- "syscon";
++ apmixedsys: clock-controller@10209000 {
++ compatible = "mediatek,mt7622-apmixedsys";
+ reg = <0 0x10209000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+- topckgen: topckgen@10210000 {
+- compatible = "mediatek,mt7622-topckgen",
+- "syscon";
++ topckgen: clock-controller@10210000 {
++ compatible = "mediatek,mt7622-topckgen";
+ reg = <0 0x10210000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+@@ -357,7 +355,7 @@ cci_control1: slave-if@4000 {
+ };
+
+ cci_control2: slave-if@5000 {
+- compatible = "arm,cci-400-ctrl-if";
++ compatible = "arm,cci-400-ctrl-if", "syscon";
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
+@@ -507,7 +505,6 @@ thermal: thermal@1100b000 {
+ <&pericfg CLK_PERI_AUXADC_PD>;
+ clock-names = "therm", "auxadc";
+ resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
+- reset-names = "therm";
+ mediatek,auxadc = <&auxadc>;
+ mediatek,apmixedsys = <&apmixedsys>;
+ nvmem-cells = <&thermal_calibration>;
+@@ -715,9 +712,8 @@ wmac: wmac@18000000 {
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ };
+
+- ssusbsys: ssusbsys@1a000000 {
+- compatible = "mediatek,mt7622-ssusbsys",
+- "syscon";
++ ssusbsys: clock-controller@1a000000 {
++ compatible = "mediatek,mt7622-ssusbsys";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+@@ -774,9 +770,8 @@ u2port1: usb-phy@1a0c5000 {
+ };
+ };
+
+- pciesys: pciesys@1a100800 {
+- compatible = "mediatek,mt7622-pciesys",
+- "syscon";
++ pciesys: clock-controller@1a100800 {
++ compatible = "mediatek,mt7622-pciesys";
+ reg = <0 0x1a100800 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+@@ -893,7 +888,13 @@ sata_port: sata-phy@1a243000 {
+ };
+ };
+
+- ethsys: syscon@1b000000 {
++ hifsys: clock-controller@1af00000 {
++ compatible = "mediatek,mt7622-hifsys";
++ reg = <0 0x1af00000 0 0x70>;
++ #clock-cells = <1>;
++ };
++
++ ethsys: clock-controller@1b000000 {
+ compatible = "mediatek,mt7622-ethsys",
+ "syscon";
+ reg = <0 0x1b000000 0 0x1000>;
+@@ -911,10 +912,28 @@ hsdma: dma-controller@1b007000 {
+ #dma-cells = <1>;
+ };
+
+- eth: ethernet@1b100000 {
+- compatible = "mediatek,mt7622-eth",
+- "mediatek,mt2701-eth",
++ pcie_mirror: pcie-mirror@10000400 {
++ compatible = "mediatek,mt7622-pcie-mirror",
+ "syscon";
++ reg = <0 0x10000400 0 0x10>;
++ };
++
++ wed0: wed@1020a000 {
++ compatible = "mediatek,mt7622-wed",
++ "syscon";
++ reg = <0 0x1020a000 0 0x1000>;
++ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
++ };
++
++ wed1: wed@1020b000 {
++ compatible = "mediatek,mt7622-wed",
++ "syscon";
++ reg = <0 0x1020b000 0 0x1000>;
++ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
++ };
++
++ eth: ethernet@1b100000 {
++ compatible = "mediatek,mt7622-eth";
+ reg = <0 0x1b100000 0 0x20000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
+@@ -937,6 +956,11 @@ eth: ethernet@1b100000 {
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
+ mediatek,ethsys = <&ethsys>;
+ mediatek,sgmiisys = <&sgmiisys>;
++ mediatek,cci-control = <&cci_control2>;
++ mediatek,wed = <&wed0>, <&wed1>;
++ mediatek,pcie-mirror = <&pcie_mirror>;
++ mediatek,hifsys = <&hifsys>;
++ dma-coherent;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+index 4297c1db5a413..913ba25ea72f6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+@@ -784,7 +784,6 @@ &pcie_phy {
+ };
+
+ &pcie0 {
+- bus-scan-delay-ms = <1000>;
+ ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
+ num-lanes = <4>;
+ pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 95bc7a5f61dd5..0cf656824e230 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -430,16 +430,22 @@ &io_domains {
+ gpio1830-supply = <&vcc_1v8>;
+ };
+
+-&pmu_io_domains {
+- status = "okay";
+- pmu1830-supply = <&vcc_1v8>;
+-};
+-
+-&pwm2 {
+- status = "okay";
++&pcie_clkreqn_cpm {
++ rockchip,pins =
++ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ &pinctrl {
++ pinctrl-names = "default";
++ pinctrl-0 = <&q7_thermal_pin>;
++
++ gpios {
++ q7_thermal_pin: q7-thermal-pin {
++ rockchip,pins =
++ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
++ };
++ };
++
+ i2c8 {
+ i2c8_xfer_a: i2c8-xfer {
+ rockchip,pins =
+@@ -470,6 +476,15 @@ vcc5v0_host_en: vcc5v0-host-en {
+ };
+ };
+
++&pmu_io_domains {
++ status = "okay";
++ pmu1830-supply = <&vcc_1v8>;
++};
++
++&pwm2 {
++ status = "okay";
++};
++
+ &sdhci {
+ /*
+ * Signal integrity isn't great at 200MHz but 100MHz has proven stable
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index b28fabfc91bf7..70271db833831 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -445,6 +445,14 @@ config EFI
+ allow the kernel to be booted as an EFI application. This
+ is only useful on systems that have UEFI firmware.
+
++config CC_HAVE_STACKPROTECTOR_TLS
++ def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0)
++
++config STACKPROTECTOR_PER_TASK
++ def_bool y
++ depends on !GCC_PLUGIN_RANDSTRUCT
++ depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
++
+ endmenu
+
+ config BUILTIN_DTB
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index daa679440000a..8572d23fba700 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -88,6 +88,16 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
+ # architectures. It's faster to have GCC emit only aligned accesses.
+ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+
++ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
++prepare: stack_protector_prepare
++stack_protector_prepare: prepare0
++ $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \
++ -mstack-protector-guard-reg=tp \
++ -mstack-protector-guard-offset=$(shell \
++ awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
++ include/generated/asm-offsets.h))
++endif
++
+ # arch specific predefines for sparse
+ CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
+
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 5ab13570daa53..982745572945e 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -456,8 +456,8 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ #define PAGE_SHARED __pgprot(0)
+ #define PAGE_KERNEL __pgprot(0)
+ #define swapper_pg_dir NULL
+-#define TASK_SIZE 0xffffffffUL
+-#define VMALLOC_START 0
++#define TASK_SIZE _AC(-1, UL)
++#define VMALLOC_START _AC(0, UL)
+ #define VMALLOC_END TASK_SIZE
+
+ static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
+diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
+index 5962f8891f06f..09093af46565e 100644
+--- a/arch/riscv/include/asm/stackprotector.h
++++ b/arch/riscv/include/asm/stackprotector.h
+@@ -24,6 +24,7 @@ static __always_inline void boot_init_stack_canary(void)
+ canary &= CANARY_MASK;
+
+ current->stack_canary = canary;
+- __stack_chk_guard = current->stack_canary;
++ if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
++ __stack_chk_guard = current->stack_canary;
+ }
+ #endif /* _ASM_RISCV_STACKPROTECTOR_H */
+diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
+index db203442c08f9..877ff65b4e136 100644
+--- a/arch/riscv/kernel/asm-offsets.c
++++ b/arch/riscv/kernel/asm-offsets.c
+@@ -66,6 +66,9 @@ void asm_offsets(void)
+ OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
+ OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
+ OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
++#ifdef CONFIG_STACKPROTECTOR
++ OFFSET(TSK_STACK_CANARY, task_struct, stack_canary);
++#endif
+
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ OFFSET(PT_EPC, pt_regs, epc);
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index 7868050ff426d..9dac6bec316e4 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -22,9 +22,7 @@
+ #include <asm/switch_to.h>
+ #include <asm/thread_info.h>
+
+-register unsigned long gp_in_global __asm__("gp");
+-
+-#ifdef CONFIG_STACKPROTECTOR
++#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ #include <linux/stackprotector.h>
+ unsigned long __stack_chk_guard __read_mostly;
+ EXPORT_SYMBOL(__stack_chk_guard);
+@@ -117,7 +115,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ /* Kernel thread */
+ memset(childregs, 0, sizeof(struct pt_regs));
+- childregs->gp = gp_in_global;
+ /* Supervisor/Machine, irqs on: */
+ childregs->status = SR_PP | SR_PIE;
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9b3fa05e46226..0c802ade80406 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -57,6 +57,7 @@ config X86
+ select ACPI_LEGACY_TABLES_LOOKUP if ACPI
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+ select ARCH_32BIT_OFF_T if X86_32
++ select ARCH_CONFIGURES_CPU_MITIGATIONS
+ select ARCH_CLOCKSOURCE_INIT
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_CPU_FINALIZE_INIT
+@@ -2408,17 +2409,17 @@ config CC_HAS_SLS
+ config CC_HAS_RETURN_THUNK
+ def_bool $(cc-option,-mfunction-return=thunk-extern)
+
+-menuconfig SPECULATION_MITIGATIONS
+- bool "Mitigations for speculative execution vulnerabilities"
++menuconfig CPU_MITIGATIONS
++ bool "Mitigations for CPU vulnerabilities"
+ default y
+ help
+- Say Y here to enable options which enable mitigations for
+- speculative execution hardware vulnerabilities.
++ Say Y here to enable options which enable mitigations for hardware
++ vulnerabilities (usually related to speculative execution).
+
+ If you say N, all mitigations will be disabled. You really
+ should know what you are doing to say so.
+
+-if SPECULATION_MITIGATIONS
++if CPU_MITIGATIONS
+
+ config PAGE_TABLE_ISOLATION
+ bool "Remove the kernel mapping in user mode"
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 3b4412c83eec0..01064ba1a657e 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -12,6 +12,7 @@
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
+ #include <asm/hardirq.h>
++#include <asm/io.h>
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
+
+@@ -111,7 +112,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
+
+ static inline u32 native_apic_mem_read(u32 reg)
+ {
+- return *((volatile u32 *)(APIC_BASE + reg));
++ return readl((void __iomem *)(APIC_BASE + reg));
+ }
+
+ extern void native_apic_wait_icr_idle(void);
+diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
+index d502241995a39..24fca3d56c7f3 100644
+--- a/arch/x86/kernel/cpu/cpuid-deps.c
++++ b/arch/x86/kernel/cpu/cpuid-deps.c
+@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_F16C, X86_FEATURE_XMM2, },
+ { X86_FEATURE_AES, X86_FEATURE_XMM2 },
+ { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
++ { X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
+ { X86_FEATURE_FMA, X86_FEATURE_AVX },
++ { X86_FEATURE_VAES, X86_FEATURE_AVX },
++ { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
+ { X86_FEATURE_AVX2, X86_FEATURE_AVX, },
+ { X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
+ { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
+@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
+- { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
+- { X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
+- { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index bb03bed14f740..5d422e725b267 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -258,7 +258,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
+ }
+
+ if (!strcmp(q->cra_driver_name, alg->cra_name) ||
+- !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ !strcmp(q->cra_name, alg->cra_driver_name))
+ goto err;
+ }
+diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
+index 63c5444f0f1ae..7598778cf37fb 100644
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -576,7 +576,7 @@ static u_long get_word(struct vc_data *vc)
+ }
+ attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
+ buf[cnt++] = attr_ch;
+- while (tmpx < vc->vc_cols - 1) {
++ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
+ tmp_pos += 2;
+ tmpx++;
+ ch = get_char(vc, (u_short *)tmp_pos, &temp);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 708c91215ec06..bcbaa4d6a0ff5 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2042,8 +2042,10 @@ static size_t binder_get_object(struct binder_proc *proc,
+ size_t object_size = 0;
+
+ read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+- if (offset > buffer->data_size || read_size < sizeof(*hdr))
++ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
++ !IS_ALIGNED(offset, sizeof(u32)))
+ return 0;
++
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a1a8b282b99b1..b2836e8efefd2 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -418,6 +418,8 @@ static const struct usb_device_id blacklist_table[] = {
+ /* Realtek 8852BE Bluetooth devices */
+ { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index aa2f1f8aa2994..a0927c7f83d60 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -37,7 +37,11 @@ static HLIST_HEAD(clk_root_list);
+ static HLIST_HEAD(clk_orphan_list);
+ static LIST_HEAD(clk_notifier_list);
+
+-static struct hlist_head *all_lists[] = {
++/* List of registered clks that use runtime PM */
++static HLIST_HEAD(clk_rpm_list);
++static DEFINE_MUTEX(clk_rpm_list_lock);
++
++static const struct hlist_head *all_lists[] = {
+ &clk_root_list,
+ &clk_orphan_list,
+ NULL,
+@@ -59,6 +63,7 @@ struct clk_core {
+ struct clk_hw *hw;
+ struct module *owner;
+ struct device *dev;
++ struct hlist_node rpm_node;
+ struct device_node *of_node;
+ struct clk_core *parent;
+ struct clk_parent_map *parents;
+@@ -129,6 +134,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
+ pm_runtime_put_sync(core->dev);
+ }
+
++/**
++ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
++ *
++ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
++ * that disabling unused clks avoids a deadlock where a device is runtime PM
++ * resuming/suspending and the runtime PM callback is trying to grab the
++ * prepare_lock for something like clk_prepare_enable() while
++ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
++ * PM resume/suspend the device as well.
++ *
++ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
++ * success. Otherwise the lock is released on failure.
++ *
++ * Return: 0 on success, negative errno otherwise.
++ */
++static int clk_pm_runtime_get_all(void)
++{
++ int ret;
++ struct clk_core *core, *failed;
++
++ /*
++ * Grab the list lock to prevent any new clks from being registered
++ * or unregistered until clk_pm_runtime_put_all().
++ */
++ mutex_lock(&clk_rpm_list_lock);
++
++ /*
++ * Runtime PM "get" all the devices that are needed for the clks
++ * currently registered. Do this without holding the prepare_lock, to
++ * avoid the deadlock.
++ */
++ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
++ ret = clk_pm_runtime_get(core);
++ if (ret) {
++ failed = core;
++ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
++ dev_name(failed->dev), failed->name);
++ goto err;
++ }
++ }
++
++ return 0;
++
++err:
++ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
++ if (core == failed)
++ break;
++
++ clk_pm_runtime_put(core);
++ }
++ mutex_unlock(&clk_rpm_list_lock);
++
++ return ret;
++}
++
++/**
++ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
++ *
++ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
++ * the 'clk_rpm_list_lock'.
++ */
++static void clk_pm_runtime_put_all(void)
++{
++ struct clk_core *core;
++
++ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
++ clk_pm_runtime_put(core);
++ mutex_unlock(&clk_rpm_list_lock);
++}
++
++static void clk_pm_runtime_init(struct clk_core *core)
++{
++ struct device *dev = core->dev;
++
++ if (dev && pm_runtime_enabled(dev)) {
++ core->rpm_enabled = true;
++
++ mutex_lock(&clk_rpm_list_lock);
++ hlist_add_head(&core->rpm_node, &clk_rpm_list);
++ mutex_unlock(&clk_rpm_list_lock);
++ }
++}
++
+ /*** locking ***/
+ static void clk_prepare_lock(void)
+ {
+@@ -1231,9 +1319,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+ if (core->flags & CLK_IGNORE_UNUSED)
+ return;
+
+- if (clk_pm_runtime_get(core))
+- return;
+-
+ if (clk_core_is_prepared(core)) {
+ trace_clk_unprepare(core);
+ if (core->ops->unprepare_unused)
+@@ -1242,8 +1327,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+ core->ops->unprepare(core->hw);
+ trace_clk_unprepare_complete(core);
+ }
+-
+- clk_pm_runtime_put(core);
+ }
+
+ static void __init clk_disable_unused_subtree(struct clk_core *core)
+@@ -1259,9 +1342,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_prepare_enable(core->parent);
+
+- if (clk_pm_runtime_get(core))
+- goto unprepare_out;
+-
+ flags = clk_enable_lock();
+
+ if (core->enable_count)
+@@ -1286,8 +1366,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
+
+ unlock_out:
+ clk_enable_unlock(flags);
+- clk_pm_runtime_put(core);
+-unprepare_out:
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_disable_unprepare(core->parent);
+ }
+@@ -1303,12 +1381,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
+ static int __init clk_disable_unused(void)
+ {
+ struct clk_core *core;
++ int ret;
+
+ if (clk_ignore_unused) {
+ pr_warn("clk: Not disabling unused clocks\n");
+ return 0;
+ }
+
++ pr_info("clk: Disabling unused clocks\n");
++
++ ret = clk_pm_runtime_get_all();
++ if (ret)
++ return ret;
++ /*
++ * Grab the prepare lock to keep the clk topology stable while iterating
++ * over clks.
++ */
+ clk_prepare_lock();
+
+ hlist_for_each_entry(core, &clk_root_list, child_node)
+@@ -1325,6 +1413,8 @@ static int __init clk_disable_unused(void)
+
+ clk_prepare_unlock();
+
++ clk_pm_runtime_put_all();
++
+ return 0;
+ }
+ late_initcall_sync(clk_disable_unused);
+@@ -3630,9 +3720,6 @@ static int __clk_core_init(struct clk_core *core)
+ }
+
+ clk_core_reparent_orphans_nolock();
+-
+-
+- kref_init(&core->ref);
+ out:
+ clk_pm_runtime_put(core);
+ unlock:
+@@ -3842,6 +3929,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
+ kfree(core->parents);
+ }
+
++/* Free memory allocated for a struct clk_core */
++static void __clk_release(struct kref *ref)
++{
++ struct clk_core *core = container_of(ref, struct clk_core, ref);
++
++ if (core->rpm_enabled) {
++ mutex_lock(&clk_rpm_list_lock);
++ hlist_del(&core->rpm_node);
++ mutex_unlock(&clk_rpm_list_lock);
++ }
++
++ clk_core_free_parent_map(core);
++ kfree_const(core->name);
++ kfree(core);
++}
++
+ static struct clk *
+ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ {
+@@ -3862,6 +3965,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ goto fail_out;
+ }
+
++ kref_init(&core->ref);
++
+ core->name = kstrdup_const(init->name, GFP_KERNEL);
+ if (!core->name) {
+ ret = -ENOMEM;
+@@ -3874,9 +3979,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ }
+ core->ops = init->ops;
+
+- if (dev && pm_runtime_enabled(dev))
+- core->rpm_enabled = true;
+ core->dev = dev;
++ clk_pm_runtime_init(core);
+ core->of_node = np;
+ if (dev && dev->driver)
+ core->owner = dev->driver->owner;
+@@ -3916,12 +4020,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ hw->clk = NULL;
+
+ fail_create_clk:
+- clk_core_free_parent_map(core);
+ fail_parents:
+ fail_ops:
+- kfree_const(core->name);
+ fail_name:
+- kfree(core);
++ kref_put(&core->ref, __clk_release);
+ fail_out:
+ return ERR_PTR(ret);
+ }
+@@ -4001,18 +4103,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
+ }
+ EXPORT_SYMBOL_GPL(of_clk_hw_register);
+
+-/* Free memory allocated for a clock. */
+-static void __clk_release(struct kref *ref)
+-{
+- struct clk_core *core = container_of(ref, struct clk_core, ref);
+-
+- lockdep_assert_held(&prepare_lock);
+-
+- clk_core_free_parent_map(core);
+- kfree_const(core->name);
+- kfree(core);
+-}
+-
+ /*
+ * Empty clk_ops for unregistered clocks. These are used temporarily
+ * after clk_unregister() was called on a clock and until last clock
+@@ -4065,7 +4155,7 @@ static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
+ /* Remove this clk from all parent caches */
+ static void clk_core_evict_parent_cache(struct clk_core *core)
+ {
+- struct hlist_head **lists;
++ const struct hlist_head **lists;
+ struct clk_core *root;
+
+ lockdep_assert_held(&prepare_lock);
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 216594b861191..93df6cef4f5a9 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -1639,7 +1639,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct devfreq *df = to_devfreq(dev);
+- ssize_t len;
++ ssize_t len = 0;
+ int i, j;
+ unsigned int max_state;
+
+@@ -1648,7 +1648,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ max_state = df->profile->max_state;
+
+ if (max_state == 0)
+- return sprintf(buf, "Not Supported.\n");
++ return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
+
+ mutex_lock(&df->lock);
+ if (!df->stop_polling &&
+@@ -1658,33 +1658,54 @@ static ssize_t trans_stat_show(struct device *dev,
+ }
+ mutex_unlock(&df->lock);
+
+- len = sprintf(buf, " From : To\n");
+- len += sprintf(buf + len, " :");
+- for (i = 0; i < max_state; i++)
+- len += sprintf(buf + len, "%10lu",
+- df->profile->freq_table[i]);
++ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " :");
++ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
++ df->profile->freq_table[i]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+- len += sprintf(buf + len, " time(ms)\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
+ if (df->profile->freq_table[i]
+ == df->previous_freq) {
+- len += sprintf(buf + len, "*");
++ len += scnprintf(buf + len, PAGE_SIZE - len, "*");
+ } else {
+- len += sprintf(buf + len, " ");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " ");
++ }
++ if (len >= PAGE_SIZE - 1)
++ break;
++
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
++ df->profile->freq_table[i]);
++ for (j = 0; j < max_state; j++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
++ df->stats.trans_table[(i * max_state) + j]);
+ }
+- len += sprintf(buf + len, "%10lu:",
+- df->profile->freq_table[i]);
+- for (j = 0; j < max_state; j++)
+- len += sprintf(buf + len, "%10u",
+- df->stats.trans_table[(i * max_state) + j]);
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
++ jiffies64_to_msecs(df->stats.time_in_state[i]));
++ }
++
++ if (len < PAGE_SIZE - 1)
++ len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
++ df->stats.total_trans);
+
+- len += sprintf(buf + len, "%10llu\n", (u64)
+- jiffies64_to_msecs(df->stats.time_in_state[i]));
++ if (len >= PAGE_SIZE - 1) {
++ pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
++ return -EFBIG;
+ }
+
+- len += sprintf(buf + len, "Total transition : %u\n",
+- df->stats.total_trans);
+ return len;
+ }
+
+diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
+index f5a84c8463945..db506e1f7ef4e 100644
+--- a/drivers/dma/idma64.c
++++ b/drivers/dma/idma64.c
+@@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
+ u32 status_err;
+ unsigned short i;
+
++ /* Since IRQ may be shared, check if DMA controller is powered on */
++ if (status == GENMASK(31, 0))
++ return IRQ_NONE;
++
+ dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+
+ /* Check if we have any interrupt from the DMA controller */
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index 04202d75f4eed..695feb3443d80 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -249,7 +249,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
+ else
+ regval &= ~val;
+
+- writel(val, pchan->base + reg);
++ writel(regval, pchan->base + reg);
+ }
+
+ static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
+@@ -273,7 +273,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
+ else
+ regval &= ~val;
+
+- writel(val, od->base + reg);
++ writel(regval, od->base + reg);
+ }
+
+ static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 6c709803203ad..058c3a6ed6bbf 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -213,7 +213,8 @@ struct xilinx_dpdma_tx_desc {
+ * @running: true if the channel is running
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+- * @lock: lock to access struct xilinx_dpdma_chan
++ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
++ * @vchan.lock, if both are to be held.
+ * @desc_pool: descriptor allocation pool
+ * @err_task: error IRQ bottom half handler
+ * @desc: References to descriptors being processed
+@@ -1101,12 +1102,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
+ * Complete the active descriptor, if any, promote the pending
+ * descriptor to active, and queue the next transfer, if any.
+ */
++ spin_lock(&chan->vchan.lock);
+ if (chan->desc.active)
+ vchan_cookie_complete(&chan->desc.active->vdesc);
+ chan->desc.active = pending;
+ chan->desc.pending = NULL;
+
+ xilinx_dpdma_chan_queue_transfer(chan);
++ spin_unlock(&chan->vchan.lock);
+
+ out:
+ spin_unlock_irqrestore(&chan->lock, flags);
+@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+- spin_lock_irqsave(&chan->vchan.lock, flags);
++ spin_lock_irqsave(&chan->lock, flags);
++ spin_lock(&chan->vchan.lock);
+ if (vchan_issue_pending(&chan->vchan))
+ xilinx_dpdma_chan_queue_transfer(chan);
+- spin_unlock_irqrestore(&chan->vchan.lock, flags);
++ spin_unlock(&chan->vchan.lock);
++ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ static int xilinx_dpdma_config(struct dma_chan *dchan,
+@@ -1491,7 +1496,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
+ XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+ spin_lock_irqsave(&chan->lock, flags);
++ spin_lock(&chan->vchan.lock);
+ xilinx_dpdma_chan_queue_transfer(chan);
++ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1b4c7ced8b92c..4a95a624fca7b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1259,6 +1259,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ err_bo_create:
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+ err_reserve_limit:
++ amdgpu_sync_free(&(*mem)->sync);
+ mutex_destroy(&(*mem)->lock);
+ kfree(*mem);
+ err:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8445bb7ae06ab..ad3de971a0878 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2201,6 +2201,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+ }
+
++/* Validate operation parameters to prevent potential abuse */
++static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
++ struct amdgpu_bo *bo,
++ uint64_t saddr,
++ uint64_t offset,
++ uint64_t size)
++{
++ uint64_t tmp, lpfn;
++
++ if (saddr & AMDGPU_GPU_PAGE_MASK
++ || offset & AMDGPU_GPU_PAGE_MASK
++ || size & AMDGPU_GPU_PAGE_MASK)
++ return -EINVAL;
++
++ if (check_add_overflow(saddr, size, &tmp)
++ || check_add_overflow(offset, size, &tmp)
++ || size == 0 /* which also leads to end < begin */)
++ return -EINVAL;
++
++ /* make sure object fit at this offset */
++ if (bo && offset + size > amdgpu_bo_size(bo))
++ return -EINVAL;
++
++ /* Ensure last pfn not exceed max_pfn */
++ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
++ if (lpfn >= adev->vm_manager.max_pfn)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /**
+ * amdgpu_vm_bo_map - map bo inside a vm
+ *
+@@ -2227,21 +2258,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ uint64_t eaddr;
++ int r;
+
+- /* validate the parameters */
+- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+- return -EINVAL;
+- if (saddr + size <= saddr || offset + size <= offset)
+- return -EINVAL;
+-
+- /* make sure object fit at this offset */
+- eaddr = saddr + size - 1;
+- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+- return -EINVAL;
++ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++ if (r)
++ return r;
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+- eaddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ if (tmp) {
+@@ -2294,17 +2318,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ uint64_t eaddr;
+ int r;
+
+- /* validate the parameters */
+- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+- return -EINVAL;
+- if (saddr + size <= saddr || offset + size <= offset)
+- return -EINVAL;
+-
+- /* make sure object fit at this offset */
+- eaddr = saddr + size - 1;
+- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+- return -EINVAL;
++ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++ if (r)
++ return r;
+
+ /* Allocate all the needed memory */
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+@@ -2318,7 +2334,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ }
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+- eaddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+
+ mapping->start = saddr;
+ mapping->last = eaddr;
+@@ -2405,10 +2421,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
+ LIST_HEAD(removed);
+ uint64_t eaddr;
++ int r;
++
++ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
++ if (r)
++ return r;
+
+- eaddr = saddr + size - 1;
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+- eaddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+
+ /* Allocate all the needed memory */
+ before = kzalloc(sizeof(*before), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 1bd330d431479..ad36f337c8a88 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -390,17 +390,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+-
+- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+- amdgpu_ring_write(ring, ref_and_mask); /* reference */
+- amdgpu_ring_write(ring, ref_and_mask); /* mask */
+- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++ if (ring->me > 1) {
++ amdgpu_asic_flush_hdp(adev, ring);
++ } else {
++ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
++
++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
++ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
++ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
++ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
++ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
++ amdgpu_ring_write(ring, ref_and_mask); /* reference */
++ amdgpu_ring_write(ring, ref_and_mask); /* mask */
++ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
++ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++ }
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index d5fd418236246..7872a04e9a721 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -774,6 +774,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ unsigned int total_modes_count = 0;
+ struct drm_client_offset *offsets;
+ unsigned int connector_count = 0;
++ /* points to modes protected by mode_config.mutex */
+ struct drm_display_mode **modes;
+ struct drm_crtc **crtcs;
+ int i, ret = 0;
+@@ -842,7 +843,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ drm_client_pick_crtcs(client, connectors, connector_count,
+ crtcs, modes, 0, width, height);
+ }
+- mutex_unlock(&dev->mode_config.mutex);
+
+ drm_client_modeset_release(client);
+
+@@ -872,6 +872,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ modeset->y = offset->y;
+ }
+ }
++ mutex_unlock(&dev->mode_config.mutex);
+
+ mutex_unlock(&client->modeset_mutex);
+ out:
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index d204ea8a5618e..5cdf0d8d4bc18 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -23,6 +23,7 @@
+ */
+
+ #include "nouveau_drv.h"
++#include "nouveau_bios.h"
+ #include "nouveau_reg.h"
+ #include "dispnv04/hw.h"
+ #include "nouveau_encoder.h"
+@@ -1672,7 +1673,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
+ */
+ if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+ if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
++ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
+ return false;
+ }
+ }
+@@ -1758,26 +1759,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+ #ifdef __powerpc__
+ /* Apple iMac G4 NV17 */
+ if (of_machine_is_compatible("PowerMac4,5")) {
+- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
+- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
++ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
++ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
+ return;
+ }
+ #endif
+
+ /* Make up some sane defaults */
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
+- bios->legacy.i2c_indices.crt, 1, 1);
++ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
+
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
+ bios->legacy.i2c_indices.tv,
+- all_heads, 0);
++ all_heads, DCB_OUTPUT_A);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
+ bios->legacy.i2c_indices.panel,
+- all_heads, 1);
++ all_heads, DCB_OUTPUT_B);
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+index 4bf486b571013..cb05f7f48a98b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
+ return ERR_PTR(-EINVAL);
+ }
+
++static void of_fini(void *p)
++{
++ kfree(p);
++}
++
+ const struct nvbios_source
+ nvbios_of = {
+ .name = "OpenFirmware",
+ .init = of_init,
+- .fini = (void(*)(void *))kfree,
++ .fini = of_fini,
+ .read = of_read,
+ .size = of_size,
+ .rw = false,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+index 02c4eb28cef44..f3db07325dbd8 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+@@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
+ void __iomem *map = NULL;
+
+ /* Already mapped? */
+- if (refcount_inc_not_zero(&iobj->maps))
++ if (refcount_inc_not_zero(&iobj->maps)) {
++ /* read barrier match the wmb on refcount set */
++ smp_rmb();
+ return iobj->map;
++ }
+
+ /* Take the lock, and re-check that another thread hasn't
+ * already mapped the object in the meantime.
+@@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
+ iobj->base.memory.ptrs = &nv50_instobj_fast;
+ else
+ iobj->base.memory.ptrs = &nv50_instobj_slow;
++ /* barrier to ensure the ptrs are written before refcount is set */
++ smp_wmb();
+ refcount_set(&iobj->maps, 1);
+ }
+
+diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+index eb43503ec97b3..6134432e4918d 100644
+--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
++++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+@@ -261,8 +261,6 @@ static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+ struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+- mipi_dsi_device_unregister(ctx->dsi);
+-
+ drm_panel_remove(&ctx->panel);
+ return 0;
+ }
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 14811d42a5a91..4f8e8f392daba 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -56,7 +56,6 @@
+ /* flags */
+ #define I2C_HID_STARTED 0
+ #define I2C_HID_RESET_PENDING 1
+-#define I2C_HID_READ_PENDING 2
+
+ #define I2C_HID_PWR_ON 0x00
+ #define I2C_HID_PWR_SLEEP 0x01
+@@ -256,7 +255,6 @@ static int __i2c_hid_command(struct i2c_client *client,
+ msg[1].len = data_len;
+ msg[1].buf = buf_recv;
+ msg_num = 2;
+- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
+ }
+
+ if (wait)
+@@ -264,9 +262,6 @@ static int __i2c_hid_command(struct i2c_client *client,
+
+ ret = i2c_transfer(client->adapter, msg, msg_num);
+
+- if (data_len > 0)
+- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
+-
+ if (ret != msg_num)
+ return ret < 0 ? ret : -EIO;
+
+@@ -538,9 +533,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
+ {
+ struct i2c_hid *ihid = dev_id;
+
+- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
+- return IRQ_HANDLED;
+-
+ i2c_hid_get_input(ihid);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 34fecf97a355b..e8a89e18c640e 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -2013,13 +2013,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ * Returns negative errno, else the number of messages executed.
+ *
+ * Adapter lock must be held when calling this function. No debug logging
+- * takes place. adap->algo->master_xfer existence isn't checked.
++ * takes place.
+ */
+ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ {
+ unsigned long orig_jiffies;
+ int ret, try;
+
++ if (!adap->algo->master_xfer) {
++ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (WARN_ON(!msgs || num < 1))
+ return -EINVAL;
+
+@@ -2086,11 +2091,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ {
+ int ret;
+
+- if (!adap->algo->master_xfer) {
+- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+- return -EOPNOTSUPP;
+- }
+-
+ /* REVISIT the fault reporting model here is weak:
+ *
+ * - When we get an error after receiving N bytes from a slave,
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 2a30b25c5e7e5..26c66685a43dd 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1057,23 +1057,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
+ }
+ }
+
+-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
++static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
++ enum ib_cm_state old_state)
+ {
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+- pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
+- cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
++ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
++ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ }
+
+ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ {
+ struct cm_id_private *cm_id_priv;
++ enum ib_cm_state old_state;
+ struct cm_work *work;
+ int ret;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irq(&cm_id_priv->lock);
++ old_state = cm_id->state;
+ retest:
+ switch (cm_id->state) {
+ case IB_CM_LISTEN:
+@@ -1187,7 +1190,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ msecs_to_jiffies(
+ CM_DESTROY_ID_WAIT_TIMEOUT));
+ if (!ret) /* timeout happened */
+- cm_destroy_id_wait_timeout(cm_id);
++ cm_destroy_id_wait_timeout(cm_id, old_state);
+ } while (!ret);
+
+ while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
+index cca7a4a6bd82d..7f12a9b05c872 100644
+--- a/drivers/infiniband/hw/mlx5/mad.c
++++ b/drivers/infiniband/hw/mlx5/mad.c
+@@ -166,7 +166,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
++ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
++ !mlx5_core_mp_enabled(mdev)) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 95f0de0c8b49c..0505c81aa8d04 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -35,6 +35,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
+
+ if (rxe->tfm)
+ crypto_free_shash(rxe->tfm);
++
++ mutex_destroy(&rxe->usdev_lock);
+ }
+
+ /* initialize rxe device parameters */
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index aabf56272b86d..02e3183a4c67e 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -33,7 +33,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
+ struct page *pages;
+ int irq, ret;
+
+- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
++ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ if (!pages) {
+ pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
+ iommu->name);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index c1f3cd82caf33..4e486cccc4cc6 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -4508,13 +4508,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ set_bit(i, bitmap);
+ }
+
+- if (err) {
+- if (i > 0)
+- its_vpe_irq_domain_free(domain, virq, i);
+-
+- its_lpi_free(bitmap, base, nr_ids);
+- its_free_prop_table(vprop_page);
+- }
++ if (err)
++ its_vpe_irq_domain_free(domain, virq, i);
+
+ return err;
+ }
+diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
+index c5663398c6b7d..28f5450e41306 100644
+--- a/drivers/mailbox/imx-mailbox.c
++++ b/drivers/mailbox/imx-mailbox.c
+@@ -331,8 +331,6 @@ static int imx_mu_startup(struct mbox_chan *chan)
+ break;
+ }
+
+- priv->suspend = true;
+-
+ return 0;
+ }
+
+@@ -550,8 +548,6 @@ static int imx_mu_probe(struct platform_device *pdev)
+
+ clk_disable_unprepare(priv->clk);
+
+- priv->suspend = false;
+-
+ return 0;
+
+ disable_runtime_pm:
+@@ -614,6 +610,8 @@ static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
+ if (!priv->clk)
+ priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+
++ priv->suspend = true;
++
+ return 0;
+ }
+
+@@ -632,6 +630,8 @@ static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
+ if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
+ imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+
++ priv->suspend = false;
++
+ return 0;
+ }
+
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 97b479223fe52..2f7ab5df1c584 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -1116,20 +1116,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+- case CEC_MSG_TIMER_STATUS:
+- if (msg->msg[2] & 0x10) {
+- switch (msg->msg[2] & 0xf) {
+- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+- if (msg->len < 5)
+- valid_la = false;
+- break;
+- }
+- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+- if (msg->len < 5)
+- valid_la = false;
+- }
+- break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 91586d8afcaa0..2809338a5c3ae 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -115,7 +115,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+
+- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
+index 26b265e4384a1..9ee7daa6fa3c8 100644
+--- a/drivers/mtd/nand/raw/diskonchip.c
++++ b/drivers/mtd/nand/raw/diskonchip.c
+@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+ #endif
+ #endif
+- 0xffffffff };
++};
+
+ static struct mtd_info *doclist = NULL;
+
+@@ -1552,7 +1552,7 @@ static int __init init_nanddoc(void)
+ if (ret < 0)
+ return ret;
+ } else {
+- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
++ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index d59ea5148c16c..60645ea7c0f80 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -352,7 +352,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+- size = io_sq->bounce_buf_ctrl.buffer_size *
++ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
+ dev_node = dev_to_node(ena_dev->dmadev);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index fa65971949fce..9149c82c0a564 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1105,8 +1105,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+ {
+ bool print_once = true;
++ bool is_xdp_ring;
+ u32 i;
+
++ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
++
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+
+@@ -1126,10 +1129,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+
+ ena_unmap_tx_buff(tx_ring, tx_info);
+
+- dev_kfree_skb_any(tx_info->skb);
++ if (is_xdp_ring)
++ xdp_return_frame(tx_info->xdpf);
++ else
++ dev_kfree_skb_any(tx_info->skb);
+ }
+- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+- tx_ring->qid));
++
++ if (!is_xdp_ring)
++ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
++ tx_ring->qid));
+ }
+
+ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
+@@ -3672,10 +3680,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ {
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+- int i, budget, rc;
++ int qid, budget, rc;
+ int io_queue_count;
+
+ io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
++
+ /* Make sure the driver doesn't turn the device in other process */
+ smp_rmb();
+
+@@ -3688,27 +3697,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+- budget = ENA_MONITORED_TX_QUEUES;
++ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
+
+- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
+- tx_ring = &adapter->tx_ring[i];
+- rx_ring = &adapter->rx_ring[i];
++ qid = adapter->last_monitored_tx_qid;
++
++ while (budget) {
++ qid = (qid + 1) % io_queue_count;
++
++ tx_ring = &adapter->tx_ring[qid];
++ rx_ring = &adapter->rx_ring[qid];
+
+ rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
+
+- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
++ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
+ check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
+ if (unlikely(rc))
+ return;
+
+ budget--;
+- if (!budget)
+- break;
+ }
+
+- adapter->last_monitored_tx_qid = i % io_queue_count;
++ adapter->last_monitored_tx_qid = qid;
+ }
+
+ /* trigger napi schedule after 2 consecutive detections */
+diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
+index b455b60a5434b..7ad9a47156912 100644
+--- a/drivers/net/ethernet/broadcom/b44.c
++++ b/drivers/net/ethernet/broadcom/b44.c
+@@ -2027,12 +2027,14 @@ static int b44_set_pauseparam(struct net_device *dev,
+ bp->flags |= B44_FLAG_TX_PAUSE;
+ else
+ bp->flags &= ~B44_FLAG_TX_PAUSE;
+- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+- b44_halt(bp);
+- b44_init_rings(bp);
+- b44_init_hw(bp, B44_FULL_RESET);
+- } else {
+- __b44_set_flow_ctrl(bp, bp->flags);
++ if (netif_running(dev)) {
++ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
++ b44_halt(bp);
++ b44_init_rings(bp);
++ b44_init_hw(bp, B44_FULL_RESET);
++ } else {
++ __b44_set_flow_ctrl(bp, bp->flags);
++ }
+ }
+ spin_unlock_irq(&bp->lock);
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 6ea2d94c3ddea..35a903f6df215 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -15488,8 +15488,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ if (val < MAX_FRAME_SIZE_DEFAULT)
+- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+- pf->hw.port, val);
++ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
++ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+@@ -16023,7 +16023,7 @@ static int __init i40e_init_module(void)
+ * since we need to be able to guarantee forward progress even under
+ * memory pressure.
+ */
+- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
++ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
+ if (!i40e_wq) {
+ pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index b64801bc216bb..65259722a5728 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2642,6 +2642,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ }
+
++/**
++ * iavf_is_tc_config_same - Compare the mqprio TC config with the
++ * TC config already configured on this adapter.
++ * @adapter: board private structure
++ * @mqprio_qopt: TC config received from kernel.
++ *
++ * This function compares the TC config received from the kernel
++ * with the config already configured on the adapter.
++ *
++ * Return: True if configuration is same, false otherwise.
++ **/
++static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
++ struct tc_mqprio_qopt *mqprio_qopt)
++{
++ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
++ int i;
++
++ if (adapter->num_tc != mqprio_qopt->num_tc)
++ return false;
++
++ for (i = 0; i < adapter->num_tc; i++) {
++ if (ch[i].count != mqprio_qopt->count[i] ||
++ ch[i].offset != mqprio_qopt->offset[i])
++ return false;
++ }
++ return true;
++}
++
+ /**
+ * __iavf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+@@ -2698,7 +2726,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
+ if (ret)
+ return ret;
+ /* Return if same TC config is requested */
+- if (adapter->num_tc == num_tc)
++ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
+ return 0;
+ adapter->num_tc = num_tc;
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index e549b09c347a7..fb4b18be503c5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -3146,18 +3146,18 @@ int rvu_nix_init(struct rvu *rvu)
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
++ }
+
+- /* Set chan/link to backpressure TL3 instead of TL2 */
+- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
++ /* Set chan/link to backpressure TL3 instead of TL2 */
++ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+- * This sticky mode is known to cause SQ stalls when multiple
+- * SQs are mapped to same SMQ and transmitting pkts at a time.
+- */
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+- cfg &= ~BIT_ULL(15);
+- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+- }
++ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
++ * This sticky mode is known to cause SQ stalls when multiple
++ * SQs are mapped to same SMQ and transmitting pkts at a time.
++ */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
++ cfg &= ~BIT_ULL(15);
++ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+
+ ltdefs = rvu->kpu.lt_def;
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 39c17e9039157..0ba43c93abb26 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -114,15 +114,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
+ return token;
+ }
+
+-static int cmd_alloc_index(struct mlx5_cmd *cmd)
++static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
+ {
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
+- if (ret < cmd->max_reg_cmds)
++ if (ret < cmd->max_reg_cmds) {
+ clear_bit(ret, &cmd->bitmask);
++ ent->idx = ret;
++ cmd->ent_arr[ent->idx] = ent;
++ }
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
+@@ -912,7 +915,7 @@ static void cmd_work_handler(struct work_struct *work)
+ sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+ down(sem);
+ if (!ent->page_queue) {
+- alloc_ret = cmd_alloc_index(cmd);
++ alloc_ret = cmd_alloc_index(cmd, ent);
+ if (alloc_ret < 0) {
+ mlx5_core_err_rl(dev, "failed to allocate command entry\n");
+ if (ent->callback) {
+@@ -927,15 +930,14 @@ static void cmd_work_handler(struct work_struct *work)
+ up(sem);
+ return;
+ }
+- ent->idx = alloc_ret;
+ } else {
+ ent->idx = cmd->max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ clear_bit(ent->idx, &cmd->bitmask);
++ cmd->ent_arr[ent->idx] = ent;
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ }
+
+- cmd->ent_arr[ent->idx] = ent;
+ lay = get_inst(cmd, ent->idx);
+ ent->lay = lay;
+ memset(lay, 0, sizeof(*lay));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 4e8e3797aed08..074c9eb44ab73 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1675,8 +1675,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+ }
+ trace_mlx5_fs_set_fte(fte, false);
+
++ /* Link newly added rules into the tree. */
+ for (i = 0; i < handle->num_rules; i++) {
+- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
++ if (!handle->rule[i]->node.parent) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index 1a86535c49685..f568ae250393f 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -697,7 +697,7 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+
+ static const struct mlxsw_listener mlxsw_emad_rx_listener =
+ MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
+- EMAD, DISCARD);
++ EMAD, FORWARD);
+
+ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index 483c8b75bebb8..46b1120a8151e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -713,7 +713,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+ rehash.dw.work);
+ int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+
++ mutex_lock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
++ mutex_unlock(&vregion->lock);
+ if (credits < 0)
+ /* Rehash gone out of credits so it was interrupted.
+ * Schedule the work as soon as possible to continue.
+@@ -723,6 +725,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+ }
+
++static void
++mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
++{
++ /* The entry markers are relative to the current chunk and therefore
++ * needs to be reset together with the chunk marker.
++ */
++ ctx->current_vchunk = NULL;
++ ctx->start_ventry = NULL;
++ ctx->stop_ventry = NULL;
++}
++
+ static void
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+ {
+@@ -745,7 +758,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
+ * the current chunk pointer to make sure all chunks
+ * are properly migrated.
+ */
+- vregion->rehash.ctx.current_vchunk = NULL;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
+ }
+
+ static struct mlxsw_sp_acl_tcam_vregion *
+@@ -818,10 +831,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
+
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
++ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
++
+ mutex_lock(&tcam->lock);
+ list_del(&vregion->tlist);
+ mutex_unlock(&tcam->lock);
+- cancel_delayed_work_sync(&vregion->rehash.dw);
++ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
++ ctx->hints_priv)
++ ops->region_rehash_hints_put(ctx->hints_priv);
+ }
+ mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
+ if (vregion->region2)
+@@ -1187,8 +1204,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ bool *activity)
+ {
+- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
+- ventry->entry, activity);
++ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
++ int err;
++
++ mutex_lock(&vregion->lock);
++ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
++ activity);
++ mutex_unlock(&vregion->lock);
++ return err;
+ }
+
+ static int
+@@ -1222,6 +1245,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
+ {
+ struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+
++ WARN_ON(vchunk->chunk2);
++
+ new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
+ if (IS_ERR(new_chunk))
+ return PTR_ERR(new_chunk);
+@@ -1240,7 +1265,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
+ {
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ vchunk->chunk2 = NULL;
+- ctx->current_vchunk = NULL;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ }
+
+ static int
+@@ -1263,6 +1288,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ return 0;
+ }
+
++ if (list_empty(&vchunk->ventry_list))
++ goto out;
++
+ /* If the migration got interrupted, we have the ventry to start from
+ * stored in context.
+ */
+@@ -1272,6 +1300,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ ventry = list_first_entry(&vchunk->ventry_list,
+ typeof(*ventry), list);
+
++ WARN_ON(ventry->vchunk != vchunk);
++
+ list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
+ /* During rollback, once we reach the ventry that failed
+ * to migrate, we are done.
+@@ -1312,6 +1342,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ }
+ }
+
++out:
+ mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
+ return 0;
+ }
+@@ -1325,6 +1356,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ int err;
+
++ if (list_empty(&vregion->vchunk_list))
++ return 0;
++
+ /* If the migration got interrupted, we have the vchunk
+ * we are working on stored in context.
+ */
+@@ -1353,16 +1387,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ int err, err2;
+
+ trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
+- mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
++ if (ctx->this_is_rollback)
++ return err;
+ /* In case migration was not successful, we need to swap
+ * so the original region pointer is assigned again
+ * to vregion->region.
+ */
+ swap(vregion->region, vregion->region2);
+- ctx->current_vchunk = NULL;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ ctx->this_is_rollback = true;
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+@@ -1373,7 +1408,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ /* Let the rollback to be continued later on. */
+ }
+ }
+- mutex_unlock(&vregion->lock);
+ trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
+ return err;
+ }
+@@ -1422,6 +1456,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
+
+ ctx->hints_priv = hints_priv;
+ ctx->this_is_rollback = false;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+
+ return 0;
+
+@@ -1474,7 +1509,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
+- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
++ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
++ return;
+ }
+
+ if (*credits >= 0)
+diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
+index 5dc60ecabe561..e0fcdbadf2cba 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.c
++++ b/drivers/net/ethernet/ti/am65-cpts.c
+@@ -649,6 +649,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
++ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
++ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
++ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
++ mtype_seqid = skb_cb->skb_mtype_seqid;
++
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 2bb9820c66641..af35361a3dcee 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -907,7 +907,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
+- if (!pskb_inet_may_pull(skb))
++ if (!skb_vlan_inet_prepare(skb))
+ return -EINVAL;
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -1004,7 +1004,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
+- if (!pskb_inet_may_pull(skb))
++ if (!skb_vlan_inet_prepare(skb))
+ return -EINVAL;
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 4e19760cddefe..c8246363d3832 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -700,11 +700,12 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ static void gtp_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct gtp_dev *gtp = netdev_priv(dev);
++ struct hlist_node *next;
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++)
+- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
++ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
+
+ list_del_rcu(&gtp->list);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index bb0368272a1bb..77e63e7366e78 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2141,14 +2141,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ tun_is_little_endian(tun), true,
+ vlan_hlen)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+- pr_err("unexpected GSO type: "
+- "0x%x, gso_size %d, hdr_len %d\n",
+- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+- tun16_to_cpu(tun, gso.hdr_len));
+- print_hex_dump(KERN_ERR, "tun: ",
+- DUMP_PREFIX_NONE,
+- 16, 1, skb->head,
+- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
++
++ if (net_ratelimit()) {
++ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
++ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
++ tun16_to_cpu(tun, gso.hdr_len));
++ print_hex_dump(KERN_ERR, "tun: ",
++ DUMP_PREFIX_NONE,
++ 16, 1, skb->head,
++ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
++ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 38cb863ccb911..da4a2427b005f 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1558,21 +1558,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+
+- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+ ax88179_rx_checksum(skb, pkt_hdr);
+ return 1;
+ }
+
+- ax_skb = skb_clone(skb, GFP_ATOMIC);
++ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
+ if (!ax_skb)
+ return 0;
+- skb_trim(ax_skb, pkt_len);
++ skb_put(ax_skb, pkt_len);
++ memcpy(ax_skb->data, skb->data + 2, pkt_len);
+
+- /* Skip IP alignment pseudo header */
+- skb_pull(ax_skb, 2);
+-
+- skb->truesize = pkt_len_plus_padd +
+- SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 3096769e718ed..b173497a3e0ca 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1778,6 +1778,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+ return false;
+
++ /* Ignore packets from invalid src-address */
++ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
++ return false;
++
+ /* Get address from the outer IP header */
+ if (vxlan_get_sk_family(vs) == AF_INET) {
+ saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+index b1335fe3b01a2..e2df8ddc1a56a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+@@ -106,6 +106,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ if (!pasn)
+ return -ENOBUFS;
+
++ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
++
+ pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+
+ switch (pasn->cipher) {
+diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
+index c70f62fe321eb..081ec1105572e 100644
+--- a/drivers/nfc/trf7970a.c
++++ b/drivers/nfc/trf7970a.c
+@@ -424,7 +424,8 @@ struct trf7970a {
+ enum trf7970a_state state;
+ struct device *dev;
+ struct spi_device *spi;
+- struct regulator *regulator;
++ struct regulator *vin_regulator;
++ struct regulator *vddio_regulator;
+ struct nfc_digital_dev *ddev;
+ u32 quirks;
+ bool is_initiator;
+@@ -1882,7 +1883,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
+ if (trf->state != TRF7970A_ST_PWR_OFF)
+ return 0;
+
+- ret = regulator_enable(trf->regulator);
++ ret = regulator_enable(trf->vin_regulator);
+ if (ret) {
+ dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
+ return ret;
+@@ -1925,7 +1926,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
+ if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
+ gpiod_set_value_cansleep(trf->en2_gpiod, 0);
+
+- ret = regulator_disable(trf->regulator);
++ ret = regulator_disable(trf->vin_regulator);
+ if (ret)
+ dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
+ ret);
+@@ -2064,37 +2065,37 @@ static int trf7970a_probe(struct spi_device *spi)
+ mutex_init(&trf->lock);
+ INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
+
+- trf->regulator = devm_regulator_get(&spi->dev, "vin");
+- if (IS_ERR(trf->regulator)) {
+- ret = PTR_ERR(trf->regulator);
++ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
++ if (IS_ERR(trf->vin_regulator)) {
++ ret = PTR_ERR(trf->vin_regulator);
+ dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+- ret = regulator_enable(trf->regulator);
++ ret = regulator_enable(trf->vin_regulator);
+ if (ret) {
+ dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+- uvolts = regulator_get_voltage(trf->regulator);
++ uvolts = regulator_get_voltage(trf->vin_regulator);
+ if (uvolts > 4000000)
+ trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
+
+- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
+- if (IS_ERR(trf->regulator)) {
+- ret = PTR_ERR(trf->regulator);
++ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
++ if (IS_ERR(trf->vddio_regulator)) {
++ ret = PTR_ERR(trf->vddio_regulator);
+ dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
+- goto err_destroy_lock;
++ goto err_disable_vin_regulator;
+ }
+
+- ret = regulator_enable(trf->regulator);
++ ret = regulator_enable(trf->vddio_regulator);
+ if (ret) {
+ dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
+- goto err_destroy_lock;
++ goto err_disable_vin_regulator;
+ }
+
+- if (regulator_get_voltage(trf->regulator) == 1800000) {
++ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
+ trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
+ dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
+ }
+@@ -2107,7 +2108,7 @@ static int trf7970a_probe(struct spi_device *spi)
+ if (!trf->ddev) {
+ dev_err(trf->dev, "Can't allocate NFC digital device\n");
+ ret = -ENOMEM;
+- goto err_disable_regulator;
++ goto err_disable_vddio_regulator;
+ }
+
+ nfc_digital_set_parent_dev(trf->ddev, trf->dev);
+@@ -2136,8 +2137,10 @@ static int trf7970a_probe(struct spi_device *spi)
+ trf7970a_shutdown(trf);
+ err_free_ddev:
+ nfc_digital_free_device(trf->ddev);
+-err_disable_regulator:
+- regulator_disable(trf->regulator);
++err_disable_vddio_regulator:
++ regulator_disable(trf->vddio_regulator);
++err_disable_vin_regulator:
++ regulator_disable(trf->vin_regulator);
+ err_destroy_lock:
+ mutex_destroy(&trf->lock);
+ return ret;
+@@ -2156,7 +2159,8 @@ static int trf7970a_remove(struct spi_device *spi)
+ nfc_digital_unregister_device(trf->ddev);
+ nfc_digital_free_device(trf->ddev);
+
+- regulator_disable(trf->regulator);
++ regulator_disable(trf->vddio_regulator);
++ regulator_disable(trf->vin_regulator);
+
+ mutex_destroy(&trf->lock);
+
+diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
+index ccc65cfc519f5..51b814e44783e 100644
+--- a/drivers/staging/comedi/drivers/vmk80xx.c
++++ b/drivers/staging/comedi/drivers/vmk80xx.c
+@@ -642,33 +642,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
+- struct usb_endpoint_descriptor *ep_desc;
+- int i;
+-
+- if (iface_desc->desc.bNumEndpoints != 2)
+- return -ENODEV;
+-
+- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+- ep_desc = &iface_desc->endpoint[i].desc;
+-
+- if (usb_endpoint_is_int_in(ep_desc) ||
+- usb_endpoint_is_bulk_in(ep_desc)) {
+- if (!devpriv->ep_rx)
+- devpriv->ep_rx = ep_desc;
+- continue;
+- }
++ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
++ int ret;
+
+- if (usb_endpoint_is_int_out(ep_desc) ||
+- usb_endpoint_is_bulk_out(ep_desc)) {
+- if (!devpriv->ep_tx)
+- devpriv->ep_tx = ep_desc;
+- continue;
+- }
+- }
++ if (devpriv->model == VMK8061_MODEL)
++ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
++ &ep_tx_desc, NULL, NULL);
++ else
++ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
++ &ep_rx_desc, &ep_tx_desc);
+
+- if (!devpriv->ep_rx || !devpriv->ep_tx)
++ if (ret)
+ return -ENODEV;
+
++ devpriv->ep_rx = ep_rx_desc;
++ devpriv->ep_tx = ep_tx_desc;
++
+ if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
+ return -EINVAL;
+
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 22e5c4de345b5..b13a944eb4620 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2402,22 +2402,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
+ {
+ struct tb_port *up, *down;
+
+- if (sw->is_unplugged)
+- return;
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return;
+
++ /*
++ * Unconfigure downstream port so that wake-on-connect can be
++ * configured after router unplug. No need to unconfigure upstream port
++ * since its router is unplugged.
++ */
+ up = tb_upstream_port(sw);
+- if (tb_switch_is_usb4(up->sw))
+- usb4_port_unconfigure(up);
+- else
+- tb_lc_unconfigure_port(up);
+-
+ down = up->remote;
+ if (tb_switch_is_usb4(down->sw))
+ usb4_port_unconfigure(down);
+ else
+ tb_lc_unconfigure_port(down);
++
++ if (sw->is_unplugged)
++ return;
++
++ up = tb_upstream_port(sw);
++ if (tb_switch_is_usb4(up->sw))
++ usb4_port_unconfigure(up);
++ else
++ tb_lc_unconfigure_port(up);
+ }
+
+ static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
+@@ -2631,7 +2638,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ return tb_lc_set_wake(sw, flags);
+ }
+
+-int tb_switch_resume(struct tb_switch *sw)
++static void tb_switch_check_wakes(struct tb_switch *sw)
++{
++ if (device_may_wakeup(&sw->dev)) {
++ if (tb_switch_is_usb4(sw))
++ usb4_switch_check_wakes(sw);
++ }
++}
++
++/**
++ * tb_switch_resume() - Resume a switch after sleep
++ * @sw: Switch to resume
++ * @runtime: Is this resume from runtime suspend or system sleep
++ *
++ * Resumes and re-enumerates router (and all its children), if still plugged
++ * after suspend. Don't enumerate device router whose UID was changed during
++ * suspend. If this is resume from system sleep, notifies PM core about the
++ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
++ * upstream port for USB4 routers that shall be always enabled.
++ */
++int tb_switch_resume(struct tb_switch *sw, bool runtime)
+ {
+ struct tb_port *port;
+ int err;
+@@ -2676,6 +2702,9 @@ int tb_switch_resume(struct tb_switch *sw)
+ if (err)
+ return err;
+
++ if (!runtime)
++ tb_switch_check_wakes(sw);
++
+ /* Disable wakes */
+ tb_switch_set_wake(sw, 0);
+
+@@ -2702,7 +2731,8 @@ int tb_switch_resume(struct tb_switch *sw)
+ */
+ if (tb_port_unlock(port))
+ tb_port_warn(port, "failed to unlock port\n");
+- if (port->remote && tb_switch_resume(port->remote->sw)) {
++ if (port->remote &&
++ tb_switch_resume(port->remote->sw, runtime)) {
+ tb_port_warn(port,
+ "lost during suspend, disconnecting\n");
+ tb_sw_set_unplugged(port->remote->sw);
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index a56ea540af00b..26a78847985ba 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1385,7 +1385,7 @@ static int tb_resume_noirq(struct tb *tb)
+ /* remove any pci devices the firmware might have setup */
+ tb_switch_reset(tb->root_switch);
+
+- tb_switch_resume(tb->root_switch);
++ tb_switch_resume(tb->root_switch, false);
+ tb_free_invalid_tunnels(tb);
+ tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
+@@ -1488,7 +1488,7 @@ static int tb_runtime_resume(struct tb *tb)
+ struct tb_tunnel *tunnel, *n;
+
+ mutex_lock(&tb->lock);
+- tb_switch_resume(tb->root_switch);
++ tb_switch_resume(tb->root_switch, true);
+ tb_free_invalid_tunnels(tb);
+ tb_restore_children(tb->root_switch);
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index 266f3bf8ff5c6..f3df5655f7a1b 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -653,7 +653,7 @@ int tb_switch_configure(struct tb_switch *sw);
+ int tb_switch_add(struct tb_switch *sw);
+ void tb_switch_remove(struct tb_switch *sw);
+ void tb_switch_suspend(struct tb_switch *sw, bool runtime);
+-int tb_switch_resume(struct tb_switch *sw);
++int tb_switch_resume(struct tb_switch *sw, bool runtime);
+ int tb_switch_reset(struct tb_switch *sw);
+ void tb_sw_set_unplugged(struct tb_switch *sw);
+ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+@@ -957,6 +957,7 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
+ return NULL;
+ }
+
++void usb4_switch_check_wakes(struct tb_switch *sw);
+ int usb4_switch_setup(struct tb_switch *sw);
+ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 5b45c45e7c5bf..78c895abb1e48 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -197,15 +197,18 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+ return 0;
+ }
+
+-static void usb4_switch_check_wakes(struct tb_switch *sw)
++/**
++ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
++ * @sw: Router whose wakes to check
++ *
++ * Checks wakes occurred during suspend and notify the PM core about them.
++ */
++void usb4_switch_check_wakes(struct tb_switch *sw)
+ {
+ struct tb_port *port;
+ bool wakeup = false;
+ u32 val;
+
+- if (!device_may_wakeup(&sw->dev))
+- return;
+-
+ if (tb_route(sw)) {
+ if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
+ return;
+@@ -270,8 +273,6 @@ int usb4_switch_setup(struct tb_switch *sw)
+ u32 val = 0;
+ int ret;
+
+- usb4_switch_check_wakes(sw);
+-
+ if (!tb_route(sw))
+ return 0;
+
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index b784323a6a7b0..be6c8b9f1606e 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -1122,11 +1122,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
+
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ {
+- u32 istat;
++ u32 istat, stat;
+ struct mxs_auart_port *s = context;
+ u32 mctrl_temp = s->mctrl_prev;
+- u32 stat = mxs_read(s, REG_STAT);
+
++ uart_port_lock(&s->port);
++
++ stat = mxs_read(s, REG_STAT);
+ istat = mxs_read(s, REG_INTR);
+
+ /* ack irq */
+@@ -1162,6 +1164,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ istat &= ~AUART_INTR_TXIS;
+ }
+
++ uart_port_unlock(&s->port);
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index d6aef8a1f0a48..1d0717fc3729e 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -217,7 +217,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
+ {
+ struct tty_port *port;
+ unsigned char ch, r1, drop, flag;
+- int loops = 0;
+
+ /* Sanity check, make sure the old bug is no longer happening */
+ if (uap->port.state == NULL) {
+@@ -298,24 +297,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ next_char:
+- /* We can get stuck in an infinite loop getting char 0 when the
+- * line is in a wrong HW state, we break that here.
+- * When that happens, I disable the receive side of the driver.
+- * Note that what I've been experiencing is a real irq loop where
+- * I'm getting flooded regardless of the actual port speed.
+- * Something strange is going on with the HW
+- */
+- if ((++loops) > 1000)
+- goto flood;
+ ch = read_zsreg(uap, R0);
+ if (!(ch & Rx_CH_AV))
+ break;
+ }
+
+- return true;
+- flood:
+- pmz_interrupt_control(uap, 0);
+- pmz_error("pmz: rx irq flood !\n");
+ return true;
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 58423b16022b4..80332b6a1963e 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -471,7 +471,6 @@ static ssize_t wdm_write
+ static int service_outstanding_interrupt(struct wdm_device *desc)
+ {
+ int rv = 0;
+- int used;
+
+ /* submit read urb only if the device is waiting for it */
+ if (!desc->resp_count || !--desc->resp_count)
+@@ -486,10 +485,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
+ goto out;
+ }
+
+- used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+- if (used)
+- goto out;
+-
++ set_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ spin_lock_irq(&desc->iuspin);
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 336ecf6e19678..86e8585a55122 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -295,8 +295,10 @@ static void usb_port_shutdown(struct device *dev)
+ {
+ struct usb_port *port_dev = to_usb_port(dev);
+
+- if (port_dev->child)
++ if (port_dev->child) {
+ usb_disable_usb2_hardware_lpm(port_dev->child);
++ usb_unlocked_disable_lpm(port_dev->child);
++ }
+ }
+
+ static const struct dev_pm_ops usb_port_pm_ops = {
+diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
+index 6a4aa71da103f..d6fa02d851e49 100644
+--- a/drivers/usb/dwc2/hcd_ddma.c
++++ b/drivers/usb/dwc2/hcd_ddma.c
+@@ -897,13 +897,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ struct dwc2_dma_desc *dma_desc;
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
+ u16 frame_desc_idx;
+- struct urb *usb_urb = qtd->urb->priv;
++ struct urb *usb_urb;
+ u16 remain = 0;
+ int rc = 0;
+
+ if (!qtd->urb)
+ return -EINVAL;
+
++ usb_urb = qtd->urb->priv;
++
+ dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fb1eba835e508..400e3e10b435c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM061K_LMS 0x0124
+ #define QUECTEL_PRODUCT_EC25 0x0125
+ #define QUECTEL_PRODUCT_EM060K_128 0x0128
++#define QUECTEL_PRODUCT_EM060K_129 0x0129
++#define QUECTEL_PRODUCT_EM060K_12a 0x012a
++#define QUECTEL_PRODUCT_EM060K_12b 0x012b
++#define QUECTEL_PRODUCT_EM060K_12c 0x012c
+ #define QUECTEL_PRODUCT_EG91 0x0191
+ #define QUECTEL_PRODUCT_EG95 0x0195
+ #define QUECTEL_PRODUCT_BG96 0x0296
+@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
+@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
++ .driver_info = RSVD(0) | NCTRL(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
++ .driver_info = RSVD(0) | NCTRL(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
++ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = RSVD(4) },
++ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
++ .driver_info = RSVD(4) | RSVD(5) },
++ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
++ .driver_info = RSVD(0) | RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
++ .driver_info = RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
++ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
++ .driver_info = RSVD(4) | RSVD(5) },
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
++ .driver_info = RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
++ .driver_info = RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index ab67160f72841..8ed9c9b63eb16 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2513,9 +2513,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ r = vhost_get_avail_idx(vq, &avail_idx);
+ if (unlikely(r))
+ return false;
++
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++ if (vq->avail_idx != vq->last_avail_idx) {
++ /* Since we have updated avail_idx, the following
++ * call to vhost_get_vq_desc() will read available
++ * ring entries. Make sure that read happens after
++ * the avail_idx read.
++ */
++ smp_rmb();
++ return false;
++ }
+
+- return vq->avail_idx == vq->last_avail_idx;
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index d2f77b1242a13..f1731eeb86a7f 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -2315,20 +2315,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
+ size_t alloc_bytes;
+
+ alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
+- data = kvmalloc(alloc_bytes, GFP_KERNEL);
++ data = kvzalloc(alloc_bytes, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+- if (total_bytes >= sizeof(*data)) {
++ if (total_bytes >= sizeof(*data))
+ data->bytes_left = total_bytes - sizeof(*data);
+- data->bytes_missing = 0;
+- } else {
++ else
+ data->bytes_missing = sizeof(*data) - total_bytes;
+- data->bytes_left = 0;
+- }
+-
+- data->elem_cnt = 0;
+- data->elem_missed = 0;
+
+ return data;
+ }
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index bcffe7886530a..cdfc791b3c405 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1135,6 +1135,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ if (ret)
+ return ret;
+
++ ret = btrfs_record_root_in_trans(trans, node->root);
++ if (ret)
++ return ret;
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ return ret;
+ }
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 7f849310303b1..50669ff9346c6 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -4114,6 +4114,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
+ BTRFS_QGROUP_RSV_META_PREALLOC);
+ trace_qgroup_meta_convert(root, num_bytes);
+ qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
++ if (!sb_rdonly(fs_info->sb))
++ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
+ }
+
+ /*
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 81394e22d0a09..eb7de9e2a384e 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -243,7 +243,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
+
+ #define S_SHIFT 12
+ static unsigned char
+-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
++nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
+ [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
+ [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
+ [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index 96d0da65e0887..f51b2396830f4 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -429,6 +429,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
++ else
++ kobject_put(kobj);
+ return kn;
+ }
+ EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
+index b060514bf25d2..bbd3aeb2f9c5b 100644
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -542,6 +542,31 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
+ #endif
+ }
+
++/**
++ * eth_skb_pkt_type - Assign packet type if destination address does not match
++ * @skb: Assigned a packet type if address does not match @dev address
++ * @dev: Network device used to compare packet address against
++ *
++ * If the destination MAC address of the packet does not match the network
++ * device address, assign an appropriate packet type.
++ */
++static inline void eth_skb_pkt_type(struct sk_buff *skb,
++ const struct net_device *dev)
++{
++ const struct ethhdr *eth = eth_hdr(skb);
++
++ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
++ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
++ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
++ skb->pkt_type = PACKET_BROADCAST;
++ else
++ skb->pkt_type = PACKET_MULTICAST;
++ } else {
++ skb->pkt_type = PACKET_OTHERHOST;
++ }
++ }
++}
++
+ /**
+ * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * @skb: Buffer to pad
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 3ed4e8771b64e..a53adf4316a53 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -133,7 +133,7 @@ do { \
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+ # define lockdep_hrtimer_enter(__hrtimer) false
+-# define lockdep_hrtimer_exit(__context) do { } while (0)
++# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
+ # define lockdep_posixtimer_enter() do { } while (0)
+ # define lockdep_posixtimer_exit() do { } while (0)
+ # define lockdep_irq_work_enter(__work) do { } while (0)
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 46a21984c0b22..48b9cd8fdcf7c 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -260,6 +260,85 @@ struct uart_port {
+ void *private_data; /* generic platform data pointer */
+ };
+
++/**
++ * uart_port_lock - Lock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock(struct uart_port *up)
++{
++ spin_lock(&up->lock);
++}
++
++/**
++ * uart_port_lock_irq - Lock the UART port and disable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock_irq(struct uart_port *up)
++{
++ spin_lock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ */
++static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ spin_lock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_trylock - Try to lock the UART port
++ * @up: Pointer to UART port structure
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock(struct uart_port *up)
++{
++ return spin_trylock(&up->lock);
++}
++
++/**
++ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ return spin_trylock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_unlock - Unlock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock(struct uart_port *up)
++{
++ spin_unlock(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock_irq(struct uart_port *up)
++{
++ spin_unlock_irq(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
++ * @up: Pointer to UART port structure
++ * @flags: The saved interrupt flags for restore
++ */
++static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ return up->serial_in(up, offset);
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index f7ed0471d5a85..dbf0993153d35 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -577,7 +577,7 @@ struct trace_event_file {
+ } \
+ early_initcall(trace_init_perf_perm_##name);
+
+-#define PERF_MAX_TRACE_SIZE 2048
++#define PERF_MAX_TRACE_SIZE 8192
+
+ #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
+
+diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
+index e81856c0ba134..7c6f81b8971aa 100644
+--- a/include/linux/u64_stats_sync.h
++++ b/include/linux/u64_stats_sync.h
+@@ -116,7 +116,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
+ #endif
+
+ #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+-#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
++#define u64_stats_init(syncp) \
++ do { \
++ struct u64_stats_sync *__s = (syncp); \
++ seqcount_init(&__s->seq); \
++ } while (0)
+ #else
+ static inline void u64_stats_init(struct u64_stats_sync *syncp)
+ {
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 4d0c4cf1d4c88..f666d3628d6aa 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -437,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
+ refcount_inc(&ifp->refcnt);
+ }
+
++static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
++{
++ return refcount_inc_not_zero(&ifp->refcnt);
++}
+
+ /*
+ * compute link-local solicited-node multicast address
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index a6b6ce8b918b7..349279c4d2672 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -56,7 +56,7 @@ struct unix_sock {
+ struct mutex iolock, bindlock;
+ struct sock *peer;
+ struct list_head link;
+- atomic_long_t inflight;
++ unsigned long inflight;
+ spinlock_t lock;
+ unsigned long gc_flags;
+ #define UNIX_GC_CANDIDATE 0
+@@ -77,6 +77,9 @@ enum unix_socket_lock_class {
+ U_LOCK_NORMAL,
+ U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
+ U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
++ U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
++ * candidates to close a small race window.
++ */
+ };
+
+ static inline void unix_state_lock_nested(struct sock *sk,
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index 355835639ae58..7d2bd562da4be 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -487,6 +487,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ return skb;
+ }
+
++static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
++ sockptr_t src, size_t src_size)
++{
++ if (dst_size > src_size)
++ return -EINVAL;
++
++ return copy_from_sockptr(dst, src, dst_size);
++}
++
+ int bt_to_errno(u16 code);
+
+ void hci_sock_set_flag(struct sock *sk, int nr);
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 58d8e6260aa13..1f016af0622bd 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -333,6 +333,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ return pskb_network_may_pull(skb, nhlen);
+ }
+
++/* Variant of pskb_inet_may_pull().
++ */
++static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
++{
++ int nhlen = 0, maclen = ETH_HLEN;
++ __be16 type = skb->protocol;
++
++ /* Essentially this is skb_protocol(skb, true)
++ * And we get MAC len.
++ */
++ if (eth_type_vlan(type))
++ type = __vlan_get_protocol(skb, type, &maclen);
++
++ switch (type) {
++#if IS_ENABLED(CONFIG_IPV6)
++ case htons(ETH_P_IPV6):
++ nhlen = sizeof(struct ipv6hdr);
++ break;
++#endif
++ case htons(ETH_P_IP):
++ nhlen = sizeof(struct iphdr);
++ break;
++ }
++ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
++ * a base network header in skb->head.
++ */
++ if (!pskb_may_pull(skb, maclen + nhlen))
++ return false;
++
++ skb_set_network_header(skb, maclen);
++ return true;
++}
++
+ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+ {
+ const struct ip_tunnel_encap_ops *ops;
+diff --git a/init/main.c b/init/main.c
+index 298989b0d4e88..b1593bdaf3b97 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -627,6 +627,8 @@ static void __init setup_command_line(char *command_line)
+ if (!saved_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+
++ len = xlen + strlen(command_line) + 1;
++
+ static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!static_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+diff --git a/kernel/bounds.c b/kernel/bounds.c
+index a94e3769347ee..a3e1d3dfad312 100644
+--- a/kernel/bounds.c
++++ b/kernel/bounds.c
+@@ -19,7 +19,7 @@ int main(void)
+ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ #ifdef CONFIG_SMP
+- DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
++ DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
+ #endif
+ DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+ /* End of constants */
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index abf717c4f57c2..d84ba5a13d171 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2600,7 +2600,8 @@ enum cpu_mitigations {
+ };
+
+ static enum cpu_mitigations cpu_mitigations __ro_after_init =
+- CPU_MITIGATIONS_AUTO;
++ IS_ENABLED(CONFIG_CPU_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
++ CPU_MITIGATIONS_OFF;
+
+ static int __init mitigations_parse_cmdline(char *arg)
+ {
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 05d3e156a7d63..dba6541c0fc3c 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1647,10 +1647,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ jump_label_lock();
+ preempt_disable();
+
+- /* Ensure it is not in reserved area nor out of text */
+- if (!(core_kernel_text((unsigned long) p->addr) ||
+- is_module_text_address((unsigned long) p->addr)) ||
+- in_gate_area_no_mm((unsigned long) p->addr) ||
++ /* Ensure the address is in a text area, and find a module if exists. */
++ *probed_mod = NULL;
++ if (!core_kernel_text((unsigned long) p->addr)) {
++ *probed_mod = __module_text_address((unsigned long) p->addr);
++ if (!(*probed_mod)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ }
++ /* Ensure it is not in reserved area. */
++ if (in_gate_area_no_mm((unsigned long) p->addr) ||
+ within_kprobe_blacklist((unsigned long) p->addr) ||
+ jump_label_text_reserved(p->addr, p->addr) ||
+ static_call_text_reserved(p->addr, p->addr) ||
+@@ -1660,8 +1667,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ goto out;
+ }
+
+- /* Check if are we probing a module */
+- *probed_mod = __module_text_address((unsigned long) p->addr);
++ /* Get module refcount and reject __init functions for loaded modules. */
+ if (*probed_mod) {
+ /*
+ * We must hold a refcount of the probed module while updating
+diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
+index 643e0b19920d2..eb81ad523a553 100644
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -400,7 +400,8 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
+ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
+
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
+- "perf buffer not large enough"))
++ "perf buffer not large enough, wanted %d, have %d",
++ size, PERF_MAX_TRACE_SIZE))
+ return NULL;
+
+ *rctxp = rctx = perf_swevent_get_recursion_context();
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index e4340958da2df..4bc90965abb25 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -1140,10 +1140,8 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+ {
+- int ret = tracing_alloc_snapshot_instance(file->tr);
+-
+- if (ret < 0)
+- return ret;
++ if (tracing_alloc_snapshot_instance(file->tr) != 0)
++ return 0;
+
+ return register_trigger(glob, ops, data, file);
+ }
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index 25bbac46605e9..308a7c8518e8f 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -271,10 +271,10 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ /*
+ * Zero out zone modifiers, as we don't have specific zone
+ * requirements. Keep the flags related to allocation in atomic
+- * contexts and I/O.
++ * contexts, I/O, nolockdep.
+ */
+ alloc_flags &= ~GFP_ZONEMASK;
+- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
++ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
+ alloc_flags |= __GFP_NOWARN;
+ page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ if (page)
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 9e8ebac9b7e7e..f5019f698105b 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -4188,7 +4188,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+- while (true) {
++ while (timeout) {
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ if (packet_size_max >= table_size)
+ break;
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index a0f980e615052..7ce6db1ac558a 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -107,8 +107,10 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+- if (skb)
++ if (skb) {
++ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_get(skb);
++ }
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+ }
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 756523e5402a8..3a2be1b4a5743 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -456,7 +456,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+ struct l2cap_conninfo cinfo;
+- int len, err = 0;
++ int err = 0;
++ size_t len;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -503,7 +504,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+
+ BT_DBG("mode 0x%2.2x", chan->mode);
+
+- len = min_t(unsigned int, len, sizeof(opts));
++ len = min(len, sizeof(opts));
+ if (copy_to_user(optval, (char *) &opts, len))
+ err = -EFAULT;
+
+@@ -553,7 +554,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ cinfo.hci_handle = chan->conn->hcon->handle;
+ memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
+
+- len = min_t(unsigned int, len, sizeof(cinfo));
++ len = min(len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *) &cinfo, len))
+ err = -EFAULT;
+
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 8244d3ae185bf..2115ca6d7e178 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -825,7 +825,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+ {
+ struct sock *sk = sock->sk;
+- int len, err = 0;
++ int err = 0;
+ struct bt_voice voice;
+ u32 opt;
+
+@@ -841,10 +841,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -861,11 +860,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ voice.setting = sco_pi(sk)->setting;
+
+- len = min_t(unsigned int, sizeof(voice), optlen);
+- if (copy_from_sockptr(&voice, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
++ optlen);
++ if (err)
+ break;
+- }
+
+ /* Explicitly check for these values */
+ if (voice.setting != BT_VOICE_TRANSPARENT &&
+@@ -878,10 +876,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_PKT_STATUS:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
+@@ -904,7 +901,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ struct sock *sk = sock->sk;
+ struct sco_options opts;
+ struct sco_conninfo cinfo;
+- int len, err = 0;
++ int err = 0;
++ size_t len;
+
+ BT_DBG("sk %p", sk);
+
+@@ -926,7 +924,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+
+ BT_DBG("mtu %d", opts.mtu);
+
+- len = min_t(unsigned int, len, sizeof(opts));
++ len = min(len, sizeof(opts));
+ if (copy_to_user(optval, (char *)&opts, len))
+ err = -EFAULT;
+
+@@ -944,7 +942,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
+
+- len = min_t(unsigned int, len, sizeof(cinfo));
++ len = min(len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *)&cinfo, len))
+ err = -EFAULT;
+
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index dac65180c4eff..61cb40368723c 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+ eth = (struct ethhdr *)skb->data;
+ skb_pull_inline(skb, ETH_HLEN);
+
+- if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+- dev->dev_addr))) {
+- if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+- if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+- skb->pkt_type = PACKET_BROADCAST;
+- else
+- skb->pkt_type = PACKET_MULTICAST;
+- } else {
+- skb->pkt_type = PACKET_OTHERHOST;
+- }
+- }
++ eth_skb_pkt_type(skb, dev);
+
+ /*
+ * Some variants of DSA tagging don't have an ethertype field
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index c411c87ae865f..85cb44bfa3bab 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -254,12 +254,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
+ }
+ EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+
++/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
+ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+ {
+- struct inet_timewait_sock *tw;
+- struct sock *sk;
+ struct hlist_nulls_node *node;
+ unsigned int slot;
++ struct sock *sk;
+
+ for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+ struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+@@ -268,25 +268,35 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+ rcu_read_lock();
+ restart:
+ sk_nulls_for_each_rcu(sk, node, &head->chain) {
+- if (sk->sk_state != TCP_TIME_WAIT)
++ int state = inet_sk_state_load(sk);
++
++ if ((1 << state) & ~(TCPF_TIME_WAIT |
++ TCPF_NEW_SYN_RECV))
+ continue;
+- tw = inet_twsk(sk);
+- if ((tw->tw_family != family) ||
+- refcount_read(&twsk_net(tw)->count))
++
++ if (sk->sk_family != family ||
++ refcount_read(&sock_net(sk)->count))
+ continue;
+
+- if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
++ if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ continue;
+
+- if (unlikely((tw->tw_family != family) ||
+- refcount_read(&twsk_net(tw)->count))) {
+- inet_twsk_put(tw);
++ if (unlikely(sk->sk_family != family ||
++ refcount_read(&sock_net(sk)->count))) {
++ sock_gen_put(sk);
+ goto restart;
+ }
+
+ rcu_read_unlock();
+ local_bh_disable();
+- inet_twsk_deschedule_put(tw);
++ if (state == TCP_TIME_WAIT) {
++ inet_twsk_deschedule_put(inet_twsk(sk));
++ } else {
++ struct request_sock *req = inet_reqsk(sk);
++
++ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
++ req);
++ }
+ local_bh_enable();
+ goto restart_rcu;
+ }
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 48c6aa3d91ae8..5823e89b8a734 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -965,6 +965,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1265,6 +1267,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index b46d58b9f3fe4..22e9ff592cd75 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1119,6 +1119,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1505,6 +1507,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index d360c7d70e8a2..cc409cc0789c8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -955,13 +955,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+ ++peer->n_redirects;
+-#ifdef CONFIG_IP_ROUTE_VERBOSE
+- if (log_martians &&
++ if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
+ peer->n_redirects == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &gw);
+-#endif
+ }
+ out_put_peer:
+ inet_putpeer(peer);
+@@ -2090,6 +2088,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ int err = -EINVAL;
+ u32 tag = 0;
+
++ if (!in_dev)
++ return -EINVAL;
++
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ goto martian_source;
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 0b7e76e6f2028..16ff3962b24db 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1125,16 +1125,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ if (msg->msg_controllen) {
+ err = udp_cmsg_send(sk, msg, &ipc.gso_size);
+- if (err > 0)
++ if (err > 0) {
+ err = ip_cmsg_send(sk, msg, &ipc,
+ sk->sk_family == AF_INET6);
++ connected = 0;
++ }
+ if (unlikely(err < 0)) {
+ kfree(ipc.opt);
+ return err;
+ }
+ if (ipc.opt)
+ free = 1;
+- connected = 0;
+ }
+ if (!ipc.opt) {
+ struct ip_options_rcu *inet_opt;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 0429c1d50fc92..8a6f4cdd5a486 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2044,9 +2044,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
+ if (ipv6_addr_equal(&ifp->addr, addr)) {
+ if (!dev || ifp->idev->dev == dev ||
+ !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
+- result = ifp;
+- in6_ifa_hold(ifp);
+- break;
++ if (in6_ifa_hold_safe(ifp)) {
++ result = ifp;
++ break;
++ }
+ }
+ }
+ }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index d70783283a417..b79e571e5a863 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1373,7 +1373,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ struct nl_info *info, struct netlink_ext_ack *extack)
+ {
+ struct fib6_table *table = rt->fib6_table;
+- struct fib6_node *fn, *pn = NULL;
++ struct fib6_node *fn;
++#ifdef CONFIG_IPV6_SUBTREES
++ struct fib6_node *pn = NULL;
++#endif
+ int err = -ENOMEM;
+ int allow_create = 1;
+ int replace_required = 0;
+@@ -1397,9 +1400,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ goto out;
+ }
+
++#ifdef CONFIG_IPV6_SUBTREES
+ pn = fn;
+
+-#ifdef CONFIG_IPV6_SUBTREES
+ if (rt->fib6_src.plen) {
+ struct fib6_node *sn;
+
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index d013395be05fc..df7cd3d285e4f 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1137,6 +1137,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1515,6 +1517,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index b5d879f2501da..8c9672e7a7dd6 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1453,9 +1453,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ ipc6.opt = opt;
+
+ err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
+- if (err > 0)
++ if (err > 0) {
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
+ &ipc6);
++ connected = false;
++ }
+ if (err < 0) {
+ fl6_sock_release(flowlabel);
+ return err;
+@@ -1467,7 +1469,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ }
+ if (!(opt->opt_nflen|opt->opt_flen))
+ opt = NULL;
+- connected = false;
+ }
+ if (!opt) {
+ opt = txopt_get(np);
+diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+index a0921adc31a9f..1e689c7141271 100644
+--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
++++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ if (sctph->source != cp->vport || payload_csum ||
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ sctph->source = cp->vport;
+- sctp_nat_csum(skb, sctph, sctphoff);
++ if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
++ sctp_nat_csum(skb, sctph, sctphoff);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
+ sctph->dest = cp->dport;
+- sctp_nat_csum(skb, sctph, sctphoff);
++ if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
++ sctp_nat_csum(skb, sctph, sctphoff);
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index ab7f7e45b9846..858d09b54eaa4 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2739,7 +2739,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
+ {
+ const struct nft_expr_type *type, *candidate = NULL;
+
+- list_for_each_entry(type, &nf_tables_expressions, list) {
++ list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
+ if (!nla_strcmp(nla, type->name)) {
+ if (!type->family && !candidate)
+ candidate = type;
+@@ -2771,9 +2771,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
++ rcu_read_lock();
+ type = __nft_expr_type_get(family, nla);
+- if (type != NULL && try_module_get(type->owner))
++ if (type != NULL && try_module_get(type->owner)) {
++ rcu_read_unlock();
+ return type;
++ }
++ rcu_read_unlock();
+
+ lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
+index a18582a4ecf34..aad676402919b 100644
+--- a/net/netfilter/nft_chain_filter.c
++++ b/net/netfilter/nft_chain_filter.c
+@@ -339,7 +339,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
+ return;
+
+ if (n > 1) {
+- nf_unregister_net_hook(ctx->net, &found->ops);
++ if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
++ nf_unregister_net_hook(ctx->net, &found->ops);
++
+ list_del_rcu(&found->list);
+ kfree_rcu(found, rcu);
+ return;
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index b9682e085fcef..5a8521abd8f5c 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1980,6 +1980,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ rules_fx = rules_f0;
+
+ nft_pipapo_for_each_field(f, i, m) {
++ bool last = i == m->field_count - 1;
++
+ if (!pipapo_match_field(f, start, rules_fx,
+ match_start, match_end))
+ break;
+@@ -1992,16 +1994,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+
+ match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+- }
+
+- if (i == m->field_count) {
+- priv->dirty = true;
+- pipapo_drop(m, rulemap);
+- return;
++ if (last && f->mt[rulemap[i].to].e == e) {
++ priv->dirty = true;
++ pipapo_drop(m, rulemap);
++ return;
++ }
+ }
+
+ first_rule += rules_f0;
+ }
++
++ WARN_ON_ONCE(1); /* elem_priv not found */
+ }
+
+ /**
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 0f0f380e81a40..30f5e414018b1 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1692,8 +1692,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ if (ct_info.timeout[0]) {
+ if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
+ ct_info.timeout))
+- pr_info_ratelimited("Failed to associated timeout "
+- "policy `%s'\n", ct_info.timeout);
++ OVS_NLERR(log,
++ "Failed to associated timeout policy '%s'",
++ ct_info.timeout);
+ else
+ ct_info.nf_ct_timeout = rcu_dereference(
+ nf_ct_timeout_find(ct_info.ct)->timeout);
+@@ -1901,9 +1902,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
+ for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
+ struct hlist_head *head = &info->limits[i];
+ struct ovs_ct_limit *ct_limit;
++ struct hlist_node *next;
+
+- hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
+- lockdep_ovsl_is_held())
++ hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
+ kfree_rcu(ct_limit, rcu);
+ }
+ kfree(info->limits);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index b003d0597f4bd..224b1fdc82279 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -817,11 +817,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
+ sk->sk_write_space = unix_write_space;
+ sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_destruct = unix_sock_destructor;
+- u = unix_sk(sk);
++ u = unix_sk(sk);
++ u->inflight = 0;
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
+ spin_lock_init(&u->lock);
+- atomic_long_set(&u->inflight, 0);
+ INIT_LIST_HEAD(&u->link);
+ mutex_init(&u->iolock); /* single task reading lock */
+ mutex_init(&u->bindlock); /* single task binding lock */
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 9121a4d5436d5..133ba5be4b580 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -166,17 +166,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
+
+ static void dec_inflight(struct unix_sock *usk)
+ {
+- atomic_long_dec(&usk->inflight);
++ usk->inflight--;
+ }
+
+ static void inc_inflight(struct unix_sock *usk)
+ {
+- atomic_long_inc(&usk->inflight);
++ usk->inflight++;
+ }
+
+ static void inc_inflight_move_tail(struct unix_sock *u)
+ {
+- atomic_long_inc(&u->inflight);
++ u->inflight++;
++
+ /* If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
+@@ -234,20 +235,34 @@ void unix_gc(void)
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
++ *
++ * Embryos, though never candidates themselves, affect which
++ * candidates are reachable by the garbage collector. Before
++ * being added to a listener's queue, an embryo may already
++ * receive data carrying SCM_RIGHTS, potentially making the
++ * passed socket a candidate that is not yet reachable by the
++ * collector. It becomes reachable once the embryo is
++ * enqueued. Therefore, we must ensure that no SCM-laden
++ * embryo appears in a (candidate) listener's queue between
++ * consecutive scan_children() calls.
+ */
+ list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
++ struct sock *sk = &u->sk;
+ long total_refs;
+- long inflight_refs;
+
+- total_refs = file_count(u->sk.sk_socket->file);
+- inflight_refs = atomic_long_read(&u->inflight);
++ total_refs = file_count(sk->sk_socket->file);
+
+- BUG_ON(inflight_refs < 1);
+- BUG_ON(total_refs < inflight_refs);
+- if (total_refs == inflight_refs) {
++ BUG_ON(!u->inflight);
++ BUG_ON(total_refs < u->inflight);
++ if (total_refs == u->inflight) {
+ list_move_tail(&u->link, &gc_candidates);
+ __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
+ __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
++
++ if (sk->sk_state == TCP_LISTEN) {
++ unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
++ unix_state_unlock(sk);
++ }
+ }
+ }
+
+@@ -271,7 +286,7 @@ void unix_gc(void)
+ /* Move cursor to after the current position. */
+ list_move(&cursor, &u->link);
+
+- if (atomic_long_read(&u->inflight) > 0) {
++ if (u->inflight) {
+ list_move_tail(&u->link, &not_cycle_list);
+ __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ scan_children(&u->sk, inc_inflight_move_tail, NULL);
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index d1048b4c2baaf..4eff7da9f6f96 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -52,12 +52,13 @@ void unix_inflight(struct user_struct *user, struct file *fp)
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+
+- if (atomic_long_inc_return(&u->inflight) == 1) {
++ if (!u->inflight) {
+ BUG_ON(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ BUG_ON(list_empty(&u->link));
+ }
++ u->inflight++;
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ }
+@@ -74,10 +75,11 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+
+- BUG_ON(!atomic_long_read(&u->inflight));
++ BUG_ON(!u->inflight);
+ BUG_ON(list_empty(&u->link));
+
+- if (atomic_long_dec_and_test(&u->inflight))
++ u->inflight--;
++ if (!u->inflight)
+ list_del_init(&u->link);
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index d04f91f4d09df..562d69f17b4c0 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -895,6 +895,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
+ struct xsk_queue **q;
+ int entries;
+
++ if (optlen < sizeof(entries))
++ return -EINVAL;
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
+ return -EFAULT;
+
+diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+index b1ede62498667..b7c8f29c09a97 100644
+--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
++++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
+
+ yield
+
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -lt 3 ]; then
+ fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
+
+ yield
+
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -lt 3 ]; then
+ fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
+
+ yield
+
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -ne 0 ]; then
+ fail "any of scheduler events should not be recorded"
+ fi
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index 0ba500056e635..193a984f512c3 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
+ diff = end.tv_usec - start.tv_usec;
+ diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
+
+- if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
++ if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+ printf("Diff too high: %lld..", diff);
+ return -1;
+ }