summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-11-02 15:30:02 -0400
committerMike Pagano <mpagano@gentoo.org>2021-11-02 15:30:02 -0400
commit727abb72d5c6700b0a552aacec38e628eaa69fdf (patch)
treeae34d3f5ef7e7b96d50c5c59fac7936aecc9e1fc
parentLinux patch 5.14.15 (diff)
downloadlinux-patches-727abb72d5c6700b0a552aacec38e628eaa69fdf.tar.gz
linux-patches-727abb72d5c6700b0a552aacec38e628eaa69fdf.tar.bz2
linux-patches-727abb72d5c6700b0a552aacec38e628eaa69fdf.zip
Linux patch 5.14.165.14-185.14-17
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1015_linux-5.14.16.patch4422
2 files changed, 4426 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index ea788cb2..8bcce4cd 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch: 1014_linux-5.14.15.patch
From: http://www.kernel.org
Desc: Linux 5.14.15
+Patch: 1015_linux-5.14.16.patch
+From: http://www.kernel.org
+Desc: Linux 5.14.16
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1015_linux-5.14.16.patch b/1015_linux-5.14.16.patch
new file mode 100644
index 00000000..45842968
--- /dev/null
+++ b/1015_linux-5.14.16.patch
@@ -0,0 +1,4422 @@
+diff --git a/Makefile b/Makefile
+index e66341fba8a4e..02b6dab373ddb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 14
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Opossums on Parade
+
+diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
+index aa075d8372ea2..74255e8198314 100644
+--- a/arch/arm/boot/compressed/decompress.c
++++ b/arch/arm/boot/compressed/decompress.c
+@@ -47,7 +47,10 @@ extern char * strchrnul(const char *, int);
+ #endif
+
+ #ifdef CONFIG_KERNEL_XZ
++/* Prevent KASAN override of string helpers in decompressor */
++#undef memmove
+ #define memmove memmove
++#undef memcpy
+ #define memcpy memcpy
+ #include "../../../../lib/decompress_unxz.c"
+ #endif
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index a13d902064722..d9db752c51fe2 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -200,6 +200,7 @@ extern int __get_user_64t_4(void *);
+ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
++ int __tmp_e; \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+ if (sizeof((x)) >= 8) \
+@@ -227,9 +228,10 @@ extern int __get_user_64t_4(void *);
+ break; \
+ default: __e = __get_user_bad(); break; \
+ } \
++ __tmp_e = __e; \
+ uaccess_restore(__ua_flags); \
+ x = (typeof(*(p))) __r2; \
+- __e; \
++ __tmp_e; \
+ })
+
+ #define get_user(x, p) \
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 29070eb8df7d9..3fc7f9750ce4b 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -253,7 +253,7 @@ __create_page_tables:
+ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ ldr r6, =(_end - 1)
+ adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
+-#ifdef CONFIG_CPU_ENDIAN_BE8
++#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
+ str r8, [r5, #4] @ Save physical start of kernel (BE)
+ #else
+ str r8, [r5] @ Save physical start of kernel (LE)
+@@ -266,7 +266,7 @@ __create_page_tables:
+ bls 1b
+ eor r3, r3, r7 @ Remove the MMU flags
+ adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
+-#ifdef CONFIG_CPU_ENDIAN_BE8
++#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
+ str r3, [r5, #4] @ Save physical end of kernel (BE)
+ #else
+ str r3, [r5] @ Save physical end of kernel (LE)
+diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
+index 50136828f5b54..f14c2360ea0b1 100644
+--- a/arch/arm/kernel/vmlinux-xip.lds.S
++++ b/arch/arm/kernel/vmlinux-xip.lds.S
+@@ -40,6 +40,10 @@ SECTIONS
+ ARM_DISCARD
+ *(.alt.smp.init)
+ *(.pv_table)
++#ifndef CONFIG_ARM_UNWIND
++ *(.ARM.exidx) *(.ARM.exidx.*)
++ *(.ARM.extab) *(.ARM.extab.*)
++#endif
+ }
+
+ . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
+@@ -172,7 +176,7 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
+ ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
+ #endif
+
+-#ifdef CONFIG_ARM_MPU
++#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST)
+ /*
+ * Due to PMSAv7 restriction on base address and size we have to
+ * enforce minimal alignment restrictions. It was seen that weaker
+diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
+index e2c743aa2eb2b..d9f7dfe2a7ed3 100644
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -340,6 +340,7 @@ ENTRY(\name\()_cache_fns)
+
+ .macro define_tlb_functions name:req, flags_up:req, flags_smp
+ .type \name\()_tlb_fns, #object
++ .align 2
+ ENTRY(\name\()_tlb_fns)
+ .long \name\()_flush_user_tlb_range
+ .long \name\()_flush_kern_tlb_range
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index 27e0af78e88b0..9d8634e2f12f7 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -439,7 +439,7 @@ static struct undef_hook kprobes_arm_break_hook = {
+
+ #endif /* !CONFIG_THUMB2_KERNEL */
+
+-int __init arch_init_kprobes()
++int __init arch_init_kprobes(void)
+ {
+ arm_probes_decode_init();
+ #ifdef CONFIG_THUMB2_KERNEL
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+index 02f8e72f0cad1..05486cccee1c4 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+@@ -75,7 +75,7 @@
+ pinctrl-0 = <&emac_rgmii_pins>;
+ phy-supply = <&reg_gmac_3v3>;
+ phy-handle = <&ext_rgmii_phy>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts
+index d17abb5158351..e99e7644ff392 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts
+@@ -70,7 +70,9 @@
+ regulator-name = "rst-usb-eth2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_eth2>;
+- gpio = <&gpio3 2 GPIO_ACTIVE_LOW>;
++ gpio = <&gpio3 2 GPIO_ACTIVE_HIGH>;
++ enable-active-high;
++ regulator-always-on;
+ };
+
+ reg_vdd_5v: regulator-5v {
+@@ -95,7 +97,7 @@
+ clocks = <&osc_can>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
+- spi-max-frequency = <100000>;
++ spi-max-frequency = <10000000>;
+ vdd-supply = <&reg_vdd_3v3>;
+ xceiver-supply = <&reg_vdd_5v>;
+ };
+@@ -111,7 +113,7 @@
+ &fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+- phy-connection-type = "rgmii";
++ phy-connection-type = "rgmii-rxid";
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
+index 9db9b90bf2bc9..42bbbb3f532bc 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
+@@ -91,10 +91,12 @@
+ reg_vdd_soc: BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <800000>;
+- regulator-max-microvolt = <900000>;
++ regulator-max-microvolt = <850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
++ nxp,dvs-run-voltage = <850000>;
++ nxp,dvs-standby-voltage = <800000>;
+ };
+
+ reg_vdd_arm: BUCK2 {
+@@ -111,7 +113,7 @@
+ reg_vdd_dram: BUCK3 {
+ regulator-name = "buck3";
+ regulator-min-microvolt = <850000>;
+- regulator-max-microvolt = <900000>;
++ regulator-max-microvolt = <950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+@@ -150,7 +152,7 @@
+
+ reg_vdd_snvs: LDO2 {
+ regulator-name = "ldo2";
+- regulator-min-microvolt = <850000>;
++ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
+index 0e23e3a8df6b5..d55b73b18149e 100644
+--- a/arch/nds32/kernel/ftrace.c
++++ b/arch/nds32/kernel/ftrace.c
+@@ -6,7 +6,7 @@
+
+ #ifndef CONFIG_DYNAMIC_FTRACE
+ extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+- struct ftrace_ops*, struct pt_regs*);
++ struct ftrace_ops*, struct ftrace_regs*);
+ extern void ftrace_graph_caller(void);
+
+ noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform
+index 9e32fb7f3d4ce..e849daff6fd16 100644
+--- a/arch/nios2/platform/Kconfig.platform
++++ b/arch/nios2/platform/Kconfig.platform
+@@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
+
+ config NIOS2_DTB_SOURCE_BOOL
+ bool "Compile and link device tree into kernel image"
++ depends on !COMPILE_TEST
+ help
+ This allows you to specify a dts (device tree source) file
+ which will be compiled and linked into the kernel image.
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 4f7b70ae7c319..cd295e30b7acc 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -157,6 +157,12 @@ config PAGE_OFFSET
+ default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
+ default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
+
++config KASAN_SHADOW_OFFSET
++ hex
++ depends on KASAN_GENERIC
++ default 0xdfffffc800000000 if 64BIT
++ default 0xffffffff if 32BIT
++
+ config ARCH_FLATMEM_ENABLE
+ def_bool !NUMA
+
+diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
+index a2b3d9cdbc868..b00f503ec1248 100644
+--- a/arch/riscv/include/asm/kasan.h
++++ b/arch/riscv/include/asm/kasan.h
+@@ -30,8 +30,7 @@
+ #define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
+ #define KASAN_SHADOW_START KERN_VIRT_START
+ #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+-#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
+- (64 - KASAN_SHADOW_SCALE_SHIFT)))
++#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+ void kasan_init(void);
+ asmlinkage void kasan_early_init(void);
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index fce5184b22c34..52c5ff9804c55 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -193,6 +193,7 @@ setup_trap_vector:
+ csrw CSR_SCRATCH, zero
+ ret
+
++.align 2
+ .Lsecondary_park:
+ /* We lack SMP support or have too many harts, so park this hart */
+ wfi
+diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
+index d7189c8714a95..54294f83513d1 100644
+--- a/arch/riscv/mm/kasan_init.c
++++ b/arch/riscv/mm/kasan_init.c
+@@ -17,6 +17,9 @@ asmlinkage void __init kasan_early_init(void)
+ uintptr_t i;
+ pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
+
++ BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
++ KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
++
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+@@ -172,21 +175,10 @@ void __init kasan_init(void)
+ phys_addr_t p_start, p_end;
+ u64 i;
+
+- /*
+- * Populate all kernel virtual address space with kasan_early_shadow_page
+- * except for the linear mapping and the modules/kernel/BPF mapping.
+- */
+- kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+- (void *)kasan_mem_to_shadow((void *)
+- VMEMMAP_END));
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_shallow_populate(
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+- else
+- kasan_populate_early_shadow(
+- (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+- (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+
+ /* Populate the linear mapping */
+ for_each_mem_range(i, &p_start, &p_end) {
+diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
+index fed86f42dfbe5..5d247198c30d3 100644
+--- a/arch/riscv/net/bpf_jit_core.c
++++ b/arch/riscv/net/bpf_jit_core.c
+@@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+
+ if (i == NR_JIT_ITERATIONS) {
+ pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
+- bpf_jit_binary_free(jit_data->header);
++ if (jit_data->header)
++ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 16256e17a544a..ee9d052476b50 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -3053,13 +3053,14 @@ static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
+ int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
+ struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+ struct kvm_vcpu *vcpu;
++ u8 vcpu_isc_mask;
+
+ for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
+ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ if (psw_ioint_disabled(vcpu))
+ continue;
+- deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
+- if (deliverable_mask) {
++ vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
++ if (deliverable_mask & vcpu_isc_mask) {
+ /* lately kicked but not yet running */
+ if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
+ return;
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 8580543c5bc33..46ad1bdd53a27 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -3341,6 +3341,7 @@ out_free_sie_block:
+
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+ {
++ clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
+ return kvm_s390_vcpu_has_irq(vcpu, 0);
+ }
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 471b35d0b121e..41f7ee07271e1 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1084,7 +1084,7 @@ struct kvm_arch {
+ u64 cur_tsc_generation;
+ int nr_vcpus_matched_tsc;
+
+- spinlock_t pvclock_gtod_sync_lock;
++ raw_spinlock_t pvclock_gtod_sync_lock;
+ bool use_master_clock;
+ u64 master_kernel_ns;
+ u64 master_cycle_now;
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 9959888cb10c8..675a9bbf322e0 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2592,11 +2592,20 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
+
+ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
+ {
+- if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
++ int count;
++ int bytes;
++
++ if (svm->vmcb->control.exit_info_2 > INT_MAX)
++ return -EINVAL;
++
++ count = svm->vmcb->control.exit_info_2;
++ if (unlikely(check_mul_overflow(count, size, &bytes)))
++ return -EINVAL;
++
++ if (!setup_vmgexit_scratch(svm, in, bytes))
+ return -EINVAL;
+
+- return kvm_sev_es_string_io(&svm->vcpu, size, port,
+- svm->ghcb_sa, svm->ghcb_sa_len / size, in);
++ return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
+ }
+
+ void sev_es_init_vmcb(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8e9df0e00f3dd..6aea38dfb0bb0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2537,7 +2537,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
+ kvm_vcpu_write_tsc_offset(vcpu, offset);
+ raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+
+- spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
++ raw_spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
+ if (!matched) {
+ kvm->arch.nr_vcpus_matched_tsc = 0;
+ } else if (!already_matched) {
+@@ -2545,7 +2545,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
+ }
+
+ kvm_track_tsc_matching(vcpu);
+- spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+ }
+
+ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+@@ -2775,9 +2775,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
+ kvm_make_mclock_inprogress_request(kvm);
+
+ /* no guest entries from this point */
+- spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+ pvclock_update_vm_gtod_copy(kvm);
+- spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+@@ -2795,15 +2795,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
+ unsigned long flags;
+ u64 ret;
+
+- spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+ if (!ka->use_master_clock) {
+- spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+ return get_kvmclock_base_ns() + ka->kvmclock_offset;
+ }
+
+ hv_clock.tsc_timestamp = ka->master_cycle_now;
+ hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+- spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+
+ /* both __this_cpu_read() and rdtsc() should be on the same cpu */
+ get_cpu();
+@@ -2897,13 +2897,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ * If the host uses TSC clock, then passthrough TSC as stable
+ * to the guest.
+ */
+- spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+ use_master_clock = ka->use_master_clock;
+ if (use_master_clock) {
+ host_tsc = ka->master_cycle_now;
+ kernel_ns = ka->master_kernel_ns;
+ }
+- spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+
+ /* Keep irq disabled to prevent changes to the clock */
+ local_irq_save(flags);
+@@ -6101,13 +6101,13 @@ set_pit2_out:
+ * is slightly ahead) here we risk going negative on unsigned
+ * 'system_time' when 'user_ns.clock' is very small.
+ */
+- spin_lock_irq(&ka->pvclock_gtod_sync_lock);
++ raw_spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+ if (kvm->arch.use_master_clock)
+ now_ns = ka->master_kernel_ns;
+ else
+ now_ns = get_kvmclock_base_ns();
+ ka->kvmclock_offset = user_ns.clock - now_ns;
+- spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
++ raw_spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
+ break;
+@@ -8157,9 +8157,9 @@ static void kvm_hyperv_tsc_notifier(void)
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ struct kvm_arch *ka = &kvm->arch;
+
+- spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+ pvclock_update_vm_gtod_copy(kvm);
+- spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++ raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+
+ kvm_for_each_vcpu(cpu, vcpu, kvm)
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+@@ -8799,9 +8799,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
+
+ kvm_run->cr8 = kvm_get_cr8(vcpu);
+ kvm_run->apic_base = kvm_get_apic_base(vcpu);
++
++ /*
++ * The call to kvm_ready_for_interrupt_injection() may end up in
++ * kvm_xen_has_interrupt() which may require the srcu lock to be
++ * held, to protect against changes in the vcpu_info address.
++ */
++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_run->ready_for_interrupt_injection =
+ pic_in_kernel(vcpu->kvm) ||
+ kvm_vcpu_ready_for_interrupt_injection(vcpu);
++ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+ if (is_smm(vcpu))
+ kvm_run->flags |= KVM_RUN_X86_SMM;
+@@ -11148,7 +11156,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+
+ raw_spin_lock_init(&kvm->arch.tsc_write_lock);
+ mutex_init(&kvm->arch.apic_map_lock);
+- spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
++ raw_spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
+
+ kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
+ pvclock_update_vm_gtod_copy(kvm);
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index ae17250e1efe0..283a4744a9e78 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -191,6 +191,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
+
+ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
+ {
++ int err;
+ u8 rc = 0;
+
+ /*
+@@ -217,13 +218,29 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
+ if (likely(slots->generation == ghc->generation &&
+ !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
+ /* Fast path */
+- __get_user(rc, (u8 __user *)ghc->hva + offset);
+- } else {
+- /* Slow path */
+- kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
+- sizeof(rc));
++ pagefault_disable();
++ err = __get_user(rc, (u8 __user *)ghc->hva + offset);
++ pagefault_enable();
++ if (!err)
++ return rc;
+ }
+
++ /* Slow path */
++
++ /*
++ * This function gets called from kvm_vcpu_block() after setting the
++ * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
++ * from a HLT. So we really mustn't sleep. If the page ended up absent
++ * at that point, just return 1 in order to trigger an immediate wake,
++ * and we'll end up getting called again from a context where we *can*
++ * fault in the page and wait for it.
++ */
++ if (in_atomic() || !task_is_running(current))
++ return 1;
++
++ kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
++ sizeof(rc));
++
+ return rc;
+ }
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 902c40d671202..4c7c0c17cb0a3 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -842,6 +842,24 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ }
+ EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
+
++static bool disk_has_partitions(struct gendisk *disk)
++{
++ unsigned long idx;
++ struct block_device *part;
++ bool ret = false;
++
++ rcu_read_lock();
++ xa_for_each(&disk->part_tbl, idx, part) {
++ if (bdev_is_partition(part)) {
++ ret = true;
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ return ret;
++}
++
+ /**
+ * blk_queue_set_zoned - configure a disk queue zoned model.
+ * @disk: the gendisk of the queue to configure
+@@ -876,7 +894,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
+ * we do nothing special as far as the block layer is concerned.
+ */
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
+- !xa_empty(&disk->part_tbl))
++ disk_has_partitions(disk))
+ model = BLK_ZONED_NONE;
+ break;
+ case BLK_ZONED_NONE:
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 9d86203e1e7a1..c53633d47bfb3 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
+ break;
+
+ default:
+- dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
+- return 1;
++ dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
++ return -EINVAL;
+ }
+
+ hpriv->hp_flags = hp_flags;
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index cfa29dc89bbff..fabf87058d80b 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ if (!blk)
+ return -ENOMEM;
+
++ rbnode->block = blk;
++
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+- if (!present) {
+- kfree(blk);
++ if (!present)
+ return -ENOMEM;
+- }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ }
+
+ /* update the rbnode block, its size and the base register */
+- rbnode->block = blk;
+ rbnode->blklen = blklen;
+ rbnode->base_reg = base_reg;
+ rbnode->cache_present = present;
+diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
+index ad5489a65d542..dd40277b9d06d 100644
+--- a/drivers/gpio/gpio-xgs-iproc.c
++++ b/drivers/gpio/gpio-xgs-iproc.c
+@@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
+ }
+
+ chip->gc.label = dev_name(dev);
+- if (of_property_read_u32(dn, "ngpios", &num_gpios))
++ if (!of_property_read_u32(dn, "ngpios", &num_gpios))
+ chip->gc.ngpio = num_gpios;
+
+ irq = platform_get_irq(pdev, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 94d029dbf30da..fbcee5d7d6a11 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -1237,7 +1237,7 @@ static int nv_common_early_init(void *handle)
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ if (adev->pdev->device == 0x1681)
+- adev->external_rev_id = adev->rev_id + 0x19;
++ adev->external_rev_id = 0x20;
+ else
+ adev->external_rev_id = adev->rev_id + 0x01;
+ break;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 1d15a9af99560..4c8edfdc3cac8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -263,7 +263,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ if (!wr_buf)
+ return -ENOSPC;
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -487,7 +487,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+ if (!wr_buf)
+ return -ENOSPC;
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -639,7 +639,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
+ if (!wr_buf)
+ return -ENOSPC;
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -914,7 +914,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ &param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -1211,7 +1211,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -1396,7 +1396,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -1581,7 +1581,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -1766,7 +1766,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -1944,7 +1944,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+@@ -2382,7 +2382,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf,
+ return -ENOSPC;
+ }
+
+- if (parse_write_buffer_into_params(wr_buf, size,
++ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+index 4a4894e9d9c9a..377c4e53a2b37 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+@@ -366,32 +366,32 @@ static struct wm_table lpddr5_wm_table = {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+- .sr_exit_time_us = 5.32,
+- .sr_enter_plus_exit_time_us = 6.38,
++ .sr_exit_time_us = 11.5,
++ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+- .sr_exit_time_us = 9.82,
+- .sr_enter_plus_exit_time_us = 11.196,
++ .sr_exit_time_us = 11.5,
++ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+- .sr_exit_time_us = 9.89,
+- .sr_enter_plus_exit_time_us = 11.24,
++ .sr_exit_time_us = 11.5,
++ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+- .sr_exit_time_us = 9.748,
+- .sr_enter_plus_exit_time_us = 11.102,
++ .sr_exit_time_us = 11.5,
++ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+ },
+ }
+@@ -518,14 +518,21 @@ static unsigned int find_clk_for_voltage(
+ unsigned int voltage)
+ {
+ int i;
++ int max_voltage = 0;
++ int clock = 0;
+
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
+- if (clock_table->SocVoltage[i] == voltage)
++ if (clock_table->SocVoltage[i] == voltage) {
+ return clocks[i];
++ } else if (clock_table->SocVoltage[i] >= max_voltage &&
++ clock_table->SocVoltage[i] < voltage) {
++ max_voltage = clock_table->SocVoltage[i];
++ clock = clocks[i];
++ }
+ }
+
+- ASSERT(0);
+- return 0;
++ ASSERT(clock);
++ return clock;
+ }
+
+ void dcn31_clk_mgr_helper_populate_bw_params(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+index 8189606537c5a..f215f671210f8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+@@ -76,10 +76,6 @@ void dcn31_init_hw(struct dc *dc)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+- // Initialize the dccg
+- if (res_pool->dccg->funcs->dccg_init)
+- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+-
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+
+ REG_WRITE(REFCLK_CNTL, 0);
+@@ -106,6 +102,9 @@ void dcn31_init_hw(struct dc *dc)
+ hws->funcs.bios_golden_init(dc);
+ hws->funcs.disable_vga(dc->hwseq);
+ }
++ // Initialize the dccg
++ if (res_pool->dccg->funcs->dccg_init)
++ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ if (dc->debug.enable_mem_low_power.bits.dmcu) {
+ // Force ERAM to shutdown if DMCU is not enabled
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index 7ea362a864c43..4c9eed3f0713f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -217,8 +217,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+- .sr_exit_z8_time_us = 402.0,
+- .sr_enter_plus_exit_z8_time_us = 520.0,
++ .sr_exit_z8_time_us = 442.0,
++ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+- .max_downscale_src_width = 3840,/*upto 4K*/
++ .max_downscale_src_width = 4096,/*upto true 4K*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+@@ -1591,6 +1591,13 @@ static int dcn31_populate_dml_pipes_from_context(
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
++ /*
++ * Immediate flip can be set dynamically after enabling the plane.
++ * We need to require support for immediate flip or underflow can be
++ * intermittently experienced depending on peak b/w requirements.
++ */
++ pipes[pipe_cnt].pipe.src.immediate_flip = true;
++
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+index a9667068c6901..ab52dd7b330d4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+@@ -5399,9 +5399,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+
+ v->MaximumReadBandwidthWithPrefetch =
+ v->MaximumReadBandwidthWithPrefetch
+- + dml_max4(
+- v->VActivePixelBandwidth[i][j][k],
+- v->VActiveCursorBandwidth[i][j][k]
++ + dml_max3(
++ v->VActivePixelBandwidth[i][j][k]
++ + v->VActiveCursorBandwidth[i][j][k]
+ + v->NoOfDPP[i][j][k]
+ * (v->meta_row_bandwidth[i][j][k]
+ + v->dpte_row_bandwidth[i][j][k]),
+diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+index 5adc471bef57f..3d2f0817e40a1 100644
+--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
++++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+@@ -227,7 +227,7 @@ enum {
+ #define FAMILY_YELLOW_CARP 146
+
+ #define YELLOW_CARP_A0 0x01
+-#define YELLOW_CARP_B0 0x1A
++#define YELLOW_CARP_B0 0x20
+ #define YELLOW_CARP_UNKNOWN 0xFF
+
+ #ifndef ASICREV_IS_YELLOW_CARP
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index 1b02056bc8bde..422759f9078d9 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -105,6 +105,7 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3(
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
++ mutex_unlock(&psp->dtm_context.mutex);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ status = mod_hdcp_remove_display_from_topology_v2(hdcp, index);
+@@ -115,8 +116,6 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3(
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
+
+- mutex_unlock(&psp->dtm_context.mutex);
+-
+ return status;
+ }
+
+@@ -218,6 +217,7 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3(
+ dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
++ mutex_unlock(&psp->dtm_context.mutex);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ status = mod_hdcp_add_display_to_topology_v2(hdcp, display);
+@@ -227,8 +227,6 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3(
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
+
+- mutex_unlock(&psp->dtm_context.mutex);
+-
+ return status;
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index d511e578ba79d..a1a150935264e 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1924,6 +1924,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
+ {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
++ if (!crtc_state)
++ return;
++
+ /*
+ * Don't clobber DPCD if it's been already read out during output
+ * setup (eDP) or detect.
+diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
+index 1257f4f11e66f..438bbc7b81474 100644
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline)
+
+ timeline->hwsp_map = vaddr;
+ timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
+- clflush(vaddr + ofs);
++ drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
+
+ return 0;
+ }
+@@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl)
+
+ memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
+ WRITE_ONCE(*hwsp_seqno, tl->seqno);
+- clflush(hwsp_seqno);
++ drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
+ }
+
+ void intel_timeline_enter(struct intel_timeline *tl)
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index 1c5ffe2935af5..abf2d7a4fdf1a 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -190,6 +190,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+ struct ttm_transfer_obj *fbo;
+
+ fbo = container_of(bo, struct ttm_transfer_obj, base);
++ dma_resv_fini(&fbo->base.base._resv);
+ ttm_bo_put(fbo->bo);
+ kfree(fbo);
+ }
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index b61576f702b86..31fcf8e3bd728 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -760,8 +760,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
+
+ /* Construct the family header first */
+ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+- memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
+- LS_DEVICE_NAME_MAX);
++ strscpy_pad(header->device_name,
++ dev_name(&query->port->agent->device->dev),
++ LS_DEVICE_NAME_MAX);
+ header->port_num = query->port->port_num;
+
+ if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index e276522104c6a..b00f687e1a7c7 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -920,6 +920,7 @@ void sc_disable(struct send_context *sc)
+ {
+ u64 reg;
+ struct pio_buf *pbuf;
++ LIST_HEAD(wake_list);
+
+ if (!sc)
+ return;
+@@ -954,19 +955,21 @@ void sc_disable(struct send_context *sc)
+ spin_unlock(&sc->release_lock);
+
+ write_seqlock(&sc->waitlock);
+- while (!list_empty(&sc->piowait)) {
++ if (!list_empty(&sc->piowait))
++ list_move(&sc->piowait, &wake_list);
++ write_sequnlock(&sc->waitlock);
++ while (!list_empty(&wake_list)) {
+ struct iowait *wait;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
+
+- wait = list_first_entry(&sc->piowait, struct iowait, list);
++ wait = list_first_entry(&wake_list, struct iowait, list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
+ }
+- write_sequnlock(&sc->waitlock);
+
+ spin_unlock_irq(&sc->alloc_lock);
+ }
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index 5fb92de1f015a..9b544a3b12886 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
+ if (cq->avoid_mem_cflct) {
+ ext_cqe = (__le64 *)((u8 *)cqe + 32);
+ get_64bit_val(ext_cqe, 24, &qword7);
+- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
++ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ } else {
+ peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
+ ext_cqe = cq->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
++ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ if (!peek_head)
+ polarity ^= 1;
+ }
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index fa393c5ea3973..4261705fa19d5 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -3405,9 +3405,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
+ }
+
+ if (cq_poll_info->ud_vlan_valid) {
+- entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+- entry->wc_flags |= IB_WC_WITH_VLAN;
++ u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
++
+ entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
++ if (vlan) {
++ entry->vlan_id = vlan;
++ entry->wc_flags |= IB_WC_WITH_VLAN;
++ }
+ } else {
+ entry->sl = 0;
+ }
+diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
+index b68c575eb78e7..b0d6ee0739f53 100644
+--- a/drivers/infiniband/hw/irdma/ws.c
++++ b/drivers/infiniband/hw/irdma/ws.c
+@@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+
+ tc_node->enable = true;
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
+- if (ret)
++ if (ret) {
++ vsi->unregister_qset(vsi, tc_node);
+ goto reg_err;
++ }
+ }
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d TC %d\n",
+@@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+ }
+ goto exit;
+
++reg_err:
++ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
++ list_del(&tc_node->siblings);
++ irdma_free_node(vsi, tc_node);
+ leaf_add_err:
+ if (list_empty(&vsi_node->child_list_head)) {
+ if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
+@@ -369,11 +375,6 @@ vsi_add_err:
+ exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+ return ret;
+-
+-reg_err:
+- mutex_unlock(&vsi->dev->ws_mutex);
+- irdma_ws_remove(vsi, user_pri);
+- return ret;
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 061dbee55cac1..4b598ec72372d 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1338,7 +1338,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ goto err_2;
+ }
+ mr->mmkey.type = MLX5_MKEY_MR;
+- mr->desc_size = sizeof(struct mlx5_mtt);
+ mr->umem = umem;
+ set_mr_fields(dev, mr, umem->length, access_flags);
+ kvfree(in);
+@@ -1532,6 +1531,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
+ ib_umem_release(&odp->umem);
+ return ERR_CAST(mr);
+ }
++ xa_init(&mr->implicit_children);
+
+ odp->private = mr;
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index fd88b9ae96fe8..80d989edb7dd8 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4309,6 +4309,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
++ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
++ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
+
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
+diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
+index a67599b5a550a..ac11943a5ddb0 100644
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -602,7 +602,7 @@ done:
+ /*
+ * How many pages in this iovec element?
+ */
+-static int qib_user_sdma_num_pages(const struct iovec *iov)
++static size_t qib_user_sdma_num_pages(const struct iovec *iov)
+ {
+ const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long len = iov->iov_len;
+@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
+ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+- unsigned long addr, int tlen, int npages)
++ unsigned long addr, int tlen, size_t npages)
+ {
+ struct page *pages[8];
+ int i, j;
+@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
+ unsigned long idx;
+
+ for (idx = 0; idx < niov; idx++) {
+- const int npages = qib_user_sdma_num_pages(iov + idx);
++ const size_t npages = qib_user_sdma_num_pages(iov + idx);
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+ ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
+@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ unsigned pktnw;
+ unsigned pktnwc;
+ int nfrags = 0;
+- int npages = 0;
+- int bytes_togo = 0;
++ size_t npages = 0;
++ size_t bytes_togo = 0;
+ int tiddma = 0;
+ int cfur;
+
+@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+
+ npages += qib_user_sdma_num_pages(&iov[idx]);
+
+- bytes_togo += slen;
++ if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
++ bytes_togo > type_max(typeof(pkt->bytes_togo))) {
++ ret = -EINVAL;
++ goto free_pbc;
++ }
+ pktnwc += slen >> 2;
+ idx++;
+ nfrags++;
+@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ }
+
+ if (frag_size) {
+- int tidsmsize, n;
+- size_t pktsize;
++ size_t tidsmsize, n, pktsize, sz, addrlimit;
+
+ n = npages*((2*PAGE_SIZE/frag_size)+1);
+ pktsize = struct_size(pkt, addr, n);
+@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ else
+ tidsmsize = 0;
+
+- pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
++ if (check_add_overflow(pktsize, tidsmsize, &sz)) {
++ ret = -EINVAL;
++ goto free_pbc;
++ }
++ pkt = kmalloc(sz, GFP_KERNEL);
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto free_pbc;
+ }
+ pkt->largepkt = 1;
+ pkt->frag_size = frag_size;
+- pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
++ if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
++ &addrlimit) ||
++ addrlimit > type_max(typeof(pkt->addrlimit))) {
++ ret = -EINVAL;
++ goto free_pbc;
++ }
++ pkt->addrlimit = addrlimit;
+
+ if (tiddma) {
+ char *tidsm = (char *)pkt + pktsize;
+diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
+index 38559a956330b..31f841231609a 100644
+--- a/drivers/mmc/host/cqhci-core.c
++++ b/drivers/mmc/host/cqhci-core.c
+@@ -282,6 +282,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
++ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
++ cqhci_writel(cq_host, 0, CQHCI_CTL);
++
+ mmc->cqe_on = true;
+
+ if (cq_host->ops->enable)
+diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
+index 0c75810812a0a..1f8a3c0ddfe11 100644
+--- a/drivers/mmc/host/dw_mmc-exynos.c
++++ b/drivers/mmc/host/dw_mmc-exynos.c
+@@ -464,6 +464,18 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+ }
+ }
+
++ /*
++ * If there is no cadiates value, then it needs to return -EIO.
++ * If there are candiates values and don't find bset clk sample value,
++ * then use a first candiates clock sample value.
++ */
++ for (i = 0; i < iter; i++) {
++ __c = ror8(candiates, i);
++ if ((__c & 0x1) == 0x1) {
++ loc = i;
++ goto out;
++ }
++ }
+ out:
+ return loc;
+ }
+@@ -494,6 +506,8 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+ priv->tuned_sample = found;
+ } else {
+ ret = -EIO;
++ dev_warn(&mmc->class_dev,
++ "There is no candiates value about clksmpl!\n");
+ }
+
+ return ret;
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 4dfc246c5f95d..b06b4dcb7c782 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2577,6 +2577,25 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ host->dma_mask = DMA_BIT_MASK(32);
+ mmc_dev(mmc)->dma_mask = &host->dma_mask;
+
++ host->timeout_clks = 3 * 1048576;
++ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
++ 2 * sizeof(struct mt_gpdma_desc),
++ &host->dma.gpd_addr, GFP_KERNEL);
++ host->dma.bd = dma_alloc_coherent(&pdev->dev,
++ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
++ &host->dma.bd_addr, GFP_KERNEL);
++ if (!host->dma.gpd || !host->dma.bd) {
++ ret = -ENOMEM;
++ goto release_mem;
++ }
++ msdc_init_gpd_bd(host, &host->dma);
++ INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
++ spin_lock_init(&host->lock);
++
++ platform_set_drvdata(pdev, mmc);
++ msdc_ungate_clock(host);
++ msdc_init_hw(host);
++
+ if (mmc->caps2 & MMC_CAP2_CQE) {
+ host->cq_host = devm_kzalloc(mmc->parent,
+ sizeof(*host->cq_host),
+@@ -2597,25 +2616,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ mmc->max_seg_size = 64 * 1024;
+ }
+
+- host->timeout_clks = 3 * 1048576;
+- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+- 2 * sizeof(struct mt_gpdma_desc),
+- &host->dma.gpd_addr, GFP_KERNEL);
+- host->dma.bd = dma_alloc_coherent(&pdev->dev,
+- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+- &host->dma.bd_addr, GFP_KERNEL);
+- if (!host->dma.gpd || !host->dma.bd) {
+- ret = -ENOMEM;
+- goto release_mem;
+- }
+- msdc_init_gpd_bd(host, &host->dma);
+- INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
+- spin_lock_init(&host->lock);
+-
+- platform_set_drvdata(pdev, mmc);
+- msdc_ungate_clock(host);
+- msdc_init_hw(host);
+-
+ ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
+ IRQF_TRIGGER_NONE, pdev->name, host);
+ if (ret)
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 72c0bf0c18875..812c1f42a5eaf 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1133,6 +1133,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 ctrl;
++ int ret;
+
+ /* Reset the tuning circuit */
+ if (esdhc_is_usdhc(imx_data)) {
+@@ -1145,7 +1146,22 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
+ ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
++ ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
++ /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
++ ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
++ ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
++ if (ret == -ETIMEDOUT)
++ dev_warn(mmc_dev(host->mmc),
++ "Warning! clear execute tuning bit failed\n");
++ /*
++ * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
++ * usdhc IP internal logic flag execute_tuning_with_clr_buf, which
++ * will finally make sure the normal data transfer logic correct.
++ */
++ ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
++ ctrl |= SDHCI_INT_DATA_AVAIL;
++ writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
+ }
+ }
+ }
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index be19785227fe4..d0f2edfe296c8 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -616,16 +616,12 @@ static int intel_select_drive_strength(struct mmc_card *card,
+ return intel_host->drv_strength;
+ }
+
+-static int bxt_get_cd(struct mmc_host *mmc)
++static int sdhci_get_cd_nogpio(struct mmc_host *mmc)
+ {
+- int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+- if (!gpio_cd)
+- return 0;
+-
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+@@ -638,6 +634,21 @@ out:
+ return ret;
+ }
+
++static int bxt_get_cd(struct mmc_host *mmc)
++{
++ int gpio_cd = mmc_gpio_get_cd(mmc);
++
++ if (!gpio_cd)
++ return 0;
++
++ return sdhci_get_cd_nogpio(mmc);
++}
++
++static int mrfld_get_cd(struct mmc_host *mmc)
++{
++ return sdhci_get_cd_nogpio(mmc);
++}
++
+ #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
+ #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
+
+@@ -1341,6 +1352,14 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
+ MMC_CAP_1_8V_DDR;
+ break;
+ case INTEL_MRFLD_SD:
++ slot->cd_idx = 0;
++ slot->cd_override_level = true;
++ /*
++ * There are two PCB designs of SD card slot with the opposite
++ * card detection sense. Quirk this out by ignoring GPIO state
++ * completely in the custom ->get_cd() callback.
++ */
++ slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
+ slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ break;
+ case INTEL_MRFLD_SDIO:
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index fff6c39a343e9..c5287be9bbed4 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2042,6 +2042,12 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
++ /*
++ * 3.4 ~ 3.6V are valid only for those platforms where it's
++ * known that the voltage range is supported by hardware.
++ */
++ case MMC_VDD_34_35:
++ case MMC_VDD_35_36:
+ pwr = SDHCI_POWER_330;
+ break;
+ default:
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 7dfc26f48c18f..e2affa52ef469 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -195,6 +195,10 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
+ host->sdcard_irq_mask = host->sdcard_irq_mask_all;
+
++ if (host->native_hotplug)
++ tmio_mmc_enable_mmc_irqs(host,
++ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
++
+ tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
+
+ if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
+@@ -956,8 +960,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ case MMC_POWER_OFF:
+ tmio_mmc_power_off(host);
+ /* For R-Car Gen2+, we need to reset SDHI specific SCC */
+- if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
++ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) {
+ host->reset(host);
++
++ if (host->native_hotplug)
++ tmio_mmc_enable_mmc_irqs(host,
++ TMIO_STAT_CARD_REMOVE |
++ TMIO_STAT_CARD_INSERT);
++ }
++
+ host->set_clock(host, 0);
+ break;
+ case MMC_POWER_UP:
+@@ -1185,10 +1196,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
+ _host->set_clock(_host, 0);
+ tmio_mmc_reset(_host);
+
+- if (_host->native_hotplug)
+- tmio_mmc_enable_mmc_irqs(_host,
+- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+-
+ spin_lock_init(&_host->lock);
+ mutex_init(&_host->ios_lock);
+
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 4950d10d3a191..97beece62fec4 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -576,7 +576,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300)
+ GET_SYSTEM_PORT_STATUS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, &vub300->system_port_status,
+- sizeof(vub300->system_port_status), HZ);
++ sizeof(vub300->system_port_status), 1000);
+ if (sizeof(vub300->system_port_status) == retval)
+ new_system_port_status(vub300);
+ }
+@@ -1241,7 +1241,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
+ SET_INTERRUPT_PSEUDOCODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, 0x0000, 0x0000,
+- xfer_buffer, xfer_length, HZ);
++ xfer_buffer, xfer_length, 1000);
+ kfree(xfer_buffer);
+ if (retval < 0)
+ goto copy_error_message;
+@@ -1284,7 +1284,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
+ SET_TRANSFER_PSEUDOCODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, 0x0000, 0x0000,
+- xfer_buffer, xfer_length, HZ);
++ xfer_buffer, xfer_length, 1000);
+ kfree(xfer_buffer);
+ if (retval < 0)
+ goto copy_error_message;
+@@ -1991,7 +1991,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_CLOCK_SPEED,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- 0x00, 0x00, buf, buf_array_size, HZ);
++ 0x00, 0x00, buf, buf_array_size, 1000);
+ if (retval != 8) {
+ dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
+ " %dkHz failed with retval=%d\n", kHzClock, retval);
+@@ -2013,14 +2013,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_SD_POWER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- 0x0000, 0x0000, NULL, 0, HZ);
++ 0x0000, 0x0000, NULL, 0, 1000);
+ /* must wait for the VUB300 u-proc to boot up */
+ msleep(600);
+ } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_SD_POWER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- 0x0001, 0x0000, NULL, 0, HZ);
++ 0x0001, 0x0000, NULL, 0, 1000);
+ msleep(600);
+ vub300->card_powered = 1;
+ } else if (ios->power_mode == MMC_POWER_ON) {
+@@ -2275,14 +2275,14 @@ static int vub300_probe(struct usb_interface *interface,
+ GET_HC_INF0,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, &vub300->hc_info,
+- sizeof(vub300->hc_info), HZ);
++ sizeof(vub300->hc_info), 1000);
+ if (retval < 0)
+ goto error5;
+ retval =
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_ROM_WAIT_STATES,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
++ firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
+ if (retval < 0)
+ goto error5;
+ dev_info(&vub300->udev->dev,
+@@ -2297,7 +2297,7 @@ static int vub300_probe(struct usb_interface *interface,
+ GET_SYSTEM_PORT_STATUS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, &vub300->system_port_status,
+- sizeof(vub300->system_port_status), HZ);
++ sizeof(vub300->system_port_status), 1000);
+ if (retval < 0) {
+ goto error4;
+ } else if (sizeof(vub300->system_port_status) == retval) {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index dc5cce127d8ea..89b04703aacac 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -568,6 +568,7 @@ struct hnae3_ae_ops {
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_pauseparam)(struct hnae3_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
++ int (*restore_pauseparam)(struct hnae3_handle *handle);
+
+ int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
+ int (*get_autoneg)(struct hnae3_handle *handle);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 80461ab0ce9e7..b22b8baec54c0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -138,7 +138,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ .name = "uc",
+ .cmd = HNAE3_DBG_CMD_MAC_UC,
+ .dentry = HNS3_DBG_DENTRY_MAC,
+- .buf_len = HNS3_DBG_READ_LEN,
++ .buf_len = HNS3_DBG_READ_LEN_128KB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+@@ -257,7 +257,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ .name = "tqp",
+ .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dentry = HNS3_DBG_DENTRY_REG,
+- .buf_len = HNS3_DBG_READ_LEN,
++ .buf_len = HNS3_DBG_READ_LEN_128KB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+@@ -299,7 +299,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ .name = "fd_tcam",
+ .cmd = HNAE3_DBG_CMD_FD_TCAM,
+ .dentry = HNS3_DBG_DENTRY_FD,
+- .buf_len = HNS3_DBG_READ_LEN,
++ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+@@ -463,7 +463,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
+ { "TAIL", 2 },
+ { "HEAD", 2 },
+ { "FBDNUM", 2 },
+- { "PKTNUM", 2 },
++ { "PKTNUM", 5 },
+ { "COPYBREAK", 2 },
+ { "RING_EN", 2 },
+ { "RX_RING_EN", 2 },
+@@ -566,7 +566,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
+ { "HEAD", 2 },
+ { "FBDNUM", 2 },
+ { "OFFSET", 2 },
+- { "PKTNUM", 2 },
++ { "PKTNUM", 5 },
+ { "RING_EN", 2 },
+ { "TX_RING_EN", 2 },
+ { "BASE_ADDR", 10 },
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 83ee0f41322c7..4e0cec9025e85 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -838,6 +838,26 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
+ return 0;
+ }
+
++static int hns3_set_phy_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *cmd)
++{
++ struct hnae3_handle *handle = hns3_get_handle(netdev);
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ int ret;
++
++ if (cmd->base.speed == SPEED_1000 &&
++ cmd->base.autoneg == AUTONEG_DISABLE)
++ return -EINVAL;
++
++ if (cmd->base.autoneg == AUTONEG_DISABLE && ops->restore_pauseparam) {
++ ret = ops->restore_pauseparam(handle);
++ if (ret)
++ return ret;
++ }
++
++ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
++}
++
+ static int hns3_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+@@ -856,16 +876,11 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+
+ /* Only support ksettings_set for netdev with phy attached for now */
+- if (netdev->phydev) {
+- if (cmd->base.speed == SPEED_1000 &&
+- cmd->base.autoneg == AUTONEG_DISABLE)
+- return -EINVAL;
+-
+- return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+- } else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) &&
+- ops->set_phy_link_ksettings) {
++ if (netdev->phydev)
++ return hns3_set_phy_link_ksettings(netdev, cmd);
++ else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) &&
++ ops->set_phy_link_ksettings)
+ return ops->set_phy_link_ksettings(handle, cmd);
+- }
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index e6e617aba2a4c..04e7c8d469696 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+ {
+- struct hclge_dbg_bitmap_cmd *bitmap;
++ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u16 qset_id, qset_num;
+ int ret;
+@@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
+ if (ret)
+ return ret;
+
+- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
++ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%04u %#x %#x %#x %#x\n",
+- qset_id, bitmap->bit0, bitmap->bit1,
+- bitmap->bit2, bitmap->bit3);
++ qset_id, req.bit0, req.bit1, req.bit2,
++ req.bit3);
+ }
+
+ return 0;
+@@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
+ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+ {
+- struct hclge_dbg_bitmap_cmd *bitmap;
++ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 pri_id, pri_num;
+ int ret;
+@@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
+ if (ret)
+ return ret;
+
+- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
++ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+- pri_id, bitmap->bit0, bitmap->bit1,
+- bitmap->bit2);
++ pri_id, req.bit0, req.bit1, req.bit2);
+ }
+
+ return 0;
+@@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
+ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+ {
+- struct hclge_dbg_bitmap_cmd *bitmap;
++ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 pg_id;
+ int ret;
+@@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
+ if (ret)
+ return ret;
+
+- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
++ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+- pg_id, bitmap->bit0, bitmap->bit1,
+- bitmap->bit2);
++ pg_id, req.bit0, req.bit1, req.bit2);
+ }
+
+ return 0;
+@@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
+ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+ {
+- struct hclge_dbg_bitmap_cmd *bitmap;
++ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 port_id = 0;
+ int ret;
+@@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
+ if (ret)
+ return ret;
+
+- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
++ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
+- bitmap->bit0);
++ req.bit0);
+ *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
+- bitmap->bit1);
++ req.bit1);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index be46b164b0e2c..721eb4e92f618 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11014,6 +11014,35 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ return -EOPNOTSUPP;
+ }
+
++static int hclge_restore_pauseparam(struct hnae3_handle *handle)
++{
++ struct hclge_vport *vport = hclge_get_vport(handle);
++ struct hclge_dev *hdev = vport->back;
++ u32 auto_neg, rx_pause, tx_pause;
++ int ret;
++
++ hclge_get_pauseparam(handle, &auto_neg, &rx_pause, &tx_pause);
++ /* when autoneg is disabled, the pause setting of phy has no effect
++ * unless the link goes down.
++ */
++ ret = phy_suspend(hdev->hw.mac.phydev);
++ if (ret)
++ return ret;
++
++ phy_set_asym_pause(hdev->hw.mac.phydev, rx_pause, tx_pause);
++
++ ret = phy_resume(hdev->hw.mac.phydev);
++ if (ret)
++ return ret;
++
++ ret = hclge_mac_pause_setup_hw(hdev);
++ if (ret)
++ dev_err(&hdev->pdev->dev,
++ "restore pauseparam error, ret = %d.\n", ret);
++
++ return ret;
++}
++
+ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed, u8 *duplex)
+ {
+@@ -12943,6 +12972,7 @@ static const struct hnae3_ae_ops hclge_ops = {
+ .halt_autoneg = hclge_halt_autoneg,
+ .get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
++ .restore_pauseparam = hclge_restore_pauseparam,
+ .set_mtu = hclge_set_mtu,
+ .reset_queue = hclge_reset_tqp,
+ .get_stats = hclge_get_stats,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 95074e91a8466..124791e4bfeed 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -1435,7 +1435,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+ return 0;
+ }
+
+-static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
++int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
+ {
+ bool tx_en, rx_en;
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+index 2ee9b795f71dc..4b2c3a7889800 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+@@ -244,6 +244,7 @@ int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
+ int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para);
++int hclge_mac_pause_setup_hw(struct hclge_dev *hdev);
+ int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
+ int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
+ int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 37c18c66b5c72..e375ac849aecd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
+ */
+ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
+ {
+- struct net_device *event_netdev, *netdev_tmp;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
++ struct net_device *event_netdev;
+ const char *lag_netdev_name;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+@@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
+ goto lag_out;
+ }
+
+- rcu_read_lock();
+- for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
+- if (!netif_is_ice(netdev_tmp))
+- continue;
+-
+- if (netdev_tmp && netdev_tmp != lag->netdev &&
+- lag->peer_netdev != netdev_tmp) {
+- dev_hold(netdev_tmp);
+- lag->peer_netdev = netdev_tmp;
+- }
+- }
+- rcu_read_unlock();
+-
+ if (bonding_info->slave.state)
+ ice_lag_set_backup(lag);
+ else
+@@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
+ case NETDEV_BONDING_INFO:
+ ice_lag_info_event(lag, ptr);
+ break;
++ case NETDEV_UNREGISTER:
++ ice_lag_unlink(lag, ptr);
++ break;
+ default:
+ break;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index c2465b9d80567..545813657c939 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -1582,6 +1582,9 @@ err_kworker:
+ */
+ void ice_ptp_release(struct ice_pf *pf)
+ {
++ if (!test_bit(ICE_FLAG_PTP, pf->flags))
++ return;
++
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index 9b2dfbf90e510..a606de56678d4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -229,18 +229,85 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
+
+ static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+
++static void get_lf_str_list(struct rvu_block block, int pcifunc,
++ char *lfs)
++{
++ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
++
++ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
++ if (lf >= block.lf.max)
++ break;
++
++ if (block.fn_map[lf] != pcifunc)
++ continue;
++
++ if (lf == prev_lf + 1) {
++ prev_lf = lf;
++ seq = 1;
++ continue;
++ }
++
++ if (seq)
++ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
++ else
++ len += (len ? sprintf(lfs + len, ",%d", lf) :
++ sprintf(lfs + len, "%d", lf));
++
++ prev_lf = lf;
++ seq = 0;
++ }
++
++ if (seq)
++ len += sprintf(lfs + len, "-%d", prev_lf);
++
++ lfs[len] = '\0';
++}
++
++static int get_max_column_width(struct rvu *rvu)
++{
++ int index, pf, vf, lf_str_size = 12, buf_size = 256;
++ struct rvu_block block;
++ u16 pcifunc;
++ char *buf;
++
++ buf = kzalloc(buf_size, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
++ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
++ pcifunc = pf << 10 | vf;
++ if (!pcifunc)
++ continue;
++
++ for (index = 0; index < BLK_COUNT; index++) {
++ block = rvu->hw->block[index];
++ if (!strlen(block.name))
++ continue;
++
++ get_lf_str_list(block, pcifunc, buf);
++ if (lf_str_size <= strlen(buf))
++ lf_str_size = strlen(buf) + 1;
++ }
++ }
++ }
++
++ kfree(buf);
++ return lf_str_size;
++}
++
+ /* Dumps current provisioning status of all RVU block LFs */
+ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+- int index, off = 0, flag = 0, go_back = 0, len = 0;
++ int index, off = 0, flag = 0, len = 0, i = 0;
+ struct rvu *rvu = filp->private_data;
+- int lf, pf, vf, pcifunc;
++ int bytes_not_copied = 0;
+ struct rvu_block block;
+- int bytes_not_copied;
+- int lf_str_size = 12;
++ int pf, vf, pcifunc;
+ int buf_size = 2048;
++ int lf_str_size;
+ char *lfs;
+ char *buf;
+
+@@ -252,6 +319,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ if (!buf)
+ return -ENOSPC;
+
++ /* Get the maximum width of a column */
++ lf_str_size = get_max_column_width(rvu);
++
+ lfs = kzalloc(lf_str_size, GFP_KERNEL);
+ if (!lfs) {
+ kfree(buf);
+@@ -265,65 +335,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ "%-*s", lf_str_size,
+ rvu->hw->block[index].name);
+ }
++
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
++ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
++ if (bytes_not_copied)
++ goto out;
++
++ i++;
++ *ppos += off;
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
++ off = 0;
++ flag = 0;
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ if (vf) {
+ sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
+- go_back = scnprintf(&buf[off],
+- buf_size - 1 - off,
+- "%-*s", lf_str_size, lfs);
++ off = scnprintf(&buf[off],
++ buf_size - 1 - off,
++ "%-*s", lf_str_size, lfs);
+ } else {
+ sprintf(lfs, "PF%d", pf);
+- go_back = scnprintf(&buf[off],
+- buf_size - 1 - off,
+- "%-*s", lf_str_size, lfs);
++ off = scnprintf(&buf[off],
++ buf_size - 1 - off,
++ "%-*s", lf_str_size, lfs);
+ }
+
+- off += go_back;
+- for (index = 0; index < BLKTYPE_MAX; index++) {
++ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+ len = 0;
+ lfs[len] = '\0';
+- for (lf = 0; lf < block.lf.max; lf++) {
+- if (block.fn_map[lf] != pcifunc)
+- continue;
++ get_lf_str_list(block, pcifunc, lfs);
++ if (strlen(lfs))
+ flag = 1;
+- len += sprintf(&lfs[len], "%d,", lf);
+- }
+
+- if (flag)
+- len--;
+- lfs[len] = '\0';
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
+- if (!strlen(lfs))
+- go_back += lf_str_size;
+ }
+- if (!flag)
+- off -= go_back;
+- else
+- flag = 0;
+- off--;
+- off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
++ if (flag) {
++ off += scnprintf(&buf[off],
++ buf_size - 1 - off, "\n");
++ bytes_not_copied = copy_to_user(buffer +
++ (i * off),
++ buf, off);
++ if (bytes_not_copied)
++ goto out;
++
++ i++;
++ *ppos += off;
++ }
+ }
+ }
+
+- bytes_not_copied = copy_to_user(buffer, buf, off);
++out:
+ kfree(lfs);
+ kfree(buf);
+-
+ if (bytes_not_copied)
+ return -EFAULT;
+
+- *ppos = off;
+- return off;
++ return *ppos;
+ }
+
+ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
+@@ -507,7 +581,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ if (cmd_buf)
+ ret = -EINVAL;
+
+- if (!strncmp(subtoken, "help", 4) || ret < 0) {
++ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
+ dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
+ goto qsize_write_done;
+ }
+@@ -1722,6 +1796,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+ u16 pcifunc;
+ char *str;
+
++ /* Ingress policers do not exist on all platforms */
++ if (!nix_hw->ipolicer)
++ return 0;
++
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+@@ -1771,6 +1849,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+ int layer;
+ char *str;
+
++ /* Ingress policers do not exist on all platforms */
++ if (!nix_hw->ipolicer)
++ return 0;
++
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 87af164951eae..05b4149f79a5c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2146,6 +2146,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
++ if (!nix_hw)
++ return;
++
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+index 13b0259f7ea69..fcace73eae40f 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+@@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb;
+ int err;
+
+- elem_info->u.rdq.skb = NULL;
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+- /* Assume that wqe was previously zeroed. */
+-
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+@@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ struct mlxsw_rx_info rx_info = {};
+- char *wqe;
++ char wqe[MLXSW_PCI_WQE_SIZE];
+ struct sk_buff *skb;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+- skb = elem_info->u.sdq.skb;
+- if (!skb)
+- return;
+- wqe = elem_info->elem;
+- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
++ skb = elem_info->u.rdq.skb;
++ memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
++ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
++ if (err) {
++ dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
++ goto out;
++ }
++
++ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
++
+ if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
+ rx_info.is_lag = true;
+ rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
+@@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+- memset(wqe, 0, q->elem_size);
+- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+- if (err)
+- dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
++out:
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index dae10328c6cf7..d1c19ad4229c1 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
+ ret = -EINVAL;
+ goto cleanup;
+ }
++ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
++ DMA_BIT_MASK(64))) {
++ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
++ DMA_BIT_MASK(32))) {
++ dev_warn(&tx->adapter->pdev->dev,
++ "lan743x_: No suitable DMA available\n");
++ ret = -ENOMEM;
++ goto cleanup;
++ }
++ }
+ ring_allocation_size = ALIGN(tx->ring_size *
+ sizeof(struct lan743x_tx_descriptor),
+ PAGE_SIZE);
+@@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
+ index);
+ }
+
+-static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
++static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
++ gfp_t gfp)
+ {
+ struct net_device *netdev = rx->adapter->netdev;
+ struct device *dev = &rx->adapter->pdev->dev;
+@@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
+
+ descriptor = &rx->ring_cpu_ptr[index];
+ buffer_info = &rx->buffer_info[index];
+- skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
++ skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
+ if (!skb)
+ return -ENOMEM;
+ dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
+@@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
+
+ /* save existing skb, allocate new skb and map to dma */
+ skb = buffer_info->skb;
+- if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
++ if (lan743x_rx_init_ring_element(rx, rx->last_head,
++ GFP_ATOMIC | GFP_DMA)) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+@@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
+ ret = -EINVAL;
+ goto cleanup;
+ }
++ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
++ DMA_BIT_MASK(64))) {
++ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
++ DMA_BIT_MASK(32))) {
++ dev_warn(&rx->adapter->pdev->dev,
++ "lan743x_: No suitable DMA available\n");
++ ret = -ENOMEM;
++ goto cleanup;
++ }
++ }
+ ring_allocation_size = ALIGN(rx->ring_size *
+ sizeof(struct lan743x_rx_descriptor),
+ PAGE_SIZE);
+@@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
+
+ rx->last_head = 0;
+ for (index = 0; index < rx->ring_size; index++) {
+- ret = lan743x_rx_init_ring_element(rx, index);
++ ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
+ if (ret)
+ goto cleanup;
+ }
+ return 0;
+
+ cleanup:
++ netif_warn(rx->adapter, ifup, rx->adapter->netdev,
++ "Error allocating memory for LAN743x\n");
++
+ lan743x_rx_ring_cleanup(rx);
+ return ret;
+ }
+@@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
+ if (ret) {
+ netif_err(adapter, probe, adapter->netdev,
+ "lan743x_hardware_init returned %d\n", ret);
++ lan743x_pci_cleanup(adapter);
++ return ret;
+ }
+
+ /* open netdev when netdev is at running state while resume.
+diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
+index 64c6842bd452c..6d8406cfd38a1 100644
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
+ napi_disable(&pldat->napi);
+ netif_stop_queue(ndev);
+
+- if (ndev->phydev)
+- phy_stop(ndev->phydev);
+-
+ spin_lock_irqsave(&pldat->lock, flags);
+ __lpc_eth_reset(pldat);
+ netif_carrier_off(ndev);
+@@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
+ writel(0, LPC_ENET_MAC2(pldat->net_base));
+ spin_unlock_irqrestore(&pldat->lock, flags);
+
++ if (ndev->phydev)
++ phy_stop(ndev->phydev);
+ clk_disable_unprepare(pldat->clk);
+
+ return 0;
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 3fa9c15ec81e2..6865d9319197f 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -548,7 +548,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
+ err = device_register(&bus->dev);
+ if (err) {
+ pr_err("mii_bus %s failed to register\n", bus->id);
+- put_device(&bus->dev);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 8eeb26d8aeb7d..cbf344c5db610 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
+ }
+ }
+
+-int phy_ethtool_ksettings_set(struct phy_device *phydev,
+- const struct ethtool_link_ksettings *cmd)
+-{
+- __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+- u8 autoneg = cmd->base.autoneg;
+- u8 duplex = cmd->base.duplex;
+- u32 speed = cmd->base.speed;
+-
+- if (cmd->base.phy_address != phydev->mdio.addr)
+- return -EINVAL;
+-
+- linkmode_copy(advertising, cmd->link_modes.advertising);
+-
+- /* We make sure that we don't pass unsupported values in to the PHY */
+- linkmode_and(advertising, advertising, phydev->supported);
+-
+- /* Verify the settings we care about. */
+- if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+- return -EINVAL;
+-
+- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+- return -EINVAL;
+-
+- if (autoneg == AUTONEG_DISABLE &&
+- ((speed != SPEED_1000 &&
+- speed != SPEED_100 &&
+- speed != SPEED_10) ||
+- (duplex != DUPLEX_HALF &&
+- duplex != DUPLEX_FULL)))
+- return -EINVAL;
+-
+- phydev->autoneg = autoneg;
+-
+- if (autoneg == AUTONEG_DISABLE) {
+- phydev->speed = speed;
+- phydev->duplex = duplex;
+- }
+-
+- linkmode_copy(phydev->advertising, advertising);
+-
+- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+- phydev->advertising, autoneg == AUTONEG_ENABLE);
+-
+- phydev->master_slave_set = cmd->base.master_slave_cfg;
+- phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+-
+- /* Restart the PHY */
+- phy_start_aneg(phydev);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+-
+ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd)
+ {
++ mutex_lock(&phydev->lock);
+ linkmode_copy(cmd->link_modes.supported, phydev->supported);
+ linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+ linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
+@@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ cmd->base.autoneg = phydev->autoneg;
+ cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
+ cmd->base.eth_tp_mdix = phydev->mdix;
++ mutex_unlock(&phydev->lock);
+ }
+ EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
+@@ -751,7 +700,7 @@ static int phy_check_link_status(struct phy_device *phydev)
+ }
+
+ /**
+- * phy_start_aneg - start auto-negotiation for this PHY device
++ * _phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+@@ -759,25 +708,43 @@ static int phy_check_link_status(struct phy_device *phydev)
+ * If the PHYCONTROL Layer is operating, we change the state to
+ * reflect the beginning of Auto-negotiation or forcing.
+ */
+-int phy_start_aneg(struct phy_device *phydev)
++static int _phy_start_aneg(struct phy_device *phydev)
+ {
+ int err;
+
++ lockdep_assert_held(&phydev->lock);
++
+ if (!phydev->drv)
+ return -EIO;
+
+- mutex_lock(&phydev->lock);
+-
+ if (AUTONEG_DISABLE == phydev->autoneg)
+ phy_sanitize_settings(phydev);
+
+ err = phy_config_aneg(phydev);
+ if (err < 0)
+- goto out_unlock;
++ return err;
+
+ if (phy_is_started(phydev))
+ err = phy_check_link_status(phydev);
+-out_unlock:
++
++ return err;
++}
++
++/**
++ * phy_start_aneg - start auto-negotiation for this PHY device
++ * @phydev: the phy_device struct
++ *
++ * Description: Sanitizes the settings (if we're not autonegotiating
++ * them), and then calls the driver's config_aneg function.
++ * If the PHYCONTROL Layer is operating, we change the state to
++ * reflect the beginning of Auto-negotiation or forcing.
++ */
++int phy_start_aneg(struct phy_device *phydev)
++{
++ int err;
++
++ mutex_lock(&phydev->lock);
++ err = _phy_start_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return err;
+@@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
+ return ret < 0 ? ret : 0;
+ }
+
++int phy_ethtool_ksettings_set(struct phy_device *phydev,
++ const struct ethtool_link_ksettings *cmd)
++{
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
++ u8 autoneg = cmd->base.autoneg;
++ u8 duplex = cmd->base.duplex;
++ u32 speed = cmd->base.speed;
++
++ if (cmd->base.phy_address != phydev->mdio.addr)
++ return -EINVAL;
++
++ linkmode_copy(advertising, cmd->link_modes.advertising);
++
++ /* We make sure that we don't pass unsupported values in to the PHY */
++ linkmode_and(advertising, advertising, phydev->supported);
++
++ /* Verify the settings we care about. */
++ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
++ return -EINVAL;
++
++ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
++ return -EINVAL;
++
++ if (autoneg == AUTONEG_DISABLE &&
++ ((speed != SPEED_1000 &&
++ speed != SPEED_100 &&
++ speed != SPEED_10) ||
++ (duplex != DUPLEX_HALF &&
++ duplex != DUPLEX_FULL)))
++ return -EINVAL;
++
++ mutex_lock(&phydev->lock);
++ phydev->autoneg = autoneg;
++
++ if (autoneg == AUTONEG_DISABLE) {
++ phydev->speed = speed;
++ phydev->duplex = duplex;
++ }
++
++ linkmode_copy(phydev->advertising, advertising);
++
++ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
++ phydev->advertising, autoneg == AUTONEG_ENABLE);
++
++ phydev->master_slave_set = cmd->base.master_slave_cfg;
++ phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
++
++ /* Restart the PHY */
++ _phy_start_aneg(phydev);
++
++ mutex_unlock(&phydev->lock);
++ return 0;
++}
++EXPORT_SYMBOL(phy_ethtool_ksettings_set);
++
+ /**
+ * phy_speed_down - set speed to lowest speed supported by both link partners
+ * @phydev: the phy_device struct
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 6d092d78e0cbc..a7e58c327d7f6 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3734,6 +3734,12 @@ static int lan78xx_probe(struct usb_interface *intf,
+
+ dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
++ /* Reject broken descriptors. */
++ if (dev->maxpacket == 0) {
++ ret = -ENODEV;
++ goto out4;
++ }
++
+ /* driver requires remote-wakeup capability during autosuspend. */
+ intf->needs_remote_wakeup = 1;
+
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 470e1c1e63535..cf0728a63ace0 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1788,6 +1788,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ if (!dev->rx_urb_size)
+ dev->rx_urb_size = dev->hard_mtu;
+ dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
++ if (dev->maxpacket == 0) {
++ /* that is a broken device */
++ status = -ENODEV;
++ goto out4;
++ }
+
+ /* let userspace know we have a random address */
+ if (ether_addr_equal(net->dev_addr, node_id))
+diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
+index 4df926cc37d03..2777c0dd23f70 100644
+--- a/drivers/nfc/port100.c
++++ b/drivers/nfc/port100.c
+@@ -1003,11 +1003,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev)
+
+ skb = port100_alloc_skb(dev, 0);
+ if (!skb)
+- return -ENOMEM;
++ return 0;
+
+ resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
+ if (IS_ERR(resp))
+- return PTR_ERR(resp);
++ return 0;
+
+ if (resp->len < 8)
+ mask = 0;
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index fd28a23d45ed6..5e412c080101c 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -913,12 +913,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
+ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ {
+ struct nvme_tcp_queue *queue = req->queue;
++ int req_data_len = req->data_len;
+
+ while (true) {
+ struct page *page = nvme_tcp_req_cur_page(req);
+ size_t offset = nvme_tcp_req_cur_offset(req);
+ size_t len = nvme_tcp_req_cur_length(req);
+ bool last = nvme_tcp_pdu_last_send(req, len);
++ int req_data_sent = req->data_sent;
+ int ret, flags = MSG_DONTWAIT;
+
+ if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
+@@ -945,7 +947,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ * in the request where we don't want to modify it as we may
+ * compete with the RX path completing the request.
+ */
+- if (req->data_sent + ret < req->data_len)
++ if (req_data_sent + ret < req_data_len)
+ nvme_tcp_advance_req(req, ret);
+
+ /* fully successful last send in current PDU */
+@@ -1035,10 +1037,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
+ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
+ {
+ struct nvme_tcp_queue *queue = req->queue;
++ size_t offset = req->offset;
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+- .iov_base = &req->ddgst + req->offset,
++ .iov_base = (u8 *)&req->ddgst + req->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
+ };
+
+@@ -1051,7 +1054,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
+ if (unlikely(ret <= 0))
+ return ret;
+
+- if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
++ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ nvme_tcp_done_send_req(queue);
+ return 1;
+ }
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 07ee347ea3f3c..d641bfa07a801 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+- .iov_base = &cmd->exp_ddgst + cmd->offset,
++ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ };
+ int ret;
+diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
+index e79690bd8b85f..d7f8175d2c1c8 100644
+--- a/drivers/pinctrl/bcm/pinctrl-ns.c
++++ b/drivers/pinctrl/bcm/pinctrl-ns.c
+@@ -5,7 +5,6 @@
+
+ #include <linux/err.h>
+ #include <linux/io.h>
+-#include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+@@ -13,7 +12,6 @@
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/pinctrl/pinmux.h>
+ #include <linux/platform_device.h>
+-#include <linux/regmap.h>
+ #include <linux/slab.h>
+
+ #define FLAG_BCM4708 BIT(1)
+@@ -24,8 +22,7 @@ struct ns_pinctrl {
+ struct device *dev;
+ unsigned int chipset_flag;
+ struct pinctrl_dev *pctldev;
+- struct regmap *regmap;
+- u32 offset;
++ void __iomem *base;
+
+ struct pinctrl_desc pctldesc;
+ struct ns_pinctrl_group *groups;
+@@ -232,9 +229,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
+ unset |= BIT(pin_number);
+ }
+
+- regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp);
++ tmp = readl(ns_pinctrl->base);
+ tmp &= ~unset;
+- regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp);
++ writel(tmp, ns_pinctrl->base);
+
+ return 0;
+ }
+@@ -266,13 +263,13 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
+ static int ns_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct device_node *np = dev->of_node;
+ const struct of_device_id *of_id;
+ struct ns_pinctrl *ns_pinctrl;
+ struct pinctrl_desc *pctldesc;
+ struct pinctrl_pin_desc *pin;
+ struct ns_pinctrl_group *group;
+ struct ns_pinctrl_function *function;
++ struct resource *res;
+ int i;
+
+ ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
+@@ -290,18 +287,12 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
+ return -EINVAL;
+ ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
+
+- ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np));
+- if (IS_ERR(ns_pinctrl->regmap)) {
+- int err = PTR_ERR(ns_pinctrl->regmap);
+-
+- dev_err(dev, "Failed to map pinctrl regs: %d\n", err);
+-
+- return err;
+- }
+-
+- if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) {
+- dev_err(dev, "Failed to get register offset\n");
+- return -ENOENT;
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "cru_gpio_control");
++ ns_pinctrl->base = devm_ioremap_resource(dev, res);
++ if (IS_ERR(ns_pinctrl->base)) {
++ dev_err(dev, "Failed to map pinctrl regs\n");
++ return PTR_ERR(ns_pinctrl->base);
+ }
+
+ memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 5b764740b8298..c5fd75bbf5d97 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -832,6 +832,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ .pin_config_group_set = amd_pinconf_group_set,
+ };
+
++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
++{
++ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++ unsigned long flags;
++ u32 pin_reg, mask;
++ int i;
++
++ mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
++ BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
++ BIT(WAKE_CNTRL_OFF_S4);
++
++ for (i = 0; i < desc->npins; i++) {
++ int pin = desc->pins[i].number;
++ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
++
++ if (!pd)
++ continue;
++
++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++
++ pin_reg = readl(gpio_dev->base + i * 4);
++ pin_reg &= ~mask;
++ writel(pin_reg, gpio_dev->base + i * 4);
++
++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
++ }
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
+ {
+@@ -969,6 +997,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
+ return PTR_ERR(gpio_dev->pctrl);
+ }
+
++ /* Disable and mask interrupts */
++ amd_gpio_irq_init(gpio_dev);
++
+ girq = &gpio_dev->gc.irq;
+ girq->chip = &amd_gpio_irqchip;
+ /* This will let us handle the parent IRQ in the driver */
+diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c
+index b6f074d6a65f8..433fa0c40e477 100644
+--- a/drivers/reset/reset-brcmstb-rescal.c
++++ b/drivers/reset/reset-brcmstb-rescal.c
+@@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
+ }
+
+ ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
+- !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
++ (reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+ if (ret) {
+ dev_err(data->dev, "time out on SATA/PCIe rescal\n");
+ return ret;
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 935b01ee44b74..f100fe4e9b2a9 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
+
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->sent);
++ atomic_set(&evt->active, 1);
+
+ mb();
+
+@@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
+ be64_to_cpu(crq_as_u64[1]));
+
+ if (rc) {
++ atomic_set(&evt->active, 0);
+ list_del(&evt->queue_list);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
+ del_timer(&evt->timer);
+@@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
+
+ evt->done(evt);
+ } else {
+- atomic_set(&evt->active, 1);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
+ ibmvfc_trc_start(evt);
+ }
+diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
+index 427a2ff7e9da1..9cdedbff5b884 100644
+--- a/drivers/scsi/ufs/ufs-exynos.c
++++ b/drivers/scsi/ufs/ufs-exynos.c
+@@ -642,9 +642,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
+ }
+
+ /* setting for three timeout values for traffic class #0 */
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
+
+ return 0;
+ out:
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index dd95dfd85e980..3035bb6f54585 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -576,7 +576,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
+ /* Last one doesn't continue. */
+ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
+ if (!indirect && vq->use_dma_api)
+- vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
++ vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
+ ~VRING_DESC_F_NEXT;
+
+ if (indirect) {
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 643c6c2d0b728..ced2fc0deb8c4 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -71,8 +71,6 @@
+ #define TCOBASE(p) ((p)->tco_res->start)
+ /* SMI Control and Enable Register */
+ #define SMI_EN(p) ((p)->smi_res->start)
+-#define TCO_EN (1 << 13)
+-#define GBL_SMI_EN (1 << 0)
+
+ #define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
+ #define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
+@@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
+
+ tmrval = seconds_to_ticks(p, t);
+
+- /*
+- * If TCO SMIs are off, the timer counts down twice before rebooting.
+- * Otherwise, the BIOS generally reboots when the SMI triggers.
+- */
+- if (p->smi_res &&
+- (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
++ /* For TCO v1 the timer counts down twice before rebooting */
++ if (p->iTCO_version == 1)
+ tmrval /= 2;
+
+ /* from the specs: */
+@@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ * Disables TCO logic generating an SMI#
+ */
+ val32 = inl(SMI_EN(p));
+- val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
++ val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
+ outl(val32, SMI_EN(p));
+ }
+
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index ee9ff38929eb5..6f4319bdbc500 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -130,7 +130,7 @@ static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
+ if (gwdt->version == 0)
+ return readl(gwdt->control_base + SBSA_GWDT_WOR);
+ else
+- return readq(gwdt->control_base + SBSA_GWDT_WOR);
++ return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
+ }
+
+ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
+@@ -138,7 +138,7 @@ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
+ if (gwdt->version == 0)
+ writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
+ else
+- writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
++ lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
+ }
+
+ /*
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index 8521942f5af2b..481017e1dac5a 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -1251,7 +1251,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
+ {
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ struct journal_head *jh;
+- int ret;
++ int ret = 1;
+
+ if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
+ return 0;
+@@ -1259,14 +1259,18 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
+ if (!buffer_jbd(bg_bh))
+ return 1;
+
+- jh = bh2jh(bg_bh);
+- spin_lock(&jh->b_state_lock);
+- bg = (struct ocfs2_group_desc *) jh->b_committed_data;
+- if (bg)
+- ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
+- else
+- ret = 1;
+- spin_unlock(&jh->b_state_lock);
++ jbd_lock_bh_journal_head(bg_bh);
++ if (buffer_jbd(bg_bh)) {
++ jh = bh2jh(bg_bh);
++ spin_lock(&jh->b_state_lock);
++ bg = (struct ocfs2_group_desc *) jh->b_committed_data;
++ if (bg)
++ ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
++ else
++ ret = 1;
++ spin_unlock(&jh->b_state_lock);
++ }
++ jbd_unlock_bh_journal_head(bg_bh);
+
+ return ret;
+ }
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 11da5671d4f09..5c242e0477328 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -900,8 +900,11 @@ struct bpf_array_aux {
+ * stored in the map to make sure that all callers and callees have
+ * the same prog type and JITed flag.
+ */
+- enum bpf_prog_type type;
+- bool jited;
++ struct {
++ spinlock_t lock;
++ enum bpf_prog_type type;
++ bool jited;
++ } owner;
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
+index ae3ac3a2018ca..2eb9c53468e77 100644
+--- a/include/linux/bpf_types.h
++++ b/include/linux/bpf_types.h
+@@ -101,14 +101,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
+ #endif
+ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
+ BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+-#ifdef CONFIG_NET
+-BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+-BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
+-BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
+ #ifdef CONFIG_BPF_LSM
+ BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+ #endif
+ BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
++#ifdef CONFIG_NET
++BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
++BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
++BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
+ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
+ #if defined(CONFIG_XDP_SOCKETS)
+ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index 5922031ffab6e..c573fccfc4751 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -171,6 +171,15 @@ enum pageflags {
+ /* Compound pages. Stored in first tail page's flags */
+ PG_double_map = PG_workingset,
+
++#ifdef CONFIG_MEMORY_FAILURE
++ /*
++ * Compound pages. Stored in first tail page's flags.
++ * Indicates that at least one subpage is hwpoisoned in the
++ * THP.
++ */
++ PG_has_hwpoisoned = PG_mappedtodisk,
++#endif
++
+ /* non-lru isolated movable page */
+ PG_isolated = PG_reclaim,
+
+@@ -703,6 +712,20 @@ PAGEFLAG_FALSE(DoubleMap)
+ TESTSCFLAG_FALSE(DoubleMap)
+ #endif
+
++#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
++/*
++ * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
++ * compound page.
++ *
++ * This flag is set by hwpoison handler. Cleared by THP split or free page.
++ */
++PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
++ TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
++#else
++PAGEFLAG_FALSE(HasHWPoisoned)
++ TESTSCFLAG_FALSE(HasHWPoisoned)
++#endif
++
+ /*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 161cdf7df1a07..db581a761dcf6 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5350,7 +5350,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
+ * netdev and may otherwise be used by driver read-only, will be update
+ * by cfg80211 on change_interface
+ * @mgmt_registrations: list of registrations for management frames
+- * @mgmt_registrations_lock: lock for the list
+ * @mgmt_registrations_need_update: mgmt registrations were updated,
+ * need to propagate the update to the driver
+ * @mtx: mutex used to lock data in this struct, may be used by drivers
+@@ -5397,7 +5396,6 @@ struct wireless_dev {
+ u32 identifier;
+
+ struct list_head mgmt_registrations;
+- spinlock_t mgmt_registrations_lock;
+ u8 mgmt_registrations_need_update:1;
+
+ struct mutex mtx;
+diff --git a/include/net/tls.h b/include/net/tls.h
+index be4b3e1cac462..64217c9873c92 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -358,6 +358,7 @@ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
+ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen);
++void tls_err_abort(struct sock *sk, int err);
+
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+@@ -466,12 +467,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+ #endif
+ }
+
+-static inline void tls_err_abort(struct sock *sk, int err)
+-{
+- sk->sk_err = err;
+- sk_error_report(sk);
+-}
+-
+ static inline bool tls_bigint_increment(unsigned char *seq, int len)
+ {
+ int i;
+@@ -512,7 +507,7 @@ static inline void tls_advance_record_sn(struct sock *sk,
+ struct cipher_context *ctx)
+ {
+ if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
+- tls_err_abort(sk, EBADMSG);
++ tls_err_abort(sk, -EBADMSG);
+
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 3c4105603f9db..db3c88fe08940 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -1051,6 +1051,7 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+ INIT_WORK(&aux->work, prog_array_map_clear_deferred);
+ INIT_LIST_HEAD(&aux->poke_progs);
+ mutex_init(&aux->poke_mutex);
++ spin_lock_init(&aux->owner.lock);
+
+ map = array_map_alloc(attr);
+ if (IS_ERR(map)) {
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index c019611fbc8f4..4c0c0146f956c 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1821,20 +1821,26 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ bool bpf_prog_array_compatible(struct bpf_array *array,
+ const struct bpf_prog *fp)
+ {
++ bool ret;
++
+ if (fp->kprobe_override)
+ return false;
+
+- if (!array->aux->type) {
++ spin_lock(&array->aux->owner.lock);
++
++ if (!array->aux->owner.type) {
+ /* There's no owner yet where we could check for
+ * compatibility.
+ */
+- array->aux->type = fp->type;
+- array->aux->jited = fp->jited;
+- return true;
++ array->aux->owner.type = fp->type;
++ array->aux->owner.jited = fp->jited;
++ ret = true;
++ } else {
++ ret = array->aux->owner.type == fp->type &&
++ array->aux->owner.jited == fp->jited;
+ }
+-
+- return array->aux->type == fp->type &&
+- array->aux->jited == fp->jited;
++ spin_unlock(&array->aux->owner.lock);
++ return ret;
+ }
+
+ static int bpf_check_tail_call(const struct bpf_prog *fp)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index e343f158e5564..92ed4b2984b8d 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -543,8 +543,10 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
+
+ if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
+ array = container_of(map, struct bpf_array, map);
+- type = array->aux->type;
+- jited = array->aux->jited;
++ spin_lock(&array->aux->owner.lock);
++ type = array->aux->owner.type;
++ jited = array->aux->owner.jited;
++ spin_unlock(&array->aux->owner.lock);
+ }
+
+ seq_printf(m,
+@@ -1064,7 +1066,7 @@ static int map_lookup_elem(union bpf_attr *attr)
+ value_size = bpf_map_value_size(map);
+
+ err = -ENOMEM;
+- value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
++ value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
+ if (!value)
+ goto free_key;
+
+@@ -1079,7 +1081,7 @@ static int map_lookup_elem(union bpf_attr *attr)
+ err = 0;
+
+ free_value:
+- kfree(value);
++ kvfree(value);
+ free_key:
+ kfree(key);
+ err_put:
+@@ -1125,16 +1127,10 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
+ goto err_put;
+ }
+
+- if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+- map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+- map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
+- value_size = round_up(map->value_size, 8) * num_possible_cpus();
+- else
+- value_size = map->value_size;
++ value_size = bpf_map_value_size(map);
+
+ err = -ENOMEM;
+- value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
++ value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
+ if (!value)
+ goto free_key;
+
+@@ -1145,7 +1141,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
+ err = bpf_map_update_value(map, f, key, value, attr->flags);
+
+ free_value:
+- kfree(value);
++ kvfree(value);
+ free_key:
+ kfree(key);
+ err_put:
+@@ -1331,12 +1327,11 @@ int generic_map_update_batch(struct bpf_map *map,
+ void __user *values = u64_to_user_ptr(attr->batch.values);
+ void __user *keys = u64_to_user_ptr(attr->batch.keys);
+ u32 value_size, cp, max_count;
+- int ufd = attr->map_fd;
++ int ufd = attr->batch.map_fd;
+ void *key, *value;
+ struct fd f;
+ int err = 0;
+
+- f = fdget(ufd);
+ if (attr->batch.elem_flags & ~BPF_F_LOCK)
+ return -EINVAL;
+
+@@ -1355,12 +1350,13 @@ int generic_map_update_batch(struct bpf_map *map,
+ if (!key)
+ return -ENOMEM;
+
+- value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
++ value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
+ if (!value) {
+ kfree(key);
+ return -ENOMEM;
+ }
+
++ f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
+ for (cp = 0; cp < max_count; cp++) {
+ err = -EFAULT;
+ if (copy_from_user(key, keys + cp * map->key_size,
+@@ -1378,8 +1374,9 @@ int generic_map_update_batch(struct bpf_map *map,
+ if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
+ err = -EFAULT;
+
+- kfree(value);
++ kvfree(value);
+ kfree(key);
++ fdput(f);
+ return err;
+ }
+
+@@ -1417,7 +1414,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ if (!buf_prevkey)
+ return -ENOMEM;
+
+- buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
++ buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
+ if (!buf) {
+ kfree(buf_prevkey);
+ return -ENOMEM;
+@@ -1480,7 +1477,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
+
+ free_buf:
+ kfree(buf_prevkey);
+- kfree(buf);
++ kvfree(buf);
+ return err;
+ }
+
+@@ -1535,7 +1532,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
+ value_size = bpf_map_value_size(map);
+
+ err = -ENOMEM;
+- value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
++ value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
+ if (!value)
+ goto free_key;
+
+@@ -1567,7 +1564,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
+ err = 0;
+
+ free_value:
+- kfree(value);
++ kvfree(value);
+ free_key:
+ kfree(key);
+ err_put:
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 3a0161c21b6ba..38750c385dd2c 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2174,8 +2174,10 @@ static void cgroup_kill_sb(struct super_block *sb)
+ * And don't kill the default root.
+ */
+ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
+- !percpu_ref_is_dying(&root->cgrp.self.refcnt))
++ !percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
++ cgroup_bpf_offline(&root->cgrp);
+ percpu_ref_kill(&root->cgrp.self.refcnt);
++ }
+ cgroup_put(&root->cgrp);
+ kernfs_kill_sb(sb);
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 163c2da2a6548..d4eb8590fa6bb 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2452,6 +2452,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
+ lruvec = lock_page_lruvec(head);
+
++ ClearPageHasHWPoisoned(head);
++
+ for (i = nr - 1; i >= 1; i--) {
+ __split_huge_page_tail(head, i, lruvec, list);
+ /* Some pages can be beyond i_size: drop them from page cache */
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index b0412be08fa2c..b82b760acf949 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -445,22 +445,25 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
+ if (!transhuge_vma_enabled(vma, vm_flags))
+ return false;
+
++ if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
++ vma->vm_pgoff, HPAGE_PMD_NR))
++ return false;
++
+ /* Enabled via shmem mount options or sysfs settings. */
+- if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
+- return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+- HPAGE_PMD_NR);
+- }
++ if (shmem_file(vma->vm_file))
++ return shmem_huge_enabled(vma);
+
+ /* THP settings require madvise. */
+ if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
+ return false;
+
+- /* Read-only file mappings need to be aligned for THP to work. */
++ /* Only regular file is valid */
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
+- !inode_is_open_for_write(vma->vm_file->f_inode) &&
+ (vm_flags & VM_EXEC)) {
+- return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+- HPAGE_PMD_NR);
++ struct inode *inode = vma->vm_file->f_inode;
++
++ return !inode_is_open_for_write(inode) &&
++ S_ISREG(inode->i_mode);
+ }
+
+ if (!vma->anon_vma || vma->vm_ops)
+@@ -1763,6 +1766,10 @@ static void collapse_file(struct mm_struct *mm,
+ filemap_flush(mapping);
+ result = SCAN_FAIL;
+ goto xa_unlocked;
++ } else if (PageWriteback(page)) {
++ xas_unlock_irq(&xas);
++ result = SCAN_FAIL;
++ goto xa_unlocked;
+ } else if (trylock_page(page)) {
+ get_page(page);
+ xas_unlock_irq(&xas);
+@@ -1798,7 +1805,8 @@ static void collapse_file(struct mm_struct *mm,
+ goto out_unlock;
+ }
+
+- if (!is_shmem && PageDirty(page)) {
++ if (!is_shmem && (PageDirty(page) ||
++ PageWriteback(page))) {
+ /*
+ * khugepaged only works on read-only fd, so this
+ * page is dirty because it hasn't been flushed
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 7df9fde18004c..c398d8524f6e0 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1148,20 +1148,6 @@ static int __get_hwpoison_page(struct page *page)
+ if (!HWPoisonHandlable(head))
+ return -EBUSY;
+
+- if (PageTransHuge(head)) {
+- /*
+- * Non anonymous thp exists only in allocation/free time. We
+- * can't handle such a case correctly, so let's give it up.
+- * This should be better than triggering BUG_ON when kernel
+- * tries to touch the "partially handled" page.
+- */
+- if (!PageAnon(head)) {
+- pr_err("Memory failure: %#lx: non anonymous thp\n",
+- page_to_pfn(page));
+- return 0;
+- }
+- }
+-
+ if (get_page_unless_zero(head)) {
+ if (head == compound_head(page))
+ return 1;
+@@ -1708,6 +1694,20 @@ try_again:
+ }
+
+ if (PageTransHuge(hpage)) {
++ /*
++ * The flag must be set after the refcount is bumped
++ * otherwise it may race with THP split.
++ * And the flag can't be set in get_hwpoison_page() since
++ * it is called by soft offline too and it is just called
++ * for !MF_COUNT_INCREASE. So here seems to be the best
++ * place.
++ *
++ * Don't need care about the above error handling paths for
++ * get_hwpoison_page() since they handle either free page
++ * or unhandlable page. The refcount is bumped iff the
++ * page is a valid handlable page.
++ */
++ SetPageHasHWPoisoned(hpage);
+ if (try_to_split_thp_page(p, "Memory Failure") < 0) {
+ action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+ res = -EBUSY;
+diff --git a/mm/memory.c b/mm/memory.c
+index 25fc46e872142..738f4e1df81ee 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3905,6 +3905,15 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
+ if (compound_order(page) != HPAGE_PMD_ORDER)
+ return ret;
+
++ /*
++ * Just backoff if any subpage of a THP is corrupted otherwise
++ * the corrupted page may mapped by PMD silently to escape the
++ * check. This kind of THP just can be PTE mapped. Access to
++ * the corrupted subpage should trigger SIGBUS as expected.
++ */
++ if (unlikely(PageHasHWPoisoned(page)))
++ return ret;
++
+ /*
+ * Archs like ppc64 need additional space to store information
+ * related to pte entry. Use the preallocated table for that.
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 7a28f7db7d286..7db847fa62f89 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1320,8 +1320,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
+
+ VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
+
+- if (compound)
++ if (compound) {
+ ClearPageDoubleMap(page);
++ ClearPageHasHWPoisoned(page);
++ }
+ for (i = 1; i < (1 << order); i++) {
+ if (compound)
+ bad += free_tail_pages_check(page, page + i);
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index 63d42dcc9324a..eb55b419faf88 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -1556,10 +1556,14 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
+ return 0;
+
+ bat_priv->bla.claim_hash = batadv_hash_new(128);
+- bat_priv->bla.backbone_hash = batadv_hash_new(32);
++ if (!bat_priv->bla.claim_hash)
++ return -ENOMEM;
+
+- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
++ bat_priv->bla.backbone_hash = batadv_hash_new(32);
++ if (!bat_priv->bla.backbone_hash) {
++ batadv_hash_destroy(bat_priv->bla.claim_hash);
+ return -ENOMEM;
++ }
+
+ batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
+ &batadv_claim_hash_lock_class_key);
+diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
+index 3ddd66e4c29ef..5207cd8d6ad83 100644
+--- a/net/batman-adv/main.c
++++ b/net/batman-adv/main.c
+@@ -190,29 +190,41 @@ int batadv_mesh_init(struct net_device *soft_iface)
+
+ bat_priv->gw.generation = 0;
+
+- ret = batadv_v_mesh_init(bat_priv);
+- if (ret < 0)
+- goto err;
+-
+ ret = batadv_originator_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_orig;
++ }
+
+ ret = batadv_tt_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_tt;
++ }
++
++ ret = batadv_v_mesh_init(bat_priv);
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_v;
++ }
+
+ ret = batadv_bla_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_bla;
++ }
+
+ ret = batadv_dat_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_dat;
++ }
+
+ ret = batadv_nc_mesh_init(bat_priv);
+- if (ret < 0)
+- goto err;
++ if (ret < 0) {
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
++ goto err_nc;
++ }
+
+ batadv_gw_init(bat_priv);
+ batadv_mcast_init(bat_priv);
+@@ -222,8 +234,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
+
+ return 0;
+
+-err:
+- batadv_mesh_free(soft_iface);
++err_nc:
++ batadv_dat_free(bat_priv);
++err_dat:
++ batadv_bla_free(bat_priv);
++err_bla:
++ batadv_v_mesh_free(bat_priv);
++err_v:
++ batadv_tt_free(bat_priv);
++err_tt:
++ batadv_originator_free(bat_priv);
++err_orig:
++ batadv_purge_outstanding_packets(bat_priv, NULL);
++ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
++
+ return ret;
+ }
+
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
+index 4bb76b434d071..b175043efdaf6 100644
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -152,8 +152,10 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
+ &batadv_nc_coding_hash_lock_class_key);
+
+ bat_priv->nc.decoding_hash = batadv_hash_new(128);
+- if (!bat_priv->nc.decoding_hash)
++ if (!bat_priv->nc.decoding_hash) {
++ batadv_hash_destroy(bat_priv->nc.coding_hash);
+ goto err;
++ }
+
+ batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
+ &batadv_nc_decoding_hash_lock_class_key);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 434b4f0429092..87626894a3468 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -4193,8 +4193,10 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
+ return ret;
+
+ ret = batadv_tt_global_init(bat_priv);
+- if (ret < 0)
++ if (ret < 0) {
++ batadv_tt_local_table_free(bat_priv);
+ return ret;
++ }
+
+ batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+ batadv_tt_tvlv_unicast_handler_v1,
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 693f15a056304..9cb47618d4869 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3246,6 +3246,12 @@ static u16 skb_tx_hash(const struct net_device *dev,
+
+ qoffset = sb_dev->tc_to_txq[tc].offset;
+ qcount = sb_dev->tc_to_txq[tc].count;
++ if (unlikely(!qcount)) {
++ net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
++ sb_dev->name, qoffset, tc);
++ qoffset = 0;
++ qcount = dev->real_num_tx_queues;
++ }
+ }
+
+ if (skb_rx_queue_recorded(skb)) {
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index f6197774048b6..b2e49eb7001d6 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1973,9 +1973,9 @@ int netdev_register_kobject(struct net_device *ndev)
+ int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
+ const struct net *net_new)
+ {
++ kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
++ kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
+ struct device *dev = &ndev->dev;
+- kuid_t old_uid, new_uid;
+- kgid_t old_gid, new_gid;
+ int error;
+
+ net_ns_get_ownership(net_old, &old_uid, &old_gid);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index d3e9386b493eb..9d068153c3168 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -232,6 +232,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ bool cork = false, enospc = sk_msg_full(msg);
+ struct sock *sk_redir;
+ u32 tosend, delta = 0;
++ u32 eval = __SK_NONE;
+ int ret;
+
+ more_data:
+@@ -275,13 +276,24 @@ more_data:
+ case __SK_REDIRECT:
+ sk_redir = psock->sk_redir;
+ sk_msg_apply_bytes(psock, tosend);
++ if (!psock->apply_bytes) {
++ /* Clean up before releasing the sock lock. */
++ eval = psock->eval;
++ psock->eval = __SK_NONE;
++ psock->sk_redir = NULL;
++ }
+ if (psock->cork) {
+ cork = true;
+ psock->cork = NULL;
+ }
+ sk_msg_return(sk, msg, tosend);
+ release_sock(sk);
++
+ ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
++
++ if (eval == __SK_REDIRECT)
++ sock_put(sk_redir);
++
+ lock_sock(sk);
+ if (unlikely(ret < 0)) {
+ int free = sk_msg_free_nocharge(sk, msg);
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 97095b7c9c648..5dcfd53a4ab6c 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -672,7 +672,7 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
+ u8 *ie, u8 ie_len)
+ {
+ struct ieee80211_supported_band *sband;
+- const u8 *cap;
++ const struct element *cap;
+ const struct ieee80211_he_operation *he_oper = NULL;
+
+ sband = ieee80211_get_sband(sdata);
+@@ -687,9 +687,10 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
+
+ sdata->vif.bss_conf.he_support = true;
+
+- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
+- if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
+- he_oper = (void *)(cap + 3);
++ cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
++ if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
++ cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
++ he_oper = (void *)(cap->data + 1);
+
+ if (he_oper)
+ sdata->vif.bss_conf.he_oper.params =
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 32df65f68c123..fb3da4d8f4a34 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -156,6 +156,12 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
+ void *arg,
+ struct sctp_cmd_seq *commands);
+
++static enum sctp_disposition
++__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
++ const struct sctp_association *asoc,
++ const union sctp_subtype type, void *arg,
++ struct sctp_cmd_seq *commands);
++
+ /* Small helper function that checks if the chunk length
+ * is of the appropriate length. The 'required_length' argument
+ * is set to be the size of a specific chunk we are testing.
+@@ -337,6 +343,14 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
+ if (!chunk->singleton)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
++ /* Make sure that the INIT chunk has a valid length.
++ * Normally, this would cause an ABORT with a Protocol Violation
++ * error, but since we don't have an association, we'll
++ * just discard the packet.
++ */
++ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ /* If the packet is an OOTB packet which is temporarily on the
+ * control endpoint, respond with an ABORT.
+ */
+@@ -351,14 +365,6 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
+ if (chunk->sctp_hdr->vtag != 0)
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+- /* Make sure that the INIT chunk has a valid length.
+- * Normally, this would cause an ABORT with a Protocol Violation
+- * error, but since we don't have an association, we'll
+- * just discard the packet.
+- */
+- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+-
+ /* If the INIT is coming toward a closing socket, we'll send back
+ * and ABORT. Essentially, this catches the race of INIT being
+ * backloged to the socket at the same time as the user issues close().
+@@ -704,6 +710,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ struct sock *sk;
+ int error = 0;
+
++ if (asoc && !sctp_vtag_verify(chunk, asoc))
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ /* If the packet is an OOTB packet which is temporarily on the
+ * control endpoint, respond with an ABORT.
+ */
+@@ -718,7 +727,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ * in sctp_unpack_cookie().
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++ commands);
+
+ /* If the endpoint is not listening or if the number of associations
+ * on the TCP-style socket exceed the max backlog, respond with an
+@@ -1524,20 +1534,16 @@ static enum sctp_disposition sctp_sf_do_unexpected_init(
+ if (!chunk->singleton)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
++ /* Make sure that the INIT chunk has a valid length. */
++ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
+ * Tag.
+ */
+ if (chunk->sctp_hdr->vtag != 0)
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+- /* Make sure that the INIT chunk has a valid length.
+- * In this case, we generate a protocol violation since we have
+- * an association established.
+- */
+- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+- commands);
+-
+ if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port)
+ return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands);
+
+@@ -1882,9 +1888,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
+ * its peer.
+ */
+ if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
+- disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
+- SCTP_ST_CHUNK(chunk->chunk_hdr->type),
+- chunk, commands);
++ disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
++ SCTP_ST_CHUNK(chunk->chunk_hdr->type),
++ chunk, commands);
+ if (SCTP_DISPOSITION_NOMEM == disposition)
+ goto nomem;
+
+@@ -2202,9 +2208,11 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ * enough for the chunk header. Cookie length verification is
+ * done later.
+ */
+- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+- commands);
++ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) {
++ if (!sctp_vtag_verify(chunk, asoc))
++ asoc = NULL;
++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands);
++ }
+
+ /* "Decode" the chunk. We have no optional parameters so we
+ * are in good shape.
+@@ -2341,7 +2349,7 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
+ */
+ if (SCTP_ADDR_DEL ==
+ sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+- return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+@@ -2387,7 +2395,7 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
+ */
+ if (SCTP_ADDR_DEL ==
+ sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+- return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+@@ -2657,7 +2665,7 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
+ */
+ if (SCTP_ADDR_DEL ==
+ sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+- return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+@@ -2970,13 +2978,11 @@ enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
+ * that belong to this association, it should discard the INIT chunk and
+ * retransmit the SHUTDOWN ACK chunk.
+ */
+-enum sctp_disposition sctp_sf_do_9_2_reshutack(
+- struct net *net,
+- const struct sctp_endpoint *ep,
+- const struct sctp_association *asoc,
+- const union sctp_subtype type,
+- void *arg,
+- struct sctp_cmd_seq *commands)
++static enum sctp_disposition
++__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
++ const struct sctp_association *asoc,
++ const union sctp_subtype type, void *arg,
++ struct sctp_cmd_seq *commands)
+ {
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *reply;
+@@ -3010,6 +3016,26 @@ nomem:
+ return SCTP_DISPOSITION_NOMEM;
+ }
+
++enum sctp_disposition
++sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
++ const struct sctp_association *asoc,
++ const union sctp_subtype type, void *arg,
++ struct sctp_cmd_seq *commands)
++{
++ struct sctp_chunk *chunk = arg;
++
++ if (!chunk->singleton)
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
++ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
++ if (chunk->sctp_hdr->vtag != 0)
++ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
++
++ return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
++}
++
+ /*
+ * sctp_sf_do_ecn_cwr
+ *
+@@ -3662,6 +3688,9 @@ enum sctp_disposition sctp_sf_ootb(struct net *net,
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+
++ if (asoc && !sctp_vtag_verify(chunk, asoc))
++ asoc = NULL;
++
+ ch = (struct sctp_chunkhdr *)chunk->chunk_hdr;
+ do {
+ /* Report violation if the chunk is less then minimal */
+@@ -3777,12 +3806,6 @@ static enum sctp_disposition sctp_sf_shut_8_4_5(
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+- /* If the chunk length is invalid, we don't want to process
+- * the reset of the packet.
+- */
+- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+-
+ /* We need to discard the rest of the packet to prevent
+ * potential boomming attacks from additional bundled chunks.
+ * This is documented in SCTP Threats ID.
+@@ -3810,6 +3833,9 @@ enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net,
+ {
+ struct sctp_chunk *chunk = arg;
+
++ if (!sctp_vtag_verify(chunk, asoc))
++ asoc = NULL;
++
+ /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+@@ -3845,6 +3871,11 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
++ /* Make sure that the ASCONF ADDIP chunk has a valid length. */
++ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++ commands);
++
+ /* ADD-IP: Section 4.1.1
+ * This chunk MUST be sent in an authenticated way by using
+ * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
+@@ -3853,13 +3884,7 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
+ */
+ if (!asoc->peer.asconf_capable ||
+ (!net->sctp.addip_noauth && !chunk->auth))
+- return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
+- commands);
+-
+- /* Make sure that the ASCONF ADDIP chunk has a valid length. */
+- if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
+- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+- commands);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ hdr = (struct sctp_addiphdr *)chunk->skb->data;
+ serial = ntohl(hdr->serial);
+@@ -3988,6 +4013,12 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
++ /* Make sure that the ADDIP chunk has a valid length. */
++ if (!sctp_chunk_length_valid(asconf_ack,
++ sizeof(struct sctp_addip_chunk)))
++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++ commands);
++
+ /* ADD-IP, Section 4.1.2:
+ * This chunk MUST be sent in an authenticated way by using
+ * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
+@@ -3996,14 +4027,7 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
+ */
+ if (!asoc->peer.asconf_capable ||
+ (!net->sctp.addip_noauth && !asconf_ack->auth))
+- return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
+- commands);
+-
+- /* Make sure that the ADDIP chunk has a valid length. */
+- if (!sctp_chunk_length_valid(asconf_ack,
+- sizeof(struct sctp_addip_chunk)))
+- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+- commands);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
+ rcvd_serial = ntohl(addip_hdr->serial);
+@@ -4575,6 +4599,9 @@ enum sctp_disposition sctp_sf_discard_chunk(struct net *net,
+ {
+ struct sctp_chunk *chunk = arg;
+
++ if (asoc && !sctp_vtag_verify(chunk, asoc))
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ /* Make sure that the chunk has a valid length.
+ * Since we don't know the chunk type, we use a general
+ * chunkhdr structure to make a comparison.
+@@ -4642,6 +4669,9 @@ enum sctp_disposition sctp_sf_violation(struct net *net,
+ {
+ struct sctp_chunk *chunk = arg;
+
++ if (!sctp_vtag_verify(chunk, asoc))
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ /* Make sure that the chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+@@ -6348,6 +6378,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(
+ * yet.
+ */
+ switch (chunk->chunk_hdr->type) {
++ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ {
+ struct sctp_initack_chunk *initack;
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index c9391d38de85c..dc60c32bb70df 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -2285,43 +2285,53 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+ u16 key_gen = msg_key_gen(hdr);
+ u16 size = msg_data_sz(hdr);
+ u8 *data = msg_data(hdr);
++ unsigned int keylen;
++
++ /* Verify whether the size can exist in the packet */
++ if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
++ pr_debug("%s: message data size is too small\n", rx->name);
++ goto exit;
++ }
++
++ keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
++
++ /* Verify the supplied size values */
++ if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
++ keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
++ pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
++ goto exit;
++ }
+
+ spin_lock(&rx->lock);
+ if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
+ pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
+ rx->skey, key_gen, rx->key_gen);
+- goto exit;
++ goto exit_unlock;
+ }
+
+ /* Allocate memory for the key */
+ skey = kmalloc(size, GFP_ATOMIC);
+ if (unlikely(!skey)) {
+ pr_err("%s: unable to allocate memory for skey\n", rx->name);
+- goto exit;
++ goto exit_unlock;
+ }
+
+ /* Copy key from msg data */
+- skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
++ skey->keylen = keylen;
+ memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
+ memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
+ skey->keylen);
+
+- /* Sanity check */
+- if (unlikely(size != tipc_aead_key_size(skey))) {
+- kfree(skey);
+- skey = NULL;
+- goto exit;
+- }
+-
+ rx->key_gen = key_gen;
+ rx->skey_mode = msg_key_mode(hdr);
+ rx->skey = skey;
+ rx->nokey = 0;
+ mb(); /* for nokey flag */
+
+-exit:
++exit_unlock:
+ spin_unlock(&rx->lock);
+
++exit:
+ /* Schedule the key attaching on this crypto */
+ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
+ return true;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 4feb95e34b64b..3580e73fb317f 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -35,6 +35,7 @@
+ * SOFTWARE.
+ */
+
++#include <linux/bug.h>
+ #include <linux/sched/signal.h>
+ #include <linux/module.h>
+ #include <linux/splice.h>
+@@ -43,6 +44,14 @@
+ #include <net/strparser.h>
+ #include <net/tls.h>
+
++noinline void tls_err_abort(struct sock *sk, int err)
++{
++ WARN_ON_ONCE(err >= 0);
++ /* sk->sk_err should contain a positive error code. */
++ sk->sk_err = -err;
++ sk_error_report(sk);
++}
++
+ static int __skb_nsg(struct sk_buff *skb, int offset, int len,
+ unsigned int recursion_level)
+ {
+@@ -419,7 +428,7 @@ int tls_tx_records(struct sock *sk, int flags)
+
+ tx_err:
+ if (rc < 0 && rc != -EAGAIN)
+- tls_err_abort(sk, EBADMSG);
++ tls_err_abort(sk, -EBADMSG);
+
+ return rc;
+ }
+@@ -450,7 +459,7 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
+
+ /* If err is already set on socket, return the same code */
+ if (sk->sk_err) {
+- ctx->async_wait.err = sk->sk_err;
++ ctx->async_wait.err = -sk->sk_err;
+ } else {
+ ctx->async_wait.err = err;
+ tls_err_abort(sk, err);
+@@ -763,7 +772,7 @@ static int tls_push_record(struct sock *sk, int flags,
+ msg_pl->sg.size + prot->tail_size, i);
+ if (rc < 0) {
+ if (rc != -EINPROGRESS) {
+- tls_err_abort(sk, EBADMSG);
++ tls_err_abort(sk, -EBADMSG);
+ if (split) {
+ tls_ctx->pending_open_record_frags = true;
+ tls_merge_open_record(sk, rec, tmp, orig_end);
+@@ -1827,7 +1836,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ err = decrypt_skb_update(sk, skb, &msg->msg_iter,
+ &chunk, &zc, async_capable);
+ if (err < 0 && err != -EINPROGRESS) {
+- tls_err_abort(sk, EBADMSG);
++ tls_err_abort(sk, -EBADMSG);
+ goto recv_end;
+ }
+
+@@ -2007,7 +2016,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
+ }
+
+ if (err < 0) {
+- tls_err_abort(sk, EBADMSG);
++ tls_err_abort(sk, -EBADMSG);
+ goto splice_read_end;
+ }
+ ctx->decrypted = 1;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 03323121ca505..aaba847d79eb2 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -524,6 +524,7 @@ use_default_name:
+ INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk);
+ INIT_WORK(&rdev->mgmt_registrations_update_wk,
+ cfg80211_mgmt_registrations_update_wk);
++ spin_lock_init(&rdev->mgmt_registrations_lock);
+
+ #ifdef CONFIG_CFG80211_DEFAULT_PS
+ rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+@@ -1279,7 +1280,6 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
+ INIT_LIST_HEAD(&wdev->event_list);
+ spin_lock_init(&wdev->event_lock);
+ INIT_LIST_HEAD(&wdev->mgmt_registrations);
+- spin_lock_init(&wdev->mgmt_registrations_lock);
+ INIT_LIST_HEAD(&wdev->pmsr_list);
+ spin_lock_init(&wdev->pmsr_lock);
+ INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index b35d0db12f1d5..1720abf36f92a 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -100,6 +100,8 @@ struct cfg80211_registered_device {
+ struct work_struct propagate_cac_done_wk;
+
+ struct work_struct mgmt_registrations_update_wk;
++ /* lock for all wdev lists */
++ spinlock_t mgmt_registrations_lock;
+
+ /* must be last because of the way we do wiphy_priv(),
+ * and it should at least be aligned to NETDEV_ALIGN */
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 3aa69b375a107..783acd2c4211f 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -452,9 +452,9 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev)
+
+ lockdep_assert_held(&rdev->wiphy.mtx);
+
+- spin_lock_bh(&wdev->mgmt_registrations_lock);
++ spin_lock_bh(&rdev->mgmt_registrations_lock);
+ if (!wdev->mgmt_registrations_need_update) {
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+ return;
+ }
+
+@@ -479,7 +479,7 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev)
+ rcu_read_unlock();
+
+ wdev->mgmt_registrations_need_update = 0;
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+
+ rdev_update_mgmt_frame_registrations(rdev, wdev, &upd);
+ }
+@@ -503,6 +503,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
+ int match_len, bool multicast_rx,
+ struct netlink_ext_ack *extack)
+ {
++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_mgmt_registration *reg, *nreg;
+ int err = 0;
+ u16 mgmt_type;
+@@ -548,7 +549,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
+ if (!nreg)
+ return -ENOMEM;
+
+- spin_lock_bh(&wdev->mgmt_registrations_lock);
++ spin_lock_bh(&rdev->mgmt_registrations_lock);
+
+ list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
+ int mlen = min(match_len, reg->match_len);
+@@ -583,7 +584,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
+ list_add(&nreg->list, &wdev->mgmt_registrations);
+ }
+ wdev->mgmt_registrations_need_update = 1;
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+
+ cfg80211_mgmt_registrations_update(wdev);
+
+@@ -591,7 +592,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
+
+ out:
+ kfree(nreg);
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+
+ return err;
+ }
+@@ -602,7 +603,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct cfg80211_mgmt_registration *reg, *tmp;
+
+- spin_lock_bh(&wdev->mgmt_registrations_lock);
++ spin_lock_bh(&rdev->mgmt_registrations_lock);
+
+ list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
+ if (reg->nlportid != nlportid)
+@@ -615,7 +616,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
+ schedule_work(&rdev->mgmt_registrations_update_wk);
+ }
+
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+
+ if (nlportid && rdev->crit_proto_nlportid == nlportid) {
+ rdev->crit_proto_nlportid = 0;
+@@ -628,15 +629,16 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
+
+ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
+ {
++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_mgmt_registration *reg, *tmp;
+
+- spin_lock_bh(&wdev->mgmt_registrations_lock);
++ spin_lock_bh(&rdev->mgmt_registrations_lock);
+ list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
+ list_del(&reg->list);
+ kfree(reg);
+ }
+ wdev->mgmt_registrations_need_update = 1;
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+
+ cfg80211_mgmt_registrations_update(wdev);
+ }
+@@ -784,7 +786,7 @@ bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
+ data = buf + ieee80211_hdrlen(mgmt->frame_control);
+ data_len = len - ieee80211_hdrlen(mgmt->frame_control);
+
+- spin_lock_bh(&wdev->mgmt_registrations_lock);
++ spin_lock_bh(&rdev->mgmt_registrations_lock);
+
+ list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
+ if (reg->frame_type != ftype)
+@@ -808,7 +810,7 @@ bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm,
+ break;
+ }
+
+- spin_unlock_bh(&wdev->mgmt_registrations_lock);
++ spin_unlock_bh(&rdev->mgmt_registrations_lock);
+
+ trace_cfg80211_return_bool(result);
+ return result;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 7897b1478c3c0..d5bd9f015d8bc 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -418,14 +418,17 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
+ }
+ ssid_len = ssid[1];
+ ssid = ssid + 2;
+- rcu_read_unlock();
+
+ /* check if nontrans_bss is in the list */
+ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
+- if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
++ if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
++ rcu_read_unlock();
+ return 0;
++ }
+ }
+
++ rcu_read_unlock();
++
+ /* add to the list */
+ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
+ return 0;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 18dba3d7c638b..a1a99a5749844 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -1028,14 +1028,14 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ !(rdev->wiphy.interface_modes & (1 << ntype)))
+ return -EOPNOTSUPP;
+
+- /* if it's part of a bridge, reject changing type to station/ibss */
+- if (netif_is_bridge_port(dev) &&
+- (ntype == NL80211_IFTYPE_ADHOC ||
+- ntype == NL80211_IFTYPE_STATION ||
+- ntype == NL80211_IFTYPE_P2P_CLIENT))
+- return -EBUSY;
+-
+ if (ntype != otype) {
++ /* if it's part of a bridge, reject changing type to station/ibss */
++ if (netif_is_bridge_port(dev) &&
++ (ntype == NL80211_IFTYPE_ADHOC ||
++ ntype == NL80211_IFTYPE_STATION ||
++ ntype == NL80211_IFTYPE_P2P_CLIENT))
++ return -EBUSY;
++
+ dev->ieee80211_ptr->use_4addr = false;
+ dev->ieee80211_ptr->mesh_id_up_len = 0;
+ wdev_lock(dev->ieee80211_ptr);
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 064da7f3618d3..6b6dbd84cdeba 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -469,7 +469,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
+ return -EINVAL;
+
+ if (PRINT_FIELD(WEIGHT) &&
+- evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT, "WEIGHT", PERF_OUTPUT_WEIGHT))
++ evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT))
+ return -EINVAL;
+
+ if (PRINT_FIELD(SYM) &&
+@@ -4024,11 +4024,15 @@ script_found:
+ goto out_delete;
+
+ uname(&uts);
+- if (data.is_pipe || /* assume pipe_mode indicates native_arch */
+- !strcmp(uts.machine, session->header.env.arch) ||
+- (!strcmp(uts.machine, "x86_64") &&
+- !strcmp(session->header.env.arch, "i386")))
++ if (data.is_pipe) { /* Assume pipe_mode indicates native_arch */
+ native_arch = true;
++ } else if (session->header.env.arch) {
++ if (!strcmp(uts.machine, session->header.env.arch))
++ native_arch = true;
++ else if (!strcmp(uts.machine, "x86_64") &&
++ !strcmp(session->header.env.arch, "i386"))
++ native_arch = true;
++ }
+
+ script.session = session;
+ script__setup_sample_type(&script);