summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1243_linux-5.4.244.patch6926
2 files changed, 6930 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index b7bf2fd4..2bc77166 100644
--- a/0000_README
+++ b/0000_README
@@ -1015,6 +1015,10 @@ Patch: 1242_linux-5.4.243.patch
From: https://www.kernel.org
Desc: Linux 5.4.243
+Patch: 1243_linux-5.4.244.patch
+From: https://www.kernel.org
+Desc: Linux 5.4.244
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1243_linux-5.4.244.patch b/1243_linux-5.4.244.patch
new file mode 100644
index 00000000..baad88e5
--- /dev/null
+++ b/1243_linux-5.4.244.patch
@@ -0,0 +1,6926 @@
+diff --git a/Makefile b/Makefile
+index f660c3e224ec0..a12f1af9b03b1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 243
++SUBLEVEL = 244
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
+index 1dbe98948ce30..9627c4cf3e41d 100644
+--- a/arch/arm/mach-sa1100/jornada720_ssp.c
++++ b/arch/arm/mach-sa1100/jornada720_ssp.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+-/**
++/*
+ * arch/arm/mac-sa1100/jornada720_ssp.c
+ *
+ * Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
+@@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
+
+ /**
+ * jornada_ssp_reverse - reverses input byte
++ * @byte: input byte to reverse
+ *
+ * we need to reverse all data we receive from the mcu due to its physical location
+ * returns : 01110111 -> 11101110
+@@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
+
+ /**
+ * jornada_ssp_byte - waits for ready ssp bus and sends byte
++ * @byte: input byte to transmit
+ *
+ * waits for fifo buffer to clear and then transmits, if it doesn't then we will
+ * timeout after <timeout> rounds. Needs mcu running before its called.
+@@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
+
+ /**
+ * jornada_ssp_inout - decide if input is command or trading byte
++ * @byte: input byte to send (may be %TXDUMMY)
+ *
+ * returns : (jornada_ssp_byte(byte)) on success
+ * : %-ETIMEDOUT on timeout failure
+diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
+index f7121b775e5f0..ab5fec67e541c 100644
+--- a/arch/m68k/kernel/signal.c
++++ b/arch/m68k/kernel/signal.c
+@@ -883,11 +883,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
+ }
+
+ static inline void __user *
+-get_sigframe(struct ksignal *ksig, size_t frame_size)
++get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
+ {
+ unsigned long usp = sigsp(rdusp(), ksig);
++ unsigned long gap = 0;
+
+- return (void __user *)((usp - frame_size) & -8UL);
++ if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
++ /* USP is unreliable so use worst-case value */
++ gap = 256;
++ }
++
++ return (void __user *)((usp - gap - frame_size) & -8UL);
+ }
+
+ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+@@ -905,7 +911,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ return -EFAULT;
+ }
+
+- frame = get_sigframe(ksig, sizeof(*frame) + fsize);
++ frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
+
+ if (fsize)
+ err |= copy_to_user (frame + 1, regs + 1, fsize);
+@@ -976,7 +982,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ return -EFAULT;
+ }
+
+- frame = get_sigframe(ksig, sizeof(*frame));
++ frame = get_sigframe(ksig, tregs, sizeof(*frame));
+
+ if (fsize)
+ err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index 0c83644bfa5cb..b4076ac51005e 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -57,6 +57,11 @@ extern void flush_dcache_page(struct page *page);
+
+ #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+ #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
++#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
++ xa_lock_irqsave(&mapping->i_pages, flags)
++#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
++ xa_unlock_irqrestore(&mapping->i_pages, flags)
++
+
+ #define flush_icache_page(vma,page) do { \
+ flush_kernel_dcache_page(page); \
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index a82b3eaa53989..fd84789b5ca4b 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -328,6 +328,7 @@ void flush_dcache_page(struct page *page)
+ struct vm_area_struct *mpnt;
+ unsigned long offset;
+ unsigned long addr, old_addr = 0;
++ unsigned long flags;
+ pgoff_t pgoff;
+
+ if (mapping && !mapping_mapped(mapping)) {
+@@ -347,7 +348,7 @@ void flush_dcache_page(struct page *page)
+ * declared as MAP_PRIVATE or MAP_SHARED), so we only need
+ * to flush one address here for them all to become coherent */
+
+- flush_dcache_mmap_lock(mapping);
++ flush_dcache_mmap_lock_irqsave(mapping, flags);
+ vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
+ offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+ addr = mpnt->vm_start + offset;
+@@ -370,7 +371,7 @@ void flush_dcache_page(struct page *page)
+ old_addr = addr;
+ }
+ }
+- flush_dcache_mmap_unlock(mapping);
++ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
+ }
+ EXPORT_SYMBOL(flush_dcache_page);
+
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index 230a6422b99f3..0e67ab681b4e9 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -124,13 +124,18 @@ void machine_power_off(void)
+ /* It seems we have no way to power the system off via
+ * software. The user has to press the button himself. */
+
+- printk(KERN_EMERG "System shut down completed.\n"
+- "Please power this system off now.");
++ printk("Power off or press RETURN to reboot.\n");
+
+ /* prevent soft lockup/stalled CPU messages for endless loop. */
+ rcu_sysrq_start();
+ lockup_detector_soft_poweroff();
+- for (;;);
++ while (1) {
++ /* reboot if user presses RETURN key */
++ if (pdc_iodc_getc() == 13) {
++ printk("Rebooting...\n");
++ machine_restart(NULL);
++ }
++ }
+ }
+
+ void (*pm_power_off)(void);
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 37988f7f3abcb..10776991296a0 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -305,8 +305,8 @@ static void handle_break(struct pt_regs *regs)
+ #endif
+
+ #ifdef CONFIG_KGDB
+- if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+- iir == PARISC_KGDB_BREAK_INSN)) {
++ if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
++ iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
+ kgdb_handle_exception(9, SIGTRAP, 0, regs);
+ return;
+ }
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index bdcb07a98cd37..dc8f3fb02ac21 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -1018,8 +1018,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ pte_t entry, unsigned long address, int psize)
+ {
+ struct mm_struct *mm = vma->vm_mm;
+- unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
+- _PAGE_RW | _PAGE_EXEC);
++ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
++ _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+
+ unsigned long change = pte_val(entry) ^ pte_val(*ptep);
+ /*
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index c1d6d8bbb7dad..6fdd863198ec2 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -96,6 +96,11 @@
+ #define INTEL_FAM6_LAKEFIELD 0x8A
+ #define INTEL_FAM6_ALDERLAKE 0x97
+ #define INTEL_FAM6_ALDERLAKE_L 0x9A
++#define INTEL_FAM6_ALDERLAKE_N 0xBE
++
++#define INTEL_FAM6_RAPTORLAKE 0xB7
++#define INTEL_FAM6_RAPTORLAKE_P 0xBA
++#define INTEL_FAM6_RAPTORLAKE_S 0xBF
+
+ /* "Small Core" Processors (Atom) */
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 4bc476d7fa6c4..80239c84b4ddf 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -563,6 +563,7 @@ struct kvm_vcpu_arch {
+ u64 ia32_misc_enable_msr;
+ u64 smbase;
+ u64 smi_count;
++ bool at_instruction_boundary;
+ bool tpr_access_reporting;
+ u64 ia32_xss;
+ u64 microcode_version;
+@@ -981,6 +982,8 @@ struct kvm_vcpu_stat {
+ u64 irq_injections;
+ u64 nmi_injections;
+ u64 req_event;
++ u64 preemption_reported;
++ u64 preemption_other;
+ };
+
+ struct x86_instruction_info;
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index 24da5ee4f0220..5729ed7bb3e7d 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
+ * initial apic id, which also represents 32-bit extended x2apic id.
+ */
+ c->initial_apicid = edx;
+- smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++ smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
+ #endif
+ return 0;
+ }
+@@ -107,7 +107,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
+ */
+ cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+ c->initial_apicid = edx;
+- core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++ core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
++ smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
+ core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+ die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index e72042dc9487c..9b2bbb66d0c87 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -171,7 +171,6 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ printk("%sCall Trace:\n", log_lvl);
+
+ unwind_start(&state, task, regs, stack);
+- stack = stack ? : get_stack_pointer(task, regs);
+ regs = unwind_get_entry_regs(&state, &partial);
+
+ /*
+@@ -190,9 +189,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ * - hardirq stack
+ * - entry stack
+ */
+- for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
++ for (stack = stack ?: get_stack_pointer(task, regs);
++ stack;
++ stack = stack_info.next_sp) {
+ const char *stack_name;
+
++ stack = PTR_ALIGN(stack, sizeof(long));
++
+ if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+ /*
+ * We weren't on a valid stack. It's possible that
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c5a9de8d07250..e9444e202c334 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6246,7 +6246,8 @@ out:
+
+ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+ {
+-
++ if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
++ vcpu->arch.at_instruction_boundary = true;
+ }
+
+ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 9bd08d2646036..c930708297909 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6358,6 +6358,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
+ );
+
+ kvm_after_interrupt(vcpu);
++ vcpu->arch.at_instruction_boundary = true;
+ }
+ STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff);
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f5e9590a8f311..d152afdfa8b4f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -207,6 +207,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "nmi_injections", VCPU_STAT(nmi_injections) },
+ { "req_event", VCPU_STAT(req_event) },
+ { "l1d_flush", VCPU_STAT(l1d_flush) },
++ { "preemption_reported", VCPU_STAT(preemption_reported) },
++ { "preemption_other", VCPU_STAT(preemption_other) },
+ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
+ { "mmu_pte_write", VM_STAT(mmu_pte_write) },
+ { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
+@@ -3562,6 +3564,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
++ /*
++ * The vCPU can be marked preempted if and only if the VM-Exit was on
++ * an instruction boundary and will not trigger guest emulation of any
++ * kind (see vcpu_run). Vendor specific code controls (conservatively)
++ * when this is true, for example allowing the vCPU to be marked
++ * preempted if and only if the VM-Exit was due to a host interrupt.
++ */
++ if (!vcpu->arch.at_instruction_boundary) {
++ vcpu->stat.preemption_other++;
++ return;
++ }
++
++ vcpu->stat.preemption_reported++;
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+@@ -8446,6 +8461,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
+ vcpu->arch.l1tf_flush_l1d = true;
+
+ for (;;) {
++ /*
++ * If another guest vCPU requests a PV TLB flush in the middle
++ * of instruction emulation, the rest of the emulation could
++ * use a stale page translation. Assume that any code after
++ * this point can start executing an instruction.
++ */
++ vcpu->arch.at_instruction_boundary = false;
+ if (kvm_vcpu_running(vcpu)) {
+ r = vcpu_enter_guest(vcpu);
+ } else {
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index af352e228fa2b..38e6798ce44fc 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched/task.h>
+
+ #include <asm/set_memory.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/e820/api.h>
+ #include <asm/init.h>
+ #include <asm/page.h>
+@@ -208,6 +209,24 @@ static void __init probe_page_size_mask(void)
+ }
+ }
+
++#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
++ .family = 6, \
++ .model = _model, \
++ }
++/*
++ * INVLPG may not properly flush Global entries
++ * on these CPUs when PCIDs are enabled.
++ */
++static const struct x86_cpu_id invlpg_miss_ids[] = {
++ INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
++ INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
++ INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
++ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
++ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
++ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
++ {}
++};
++
+ static void setup_pcid(void)
+ {
+ if (!IS_ENABLED(CONFIG_X86_64))
+@@ -216,6 +235,12 @@ static void setup_pcid(void)
+ if (!boot_cpu_has(X86_FEATURE_PCID))
+ return;
+
++ if (x86_match_cpu(invlpg_miss_ids)) {
++ pr_info("Incomplete global flushes, disabling PCID");
++ setup_clear_cpu_cap(X86_FEATURE_PCID);
++ return;
++ }
++
+ if (boot_cpu_has(X86_FEATURE_PGE)) {
+ /*
+ * This can't be cr4_set_bits_and_update_boot() -- the
+diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
+index 63fe30e868075..7f14403165dd1 100644
+--- a/drivers/acpi/acpica/dbnames.c
++++ b/drivers/acpi/acpica/dbnames.c
+@@ -571,6 +571,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
+ object_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info));
+
++ if (!object_info)
++ return (AE_NO_MEMORY);
++
+ /* Walk the namespace from the root */
+
+ (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
+index de79f835a3737..7979d52dfbc96 100644
+--- a/drivers/acpi/acpica/dswstate.c
++++ b/drivers/acpi/acpica/dswstate.c
+@@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
+ ACPI_FUNCTION_TRACE(ds_init_aml_walk);
+
+ walk_state->parser_state.aml =
+- walk_state->parser_state.aml_start = aml_start;
+- walk_state->parser_state.aml_end =
+- walk_state->parser_state.pkg_end = aml_start + aml_length;
++ walk_state->parser_state.aml_start =
++ walk_state->parser_state.aml_end =
++ walk_state->parser_state.pkg_end = aml_start;
++ /* Avoid undefined behavior: applying zero offset to null pointer */
++ if (aml_length != 0) {
++ walk_state->parser_state.aml_end += aml_length;
++ walk_state->parser_state.pkg_end += aml_length;
++ }
+
+ /* The next_op of the next_walk will be the beginning of the method */
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index defc5796b5084..c7baccd47b89f 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1118,6 +1118,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
+ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
+ {
+ acpi_ec_remove_query_handlers(ec, false, query_bit);
++ flush_workqueue(ec_query_wq);
+ }
+ EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
+
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 1b016fdd1a750..f8e157ede44f8 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -3474,6 +3474,13 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
+ }
+ EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+
++void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
++{
++ dev->fwnode = fwnode;
++ dev->of_node = to_of_node(fwnode);
++}
++EXPORT_SYMBOL_GPL(device_set_node);
++
+ int device_match_name(struct device *dev, const void *name)
+ {
+ return sysfs_streq(dev_name(dev), name);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index 7f4b3b62492ca..7fdd702e564ae 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -343,6 +343,9 @@ int regcache_sync(struct regmap *map)
+ const char *name;
+ bool bypass;
+
++ if (WARN_ON(map->cache_type == REGCACHE_NONE))
++ return -EINVAL;
++
+ BUG_ON(!map->cache_ops);
+
+ map->lock(map->lock_arg);
+@@ -412,6 +415,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
+ const char *name;
+ bool bypass;
+
++ if (WARN_ON(map->cache_type == REGCACHE_NONE))
++ return -EINVAL;
++
+ BUG_ON(!map->cache_ops);
+
+ map->lock(map->lock_arg);
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 9c61be2afca7e..5456d8e2eef28 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -83,6 +83,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
+ },
+ },
++ {
++ .callback = tpm_tis_disable_irq,
++ .ident = "ThinkStation P360 Tiny",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
++ },
++ },
++ {
++ .callback = tpm_tis_disable_irq,
++ .ident = "ThinkPad L490",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
+index 3e0f04f0e16e5..3f74497d73e58 100644
+--- a/drivers/clk/tegra/clk-tegra20.c
++++ b/drivers/clk/tegra/clk-tegra20.c
+@@ -18,24 +18,24 @@
+ #define MISC_CLK_ENB 0x48
+
+ #define OSC_CTRL 0x50
+-#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
+-#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
+-#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
+-#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
+-#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+-#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+-
+-#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
+-#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
+-#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
+-#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
++#define OSC_CTRL_OSC_FREQ_MASK (3u<<30)
++#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30)
++#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30)
++#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30)
++#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30)
++#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK)
++
++#define OSC_CTRL_PLL_REF_DIV_MASK (3u<<28)
++#define OSC_CTRL_PLL_REF_DIV_1 (0u<<28)
++#define OSC_CTRL_PLL_REF_DIV_2 (1u<<28)
++#define OSC_CTRL_PLL_REF_DIV_4 (2u<<28)
+
+ #define OSC_FREQ_DET 0x58
+-#define OSC_FREQ_DET_TRIG (1<<31)
++#define OSC_FREQ_DET_TRIG (1u<<31)
+
+ #define OSC_FREQ_DET_STATUS 0x5c
+-#define OSC_FREQ_DET_BUSY (1<<31)
+-#define OSC_FREQ_DET_CNT_MASK 0xFFFF
++#define OSC_FREQ_DET_BUSYu (1<<31)
++#define OSC_FREQ_DET_CNT_MASK 0xFFFFu
+
+ #define TEGRA20_CLK_PERIPH_BANKS 3
+
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index e497785cd99fe..b0e8752174c6f 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -44,6 +44,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ /* entry point from firmware to arch asm code */
+ static unsigned long sdei_entry_point;
+
++static int sdei_hp_state;
++
+ struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+@@ -305,8 +307,6 @@ int sdei_mask_local_cpu(void)
+ {
+ int err;
+
+- WARN_ON_ONCE(preemptible());
+-
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+@@ -319,6 +319,7 @@ int sdei_mask_local_cpu(void)
+
+ static void _ipi_mask_cpu(void *ignored)
+ {
++ WARN_ON_ONCE(preemptible());
+ sdei_mask_local_cpu();
+ }
+
+@@ -326,8 +327,6 @@ int sdei_unmask_local_cpu(void)
+ {
+ int err;
+
+- WARN_ON_ONCE(preemptible());
+-
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+@@ -340,6 +339,7 @@ int sdei_unmask_local_cpu(void)
+
+ static void _ipi_unmask_cpu(void *ignored)
+ {
++ WARN_ON_ONCE(preemptible());
+ sdei_unmask_local_cpu();
+ }
+
+@@ -347,6 +347,8 @@ static void _ipi_private_reset(void *ignored)
+ {
+ int err;
+
++ WARN_ON_ONCE(preemptible());
++
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+@@ -393,8 +395,6 @@ static void _local_event_enable(void *data)
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+- WARN_ON_ONCE(preemptible());
+-
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+@@ -485,8 +485,6 @@ static void _local_event_unregister(void *data)
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+- WARN_ON_ONCE(preemptible());
+-
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+@@ -575,8 +573,6 @@ static void _local_event_register(void *data)
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+- WARN_ON(preemptible());
+-
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+@@ -756,6 +752,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ {
+ int rv;
+
++ WARN_ON_ONCE(preemptible());
++
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+@@ -804,7 +802,7 @@ static int sdei_device_freeze(struct device *dev)
+ int err;
+
+ /* unregister private events */
+- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
++ cpuhp_remove_state(sdei_entry_point);
+
+ err = sdei_unregister_shared();
+ if (err)
+@@ -825,12 +823,15 @@ static int sdei_device_thaw(struct device *dev)
+ return err;
+ }
+
+- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
++ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+- if (err)
++ if (err < 0) {
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
++ return err;
++ }
+
+- return err;
++ sdei_hp_state = err;
++ return 0;
+ }
+
+ static int sdei_device_restore(struct device *dev)
+@@ -862,7 +863,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
++ cpuhp_remove_state(sdei_hp_state);
+
+ sdei_platform_reset();
+
+@@ -1044,13 +1045,15 @@ static int sdei_probe(struct platform_device *pdev)
+ goto remove_cpupm;
+ }
+
+- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
++ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+- if (err) {
++ if (err < 0) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
++ sdei_hp_state = err;
++
+ return 0;
+
+ remove_reboot:
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index 9c1c4d81aa7b6..3e983d98837b4 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -339,7 +339,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
+ priv->offset = i;
+ priv->desc = &gc->gpiodev->descs[i];
+
+- debugfs_create_file(name, 0200, chip->dbg_dir, priv,
++ debugfs_create_file(name, 0600, chip->dbg_dir, priv,
+ &gpio_mockup_debugfs_ops);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index 6fd57cfb112f5..96fdc18ecb3bf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -778,7 +778,7 @@ static void dce_transform_set_pixel_storage_depth(
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+- BREAK_TO_DEBUGGER();
++ DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
+ break;
+ }
+
+@@ -792,8 +792,7 @@ static void dce_transform_set_pixel_storage_depth(
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+- DC_LOG_WARNING("%s: Capability not supported",
+- __func__);
++ DC_LOG_DC("%s: Capability not supported", __func__);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index b942c69e9b489..17a96f1d9234c 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+ return dsi;
+ }
+
+- dsi->dev.of_node = info->node;
++ device_set_node(&dsi->dev, of_fwnode_handle(info->node));
+ dsi->channel = info->channel;
+ strlcpy(dsi->name, info->type, sizeof(dsi->name));
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+index 74ea3c26deadc..1a5ae781b56c6 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ return -ENODEV;
+ }
+
+-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
++static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+ {
+ return 0;
+ }
+
+-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
++static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+ { }
+ #endif
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index 0419b6105c8a5..ccd084abc8c94 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -906,7 +906,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
+ struct drm_dp_link *link)
+ {
+ const u64 f = 100000, link_rate = link->rate * 1000;
+- const u64 pclk = mode->clock * 1000;
++ const u64 pclk = (u64)mode->clock * 1000;
+ u64 input, output, watermark, num;
+ struct tegra_sor_params params;
+ u32 num_syms_per_line;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 919551ed5809c..b8558292801ec 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -809,8 +809,7 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
+ if (ret)
+ return ret;
+
+- snprintf(hdev->uniq, sizeof(hdev->uniq), "%04x-%4phD",
+- hdev->product, &serial);
++ snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
+ dbg_hid("HID++ Unifying: Got serial: %s\n", hdev->uniq);
+
+ name = hidpp_unifying_get_name(hidpp);
+@@ -903,6 +902,54 @@ print_version:
+ return 0;
+ }
+
++/* -------------------------------------------------------------------------- */
++/* 0x0003: Device Information */
++/* -------------------------------------------------------------------------- */
++
++#define HIDPP_PAGE_DEVICE_INFORMATION 0x0003
++
++#define CMD_GET_DEVICE_INFO 0x00
++
++static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial)
++{
++ struct hidpp_report response;
++ u8 feature_type;
++ u8 feature_index;
++ int ret;
++
++ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION,
++ &feature_index,
++ &feature_type);
++ if (ret)
++ return ret;
++
++ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
++ CMD_GET_DEVICE_INFO,
++ NULL, 0, &response);
++ if (ret)
++ return ret;
++
++ /* See hidpp_unifying_get_serial() */
++ *serial = *((u32 *)&response.rap.params[1]);
++ return 0;
++}
++
++static int hidpp_serial_init(struct hidpp_device *hidpp)
++{
++ struct hid_device *hdev = hidpp->hid_dev;
++ u32 serial;
++ int ret;
++
++ ret = hidpp_get_serial(hidpp, &serial);
++ if (ret)
++ return ret;
++
++ snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
++ dbg_hid("HID++ DeviceInformation: Got serial: %s\n", hdev->uniq);
++
++ return 0;
++}
++
+ /* -------------------------------------------------------------------------- */
+ /* 0x0005: GetDeviceNameType */
+ /* -------------------------------------------------------------------------- */
+@@ -3651,6 +3698,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+
+ if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+ hidpp_unifying_init(hidpp);
++ else if (hid_is_usb(hidpp->hid_dev))
++ hidpp_serial_init(hidpp);
+
+ connected = hidpp_root_get_protocol_version(hidpp) == 0;
+ atomic_set(&hidpp->connected, connected);
+diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
+index 203d27d198b81..3f8b24a57014b 100644
+--- a/drivers/hid/wacom.h
++++ b/drivers/hid/wacom.h
+@@ -91,6 +91,7 @@
+ #include <linux/leds.h>
+ #include <linux/usb/input.h>
+ #include <linux/power_supply.h>
++#include <linux/timer.h>
+ #include <asm/unaligned.h>
+
+ /*
+@@ -167,6 +168,7 @@ struct wacom {
+ struct delayed_work init_work;
+ struct wacom_remote *remote;
+ struct work_struct mode_change_work;
++ struct timer_list idleprox_timer;
+ bool generic_has_leds;
+ struct wacom_leds {
+ struct wacom_group_leds *groups;
+@@ -239,4 +241,5 @@ struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
+ struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
+ int wacom_equivalent_usage(int usage);
+ int wacom_initialize_leds(struct wacom *wacom);
++void wacom_idleprox_timeout(struct timer_list *list);
+ #endif
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index b42785fdf7ed5..a93070f5b214c 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2781,6 +2781,7 @@ static int wacom_probe(struct hid_device *hdev,
+ INIT_WORK(&wacom->battery_work, wacom_battery_work);
+ INIT_WORK(&wacom->remote_work, wacom_remote_work);
+ INIT_WORK(&wacom->mode_change_work, wacom_mode_change_work);
++ timer_setup(&wacom->idleprox_timer, &wacom_idleprox_timeout, TIMER_DEFERRABLE);
+
+ /* ask for the report descriptor to be loaded by HID */
+ error = hid_parse(hdev);
+@@ -2825,6 +2826,7 @@ static void wacom_remove(struct hid_device *hdev)
+ cancel_work_sync(&wacom->battery_work);
+ cancel_work_sync(&wacom->remote_work);
+ cancel_work_sync(&wacom->mode_change_work);
++ del_timer_sync(&wacom->idleprox_timer);
+ if (hdev->bus == BUS_BLUETOOTH)
+ device_remove_file(&hdev->dev, &dev_attr_speed);
+
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 339bc7f1fcedb..099f7cccc3a7a 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -11,6 +11,7 @@
+ #include "wacom_wac.h"
+ #include "wacom.h"
+ #include <linux/input/mt.h>
++#include <linux/jiffies.h>
+
+ /* resolution for penabled devices */
+ #define WACOM_PL_RES 20
+@@ -41,6 +42,43 @@ static int wacom_numbered_button_to_key(int n);
+
+ static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
+ int group);
++
++static void wacom_force_proxout(struct wacom_wac *wacom_wac)
++{
++ struct input_dev *input = wacom_wac->pen_input;
++
++ wacom_wac->shared->stylus_in_proximity = 0;
++
++ input_report_key(input, BTN_TOUCH, 0);
++ input_report_key(input, BTN_STYLUS, 0);
++ input_report_key(input, BTN_STYLUS2, 0);
++ input_report_key(input, BTN_STYLUS3, 0);
++ input_report_key(input, wacom_wac->tool[0], 0);
++ if (wacom_wac->serial[0]) {
++ input_report_abs(input, ABS_MISC, 0);
++ }
++ input_report_abs(input, ABS_PRESSURE, 0);
++
++ wacom_wac->tool[0] = 0;
++ wacom_wac->id[0] = 0;
++ wacom_wac->serial[0] = 0;
++
++ input_sync(input);
++}
++
++void wacom_idleprox_timeout(struct timer_list *list)
++{
++ struct wacom *wacom = from_timer(wacom, list, idleprox_timer);
++ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
++
++ if (!wacom_wac->hid_data.sense_state) {
++ return;
++ }
++
++ hid_warn(wacom->hdev, "%s: tool appears to be hung in-prox. forcing it out.\n", __func__);
++ wacom_force_proxout(wacom_wac);
++}
++
+ /*
+ * Percent of battery capacity for Graphire.
+ * 8th value means AC online and show 100% capacity.
+@@ -675,11 +713,14 @@ static int wacom_intuos_get_tool_type(int tool_id)
+ case 0x802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x8e2: /* IntuosHT2 pen */
+ case 0x022:
++ case 0x200: /* Pro Pen 3 */
++ case 0x04200: /* Pro Pen 3 */
+ case 0x10842: /* MobileStudio Pro Pro Pen slim */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
++ case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
+ tool_type = BTN_TOOL_PEN;
+ break;
+
+@@ -1927,18 +1968,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ static void wacom_wac_battery_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+ {
+- struct wacom *wacom = hid_get_drvdata(hdev);
+- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+- struct wacom_features *features = &wacom_wac->features;
+- unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+-
+- switch (equivalent_usage) {
+- case HID_DG_BATTERYSTRENGTH:
+- case WACOM_HID_WD_BATTERY_LEVEL:
+- case WACOM_HID_WD_BATTERY_CHARGING:
+- features->quirks |= WACOM_QUIRK_BATTERY;
+- break;
+- }
++ return;
+ }
+
+ static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *field,
+@@ -1959,18 +1989,21 @@ static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *f
+ wacom_wac->hid_data.bat_connected = 1;
+ wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
+ }
++ wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ value = value * 100 / (field->logical_maximum - field->logical_minimum);
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
++ wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ wacom_wac->hid_data.bat_charging = value;
+ wacom_wac->hid_data.ps_connected = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
++ wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ }
+ }
+@@ -1986,18 +2019,15 @@ static void wacom_wac_battery_report(struct hid_device *hdev,
+ {
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+- struct wacom_features *features = &wacom_wac->features;
+
+- if (features->quirks & WACOM_QUIRK_BATTERY) {
+- int status = wacom_wac->hid_data.bat_status;
+- int capacity = wacom_wac->hid_data.battery_capacity;
+- bool charging = wacom_wac->hid_data.bat_charging;
+- bool connected = wacom_wac->hid_data.bat_connected;
+- bool powered = wacom_wac->hid_data.ps_connected;
++ int status = wacom_wac->hid_data.bat_status;
++ int capacity = wacom_wac->hid_data.battery_capacity;
++ bool charging = wacom_wac->hid_data.bat_charging;
++ bool connected = wacom_wac->hid_data.bat_connected;
++ bool powered = wacom_wac->hid_data.ps_connected;
+
+- wacom_notify_battery(wacom_wac, status, capacity, charging,
+- connected, powered);
+- }
++ wacom_notify_battery(wacom_wac, status, capacity, charging,
++ connected, powered);
+ }
+
+ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+@@ -2339,6 +2369,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
+ value = field->logical_maximum - value;
+ break;
+ case HID_DG_INRANGE:
++ mod_timer(&wacom->idleprox_timer, jiffies + msecs_to_jiffies(100));
+ wacom_wac->hid_data.inrange_state = value;
+ if (!(features->quirks & WACOM_QUIRK_SENSE))
+ wacom_wac->hid_data.sense_state = value;
+@@ -4812,6 +4843,10 @@ static const struct wacom_features wacom_features_0x3c6 =
+ static const struct wacom_features wacom_features_0x3c8 =
+ { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
+ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
++static const struct wacom_features wacom_features_0x3dd =
++ { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
++ INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
++ .touch_max = 10 };
+
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+@@ -4991,6 +5026,7 @@ const struct hid_device_id wacom_ids[] = {
+ { BT_DEVICE_WACOM(0x393) },
+ { BT_DEVICE_WACOM(0x3c6) },
+ { BT_DEVICE_WACOM(0x3c8) },
++ { BT_DEVICE_WACOM(0x3dd) },
+ { USB_DEVICE_WACOM(0x4001) },
+ { USB_DEVICE_WACOM(0x4004) },
+ { USB_DEVICE_WACOM(0x5000) },
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index ed77c7f7b344b..763463776a0e1 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -909,7 +909,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
+
+ len = tmc_etr_buf_get_data(etr_buf, offset,
+ CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+- if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
++ if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
+ return -EINVAL;
+ coresight_insert_barrier_packet(bufp);
+ return offset + CORESIGHT_BARRIER_PKT_SIZE;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index ad3a092b8b5c3..390123f87658b 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -131,6 +131,11 @@ struct ib_umad_packet {
+ struct ib_user_mad mad;
+ };
+
++struct ib_rmpp_mad_hdr {
++ struct ib_mad_hdr mad_hdr;
++ struct ib_rmpp_hdr rmpp_hdr;
++} __packed;
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/ib_umad.h>
+
+@@ -494,11 +499,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+ {
+ struct ib_umad_file *file = filp->private_data;
++ struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
+ struct ib_umad_packet *packet;
+ struct ib_mad_agent *agent;
+ struct rdma_ah_attr ah_attr;
+ struct ib_ah *ah;
+- struct ib_rmpp_mad *rmpp_mad;
+ __be64 *tid;
+ int ret, data_len, hdr_len, copy_offset, rmpp_active;
+ u8 base_version;
+@@ -506,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
+ return -EINVAL;
+
+- packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
++ packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+@@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ goto err_up;
+ }
+
+- rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
+- hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
++ rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
++ hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
+
+- if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
++ if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
+ && ib_mad_kernel_rmpp_agent(agent)) {
+ copy_offset = IB_MGMT_RMPP_HDR;
+- rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
++ rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
+ IB_MGMT_RMPP_FLAG_ACTIVE;
+ } else {
+ copy_offset = IB_MGMT_MAD_HDR;
+@@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
+ *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
+ (be64_to_cpup(tid) & 0xffffffff));
+- rmpp_mad->mad_hdr.tid = *tid;
++ rmpp_mad_hdr->mad_hdr.tid = *tid;
+ }
+
+ if (!ib_mad_kernel_rmpp_agent(agent)
+- && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
+- && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
++ && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
++ && (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
+ spin_lock_irq(&file->send_lock);
+ list_add_tail(&packet->list, &file->send_list);
+ spin_unlock_irq(&file->send_lock);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 70dedc0f7827c..0bd55e1fca372 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -489,6 +489,9 @@ struct xboxone_init_packet {
+ }
+
+
++#define GIP_WIRED_INTF_DATA 0
++#define GIP_WIRED_INTF_AUDIO 1
++
+ /*
+ * This packet is required for all Xbox One pads with 2015
+ * or later firmware installed (or present from the factory).
+@@ -1813,7 +1816,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ }
+
+ if (xpad->xtype == XTYPE_XBOXONE &&
+- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
++ intf->cur_altsetting->desc.bInterfaceNumber != GIP_WIRED_INTF_DATA) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 4f64c3a9ee88d..b3c5d7b4547a4 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -760,6 +760,18 @@ static void queue_inc_cons(struct arm_smmu_ll_queue *q)
+ q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
+ }
+
++static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
++{
++ struct arm_smmu_ll_queue *llq = &q->llq;
++
++ if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
++ return;
++
++ llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
++ Q_IDX(llq, llq->cons);
++ queue_sync_cons_out(q);
++}
++
+ static int queue_sync_prod_in(struct arm_smmu_queue *q)
+ {
+ int ret = 0;
+@@ -1720,8 +1732,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
+ } while (!queue_empty(llq));
+
+ /* Sync our overflow flag, as we believe we're up to speed */
+- llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+- Q_IDX(llq, llq->cons);
++ queue_sync_cons_ovf(q);
+ return IRQ_HANDLED;
+ }
+
+@@ -1779,9 +1790,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
+ } while (!queue_empty(llq));
+
+ /* Sync our overflow flag, as we believe we're up to speed */
+- llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+- Q_IDX(llq, llq->cons);
+- queue_sync_cons_out(q);
++ queue_sync_cons_ovf(q);
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
+index 14866aa22f753..22927c80ff469 100644
+--- a/drivers/mcb/mcb-pci.c
++++ b/drivers/mcb/mcb-pci.c
+@@ -31,7 +31,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ struct resource *res;
+ struct priv *priv;
+- int ret;
++ int ret, table_size;
+ unsigned long flags;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
+@@ -90,7 +90,30 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (ret < 0)
+ goto out_mcb_bus;
+
+- dev_dbg(&pdev->dev, "Found %d cells\n", ret);
++ table_size = ret;
++
++ if (table_size < CHAM_HEADER_SIZE) {
++ /* Release the previous resources */
++ devm_iounmap(&pdev->dev, priv->base);
++ devm_release_mem_region(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
++
++ /* Then, allocate it again with the actual chameleon table size */
++ res = devm_request_mem_region(&pdev->dev, priv->mapbase,
++ table_size,
++ KBUILD_MODNAME);
++ if (!res) {
++ dev_err(&pdev->dev, "Failed to request PCI memory\n");
++ ret = -EBUSY;
++ goto out_mcb_bus;
++ }
++
++ priv->base = devm_ioremap(&pdev->dev, priv->mapbase, table_size);
++ if (!priv->base) {
++ dev_err(&pdev->dev, "Cannot ioremap\n");
++ ret = -ENOMEM;
++ goto out_mcb_bus;
++ }
++ }
+
+ mcb_bus_add_devices(priv->bus);
+
+diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+index eb5621c9ebf85..129acf595410d 100644
+--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+@@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
+ netup_unidvb_dma_enable(dma, 0);
+ msleep(50);
+ cancel_work_sync(&dma->work);
+- del_timer(&dma->timeout);
++ del_timer_sync(&dma->timeout);
+ }
+
+ static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
+diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
+index 8230da828d0ee..127a3be0e0f07 100644
+--- a/drivers/media/radio/radio-shark.c
++++ b/drivers/media/radio/radio-shark.c
+@@ -316,6 +316,16 @@ static int usb_shark_probe(struct usb_interface *intf,
+ {
+ struct shark_device *shark;
+ int retval = -ENOMEM;
++ static const u8 ep_addresses[] = {
++ SHARK_IN_EP | USB_DIR_IN,
++ SHARK_OUT_EP | USB_DIR_OUT,
++ 0};
++
++ /* Are the expected endpoints present? */
++ if (!usb_check_int_endpoints(intf, ep_addresses)) {
++ dev_err(&intf->dev, "Invalid radioSHARK device\n");
++ return -EINVAL;
++ }
+
+ shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
+ if (!shark)
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index d150f12382c60..f1c5c0a6a335c 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -282,6 +282,16 @@ static int usb_shark_probe(struct usb_interface *intf,
+ {
+ struct shark_device *shark;
+ int retval = -ENOMEM;
++ static const u8 ep_addresses[] = {
++ SHARK_IN_EP | USB_DIR_IN,
++ SHARK_OUT_EP | USB_DIR_OUT,
++ 0};
++
++ /* Are the expected endpoints present? */
++ if (!usb_check_int_endpoints(intf, ep_addresses)) {
++ dev_err(&intf->dev, "Invalid radioSHARK2 device\n");
++ return -EINVAL;
++ }
+
+ shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
+ if (!shark)
+diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
+index eaa2a94d18be4..dd06c18495eb6 100644
+--- a/drivers/memstick/host/r592.c
++++ b/drivers/memstick/host/r592.c
+@@ -828,7 +828,7 @@ static void r592_remove(struct pci_dev *pdev)
+ /* Stop the processing thread.
+ That ensures that we won't take any more requests */
+ kthread_stop(dev->io_thread);
+-
++ del_timer_sync(&dev->detect_timer);
+ r592_enable_device(dev, false);
+
+ while (!error && dev->req) {
+diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
+index ebc00d47abf52..624803a887d8f 100644
+--- a/drivers/message/fusion/mptlan.c
++++ b/drivers/message/fusion/mptlan.c
+@@ -1430,7 +1430,9 @@ mptlan_remove(struct pci_dev *pdev)
+ {
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct net_device *dev = ioc->netdev;
++ struct mpt_lan_priv *priv = netdev_priv(dev);
+
++ cancel_delayed_work_sync(&priv->post_buckets_task);
+ if(dev != NULL) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index 707f4287ab4a0..80952237e4b43 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -797,6 +797,7 @@ out_stop_rx:
+ dln2_stop_rx_urbs(dln2);
+
+ out_free:
++ usb_put_dev(dln2->usb_dev);
+ dln2_free(dln2);
+
+ return ret;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 0885991347d09..cdb9efae6032d 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3239,7 +3239,11 @@ static int bond_slave_netdev_event(unsigned long event,
+ unblock_netpoll_tx();
+ break;
+ case NETDEV_FEAT_CHANGE:
+- bond_compute_features(bond);
++ if (!bond->notifier_ctx) {
++ bond->notifier_ctx = true;
++ bond_compute_features(bond);
++ bond->notifier_ctx = false;
++ }
+ break;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+@@ -4878,6 +4882,8 @@ static int bond_init(struct net_device *bond_dev)
+ if (!bond->wq)
+ return -ENOMEM;
+
++ bond->notifier_ctx = false;
++
+ spin_lock_init(&bond->stats_lock);
+ lockdep_register_key(&bond->stats_lock_key);
+ lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index faa78d38d752b..560a0a5ba6f3f 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -70,10 +70,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+ #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
+ /* Shared receive buffer registers */
+ #define KVASER_PCIEFD_SRB_BASE 0x1f200
++#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
+ #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
+ #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
+ #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
+ #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
++#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
+ #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
+ /* EPCS flash controller registers */
+ #define KVASER_PCIEFD_SPI_BASE 0x1fc00
+@@ -110,6 +112,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+ /* DMA support */
+ #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+
++/* SRB current packet level */
++#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
++
+ /* DMA Enable */
+ #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
+
+@@ -528,7 +533,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+ KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
+ KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
+ KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
+- KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
++ KVASER_PCIEFD_KCAN_IRQ_TAR;
+
+ iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+@@ -556,6 +561,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
+
+ if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
++ else
++ mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
+
+ mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
+ mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
+@@ -574,7 +581,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
+
+ spin_lock_irqsave(&can->lock, irq);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
++ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+@@ -617,7 +624,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
++ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+@@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ del_timer(&can->bec_poll_timer);
+ }
++ can->can.state = CAN_STATE_STOPPED;
+ close_candev(netdev);
+
+ return ret;
+@@ -1001,8 +1009,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ SET_NETDEV_DEV(netdev, &pcie->pci->dev);
+
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
+- KVASER_PCIEFD_KCAN_IRQ_TFD,
++ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ pcie->can[i] = can;
+@@ -1052,6 +1059,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
+ {
+ int i;
+ u32 srb_status;
++ u32 srb_packet_count;
+ dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
+
+ /* Disable the DMA */
+@@ -1079,6 +1087,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
+ KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+
++ /* Empty Rx FIFO */
++ srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
++ KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
++ while (srb_packet_count) {
++ /* Drop current packet in FIFO */
++ ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
++ srb_packet_count--;
++ }
++
+ srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
+ dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
+@@ -1421,9 +1438,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
+ cmd = KVASER_PCIEFD_KCAN_CMD_AT;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+-
+- iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
+- can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
+ p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+@@ -1712,15 +1726,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
+ netdev_err(can->can.dev, "Tx FIFO overflow\n");
+
+- if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
+- u8 count = ioread32(can->reg_base +
+- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+-
+- if (count == 0)
+- iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+- can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+- }
+-
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
+ netdev_err(can->can.dev,
+ "Fail to change bittiming, when not in reset mode\n");
+@@ -1822,6 +1827,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ if (err)
+ goto err_teardown_can_ctrls;
+
++ err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
++ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
++ if (err)
++ goto err_teardown_can_ctrls;
++
+ iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+
+@@ -1842,11 +1852,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+
+- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+- if (err)
+- goto err_teardown_can_ctrls;
+-
+ err = kvaser_pciefd_reg_candev(pcie);
+ if (err)
+ goto err_free_irq;
+@@ -1854,6 +1859,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ return 0;
+
+ err_free_irq:
++ /* Disable PCI interrupts */
++ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+ free_irq(pcie->pci->irq, pcie);
+
+ err_teardown_can_ctrls:
+diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
+index 2b2695311bda9..aab26dbe76ff9 100644
+--- a/drivers/net/ethernet/3com/3c589_cs.c
++++ b/drivers/net/ethernet/3com/3c589_cs.c
+@@ -196,6 +196,7 @@ static int tc589_probe(struct pcmcia_device *link)
+ {
+ struct el3_private *lp;
+ struct net_device *dev;
++ int ret;
+
+ dev_dbg(&link->dev, "3c589_attach()\n");
+
+@@ -219,7 +220,15 @@ static int tc589_probe(struct pcmcia_device *link)
+
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+- return tc589_config(link);
++ ret = tc589_config(link);
++ if (ret)
++ goto err_free_netdev;
++
++ return 0;
++
++err_free_netdev:
++ free_netdev(dev);
++ return ret;
+ }
+
+ static void tc589_detach(struct pcmcia_device *link)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 1b725a021455b..750acbf294640 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2973,7 +2973,7 @@ err_clk_disable:
+ return ret;
+ }
+
+-static void bcmgenet_netif_stop(struct net_device *dev)
++static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
+ {
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+@@ -2988,7 +2988,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
+ umac_enable_set(priv, CMD_TX_EN, false);
+
+- phy_stop(dev->phydev);
++ if (stop_phy)
++ phy_stop(dev->phydev);
+ bcmgenet_disable_rx_napi(priv);
+ bcmgenet_intr_disable(priv);
+
+@@ -3014,7 +3015,7 @@ static int bcmgenet_close(struct net_device *dev)
+
+ netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+- bcmgenet_netif_stop(dev);
++ bcmgenet_netif_stop(dev, false);
+
+ /* Really kill the PHY state machine and disconnect from it */
+ phy_disconnect(dev->phydev);
+@@ -3712,7 +3713,7 @@ static int bcmgenet_suspend(struct device *d)
+
+ netif_device_detach(dev);
+
+- bcmgenet_netif_stop(dev);
++ bcmgenet_netif_stop(dev, true);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index e1b8c58c4d6b2..f67f104049dba 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3769,7 +3769,9 @@ fec_drv_remove(struct platform_device *pdev)
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+- return ret;
++ dev_err(&pdev->dev,
++ "Failed to resume device in remove callback (%pe)\n",
++ ERR_PTR(ret));
+
+ cancel_work_sync(&fep->tx_timeout_work);
+ fec_ptp_stop(pdev);
+@@ -3782,8 +3784,13 @@ fec_drv_remove(struct platform_device *pdev)
+ of_phy_deregister_fixed_link(np);
+ of_node_put(fep->phy_node);
+
+- clk_disable_unprepare(fep->clk_ahb);
+- clk_disable_unprepare(fep->clk_ipg);
++ /* After pm_runtime_get_sync() failed, the clks are still off, so skip
++ * disabling them again.
++ */
++ if (ret >= 0) {
++ clk_disable_unprepare(fep->clk_ahb);
++ clk_disable_unprepare(fep->clk_ipg);
++ }
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index d58abdfdb9b7b..08277c3cf2806 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6688,12 +6688,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+- hdev->reset_type != HNAE3_FUNC_RESET &&
+- hdev->reset_type != HNAE3_FLR_RESET) {
+- hclge_mac_stop_phy(hdev);
+- hclge_update_link_status(hdev);
+- return;
++ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
++ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
++ HCLGE_PFC_DISABLE);
++ if (hdev->reset_type != HNAE3_FUNC_RESET &&
++ hdev->reset_type != HNAE3_FLR_RESET) {
++ hclge_mac_stop_phy(hdev);
++ hclge_update_link_status(hdev);
++ return;
++ }
+ }
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 8448607742a6b..2183e700f9d96 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -170,8 +170,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+ }
+
+-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+- u8 pfc_bitmap)
++int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
++ u8 pfc_bitmap)
+ {
+ struct hclge_desc desc;
+ struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+index 260f22d19d81a..406084bb23072 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+@@ -109,6 +109,9 @@ struct hclge_bp_to_qs_map_cmd {
+ u32 rsvd1;
+ };
+
++#define HCLGE_PFC_DISABLE 0
++#define HCLGE_PFC_TX_RX_DISABLE 0
++
+ struct hclge_pfc_en_cmd {
+ u8 tx_rx_en_bitmap;
+ u8 pri_en_bitmap;
+@@ -150,6 +153,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+ void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
+ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
+ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
++int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
++ u8 pfc_bitmap);
+ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
+ int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 48956c30d2eee..ec3d98595198e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1432,7 +1432,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
+ * might happen in case reset assertion was made by PF. Yes, this also
+ * means we might end up waiting bit more even for VF reset.
+ */
+- msleep(5000);
++ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
++ msleep(5000);
++ else
++ msleep(500);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
+index 79ee0a7472608..4e69cb2c025fd 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
++++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
+@@ -425,7 +425,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+ {
+ u32 hash_value, hash_mask;
+- u8 bit_shift = 0;
++ u8 bit_shift = 1;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+@@ -433,7 +433,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+- while (hash_mask >> bit_shift != 0xFF)
++ while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+index bced2efe9bef4..abd066e952286 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+@@ -110,7 +110,8 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+ priv->devs[idx] = dev;
+ devcom = mlx5_devcom_alloc(priv, idx);
+ if (!devcom) {
+- kfree(priv);
++ if (new_priv)
++ kfree(priv);
+ return ERR_PTR(-ENOMEM);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 83ee9429e7c65..ff9ac7cffc321 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -887,7 +887,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+
+ dev->dm = mlx5_dm_create(dev);
+ if (IS_ERR(dev->dm))
+- mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
++ mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
+
+ dev->tracer = mlx5_fw_tracer_create(dev);
+ dev->hv_vhca = mlx5_hv_vhca_create(dev);
+diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
+index 05d2b478c99bd..d069017d43a3e 100644
+--- a/drivers/net/ethernet/nvidia/forcedeth.c
++++ b/drivers/net/ethernet/nvidia/forcedeth.c
+@@ -6099,6 +6099,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+ return 0;
+
+ out_error:
++ nv_mgmt_release_sema(dev);
+ if (phystate_orig)
+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
+ out_freering:
+diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
+index 040a15a828b41..c1d7bd168f1d1 100644
+--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
+@@ -1423,7 +1423,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
+ write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
+ }
+
+-static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct pasemi_mac * const mac = netdev_priv(dev);
+ struct pasemi_mac_txring * const txring = tx_ring(mac);
+diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
+index 6e78a33aa5e47..ecaa9beee76eb 100644
+--- a/drivers/net/ethernet/sun/cassini.c
++++ b/drivers/net/ethernet/sun/cassini.c
+@@ -5138,6 +5138,8 @@ err_out_iounmap:
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
++ vfree(cp->fw_data);
++
+ pci_iounmap(pdev, cp->regs);
+
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index a33149ee0ddcf..0a5b5ff597c6f 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -437,6 +437,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ goto err;
+ }
+ skb_dst_set(skb, &rt->dst);
++
++ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++
+ err = ip_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+@@ -475,6 +478,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ goto err;
+ }
+ skb_dst_set(skb, dst);
++
++ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++
+ err = ip6_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index a522d1673fa87..574c17aa4b09a 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -715,9 +715,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
+ skb_probe_transport_header(skb);
+
+ /* Move network header to the right position for VLAN tagged packets */
+- if ((skb->protocol == htons(ETH_P_8021Q) ||
+- skb->protocol == htons(ETH_P_8021AD)) &&
+- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
++ if (eth_type_vlan(skb->protocol) &&
++ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
+ rcu_read_lock();
+@@ -1177,9 +1176,8 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
+ }
+
+ /* Move network header to the right position for VLAN tagged packets */
+- if ((skb->protocol == htons(ETH_P_8021Q) ||
+- skb->protocol == htons(ETH_P_8021AD)) &&
+- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
++ if (eth_type_vlan(skb->protocol) &&
++ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
+ rcu_read_lock();
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index da74ec778b6e7..227d97b4dc224 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1624,6 +1624,7 @@ static int team_init(struct net_device *dev)
+
+ team->dev = dev;
+ team_set_no_mode(team);
++ team->notifier_ctx = false;
+
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
+ if (!team->pcpu_stats)
+@@ -3015,7 +3016,11 @@ static int team_device_event(struct notifier_block *unused,
+ team_del_slave(port->team->dev, dev);
+ break;
+ case NETDEV_FEAT_CHANGE:
+- team_compute_features(port->team);
++ if (!port->team->notifier_ctx) {
++ port->team->notifier_ctx = true;
++ team_compute_features(port->team);
++ port->team->notifier_ctx = false;
++ }
+ break;
+ case NETDEV_PRECHANGEMTU:
+ /* Forbid to change mtu of underlaying device */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 5bfff309f5474..b7ceea0b3204d 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -1269,13 +1269,14 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+ {
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_pmk_le pmk;
+- int i, err;
++ int err;
++
++ memset(&pmk, 0, sizeof(pmk));
+
+- /* convert to firmware key format */
+- pmk.key_len = cpu_to_le16(pmk_len << 1);
+- pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE);
+- for (i = 0; i < pmk_len; i++)
+- snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]);
++ /* pass pmk directly */
++ pmk.key_len = cpu_to_le16(pmk_len);
++ pmk.flags = cpu_to_le16(0);
++ memcpy(pmk.key, pmk_data, pmk_len);
+
+ /* store psk in firmware */
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+index 51158edce15b0..f30fdbedd7172 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+@@ -1086,6 +1086,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
+ {
+ __le16 key_flags;
+ struct iwl_addsta_cmd sta_cmd;
++ size_t to_copy;
+ int i;
+
+ spin_lock_bh(&priv->sta_lock);
+@@ -1105,7 +1106,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
+ sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+- memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
++ /* keyconf may contain MIC rx/tx keys which iwl does not use */
++ to_copy = min_t(size_t, sizeof(sta_cmd.key.key), keyconf->keylen);
++ memcpy(sta_cmd.key.key, keyconf->key, to_copy);
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+index f49887379c43f..f485c0dd75d60 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+@@ -508,6 +508,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp->n_channels);
++ if (iwl_rx_packet_payload_len(pkt) !=
++ struct_size(mcc_resp, channels, n_channels)) {
++ resp_cp = ERR_PTR(-EINVAL);
++ goto exit;
++ }
+ resp_len = sizeof(struct iwl_mcc_update_resp) +
+ n_channels * sizeof(__le32);
+ resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+@@ -519,6 +524,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
++ if (iwl_rx_packet_payload_len(pkt) !=
++ struct_size(mcc_resp_v3, channels, n_channels)) {
++ resp_cp = ERR_PTR(-EINVAL);
++ goto exit;
++ }
+ resp_len = sizeof(struct iwl_mcc_update_resp) +
+ n_channels * sizeof(__le32);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index f34297fd453c0..5153314e85554 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -1173,6 +1173,9 @@ static void iwl_pci_remove(struct pci_dev *pdev)
+ {
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+
++ if (!trans)
++ return;
++
+ iwl_drv_stop(trans->drv);
+
+ iwl_trans_pcie_free(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 3a93a7b8ba0af..e7b90cf1f28cf 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2832,7 +2832,7 @@ static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
+ void *buf, ssize_t *size,
+ ssize_t *bytes_copied)
+ {
+- int buf_size_left = count - *bytes_copied;
++ ssize_t buf_size_left = count - *bytes_copied;
+
+ buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
+ if (*size > buf_size_left)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index a6c530b9ceee0..0c813e2b9d29a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -13,10 +13,7 @@
+ #include "../dma.h"
+ #include "mac.h"
+
+-static inline s8 to_rssi(u32 field, u32 rxv)
+-{
+- return (FIELD_GET(field, rxv) - 220) / 2;
+-}
++#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
+ u8 idx, bool unicast)
+diff --git a/drivers/phy/st/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c
+index 068160a34f5cc..e30305b77f0d1 100644
+--- a/drivers/phy/st/phy-miphy28lp.c
++++ b/drivers/phy/st/phy-miphy28lp.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+@@ -484,19 +485,11 @@ static inline void miphy28lp_pcie_config_gen(struct miphy28lp_phy *miphy_phy)
+
+ static inline int miphy28lp_wait_compensation(struct miphy28lp_phy *miphy_phy)
+ {
+- unsigned long finish = jiffies + 5 * HZ;
+ u8 val;
+
+ /* Waiting for Compensation to complete */
+- do {
+- val = readb_relaxed(miphy_phy->base + MIPHY_COMP_FSM_6);
+-
+- if (time_after_eq(jiffies, finish))
+- return -EBUSY;
+- cpu_relax();
+- } while (!(val & COMP_DONE));
+-
+- return 0;
++ return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_COMP_FSM_6,
++ val, val & COMP_DONE, 1, 5 * USEC_PER_SEC);
+ }
+
+
+@@ -805,7 +798,6 @@ static inline void miphy28lp_configure_usb3(struct miphy28lp_phy *miphy_phy)
+
+ static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
+ {
+- unsigned long finish = jiffies + 5 * HZ;
+ u8 mask = HFC_PLL | HFC_RDY;
+ u8 val;
+
+@@ -816,21 +808,14 @@ static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
+ if (miphy_phy->type == PHY_TYPE_SATA)
+ mask |= PHY_RDY;
+
+- do {
+- val = readb_relaxed(miphy_phy->base + MIPHY_STATUS_1);
+- if ((val & mask) != mask)
+- cpu_relax();
+- else
+- return 0;
+- } while (!time_after_eq(jiffies, finish));
+-
+- return -EBUSY;
++ return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_STATUS_1,
++ val, (val & mask) == mask, 1,
++ 5 * USEC_PER_SEC);
+ }
+
+ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
+ {
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+- unsigned long finish = jiffies + 5 * HZ;
+ u32 val;
+
+ if (!miphy_phy->osc_rdy)
+@@ -839,17 +824,10 @@ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
+ if (!miphy_phy->syscfg_reg[SYSCFG_STATUS])
+ return -EINVAL;
+
+- do {
+- regmap_read(miphy_dev->regmap,
+- miphy_phy->syscfg_reg[SYSCFG_STATUS], &val);
+-
+- if ((val & MIPHY_OSC_RDY) != MIPHY_OSC_RDY)
+- cpu_relax();
+- else
+- return 0;
+- } while (!time_after_eq(jiffies, finish));
+-
+- return -EBUSY;
++ return regmap_read_poll_timeout(miphy_dev->regmap,
++ miphy_phy->syscfg_reg[SYSCFG_STATUS],
++ val, val & MIPHY_OSC_RDY, 1,
++ 5 * USEC_PER_SEC);
+ }
+
+ static int miphy28lp_get_resource_byname(struct device_node *child,
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index b1a37aa388800..b45cbd40294b9 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1547,7 +1547,7 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
+ return POWER_SUPPLY_HEALTH_GOOD;
+ }
+
+-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
++static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+ {
+ struct bq27xxx_reg_cache cache = {0, };
+ bool has_ci_flag = di->opts & BQ27XXX_O_ZERO;
+@@ -1597,6 +1597,16 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+ di->cache = cache;
+
+ di->last_update = jiffies;
++
++ if (!di->removed && poll_interval > 0)
++ mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
++}
++
++void bq27xxx_battery_update(struct bq27xxx_device_info *di)
++{
++ mutex_lock(&di->lock);
++ bq27xxx_battery_update_unlocked(di);
++ mutex_unlock(&di->lock);
+ }
+ EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
+
+@@ -1607,9 +1617,6 @@ static void bq27xxx_battery_poll(struct work_struct *work)
+ work.work);
+
+ bq27xxx_battery_update(di);
+-
+- if (poll_interval > 0)
+- schedule_delayed_work(&di->work, poll_interval * HZ);
+ }
+
+ /*
+@@ -1770,10 +1777,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
+
+ mutex_lock(&di->lock);
+- if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
+- cancel_delayed_work_sync(&di->work);
+- bq27xxx_battery_poll(&di->work.work);
+- }
++ if (time_is_before_jiffies(di->last_update + 5 * HZ))
++ bq27xxx_battery_update_unlocked(di);
+ mutex_unlock(&di->lock);
+
+ if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
+@@ -1910,22 +1915,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
+
+ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
+ {
+- /*
+- * power_supply_unregister call bq27xxx_battery_get_property which
+- * call bq27xxx_battery_poll.
+- * Make sure that bq27xxx_battery_poll will not call
+- * schedule_delayed_work again after unregister (which cause OOPS).
+- */
+- poll_interval = 0;
+-
+- cancel_delayed_work_sync(&di->work);
+-
+- power_supply_unregister(di->bat);
+-
+ mutex_lock(&bq27xxx_list_lock);
+ list_del(&di->list);
+ mutex_unlock(&bq27xxx_list_lock);
+
++ /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
++ mutex_lock(&di->lock);
++ di->removed = true;
++ mutex_unlock(&di->lock);
++
++ cancel_delayed_work_sync(&di->work);
++
++ power_supply_unregister(di->bat);
+ mutex_destroy(&di->lock);
+ }
+ EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
+diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
+index 34229c1f43e31..01800cd97e3af 100644
+--- a/drivers/power/supply/bq27xxx_battery_i2c.c
++++ b/drivers/power/supply/bq27xxx_battery_i2c.c
+@@ -187,7 +187,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
+ i2c_set_clientdata(client, di);
+
+ if (client->irq) {
+- ret = devm_request_threaded_irq(&client->dev, client->irq,
++ ret = request_threaded_irq(client->irq,
+ NULL, bq27xxx_battery_irq_handler_thread,
+ IRQF_ONESHOT,
+ di->name, di);
+@@ -217,6 +217,7 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
+ {
+ struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+
++ free_irq(client->irq, di);
+ bq27xxx_battery_teardown(di);
+
+ mutex_lock(&battery_mutex);
+diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
+index d69880cc35931..b7a2778f878de 100644
+--- a/drivers/power/supply/power_supply_leds.c
++++ b/drivers/power/supply/power_supply_leds.c
+@@ -34,8 +34,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
+ led_trigger_event(psy->charging_full_trig, LED_FULL);
+ led_trigger_event(psy->charging_trig, LED_OFF);
+ led_trigger_event(psy->full_trig, LED_FULL);
+- led_trigger_event(psy->charging_blink_full_solid_trig,
+- LED_FULL);
++ /* Going from blink to LED on requires a LED_OFF event to stop blink */
++ led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
++ led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ led_trigger_event(psy->charging_full_trig, LED_FULL);
+diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
+index fbfb6a6209617..a30eb42e379a4 100644
+--- a/drivers/power/supply/sbs-charger.c
++++ b/drivers/power/supply/sbs-charger.c
+@@ -25,7 +25,7 @@
+ #define SBS_CHARGER_REG_STATUS 0x13
+ #define SBS_CHARGER_REG_ALARM_WARNING 0x16
+
+-#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1)
++#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0)
+ #define SBS_CHARGER_STATUS_RES_COLD BIT(9)
+ #define SBS_CHARGER_STATUS_RES_HOT BIT(10)
+ #define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14)
+diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
+index 3b0a4483a2520..c78651be8d139 100644
+--- a/drivers/s390/cio/qdio.h
++++ b/drivers/s390/cio/qdio.h
+@@ -88,15 +88,15 @@ enum qdio_irq_states {
+ static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ int *start, int *count)
+ {
+- register unsigned long _ccq asm ("0") = *count;
+- register unsigned long _token asm ("1") = token;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
++ unsigned long _ccq = *count;
+
+ asm volatile(
+- " .insn rsy,0xeb000000008A,%1,0,0(%2)"
+- : "+d" (_ccq), "+d" (_queuestart)
+- : "d" ((unsigned long)state), "d" (_token)
+- : "memory", "cc");
++ " lgr 1,%[token]\n"
++ " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
++ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
++ : [state] "a" ((unsigned long)state), [token] "d" (token)
++ : "memory", "cc", "1");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+
+@@ -106,16 +106,17 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+ int *start, int *count, int ack)
+ {
+- register unsigned long _ccq asm ("0") = *count;
+- register unsigned long _token asm ("1") = token;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _state = (unsigned long)ack << 63;
++ unsigned long _ccq = *count;
+
+ asm volatile(
+- " .insn rrf,0xB99c0000,%1,%2,0,0"
+- : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+- : "d" (_token)
+- : "memory", "cc");
++ " lgr 1,%[token]\n"
++ " .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0"
++ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart),
++ [state] "+&d" (_state)
++ : [token] "d" (token)
++ : "memory", "cc", "1");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+ *state = _state & 0xff;
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 5b63c505a2f7c..620655bcbe80d 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -31,38 +31,41 @@ MODULE_DESCRIPTION("QDIO base support");
+ MODULE_LICENSE("GPL");
+
+ static inline int do_siga_sync(unsigned long schid,
+- unsigned int out_mask, unsigned int in_mask,
++ unsigned long out_mask, unsigned long in_mask,
+ unsigned int fc)
+ {
+- register unsigned long __fc asm ("0") = fc;
+- register unsigned long __schid asm ("1") = schid;
+- register unsigned long out asm ("2") = out_mask;
+- register unsigned long in asm ("3") = in_mask;
+ int cc;
+
+ asm volatile(
++ " lgr 0,%[fc]\n"
++ " lgr 1,%[schid]\n"
++ " lgr 2,%[out]\n"
++ " lgr 3,%[in]\n"
+ " siga 0\n"
+- " ipm %0\n"
+- " srl %0,28\n"
+- : "=d" (cc)
+- : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
++ " ipm %[cc]\n"
++ " srl %[cc],28\n"
++ : [cc] "=&d" (cc)
++ : [fc] "d" (fc), [schid] "d" (schid),
++ [out] "d" (out_mask), [in] "d" (in_mask)
++ : "cc", "0", "1", "2", "3");
+ return cc;
+ }
+
+-static inline int do_siga_input(unsigned long schid, unsigned int mask,
+- unsigned int fc)
++static inline int do_siga_input(unsigned long schid, unsigned long mask,
++ unsigned long fc)
+ {
+- register unsigned long __fc asm ("0") = fc;
+- register unsigned long __schid asm ("1") = schid;
+- register unsigned long __mask asm ("2") = mask;
+ int cc;
+
+ asm volatile(
++ " lgr 0,%[fc]\n"
++ " lgr 1,%[schid]\n"
++ " lgr 2,%[mask]\n"
+ " siga 0\n"
+- " ipm %0\n"
+- " srl %0,28\n"
+- : "=d" (cc)
+- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
++ " ipm %[cc]\n"
++ " srl %[cc],28\n"
++ : [cc] "=&d" (cc)
++ : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
++ : "cc", "0", "1", "2");
+ return cc;
+ }
+
+@@ -78,23 +81,24 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
+ * Note: For IQDC unicast queues only the highest priority queue is processed.
+ */
+ static inline int do_siga_output(unsigned long schid, unsigned long mask,
+- unsigned int *bb, unsigned int fc,
++ unsigned int *bb, unsigned long fc,
+ unsigned long aob)
+ {
+- register unsigned long __fc asm("0") = fc;
+- register unsigned long __schid asm("1") = schid;
+- register unsigned long __mask asm("2") = mask;
+- register unsigned long __aob asm("3") = aob;
+ int cc;
+
+ asm volatile(
++ " lgr 0,%[fc]\n"
++ " lgr 1,%[schid]\n"
++ " lgr 2,%[mask]\n"
++ " lgr 3,%[aob]\n"
+ " siga 0\n"
+- " ipm %0\n"
+- " srl %0,28\n"
+- : "=d" (cc), "+d" (__fc), "+d" (__aob)
+- : "d" (__schid), "d" (__mask)
+- : "cc");
+- *bb = __fc >> 31;
++ " lgr %[fc],0\n"
++ " ipm %[cc]\n"
++ " srl %[cc],28\n"
++ : [cc] "=&d" (cc), [fc] "+&d" (fc)
++ : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
++ : "cc", "0", "1", "2", "3");
++ *bb = fc >> 31;
+ return cc;
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 69551132f304c..291fccf02d453 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -2046,6 +2046,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+ char mybuf[64];
+ char *pbuf;
+ int i;
++ size_t bsize;
+
+ /* Protect copy from user */
+ if (!access_ok(buf, nbytes))
+@@ -2053,7 +2054,9 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+- if (copy_from_user(mybuf, buf, nbytes))
++ bsize = min(nbytes, (sizeof(mybuf) - 1));
++
++ if (copy_from_user(mybuf, buf, bsize))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+@@ -2074,7 +2077,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+ qp->lock_conflict.wq_access = 0;
+ }
+ }
+- return nbytes;
++ return bsize;
+ }
+ #endif
+
+diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
+index 858f0544289e6..c7079a9cd577f 100644
+--- a/drivers/spi/spi-fsl-cpm.c
++++ b/drivers/spi/spi-fsl-cpm.c
+@@ -21,6 +21,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <linux/platform_device.h>
++#include <linux/byteorder/generic.h>
+
+ #include "spi-fsl-cpm.h"
+ #include "spi-fsl-lib.h"
+@@ -120,6 +121,21 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
+ }
++ if (t->bits_per_word == 16 && t->tx_buf) {
++ const u16 *src = t->tx_buf;
++ u16 *dst;
++ int i;
++
++ dst = kmalloc(t->len, GFP_KERNEL);
++ if (!dst)
++ return -ENOMEM;
++
++ for (i = 0; i < t->len >> 1; i++)
++ dst[i] = cpu_to_le16p(src + i);
++
++ mspi->tx = dst;
++ mspi->map_tx_dma = 1;
++ }
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+@@ -173,6 +189,13 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+ if (mspi->map_rx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
++
++ if (t->bits_per_word == 16 && t->rx_buf) {
++ int i;
++
++ for (i = 0; i < t->len; i += 2)
++ le16_to_cpus(t->rx_buf + i);
++ }
+ }
+ EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
+
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index ae805f91eafa5..8006759c1a0c6 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -204,26 +204,6 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ return bits_per_word;
+ }
+
+-static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+- struct spi_device *spi,
+- int bits_per_word)
+-{
+- /* CPM/QE uses Little Endian for words > 8
+- * so transform 16 and 32 bits words into 8 bits
+- * Unfortnatly that doesn't work for LSB so
+- * reject these for now */
+- /* Note: 32 bits word, LSB works iff
+- * tfcr/rfcr is set to CPMFCR_GBL */
+- if (spi->mode & SPI_LSB_FIRST &&
+- bits_per_word > 8)
+- return -EINVAL;
+- if (bits_per_word <= 8)
+- return bits_per_word;
+- if (bits_per_word == 16 || bits_per_word == 32)
+- return 8; /* pretend its 8 bits */
+- return -EINVAL;
+-}
+-
+ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+ {
+@@ -251,9 +231,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ mpc8xxx_spi,
+ bits_per_word);
+- else
+- bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+- bits_per_word);
+
+ if (bits_per_word < 0)
+ return bits_per_word;
+@@ -371,14 +348,30 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
+ * In CPU mode, optimize large byte transfers to use larger
+ * bits_per_word values to reduce number of interrupts taken.
+ */
+- if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+- list_for_each_entry(t, &m->transfers, transfer_list) {
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ if (t->len < 256 || t->bits_per_word != 8)
+ continue;
+ if ((t->len & 3) == 0)
+ t->bits_per_word = 32;
+ else if ((t->len & 1) == 0)
+ t->bits_per_word = 16;
++ } else {
++ /*
++ * CPM/QE uses Little Endian for words > 8
++ * so transform 16 and 32 bits words into 8 bits
++ * Unfortnatly that doesn't work for LSB so
++ * reject these for now
++ * Note: 32 bits word, LSB works iff
++ * tfcr/rfcr is set to CPMFCR_GBL
++ */
++ if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
++ return -EINVAL;
++ if (t->bits_per_word == 16 || t->bits_per_word == 32)
++ t->bits_per_word = 8; /* pretend its 8 bits */
++ if (t->bits_per_word == 8 && t->len >= 256 &&
++ (mpc8xxx_spi->flags & SPI_CPM1))
++ t->bits_per_word = 16;
+ }
+ }
+
+@@ -637,8 +630,14 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
+ if (mpc8xxx_spi->type == TYPE_GRLIB)
+ fsl_spi_grlib_probe(dev);
+
+- master->bits_per_word_mask =
+- (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
++ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
++ master->bits_per_word_mask =
++ (SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
++ else
++ master->bits_per_word_mask =
++ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
++
++ master->bits_per_word_mask &=
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 9d593675257e0..67f31183c1180 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -239,6 +239,18 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
+ return true;
+ }
+
++/*
++ * Note the number of natively supported chip selects for MX51 is 4. Some
++ * devices may have less actual SS pins but the register map supports 4. When
++ * using gpio chip selects the cs values passed into the macros below can go
++ * outside the range 0 - 3. We therefore need to limit the cs value to avoid
++ * corrupting bits outside the allocated locations.
++ *
++ * The simplest way to do this is to just mask the cs bits to 2 bits. This
++ * still allows all 4 native chip selects to work as well as gpio chip selects
++ * (which can use any of the 4 chip select configurations).
++ */
++
+ #define MX51_ECSPI_CTRL 0x08
+ #define MX51_ECSPI_CTRL_ENABLE (1 << 0)
+ #define MX51_ECSPI_CTRL_XCH (1 << 2)
+@@ -247,16 +259,16 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
+ #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
+ #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
+ #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
+-#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
++#define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
+ #define MX51_ECSPI_CTRL_BL_OFFSET 20
+ #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
+
+ #define MX51_ECSPI_CONFIG 0x0c
+-#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
+-#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
+-#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
+-#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
+-#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
++#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
++#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
++#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
++#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
++#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
+
+ #define MX51_ECSPI_INT 0x10
+ #define MX51_ECSPI_INT_TEEN (1 << 0)
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index be377e75703bf..ca3cea27489b2 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -50,9 +50,9 @@ static const struct rtl819x_ops rtl819xp_ops = {
+ };
+
+ static struct pci_device_id rtl8192_pci_id_tbl[] = {
+- {RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
+- {RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
+- {RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
++ {PCI_DEVICE(0x10ec, 0x8192)},
++ {PCI_DEVICE(0x07aa, 0x0044)},
++ {PCI_DEVICE(0x07aa, 0x0047)},
+ {}
+ };
+
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+index 736f1a824cd2e..7bbd884aa5f13 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+@@ -55,11 +55,6 @@
+ #define IS_HARDWARE_TYPE_8192SE(_priv) \
+ (((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
+
+-#define RTL_PCI_DEVICE(vend, dev, cfg) \
+- .vendor = (vend), .device = (dev), \
+- .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+- .driver_data = (kernel_ulong_t)&(cfg)
+-
+ #define TOTAL_CAM_ENTRY 32
+ #define CAM_CONTENT_COUNT 8
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8b8afa95fbbae..ab2f0ceb1e23b 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4385,6 +4385,9 @@ int iscsit_close_session(struct iscsi_session *sess)
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+
++ if (sess->sess_ops->ErrorRecoveryLevel == 2)
++ iscsit_free_connection_recovery_entries(sess);
++
+ /*
+ * transport_deregister_session_configfs() will clear the
+ * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+@@ -4413,9 +4416,6 @@ int iscsit_close_session(struct iscsi_session *sess)
+
+ transport_deregister_session(sess->se_sess);
+
+- if (sess->sess_ops->ErrorRecoveryLevel == 2)
+- iscsit_free_connection_recovery_entries(sess);
+-
+ iscsit_free_all_ooo_cmdsns(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 2675771a03a0d..d7afff1e7685f 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -1138,6 +1138,7 @@ void serial8250_unregister_port(int line)
+ uart->port.type = PORT_UNKNOWN;
+ uart->port.dev = &serial8250_isa_devs->dev;
+ uart->capabilities = 0;
++ serial8250_init_port(uart);
+ serial8250_apply_quirks(uart);
+ uart_add_one_port(&serial8250_reg, &uart->port);
+ } else {
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 92e2ee2785239..4a3991ac2dd06 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1837,6 +1837,8 @@ pci_moxa_setup(struct serial_private *priv,
+ #define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
+ #define PCI_VENDOR_ID_ADVANTECH 0x13fe
+ #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
++#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600
++#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618
+ #define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618
+@@ -4157,6 +4159,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
+ pciserial_resume_one);
+
+ static const struct pci_device_id serial_pci_tbl[] = {
++ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
++ PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
++ pbn_b0_4_921600 },
+ /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
+ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
+ PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
+diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
+index d904a3a345e74..dd4be3c8c049c 100644
+--- a/drivers/tty/serial/arc_uart.c
++++ b/drivers/tty/serial/arc_uart.c
+@@ -613,10 +613,11 @@ static int arc_serial_probe(struct platform_device *pdev)
+ }
+ uart->baud = val;
+
+- port->membase = of_iomap(np, 0);
+- if (!port->membase)
++ port->membase = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(port->membase)) {
+ /* No point of dev_err since UART itself is hosed here */
+- return -ENXIO;
++ return PTR_ERR(port->membase);
++ }
+
+ port->irq = irq_of_parse_and_map(np, 0);
+
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 90de3331e4a51..a6813e3393ece 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -200,39 +200,47 @@ vcs_vc(struct inode *inode, int *viewed)
+ return vc_cons[currcons].d;
+ }
+
+-/*
+- * Returns size for VC carried by inode.
++/**
++ * vcs_size -- return size for a VC in @vc
++ * @vc: which VC
++ * @attr: does it use attributes?
++ * @unicode: is it unicode?
++ *
+ * Must be called with console_lock.
+ */
+-static int
+-vcs_size(struct inode *inode)
++static int vcs_size(const struct vc_data *vc, bool attr, bool unicode)
+ {
+ int size;
+- struct vc_data *vc;
+
+ WARN_CONSOLE_UNLOCKED();
+
+- vc = vcs_vc(inode, NULL);
+- if (!vc)
+- return -ENXIO;
+-
+ size = vc->vc_rows * vc->vc_cols;
+
+- if (use_attributes(inode)) {
+- if (use_unicode(inode))
++ if (attr) {
++ if (unicode)
+ return -EOPNOTSUPP;
+- size = 2*size + HEADER_SIZE;
+- } else if (use_unicode(inode))
++
++ size = 2 * size + HEADER_SIZE;
++ } else if (unicode)
+ size *= 4;
++
+ return size;
+ }
+
+ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
+ {
++ struct inode *inode = file_inode(file);
++ struct vc_data *vc;
+ int size;
+
+ console_lock();
+- size = vcs_size(file_inode(file));
++ vc = vcs_vc(inode, NULL);
++ if (!vc) {
++ console_unlock();
++ return -ENXIO;
++ }
++
++ size = vcs_size(vc, use_attributes(inode), use_unicode(inode));
+ console_unlock();
+ if (size < 0)
+ return size;
+@@ -294,7 +302,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ * as copy_to_user at the end of this loop
+ * could sleep.
+ */
+- size = vcs_size(inode);
++ size = vcs_size(vc, attr, uni_mode);
+ if (size < 0) {
+ ret = size;
+ break;
+@@ -476,7 +484,11 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ if (!vc)
+ goto unlock_out;
+
+- size = vcs_size(inode);
++ size = vcs_size(vc, attr, false);
++ if (size < 0) {
++ ret = size;
++ goto unlock_out;
++ }
+ ret = -EINVAL;
+ if (pos < 0 || pos > size)
+ goto unlock_out;
+@@ -511,11 +523,18 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ }
+ }
+
+- /* The vcs_size might have changed while we slept to grab
+- * the user buffer, so recheck.
++ /* The vc might have been freed or vcs_size might have changed
++ * while we slept to grab the user buffer, so recheck.
+ * Return data written up to now on failure.
+ */
+- size = vcs_size(inode);
++ vc = vcs_vc(inode, &viewed);
++ if (!vc) {
++ if (written)
++ break;
++ ret = -ENXIO;
++ goto unlock_out;
++ }
++ size = vcs_size(vc, attr, false);
+ if (size < 0) {
+ if (written)
+ break;
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 77b1802f829b3..fdfa2da24287a 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -1898,6 +1898,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
+
+ if (request.req.wLength > USBTMC_BUFSIZE)
+ return -EMSGSIZE;
++ if (request.req.wLength == 0) /* Length-0 requests are never IN */
++ request.req.bRequestType &= ~USB_DIR_IN;
+
+ is_in = request.req.bRequestType & USB_DIR_IN;
+
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index f16c26dc079d7..502d911f71fa6 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -208,6 +208,82 @@ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
+ }
+ EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse);
+
++/**
++ * usb_find_endpoint() - Given an endpoint address, search for the endpoint's
++ * usb_host_endpoint structure in an interface's current altsetting.
++ * @intf: the interface whose current altsetting should be searched
++ * @ep_addr: the endpoint address (number and direction) to find
++ *
++ * Search the altsetting's list of endpoints for one with the specified address.
++ *
++ * Return: Pointer to the usb_host_endpoint if found, %NULL otherwise.
++ */
++static const struct usb_host_endpoint *usb_find_endpoint(
++ const struct usb_interface *intf, unsigned int ep_addr)
++{
++ int n;
++ const struct usb_host_endpoint *ep;
++
++ n = intf->cur_altsetting->desc.bNumEndpoints;
++ ep = intf->cur_altsetting->endpoint;
++ for (; n > 0; (--n, ++ep)) {
++ if (ep->desc.bEndpointAddress == ep_addr)
++ return ep;
++ }
++ return NULL;
++}
++
++/**
++ * usb_check_bulk_endpoints - Check whether an interface's current altsetting
++ * contains a set of bulk endpoints with the given addresses.
++ * @intf: the interface whose current altsetting should be searched
++ * @ep_addrs: 0-terminated array of the endpoint addresses (number and
++ * direction) to look for
++ *
++ * Search for endpoints with the specified addresses and check their types.
++ *
++ * Return: %true if all the endpoints are found and are bulk, %false otherwise.
++ */
++bool usb_check_bulk_endpoints(
++ const struct usb_interface *intf, const u8 *ep_addrs)
++{
++ const struct usb_host_endpoint *ep;
++
++ for (; *ep_addrs; ++ep_addrs) {
++ ep = usb_find_endpoint(intf, *ep_addrs);
++ if (!ep || !usb_endpoint_xfer_bulk(&ep->desc))
++ return false;
++ }
++ return true;
++}
++EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints);
++
++/**
++ * usb_check_int_endpoints - Check whether an interface's current altsetting
++ * contains a set of interrupt endpoints with the given addresses.
++ * @intf: the interface whose current altsetting should be searched
++ * @ep_addrs: 0-terminated array of the endpoint addresses (number and
++ * direction) to look for
++ *
++ * Search for endpoints with the specified addresses and check their types.
++ *
++ * Return: %true if all the endpoints are found and are interrupt,
++ * %false otherwise.
++ */
++bool usb_check_int_endpoints(
++ const struct usb_interface *intf, const u8 *ep_addrs)
++{
++ const struct usb_host_endpoint *ep;
++
++ for (; *ep_addrs; ++ep_addrs) {
++ ep = usb_find_endpoint(intf, *ep_addrs);
++ if (!ep || !usb_endpoint_xfer_int(&ep->desc))
++ return false;
++ }
++ return true;
++}
++EXPORT_SYMBOL_GPL(usb_check_int_endpoints);
++
+ /**
+ * usb_find_alt_setting() - Given a configuration, find the alternate setting
+ * for the given interface.
+diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
+index 9fb519b2efb45..9bc46aec08a39 100644
+--- a/drivers/usb/dwc3/debugfs.c
++++ b/drivers/usb/dwc3/debugfs.c
+@@ -327,6 +327,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
+ unsigned int current_mode;
+ unsigned long flags;
+ u32 reg;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+@@ -345,6 +350,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -390,6 +397,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+@@ -409,6 +421,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
+ seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
+ }
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -455,6 +469,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+@@ -485,6 +504,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
+ seq_printf(s, "UNKNOWN %d\n", reg);
+ }
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -501,6 +522,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
+ unsigned long flags;
+ u32 testmode = 0;
+ char buf[32];
++ int ret;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+@@ -518,10 +540,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
+ else
+ testmode = 0;
+
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
++
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_set_test_mode(dwc, testmode);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return count;
+ }
+
+@@ -540,12 +568,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
+ enum dwc3_link_state state;
+ u32 reg;
+ u8 speed;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
+ seq_puts(s, "Not available\n");
+ spin_unlock_irqrestore(&dwc->lock, flags);
++ pm_runtime_put_sync(dwc->dev);
+ return 0;
+ }
+
+@@ -558,6 +592,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
+ dwc3_gadget_hs_link_string(state));
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -576,6 +612,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
+ char buf[32];
+ u32 reg;
+ u8 speed;
++ int ret;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+@@ -595,10 +632,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
+ else
+ return -EINVAL;
+
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
++
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
++ pm_runtime_put_sync(dwc->dev);
+ return -EINVAL;
+ }
+
+@@ -608,12 +650,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
+ if (speed < DWC3_DSTS_SUPERSPEED &&
+ state != DWC3_LINK_STATE_RECOV) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
++ pm_runtime_put_sync(dwc->dev);
+ return -EINVAL;
+ }
+
+ dwc3_gadget_set_link_state(dwc, state);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return count;
+ }
+
+@@ -636,6 +681,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
+@@ -646,6 +696,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -655,6 +707,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
+@@ -665,6 +722,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -674,12 +733,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -689,12 +755,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -704,12 +777,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -719,12 +799,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -734,12 +821,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -785,6 +879,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ int i;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->number <= 1) {
+@@ -814,6 +913,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
+ out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -826,6 +927,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
+ u32 lower_32_bits;
+ u32 upper_32_bits;
+ u32 reg;
++ int ret;
++
++ ret = pm_runtime_resume_and_get(dwc->dev);
++ if (ret < 0)
++ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
+@@ -838,6 +944,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
+ seq_printf(s, "0x%016llx\n", ep_info);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ pm_runtime_put_sync(dwc->dev);
++
+ return 0;
+ }
+
+@@ -899,6 +1007,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ dwc->regset->regs = dwc3_regs;
+ dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
+ dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
++ dwc->regset->dev = dwc->dev;
+
+ root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ dwc->root = root;
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 3f053b11e2cee..af3cb3bfdc29a 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -17,6 +17,8 @@
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
++#include <linux/string_helpers.h>
++#include <linux/usb/composite.h>
+
+ #include "u_ether.h"
+
+@@ -102,41 +104,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
+
+ /*-------------------------------------------------------------------------*/
+
+-/* REVISIT there must be a better way than having two sets
+- * of debug calls ...
+- */
+-
+-#undef DBG
+-#undef VDBG
+-#undef ERROR
+-#undef INFO
+-
+-#define xprintk(d, level, fmt, args...) \
+- printk(level "%s: " fmt , (d)->net->name , ## args)
+-
+-#ifdef DEBUG
+-#undef DEBUG
+-#define DBG(dev, fmt, args...) \
+- xprintk(dev , KERN_DEBUG , fmt , ## args)
+-#else
+-#define DBG(dev, fmt, args...) \
+- do { } while (0)
+-#endif /* DEBUG */
+-
+-#ifdef VERBOSE_DEBUG
+-#define VDBG DBG
+-#else
+-#define VDBG(dev, fmt, args...) \
+- do { } while (0)
+-#endif /* DEBUG */
+-
+-#define ERROR(dev, fmt, args...) \
+- xprintk(dev , KERN_ERR , fmt , ## args)
+-#define INFO(dev, fmt, args...) \
+- xprintk(dev , KERN_INFO , fmt , ## args)
+-
+-/*-------------------------------------------------------------------------*/
+-
+ /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+@@ -974,6 +941,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
+ dev = netdev_priv(net);
+ snprintf(host_addr, len, "%pm", dev->host_mac);
+
++ string_upper(host_addr, host_addr);
++
+ return strlen(host_addr);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index 0fa3d72bae261..79a6f14421606 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd)
+
+ uhci->rh_numports = uhci_count_ports(hcd);
+
+- /* Intel controllers report the OverCurrent bit active on.
+- * VIA controllers report it active off, so we'll adjust the
+- * bit value. (It's not standardized in the UHCI spec.)
++ /*
++ * Intel controllers report the OverCurrent bit active on. VIA
++ * and ZHAOXIN controllers report it active off, so we'll adjust
++ * the bit value. (It's not standardized in the UHCI spec.)
+ */
+- if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
++ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
++ to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
+ uhci->oc_low = 1;
+
+ /* HP's server management chip requires a longer port reset delay. */
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index 0734e6dd93862..d242b73cfdb2e 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -3014,6 +3014,20 @@ static int sisusb_probe(struct usb_interface *intf,
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct sisusb_usb_data *sisusb;
+ int retval = 0, i;
++ static const u8 ep_addresses[] = {
++ SISUSB_EP_GFX_IN | USB_DIR_IN,
++ SISUSB_EP_GFX_OUT | USB_DIR_OUT,
++ SISUSB_EP_GFX_BULK_OUT | USB_DIR_OUT,
++ SISUSB_EP_GFX_LBULK_OUT | USB_DIR_OUT,
++ SISUSB_EP_BRIDGE_IN | USB_DIR_IN,
++ SISUSB_EP_BRIDGE_OUT | USB_DIR_OUT,
++ 0};
++
++ /* Are the expected endpoints present? */
++ if (!usb_check_bulk_endpoints(intf, ep_addresses)) {
++ dev_err(&intf->dev, "Invalid USB2VGA device\n");
++ return -EINVAL;
++ }
+
+ dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
+ dev->devnum);
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index 2adcabe060c50..49d17988e2ce8 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -407,22 +407,25 @@ static DEF_SCSI_QCMD(queuecommand)
+ ***********************************************************************/
+
+ /* Command timeout and abort */
+-static int command_abort(struct scsi_cmnd *srb)
++static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
+ {
+- struct us_data *us = host_to_us(srb->device->host);
+-
+- usb_stor_dbg(us, "%s called\n", __func__);
+-
+ /*
+ * us->srb together with the TIMED_OUT, RESETTING, and ABORTING
+ * bits are protected by the host lock.
+ */
+ scsi_lock(us_to_host(us));
+
+- /* Is this command still active? */
+- if (us->srb != srb) {
++ /* is there any active pending command to abort ? */
++ if (!us->srb) {
+ scsi_unlock(us_to_host(us));
+ usb_stor_dbg(us, "-- nothing to abort\n");
++ return SUCCESS;
++ }
++
++ /* Does the command match the passed srb if any ? */
++ if (srb_match && us->srb != srb_match) {
++ scsi_unlock(us_to_host(us));
++ usb_stor_dbg(us, "-- pending command mismatch\n");
+ return FAILED;
+ }
+
+@@ -445,6 +448,14 @@ static int command_abort(struct scsi_cmnd *srb)
+ return SUCCESS;
+ }
+
++static int command_abort(struct scsi_cmnd *srb)
++{
++ struct us_data *us = host_to_us(srb->device->host);
++
++ usb_stor_dbg(us, "%s called\n", __func__);
++ return command_abort_matching(us, srb);
++}
++
+ /*
+ * This invokes the transport reset mechanism to reset the state of the
+ * device
+@@ -456,6 +467,9 @@ static int device_reset(struct scsi_cmnd *srb)
+
+ usb_stor_dbg(us, "%s called\n", __func__);
+
++ /* abort any pending command before reset */
++ command_abort_matching(us, NULL);
++
+ /* lock the device pointers and do the reset */
+ mutex_lock(&(us->dev_mutex));
+ result = us->transport_reset(us);
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index ca22a05179d1e..a2a1baabca933 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -501,6 +501,10 @@ static ssize_t pin_assignment_show(struct device *dev,
+
+ mutex_unlock(&dp->lock);
+
++ /* get_current_pin_assignments can return 0 when no matching pin assignments are found */
++ if (len == 0)
++ len++;
++
+ buf[len - 1] = '\n';
+ return len;
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index fb18264b702e6..b259a4a28f81a 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1018,7 +1018,21 @@ static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
+ pmdata->svids[pmdata->nsvids++] = svid;
+ tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
+ }
+- return true;
++
++ /*
++ * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
++ * 6-43), and can be returned maximum 6 VDOs per response (see Figure
++ * 6-19). If the Respondersupports 12 or more SVID then the Discover
++ * SVIDs Command Shall be executed multiple times until a Discover
++ * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
++ * the last part of the last VDO or with a VDO containing two SVIDs
++ * with values of 0x0000.
++ *
++ * However, some odd dockers support SVIDs less than 12 but without
++ * 0x0000 in the last VDO, so we need to break the Discover SVIDs
++ * request and return false here.
++ */
++ return cnt == 7;
+ abort:
+ tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
+ return false;
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index 8e8af408998ea..24e82fca19ad5 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -27,6 +27,8 @@
+ #include <video/udlfb.h>
+ #include "edid.h"
+
++#define OUT_EP_NUM 1 /* The endpoint number we will use */
++
+ static const struct fb_fix_screeninfo dlfb_fix = {
+ .id = "udlfb",
+ .type = FB_TYPE_PACKED_PIXELS,
+@@ -1652,7 +1654,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
+ struct fb_info *info;
+ int retval;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+- struct usb_endpoint_descriptor *out;
++ static u8 out_ep[] = {OUT_EP_NUM + USB_DIR_OUT, 0};
+
+ /* usb initialization */
+ dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
+@@ -1666,9 +1668,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
+ dlfb->udev = usb_get_dev(usbdev);
+ usb_set_intfdata(intf, dlfb);
+
+- retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
+- if (retval) {
+- dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
++ if (!usb_check_bulk_endpoints(intf, out_ep)) {
++ dev_err(&intf->dev, "Invalid DisplayLink device!\n");
++ retval = -EINVAL;
+ goto error;
+ }
+
+@@ -1927,7 +1929,8 @@ retry:
+ }
+
+ /* urb->transfer_buffer_length set to actual before submit */
+- usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1),
++ usb_fill_bulk_urb(urb, dlfb->udev,
++ usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
+ buf, size, dlfb_urb_completion, unode);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index 93bd302ae7c5d..704d7c527fcb9 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -98,6 +98,10 @@ static int tco_timer_start(struct watchdog_device *wdd)
+ val |= SP5100_WDT_START_STOP_BIT;
+ writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+
++ /* This must be a distinct write. */
++ val |= SP5100_WDT_TRIGGER_BIT;
++ writel(val, SP5100_WDT_CONTROL(tco->tcobase));
++
+ return 0;
+ }
+
+diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
+index 9c267e27d9d95..6c2afd8bbf522 100644
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -321,8 +321,10 @@ static struct sock_mapping *pvcalls_new_active_socket(
+ void *page;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+- if (map == NULL)
++ if (map == NULL) {
++ sock_release(sock);
+ return NULL;
++ }
+
+ map->fedata = fedata;
+ map->sock = sock;
+@@ -414,10 +416,8 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
+ req->u.connect.ref,
+ req->u.connect.evtchn,
+ sock);
+- if (!map) {
++ if (!map)
+ ret = -EFAULT;
+- sock_release(sock);
+- }
+
+ out:
+ rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+@@ -558,7 +558,6 @@ static void __pvcalls_back_accept(struct work_struct *work)
+ sock);
+ if (!map) {
+ ret = -EFAULT;
+- sock_release(sock);
+ goto out_error;
+ }
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 4536d8aea1967..7e9d914369a02 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4408,7 +4408,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+ */
+ inode = igrab(&btrfs_inode->vfs_inode);
+ if (inode) {
++ unsigned int nofs_flag;
++
++ nofs_flag = memalloc_nofs_save();
+ invalidate_inode_pages2(inode->i_mapping);
++ memalloc_nofs_restore(nofs_flag);
+ iput(inode);
+ }
+ spin_lock(&root->delalloc_lock);
+@@ -4526,7 +4530,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
+
+ inode = cache->io_ctl.inode;
+ if (inode) {
++ unsigned int nofs_flag;
++
++ nofs_flag = memalloc_nofs_save();
+ invalidate_inode_pages2(inode->i_mapping);
++ memalloc_nofs_restore(nofs_flag);
++
+ BTRFS_I(inode)->generation = 0;
+ cache->io_ctl.inode = NULL;
+ iput(inode);
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index d2d32fed8f2e9..0cb93f73acb2d 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -784,15 +784,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ }
+ spin_lock(&ctl->tree_lock);
+ ret = link_free_space(ctl, e);
+- ctl->total_bitmaps++;
+- ctl->op->recalc_thresholds(ctl);
+- spin_unlock(&ctl->tree_lock);
+ if (ret) {
++ spin_unlock(&ctl->tree_lock);
+ btrfs_err(fs_info,
+ "Duplicate entries in free space cache, dumping");
+ kmem_cache_free(btrfs_free_space_cachep, e);
+ goto free_cache;
+ }
++ ctl->total_bitmaps++;
++ ctl->op->recalc_thresholds(ctl);
++ spin_unlock(&ctl->tree_lock);
+ list_add_tail(&e->list, &bitmaps);
+ }
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 20c5db8ef8427..c89e85a7da7d4 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6830,7 +6830,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+- err = btrfs_find_free_ino(root, &objectid);
++ err = btrfs_find_free_objectid(root, &objectid);
+ if (err)
+ goto out_unlock;
+
+@@ -6894,7 +6894,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+- err = btrfs_find_free_ino(root, &objectid);
++ err = btrfs_find_free_objectid(root, &objectid);
+ if (err)
+ goto out_unlock;
+
+@@ -7039,7 +7039,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+- err = btrfs_find_free_ino(root, &objectid);
++ err = btrfs_find_free_objectid(root, &objectid);
+ if (err)
+ goto out_fail;
+
+@@ -9930,7 +9930,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
+ u64 objectid;
+ u64 index;
+
+- ret = btrfs_find_free_ino(root, &objectid);
++ ret = btrfs_find_free_objectid(root, &objectid);
+ if (ret)
+ return ret;
+
+@@ -10416,7 +10416,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+- err = btrfs_find_free_ino(root, &objectid);
++ err = btrfs_find_free_objectid(root, &objectid);
+ if (err)
+ goto out_unlock;
+
+@@ -10699,7 +10699,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+- ret = btrfs_find_free_ino(root, &objectid);
++ ret = btrfs_find_free_objectid(root, &objectid);
+ if (ret)
+ goto out;
+
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 97ce1bd13bad1..96c79d5e9f7e0 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -1005,6 +1005,19 @@ skip_inode:
+ continue;
+ adjust_snap_realm_parent(mdsc, child, realm->ino);
+ }
++ } else {
++ /*
++ * In the non-split case both 'num_split_inos' and
++ * 'num_split_realms' should be 0, making this a no-op.
++ * However the MDS happens to populate 'split_realms' list
++ * in one of the UPDATE op cases by mistake.
++ *
++ * Skip both lists just in case to ensure that 'p' is
++ * positioned at the start of realm info, as expected by
++ * ceph_update_snap_trace().
++ */
++ p += sizeof(u64) * num_split_inos;
++ p += sizeof(u64) * num_split_realms;
+ }
+
+ /*
+diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
+index 10ab238de9a65..a89b43d759052 100644
+--- a/fs/ext2/ext2.h
++++ b/fs/ext2/ext2.h
+@@ -177,6 +177,7 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+ #define EXT2_MIN_BLOCK_SIZE 1024
+ #define EXT2_MAX_BLOCK_SIZE 4096
+ #define EXT2_MIN_BLOCK_LOG_SIZE 10
++#define EXT2_MAX_BLOCK_LOG_SIZE 16
+ #define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
+ #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
+ #define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 644c83c115bc2..6e8e47871fa26 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -967,6 +967,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ goto failed_mount;
+ }
+
++ if (le32_to_cpu(es->s_log_block_size) >
++ (EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) {
++ ext2_msg(sb, KERN_ERR,
++ "Invalid log block size: %u",
++ le32_to_cpu(es->s_log_block_size));
++ goto failed_mount;
++ }
+ blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+
+ if (test_opt(sb, DAX)) {
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index caab9781bee7d..92c37fbbabc15 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3091,6 +3091,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ struct ext4_allocation_request *ar)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
++ struct ext4_super_block *es = sbi->s_es;
+ int bsbits, max;
+ ext4_lblk_t end;
+ loff_t size, start_off;
+@@ -3271,18 +3272,21 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
+
+ /* define goal start in order to merge */
+- if (ar->pright && (ar->lright == (start + size))) {
++ if (ar->pright && (ar->lright == (start + size)) &&
++ ar->pright >= size &&
++ ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
+ /* merge to the right */
+ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
+- &ac->ac_f_ex.fe_group,
+- &ac->ac_f_ex.fe_start);
++ &ac->ac_g_ex.fe_group,
++ &ac->ac_g_ex.fe_start);
+ ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
+ }
+- if (ar->pleft && (ar->lleft + 1 == start)) {
++ if (ar->pleft && (ar->lleft + 1 == start) &&
++ ar->pleft + 1 < ext4_blocks_count(es)) {
+ /* merge to the left */
+ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
+- &ac->ac_f_ex.fe_group,
+- &ac->ac_f_ex.fe_start);
++ &ac->ac_g_ex.fe_group,
++ &ac->ac_g_ex.fe_start);
+ ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
+ }
+
+@@ -3374,6 +3378,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
+ BUG_ON(start < pa->pa_pstart);
+ BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
+ BUG_ON(pa->pa_free < len);
++ BUG_ON(ac->ac_b_ex.fe_len <= 0);
+ pa->pa_free -= len;
+
+ mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
+@@ -3678,10 +3683,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ return -ENOMEM;
+
+ if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
+- int winl;
+- int wins;
+- int win;
+- int offs;
++ int new_bex_start;
++ int new_bex_end;
+
+ /* we can't allocate as much as normalizer wants.
+ * so, found space must get proper lstart
+@@ -3689,26 +3692,40 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
+ BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
+
+- /* we're limited by original request in that
+- * logical block must be covered any way
+- * winl is window we can move our chunk within */
+- winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
++ /*
++ * Use the below logic for adjusting best extent as it keeps
++ * fragmentation in check while ensuring logical range of best
++ * extent doesn't overflow out of goal extent:
++ *
++ * 1. Check if best ex can be kept at end of goal and still
++ * cover original start
++ * 2. Else, check if best ex can be kept at start of goal and
++ * still cover original start
++ * 3. Else, keep the best ex at start of original request.
++ */
++ new_bex_end = ac->ac_g_ex.fe_logical +
++ EXT4_C2B(sbi, ac->ac_g_ex.fe_len);
++ new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
++ if (ac->ac_o_ex.fe_logical >= new_bex_start)
++ goto adjust_bex;
+
+- /* also, we should cover whole original request */
+- wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
++ new_bex_start = ac->ac_g_ex.fe_logical;
++ new_bex_end =
++ new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
++ if (ac->ac_o_ex.fe_logical < new_bex_end)
++ goto adjust_bex;
+
+- /* the smallest one defines real window */
+- win = min(winl, wins);
++ new_bex_start = ac->ac_o_ex.fe_logical;
++ new_bex_end =
++ new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+
+- offs = ac->ac_o_ex.fe_logical %
+- EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+- if (offs && offs < win)
+- win = offs;
++adjust_bex:
++ ac->ac_b_ex.fe_logical = new_bex_start;
+
+- ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
+- EXT4_NUM_B2C(sbi, win);
+ BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
++ BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
++ EXT4_C2B(sbi, ac->ac_g_ex.fe_len)));
+ }
+
+ /* preallocation can change ac_b_ex, thus we store actually
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 57318010f8e6a..84e98dacd4524 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -306,8 +306,15 @@ static int __f2fs_write_meta_page(struct page *page,
+
+ trace_f2fs_writepage(page, META);
+
+- if (unlikely(f2fs_cp_error(sbi)))
++ if (unlikely(f2fs_cp_error(sbi))) {
++ if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
++ ClearPageUptodate(page);
++ dec_page_count(sbi, F2FS_DIRTY_META);
++ unlock_page(page);
++ return 0;
++ }
+ goto redirty_out;
++ }
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto redirty_out;
+ if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
+@@ -1277,7 +1284,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
+ if (!get_pages(sbi, type))
+ break;
+
+- if (unlikely(f2fs_cp_error(sbi)))
++ if (unlikely(f2fs_cp_error(sbi) &&
++ !is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
+ break;
+
+ io_schedule_timeout(HZ/50);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 03dffb126d5cc..8f78050c935d7 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2130,7 +2130,8 @@ static int __write_data_page(struct page *page, bool *submitted,
+ * don't drop any dirty dentry pages for keeping lastest
+ * directory structure.
+ */
+- if (S_ISDIR(inode->i_mode))
++ if (S_ISDIR(inode->i_mode) &&
++ !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
+ goto redirty_out;
+ goto out;
+ }
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 69106a1545fad..092223a8b1201 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -362,6 +362,7 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
+
+ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ {
++ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ const struct gfs2_dinode *str = buf;
+ struct timespec64 atime;
+ u16 height, depth;
+@@ -401,7 +402,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
+ gfs2_set_inode_flags(&ip->i_inode);
+ height = be16_to_cpu(str->di_height);
+- if (unlikely(height > GFS2_MAX_META_HEIGHT))
++ if (unlikely(height > sdp->sd_max_height))
+ goto corrupt;
+ ip->i_height = (u8)height;
+
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index 76501d905099b..15c14a6a9f7fe 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -497,7 +497,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ if (type == HFSPLUS_FOLDER) {
+ struct hfsplus_cat_folder *folder = &entry.folder;
+
+- WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
++ if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
++ pr_err("bad catalog folder entry\n");
++ res = -EIO;
++ goto out;
++ }
+ hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ hfsplus_get_perms(inode, &folder->permissions, 1);
+@@ -517,7 +521,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ } else if (type == HFSPLUS_FILE) {
+ struct hfsplus_cat_file *file = &entry.file;
+
+- WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
++ if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
++ pr_err("bad catalog file entry\n");
++ res = -EIO;
++ goto out;
++ }
+ hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ sizeof(struct hfsplus_cat_file));
+
+@@ -548,6 +556,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ pr_err("bad catalog entry used to create inode\n");
+ res = -EIO;
+ }
++out:
+ return res;
+ }
+
+@@ -556,6 +565,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ struct inode *main_inode = inode;
+ struct hfs_find_data fd;
+ hfsplus_cat_entry entry;
++ int res = 0;
+
+ if (HFSPLUS_IS_RSRC(inode))
+ main_inode = HFSPLUS_I(inode)->rsrc_inode;
+@@ -574,7 +584,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ if (S_ISDIR(main_inode->i_mode)) {
+ struct hfsplus_cat_folder *folder = &entry.folder;
+
+- WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
++ if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
++ pr_err("bad catalog folder entry\n");
++ res = -EIO;
++ goto out;
++ }
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ /* simple node checks? */
+@@ -599,7 +613,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ } else {
+ struct hfsplus_cat_file *file = &entry.file;
+
+- WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
++ if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
++ pr_err("bad catalog file entry\n");
++ res = -EIO;
++ goto out;
++ }
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ sizeof(struct hfsplus_cat_file));
+ hfsplus_inode_write_fork(inode, &file->data_fork);
+@@ -620,5 +638,5 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
+ out:
+ hfs_find_exit(&fd);
+- return 0;
++ return res;
+ }
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index cf01aa55dd44c..53ec342eb787c 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -930,6 +930,7 @@ void nilfs_evict_inode(struct inode *inode)
+ struct nilfs_transaction_info ti;
+ struct super_block *sb = inode->i_sb;
+ struct nilfs_inode_info *ii = NILFS_I(inode);
++ struct the_nilfs *nilfs;
+ int ret;
+
+ if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
+@@ -942,6 +943,23 @@ void nilfs_evict_inode(struct inode *inode)
+
+ truncate_inode_pages_final(&inode->i_data);
+
++ nilfs = sb->s_fs_info;
++ if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
++ /*
++ * If this inode is about to be disposed after the file system
++ * has been degraded to read-only due to file system corruption
++ * or after the writer has been detached, do not make any
++ * changes that cause writes, just clear it.
++ * Do this check after read-locking ns_segctor_sem by
++ * nilfs_transaction_begin() in order to avoid a race with
++ * the writer detach operation.
++ */
++ clear_inode(inode);
++ nilfs_clear_inode(inode);
++ nilfs_transaction_abort(sb);
++ return;
++ }
++
+ /* TODO: some of the following operations may fail. */
+ nilfs_truncate_bmap(ii, 0);
+ nilfs_mark_inode_dirty(inode);
+diff --git a/fs/statfs.c b/fs/statfs.c
+index 2616424012ea7..4960c69cc47cf 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -128,6 +128,7 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
+ if (sizeof(buf) == sizeof(*st))
+ memcpy(&buf, st, sizeof(*st));
+ else {
++ memset(&buf, 0, sizeof(buf));
+ if (sizeof buf.f_blocks == 4) {
+ if ((st->f_blocks | st->f_bfree | st->f_bavail |
+ st->f_bsize | st->f_frsize) &
+@@ -156,7 +157,6 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
+ buf.f_namelen = st->f_namelen;
+ buf.f_frsize = st->f_frsize;
+ buf.f_flags = st->f_flags;
+- memset(buf.f_spare, 0, sizeof(buf.f_spare));
+ }
+ if (copy_to_user(p, &buf, sizeof(buf)))
+ return -EFAULT;
+@@ -169,6 +169,7 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
+ if (sizeof(buf) == sizeof(*st))
+ memcpy(&buf, st, sizeof(*st));
+ else {
++ memset(&buf, 0, sizeof(buf));
+ buf.f_type = st->f_type;
+ buf.f_bsize = st->f_bsize;
+ buf.f_blocks = st->f_blocks;
+@@ -180,7 +181,6 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
+ buf.f_namelen = st->f_namelen;
+ buf.f_frsize = st->f_frsize;
+ buf.f_flags = st->f_flags;
+- memset(buf.f_spare, 0, sizeof(buf.f_spare));
+ }
+ if (copy_to_user(p, &buf, sizeof(buf)))
+ return -EFAULT;
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 15835f37bd5f2..8134cc3b99cdc 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -111,7 +111,6 @@ enum cpuhp_state {
+ CPUHP_AP_PERF_X86_CSTATE_STARTING,
+ CPUHP_AP_PERF_XTENSA_STARTING,
+ CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+- CPUHP_AP_ARM_SDEI_STARTING,
+ CPUHP_AP_ARM_VFP_STARTING,
+ CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
+ CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 3414b5a67b466..d74275e2047a4 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -1528,6 +1528,7 @@ extern int device_online(struct device *dev);
+ extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+ extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
++void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
+
+ static inline int dev_num_vf(struct device *dev)
+ {
+diff --git a/include/linux/dim.h b/include/linux/dim.h
+index 2571da63877c5..ad5f219ce2ff2 100644
+--- a/include/linux/dim.h
++++ b/include/linux/dim.h
+@@ -233,8 +233,9 @@ void dim_park_tired(struct dim *dim);
+ *
+ * Calculate the delta between two samples (in data rates).
+ * Takes into consideration counter wrap-around.
++ * Returned boolean indicates whether curr_stats are reliable.
+ */
+-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
++bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ struct dim_stats *curr_stats);
+
+ /**
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index ec7e4bd07f825..b216a28920f29 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -211,6 +211,7 @@ struct team {
+ bool queue_override_enabled;
+ struct list_head *qom_lists; /* array of queue override mapping lists */
+ bool port_mtu_change_allowed;
++ bool notifier_ctx;
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 41a518336673b..4e7e72f3da5bd 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -626,6 +626,23 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+ return __vlan_get_protocol(skb, skb->protocol, NULL);
+ }
+
++/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
++static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
++ __be16 type, int *depth)
++{
++ int maclen;
++
++ type = __vlan_get_protocol(skb, type, &maclen);
++
++ if (type) {
++ if (!pskb_may_pull(skb, maclen))
++ type = 0;
++ else if (depth)
++ *depth = maclen;
++ }
++ return type;
++}
++
+ /* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
+index 7413779484d5a..3baabcd7fa3e6 100644
+--- a/include/linux/power/bq27xxx_battery.h
++++ b/include/linux/power/bq27xxx_battery.h
+@@ -64,6 +64,7 @@ struct bq27xxx_device_info {
+ struct bq27xxx_access_methods bus;
+ struct bq27xxx_reg_cache cache;
+ int charge_design_full;
++ bool removed;
+ unsigned long last_update;
+ struct delayed_work work;
+ struct power_supply *bat;
+diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
+index 1009b6b5ce403..879a5c8f930b6 100644
+--- a/include/linux/sched/task_stack.h
++++ b/include/linux/sched/task_stack.h
+@@ -23,7 +23,7 @@ static __always_inline void *task_stack_page(const struct task_struct *task)
+
+ #define setup_thread_stack(new,old) do { } while(0)
+
+-static inline unsigned long *end_of_stack(const struct task_struct *task)
++static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
+ {
+ #ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
+index c289551322342..86f150c2a6b66 100644
+--- a/include/linux/string_helpers.h
++++ b/include/linux/string_helpers.h
+@@ -2,6 +2,7 @@
+ #ifndef _LINUX_STRING_HELPERS_H_
+ #define _LINUX_STRING_HELPERS_H_
+
++#include <linux/ctype.h>
+ #include <linux/types.h>
+
+ struct file;
+@@ -75,6 +76,20 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
+ return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
+ }
+
++static inline void string_upper(char *dst, const char *src)
++{
++ do {
++ *dst++ = toupper(*src);
++ } while (*src++);
++}
++
++static inline void string_lower(char *dst, const char *src)
++{
++ do {
++ *dst++ = tolower(*src);
++ } while (*src++);
++}
++
+ char *kstrdup_quotable(const char *src, gfp_t gfp);
+ char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
+ char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index c4e919cbbec7a..abcf1ce9bb068 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -279,6 +279,11 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES 32
+ #define USB_MAXIADS (USB_MAXINTERFACES/2)
+
++bool usb_check_bulk_endpoints(
++ const struct usb_interface *intf, const u8 *ep_addrs);
++bool usb_check_int_endpoints(
++ const struct usb_interface *intf, const u8 *ep_addrs);
++
+ /*
+ * USB Resume Timer: Every Host controller driver should drive the resume
+ * signalling on the bus for the amount of time defined by this macro.
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 69ceb5b4a8d68..a3698f0fb2a6d 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -207,6 +207,7 @@ struct bonding {
+ struct slave __rcu *primary_slave;
+ struct bond_up_slave __rcu *usable_slaves; /* Array of usable slaves */
+ bool force_primary;
++ bool notifier_ctx;
+ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
+diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
+index 028eaea1c8544..42d50856fcf24 100644
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -57,7 +57,7 @@ struct ip6_tnl {
+
+ /* These fields used only by GRE */
+ __u32 i_seqno; /* The last seen seqno */
+- __u32 o_seqno; /* The last output seqno */
++ atomic_t o_seqno; /* The last output seqno */
+ int hlen; /* tun_hlen + encap_hlen */
+ int tun_hlen; /* Precalculated header length */
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 56deb2501e962..6f75a84b47de5 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -113,7 +113,7 @@ struct ip_tunnel {
+
+ /* These four fields used only by GRE */
+ u32 i_seqno; /* The last seen seqno */
+- u32 o_seqno; /* The last output seqno */
++ atomic_t o_seqno; /* The last output seqno */
+ int tun_hlen; /* Precalculated header length */
+
+ /* These four fields used only by ERSPAN */
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index ad2a52a6c478b..a8cc2750990f9 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -205,14 +205,13 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+ }
+
+ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
+-unsigned int nft_parse_register(const struct nlattr *attr);
+ int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
+
+-int nft_validate_register_load(enum nft_registers reg, unsigned int len);
+-int nft_validate_register_store(const struct nft_ctx *ctx,
+- enum nft_registers reg,
+- const struct nft_data *data,
+- enum nft_data_types type, unsigned int len);
++int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
++int nft_parse_register_store(const struct nft_ctx *ctx,
++ const struct nlattr *attr, u8 *dreg,
++ const struct nft_data *data,
++ enum nft_data_types type, unsigned int len);
+
+ /**
+ * struct nft_userdata - user defined data associated with an object
+@@ -240,6 +239,10 @@ struct nft_set_elem {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key;
++ union {
++ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
++ struct nft_data val;
++ } data;
+ void *priv;
+ };
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 7281895fa6d99..6a3fd54c69c17 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -25,13 +25,13 @@ void nf_tables_core_module_exit(void);
+
+ struct nft_cmp_fast_expr {
+ u32 data;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 len;
+ };
+
+ struct nft_immediate_expr {
+ struct nft_data data;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 dlen;
+ };
+
+@@ -51,14 +51,14 @@ struct nft_payload {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ };
+
+ struct nft_payload_set {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 csum_type;
+ u8 csum_offset;
+ u8 csum_flags;
+diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
+index 628b6fa579cd8..237f3757637e1 100644
+--- a/include/net/netfilter/nft_fib.h
++++ b/include/net/netfilter/nft_fib.h
+@@ -5,7 +5,7 @@
+ #include <net/netfilter/nf_tables.h>
+
+ struct nft_fib {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 result;
+ u32 flags;
+ };
+diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
+index 07e2fd507963a..2dce55c736f40 100644
+--- a/include/net/netfilter/nft_meta.h
++++ b/include/net/netfilter/nft_meta.h
+@@ -7,8 +7,8 @@
+ struct nft_meta {
+ enum nft_meta_keys key:8;
+ union {
+- enum nft_registers dreg:8;
+- enum nft_registers sreg:8;
++ u8 dreg;
++ u8 sreg;
+ };
+ };
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 26dd07e47a7c7..fa19c6ba24441 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2448,7 +2448,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+ __sock_recv_ts_and_drops(msg, sk, skb);
+ else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+ sock_write_timestamp(sk, skb->tstamp);
+- else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
++ else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
+ sock_write_timestamp(sk, 0);
+ }
+
+diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
+index a93c0decfdd53..215ce16b37d2b 100644
+--- a/include/uapi/sound/skl-tplg-interface.h
++++ b/include/uapi/sound/skl-tplg-interface.h
+@@ -66,7 +66,8 @@ enum skl_ch_cfg {
+ SKL_CH_CFG_DUAL_MONO = 9,
+ SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
+ SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
+- SKL_CH_CFG_4_CHANNEL = 12,
++ SKL_CH_CFG_7_1 = 12,
++ SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
+ SKL_CH_CFG_INVALID
+ };
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 5476f61bad232..530664693ac48 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8966,7 +8966,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
+ insn->dst_reg,
+ shift);
+- insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
++ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
+ (1ULL << size * 8) - 1);
+ }
+ }
+diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
+index 075f3788bbe4d..920403fa5b0de 100644
+--- a/lib/cpu_rmap.c
++++ b/lib/cpu_rmap.c
+@@ -232,7 +232,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+
+ for (index = 0; index < rmap->used; index++) {
+ glue = rmap->obj[index];
+- irq_set_affinity_notifier(glue->notify.irq, NULL);
++ if (glue)
++ irq_set_affinity_notifier(glue->notify.irq, NULL);
+ }
+
+ cpu_rmap_put(rmap);
+@@ -268,6 +269,7 @@ static void irq_cpu_rmap_release(struct kref *ref)
+ container_of(ref, struct irq_glue, notify.kref);
+
+ cpu_rmap_put(glue->rmap);
++ glue->rmap->obj[glue->index] = NULL;
+ kfree(glue);
+ }
+
+@@ -297,6 +299,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+ rc = irq_set_affinity_notifier(irq, &glue->notify);
+ if (rc) {
+ cpu_rmap_put(glue->rmap);
++ rmap->obj[glue->index] = NULL;
+ kfree(glue);
+ }
+ return rc;
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index bfb3eb8c98004..26fa04335537b 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -128,7 +128,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
+
+ static void fill_pool(void)
+ {
+- gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
++ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+diff --git a/lib/dim/dim.c b/lib/dim/dim.c
+index 38045d6d05381..e89aaf07bde50 100644
+--- a/lib/dim/dim.c
++++ b/lib/dim/dim.c
+@@ -54,7 +54,7 @@ void dim_park_tired(struct dim *dim)
+ }
+ EXPORT_SYMBOL(dim_park_tired);
+
+-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
++bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ struct dim_stats *curr_stats)
+ {
+ /* u32 holds up to 71 minutes, should be enough */
+@@ -66,7 +66,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ start->comp_ctr);
+
+ if (!delta_us)
+- return;
++ return false;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+@@ -79,5 +79,6 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ else
+ curr_stats->cpe_ratio = 0;
+
++ return true;
+ }
+ EXPORT_SYMBOL(dim_calc_stats);
+diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
+index dae3b51ac3d9b..0e4f3a686f1de 100644
+--- a/lib/dim/net_dim.c
++++ b/lib/dim/net_dim.c
+@@ -227,7 +227,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample)
+ dim->start_sample.event_ctr);
+ if (nevents < DIM_NEVENTS)
+ break;
+- dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
++ if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
++ break;
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
+index f7e26c7b4749f..d32c8b105adc9 100644
+--- a/lib/dim/rdma_dim.c
++++ b/lib/dim/rdma_dim.c
+@@ -88,7 +88,8 @@ void rdma_dim(struct dim *dim, u64 completions)
+ nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
+ if (nevents < DIM_NEVENTS)
+ break;
+- dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats);
++ if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats))
++ break;
+ if (rdma_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index b10f31f98cb87..0a3a167916218 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -109,8 +109,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+ * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+ */
+- if (veth->h_vlan_proto != vlan->vlan_proto ||
+- vlan->flags & VLAN_FLAG_REORDER_HDR) {
++ if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
++ veth->h_vlan_proto != vlan->vlan_proto) {
+ u16 vlan_tci;
+ vlan_tci = vlan->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 3c559a177761b..5f53e75d83024 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4410,7 +4410,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+
+ chan = l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+- mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 86637000f275d..a92d5359b5c0c 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -43,7 +43,7 @@ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb
+ skb->protocol == htons(ETH_P_8021AD))) {
+ int depth;
+
+- if (!__vlan_get_protocol(skb, skb->protocol, &depth))
++ if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
+ goto drop;
+
+ skb_set_network_header(skb, depth);
+diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
+index 7c9e92b2f806c..0c28fa4647b73 100644
+--- a/net/bridge/netfilter/nft_meta_bridge.c
++++ b/net/bridge/netfilter/nft_meta_bridge.c
+@@ -87,9 +87,8 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
+ return nft_meta_get_init(ctx, expr, tb);
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static struct nft_expr_type nft_meta_bridge_type;
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 8817a258e73b9..904b167b07cf0 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -798,7 +798,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
+ struct j1939_sk_buff_cb *skcb;
+ int ret = 0;
+
+- if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
++ if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
+ return -EINVAL;
+
+ if (flags & MSG_ERRQUEUE)
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index b0488f30f2c4e..a5fc44448d600 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -778,18 +778,21 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
+ {
+ struct sock *sk = sock->sk;
+ __poll_t mask;
++ u8 shutdown;
+
+ sock_poll_wait(file, sock, wait);
+ mask = 0;
+
+ /* exceptional events? */
+- if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
++ if (READ_ONCE(sk->sk_err) ||
++ !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+
+- if (sk->sk_shutdown & RCV_SHUTDOWN)
++ shutdown = READ_ONCE(sk->sk_shutdown);
++ if (shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+- if (sk->sk_shutdown == SHUTDOWN_MASK)
++ if (shutdown == SHUTDOWN_MASK)
+ mask |= EPOLLHUP;
+
+ /* readable? */
+@@ -798,10 +801,12 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
+
+ /* Connection-based need to check for termination and startup */
+ if (connection_based(sk)) {
+- if (sk->sk_state == TCP_CLOSE)
++ int state = READ_ONCE(sk->sk_state);
++
++ if (state == TCP_CLOSE)
+ mask |= EPOLLHUP;
+ /* connection hasn't started yet? */
+- if (sk->sk_state == TCP_SYN_SENT)
++ if (state == TCP_SYN_SENT)
+ return mask;
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1a4e20c4ba053..0cc0809628b08 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2244,6 +2244,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ bool active = false;
+ unsigned int nr_ids;
+
++ WARN_ON_ONCE(index >= dev->num_tx_queues);
++
+ if (dev->num_tc) {
+ /* Do not allow XPS on subordinate device directly */
+ num_tc = dev->num_tc;
+@@ -2936,7 +2938,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
+ type = eth->h_proto;
+ }
+
+- return __vlan_get_protocol(skb, type, depth);
++ return vlan_get_protocol_and_depth(skb, type, depth);
+ }
+
+ /**
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 291d7a9e04839..9e297853112c4 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4633,8 +4633,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ } else {
+ skb = skb_clone(orig_skb, GFP_ATOMIC);
+
+- if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
++ if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
++ kfree_skb(skb);
+ return;
++ }
+ }
+ if (!skb)
+ return;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 317fdb9f47e88..f8f008344273e 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -437,7 +437,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
+ /* Push GRE header. */
+ gre_build_header(skb, tunnel->tun_hlen,
+ flags, proto, tunnel->parms.o_key,
+- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
+
+ ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
+ }
+@@ -475,7 +475,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
+ (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
+ gre_build_header(skb, tunnel_hlen, flags, proto,
+ tunnel_id_to_key32(tun_info->key.tun_id),
+- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
+
+ ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
+
+@@ -557,7 +557,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ gre_build_header(skb, 8, TUNNEL_SEQ,
+- proto, 0, htonl(tunnel->o_seqno++));
++ proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
+
+ ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
+
+diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
+index abf89b9720940..330349b5d6a4f 100644
+--- a/net/ipv4/netfilter/nft_dup_ipv4.c
++++ b/net/ipv4/netfilter/nft_dup_ipv4.c
+@@ -13,8 +13,8 @@
+ #include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+ struct nft_dup_ipv4 {
+- enum nft_registers sreg_addr:8;
+- enum nft_registers sreg_dev:8;
++ u8 sreg_addr;
++ u8 sreg_dev;
+ };
+
+ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+@@ -40,16 +40,16 @@ static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+- priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+- err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
++ sizeof(struct in_addr));
+ if (err < 0)
+ return err;
+
+- if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+- }
+- return 0;
++ if (tb[NFTA_DUP_SREG_DEV])
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
++ &priv->sreg_dev, sizeof(int));
++
++ return err;
+ }
+
+ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
+index f4fad9100749f..3a74a3675ca1d 100644
+--- a/net/ipv4/udplite.c
++++ b/net/ipv4/udplite.c
+@@ -62,6 +62,8 @@ struct proto udplite_prot = {
+ .get_port = udp_v4_get_port,
+ .memory_allocated = &udp_memory_allocated,
+ .sysctl_mem = sysctl_udp_mem,
++ .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
++ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
+ .obj_size = sizeof(struct udp_sock),
+ .h.udp_table = &udplite_table,
+ #ifdef CONFIG_COMPAT
+diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
+index da46c42846765..49e31e4ae7b7f 100644
+--- a/net/ipv6/exthdrs_core.c
++++ b/net/ipv6/exthdrs_core.c
+@@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type)
+ optlen = 1;
+ break;
+ default:
++ if (len < 2)
++ goto bad;
+ optlen = nh[offset + 1] + 2;
+ if (optlen > len)
+ goto bad;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 85ec466b5735e..0977137b00dc4 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -711,6 +711,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ {
+ struct ip6_tnl *tunnel = netdev_priv(dev);
+ __be16 protocol;
++ __be16 flags;
+
+ if (dev->type == ARPHRD_ETHER)
+ IPCB(skb)->flags = 0;
+@@ -720,16 +721,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ else
+ fl6->daddr = tunnel->parms.raddr;
+
+- if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
+- return -ENOMEM;
+-
+ /* Push GRE header. */
+ protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
+
+ if (tunnel->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+- __be16 flags;
+ int tun_hlen;
+
+ tun_info = skb_tunnel_info(skb);
+@@ -751,19 +748,25 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
+ tun_hlen = gre_calc_hlen(flags);
+
++ if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
++ return -ENOMEM;
++
+ gre_build_header(skb, tun_hlen,
+ flags, protocol,
+ tunnel_id_to_key32(tun_info->key.tun_id),
+- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
+ : 0);
+
+ } else {
+- if (tunnel->parms.o_flags & TUNNEL_SEQ)
+- tunnel->o_seqno++;
++ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
++ return -ENOMEM;
++
++ flags = tunnel->parms.o_flags;
+
+- gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
++ gre_build_header(skb, tunnel->tun_hlen, flags,
+ protocol, tunnel->parms.o_key,
+- htonl(tunnel->o_seqno));
++ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
++ : 0);
+ }
+
+ return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
+@@ -1000,12 +1003,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ ntohl(tun_id),
+ ntohl(md->u.index), truncate,
+ false);
++ proto = htons(ETH_P_ERSPAN);
+ } else if (md->version == 2) {
+ erspan_build_header_v2(skb,
+ ntohl(tun_id),
+ md->u.md2.dir,
+ get_hwid(&md->u.md2),
+ truncate, false);
++ proto = htons(ETH_P_ERSPAN2);
+ } else {
+ goto tx_err;
+ }
+@@ -1028,25 +1033,26 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ break;
+ }
+
+- if (t->parms.erspan_ver == 1)
++ if (t->parms.erspan_ver == 1) {
+ erspan_build_header(skb, ntohl(t->parms.o_key),
+ t->parms.index,
+ truncate, false);
+- else if (t->parms.erspan_ver == 2)
++ proto = htons(ETH_P_ERSPAN);
++ } else if (t->parms.erspan_ver == 2) {
+ erspan_build_header_v2(skb, ntohl(t->parms.o_key),
+ t->parms.dir,
+ t->parms.hwid,
+ truncate, false);
+- else
++ proto = htons(ETH_P_ERSPAN2);
++ } else {
+ goto tx_err;
++ }
+
+ fl6.daddr = t->parms.raddr;
+ }
+
+ /* Push GRE header. */
+- proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+- : htons(ETH_P_ERSPAN2);
+- gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
++ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
+
+ /* TooBig packet may have updated dst->dev's mtu */
+ if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
+diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
+index 2af32200507d0..c4aa8d27e0401 100644
+--- a/net/ipv6/netfilter/nft_dup_ipv6.c
++++ b/net/ipv6/netfilter/nft_dup_ipv6.c
+@@ -13,8 +13,8 @@
+ #include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+ struct nft_dup_ipv6 {
+- enum nft_registers sreg_addr:8;
+- enum nft_registers sreg_dev:8;
++ u8 sreg_addr;
++ u8 sreg_dev;
+ };
+
+ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+@@ -38,16 +38,16 @@ static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+- priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+- err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
++ sizeof(struct in6_addr));
+ if (err < 0)
+ return err;
+
+- if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+- }
+- return 0;
++ if (tb[NFTA_DUP_SREG_DEV])
++ err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
++ &priv->sreg_dev, sizeof(int));
++
++ return err;
+ }
+
+ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 3466b8868331e..6f6215f4d81d4 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -57,6 +57,8 @@ struct proto udplitev6_prot = {
+ .get_port = udp_v6_get_port,
+ .memory_allocated = &udp_memory_allocated,
+ .sysctl_mem = sysctl_udp_mem,
++ .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
++ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
+ .obj_size = sizeof(struct udp6_sock),
+ .h.udp_table = &udplite_table,
+ #ifdef CONFIG_COMPAT
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 92f71e8f321cd..1a33c46d9c894 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1944,7 +1944,8 @@ static u32 gen_reqid(struct net *net)
+ }
+
+ static int
+-parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
++parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol,
++ struct sadb_x_ipsecrequest *rq)
+ {
+ struct net *net = xp_net(xp);
+ struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
+@@ -1962,9 +1963,12 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+ if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
+ return -EINVAL;
+ t->mode = mode;
+- if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
++ if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
++ if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) &&
++ pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND)
++ return -EINVAL;
+ t->optional = 1;
+- else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
++ } else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
+ t->reqid = rq->sadb_x_ipsecrequest_reqid;
+ if (t->reqid > IPSEC_MANUAL_REQID_MAX)
+ t->reqid = 0;
+@@ -2006,7 +2010,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
+ rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+ return -EINVAL;
+
+- if ((err = parse_ipsecrequest(xp, rq)) < 0)
++ if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
+ return err;
+ len -= rq->sadb_x_ipsecrequest_len;
+ rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 451b2df998ea7..c35f45afd394d 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -577,9 +577,11 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
+
+ rcu_read_lock();
+ ct_hook = rcu_dereference(nf_ct_hook);
+- BUG_ON(ct_hook == NULL);
+- ct_hook->destroy(nfct);
++ if (ct_hook)
++ ct_hook->destroy(nfct);
+ rcu_read_unlock();
++
++ WARN_ON(!ct_hook);
+ }
+ EXPORT_SYMBOL(nf_conntrack_destroy);
+
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 43c3c3be6defc..1e3dbed9d7840 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -1180,11 +1180,12 @@ static int __init nf_conntrack_standalone_init(void)
+ nf_conntrack_htable_size_user = nf_conntrack_htable_size;
+ #endif
+
++ nf_conntrack_init_end();
++
+ ret = register_pernet_subsys(&nf_conntrack_net_ops);
+ if (ret < 0)
+ goto out_pernet;
+
+- nf_conntrack_init_end();
+ return 0;
+
+ out_pernet:
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7794fa4c669d6..909076ef157e8 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3839,6 +3839,12 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
+ return nft_delset(&ctx, set);
+ }
+
++static int nft_validate_register_store(const struct nft_ctx *ctx,
++ enum nft_registers reg,
++ const struct nft_data *data,
++ enum nft_data_types type,
++ unsigned int len);
++
+ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ const struct nft_set_iter *iter,
+@@ -4286,11 +4292,54 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
+ return 0;
+ }
+
++static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
++ struct nft_data *key, struct nlattr *attr)
++{
++ struct nft_data_desc desc;
++ int err;
++
++ err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr);
++ if (err < 0)
++ return err;
++
++ if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
++ nft_data_release(key, desc.type);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
++ struct nft_data_desc *desc,
++ struct nft_data *data,
++ struct nlattr *attr)
++{
++ u32 dtype;
++ int err;
++
++ err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
++ if (err < 0)
++ return err;
++
++ if (set->dtype == NFT_DATA_VERDICT)
++ dtype = NFT_DATA_VERDICT;
++ else
++ dtype = NFT_DATA_VALUE;
++
++ if (dtype != desc->type ||
++ set->dlen != desc->len) {
++ nft_data_release(data, desc->type);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ const struct nlattr *attr)
+ {
+ struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+- struct nft_data_desc desc;
+ struct nft_set_elem elem;
+ struct sk_buff *skb;
+ uint32_t flags = 0;
+@@ -4309,17 +4358,11 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ if (err < 0)
+ return err;
+
+- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
+- nla[NFTA_SET_ELEM_KEY]);
++ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
++ nla[NFTA_SET_ELEM_KEY]);
+ if (err < 0)
+ return err;
+
+- err = -EINVAL;
+- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
+- nft_data_release(&elem.key.val, desc.type);
+- return err;
+- }
+-
+ priv = set->ops->get(ctx->net, set, &elem, flags);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+@@ -4512,14 +4555,13 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ {
+ struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+ u8 genmask = nft_genmask_next(ctx->net);
+- struct nft_data_desc d1, d2;
+ struct nft_set_ext_tmpl tmpl;
+ struct nft_set_ext *ext, *ext2;
+ struct nft_set_elem elem;
+ struct nft_set_binding *binding;
+ struct nft_object *obj = NULL;
+ struct nft_userdata *udata;
+- struct nft_data data;
++ struct nft_data_desc desc;
+ enum nft_registers dreg;
+ struct nft_trans *trans;
+ u32 flags = 0;
+@@ -4553,6 +4595,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ return -EINVAL;
+ }
+
++ if (set->flags & NFT_SET_OBJECT) {
++ if (!nla[NFTA_SET_ELEM_OBJREF] &&
++ !(flags & NFT_SET_ELEM_INTERVAL_END))
++ return -EINVAL;
++ } else {
++ if (nla[NFTA_SET_ELEM_OBJREF])
++ return -EINVAL;
++ }
++
+ if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
+ (nla[NFTA_SET_ELEM_DATA] ||
+ nla[NFTA_SET_ELEM_OBJREF] ||
+@@ -4584,15 +4635,12 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ return err;
+ }
+
+- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &d1,
+- nla[NFTA_SET_ELEM_KEY]);
++ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
++ nla[NFTA_SET_ELEM_KEY]);
+ if (err < 0)
+ goto err1;
+- err = -EINVAL;
+- if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
+- goto err2;
+
+- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, d1.len);
++ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+ if (timeout > 0) {
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
+ if (timeout != set->timeout)
+@@ -4600,10 +4648,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ }
+
+ if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
+- if (!(set->flags & NFT_SET_OBJECT)) {
+- err = -EINVAL;
+- goto err2;
+- }
+ obj = nft_obj_lookup(ctx->net, ctx->table,
+ nla[NFTA_SET_ELEM_OBJREF],
+ set->objtype, genmask);
+@@ -4615,15 +4659,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ }
+
+ if (nla[NFTA_SET_ELEM_DATA] != NULL) {
+- err = nft_data_init(ctx, &data, sizeof(data), &d2,
+- nla[NFTA_SET_ELEM_DATA]);
++ err = nft_setelem_parse_data(ctx, set, &desc, &elem.data.val,
++ nla[NFTA_SET_ELEM_DATA]);
+ if (err < 0)
+ goto err2;
+
+- err = -EINVAL;
+- if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
+- goto err3;
+-
+ dreg = nft_type_to_reg(set->dtype);
+ list_for_each_entry(binding, &set->bindings, list) {
+ struct nft_ctx bind_ctx = {
+@@ -4637,19 +4677,19 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ continue;
+
+ err = nft_validate_register_store(&bind_ctx, dreg,
+- &data,
+- d2.type, d2.len);
++ &elem.data.val,
++ desc.type, desc.len);
+ if (err < 0)
+ goto err3;
+
+- if (d2.type == NFT_DATA_VERDICT &&
+- (data.verdict.code == NFT_GOTO ||
+- data.verdict.code == NFT_JUMP))
++ if (desc.type == NFT_DATA_VERDICT &&
++ (elem.data.val.verdict.code == NFT_GOTO ||
++ elem.data.val.verdict.code == NFT_JUMP))
+ nft_validate_state_update(ctx->net,
+ NFT_VALIDATE_NEED);
+ }
+
+- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, d2.len);
++ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
+ }
+
+ /* The full maximum length of userdata can exceed the maximum
+@@ -4665,8 +4705,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ }
+
+ err = -ENOMEM;
+- elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, data.data,
+- timeout, expiration, GFP_KERNEL);
++ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
++ elem.data.val.data, timeout, expiration,
++ GFP_KERNEL);
+ if (elem.priv == NULL)
+ goto err3;
+
+@@ -4732,9 +4773,9 @@ err4:
+ kfree(elem.priv);
+ err3:
+ if (nla[NFTA_SET_ELEM_DATA] != NULL)
+- nft_data_release(&data, d2.type);
++ nft_data_release(&elem.data.val, desc.type);
+ err2:
+- nft_data_release(&elem.key.val, d1.type);
++ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
+ err1:
+ return err;
+ }
+@@ -4830,7 +4871,6 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
+ {
+ struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+ struct nft_set_ext_tmpl tmpl;
+- struct nft_data_desc desc;
+ struct nft_set_elem elem;
+ struct nft_set_ext *ext;
+ struct nft_trans *trans;
+@@ -4841,11 +4881,10 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
+ err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
+ nft_set_elem_policy, NULL);
+ if (err < 0)
+- goto err1;
++ return err;
+
+- err = -EINVAL;
+ if (nla[NFTA_SET_ELEM_KEY] == NULL)
+- goto err1;
++ return -EINVAL;
+
+ nft_set_ext_prepare(&tmpl);
+
+@@ -4855,37 +4894,31 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
+ if (flags != 0)
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+
+- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
+- nla[NFTA_SET_ELEM_KEY]);
++ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
++ nla[NFTA_SET_ELEM_KEY]);
+ if (err < 0)
+- goto err1;
+-
+- err = -EINVAL;
+- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
+- goto err2;
++ return err;
+
+- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, desc.len);
++ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+
+ err = -ENOMEM;
+ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0,
+ 0, GFP_KERNEL);
+ if (elem.priv == NULL)
+- goto err2;
++ goto fail_elem;
+
+ ext = nft_set_elem_ext(set, elem.priv);
+ if (flags)
+ *nft_set_ext_flags(ext) = flags;
+
+ trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
+- if (trans == NULL) {
+- err = -ENOMEM;
+- goto err3;
+- }
++ if (trans == NULL)
++ goto fail_trans;
+
+ priv = set->ops->deactivate(ctx->net, set, &elem);
+ if (priv == NULL) {
+ err = -ENOENT;
+- goto err4;
++ goto fail_ops;
+ }
+ kfree(elem.priv);
+ elem.priv = priv;
+@@ -4896,13 +4929,12 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ return 0;
+
+-err4:
++fail_ops:
+ kfree(trans);
+-err3:
++fail_trans:
+ kfree(elem.priv);
+-err2:
+- nft_data_release(&elem.key.val, desc.type);
+-err1:
++fail_elem:
++ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
+ return err;
+ }
+
+@@ -7378,28 +7410,24 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
+ }
+ EXPORT_SYMBOL_GPL(nft_parse_u32_check);
+
+-/**
+- * nft_parse_register - parse a register value from a netlink attribute
+- *
+- * @attr: netlink attribute
+- *
+- * Parse and translate a register value from a netlink attribute.
+- * Registers used to be 128 bit wide, these register numbers will be
+- * mapped to the corresponding 32 bit register numbers.
+- */
+-unsigned int nft_parse_register(const struct nlattr *attr)
++static int nft_parse_register(const struct nlattr *attr, u32 *preg)
+ {
+ unsigned int reg;
+
+ reg = ntohl(nla_get_be32(attr));
+ switch (reg) {
+ case NFT_REG_VERDICT...NFT_REG_4:
+- return reg * NFT_REG_SIZE / NFT_REG32_SIZE;
++ *preg = reg * NFT_REG_SIZE / NFT_REG32_SIZE;
++ break;
++ case NFT_REG32_00...NFT_REG32_15:
++ *preg = reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
++ break;
+ default:
+- return reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
++ return -ERANGE;
+ }
++
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(nft_parse_register);
+
+ /**
+ * nft_dump_register - dump a register value to a netlink attribute
+@@ -7432,7 +7460,7 @@ EXPORT_SYMBOL_GPL(nft_dump_register);
+ * Validate that the input register is one of the general purpose
+ * registers and that the length of the load is within the bounds.
+ */
+-int nft_validate_register_load(enum nft_registers reg, unsigned int len)
++static int nft_validate_register_load(enum nft_registers reg, unsigned int len)
+ {
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
+ return -EINVAL;
+@@ -7443,7 +7471,24 @@ int nft_validate_register_load(enum nft_registers reg, unsigned int len)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(nft_validate_register_load);
++
++int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len)
++{
++ u32 reg;
++ int err;
++
++ err = nft_parse_register(attr, &reg);
++ if (err < 0)
++ return err;
++
++ err = nft_validate_register_load(reg, len);
++ if (err < 0)
++ return err;
++
++ *sreg = reg;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nft_parse_register_load);
+
+ /**
+ * nft_validate_register_store - validate an expressions' register store
+@@ -7459,10 +7504,11 @@ EXPORT_SYMBOL_GPL(nft_validate_register_load);
+ * A value of NULL for the data means that its runtime gathered
+ * data.
+ */
+-int nft_validate_register_store(const struct nft_ctx *ctx,
+- enum nft_registers reg,
+- const struct nft_data *data,
+- enum nft_data_types type, unsigned int len)
++static int nft_validate_register_store(const struct nft_ctx *ctx,
++ enum nft_registers reg,
++ const struct nft_data *data,
++ enum nft_data_types type,
++ unsigned int len)
+ {
+ int err;
+
+@@ -7494,7 +7540,27 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
+ return 0;
+ }
+ }
+-EXPORT_SYMBOL_GPL(nft_validate_register_store);
++
++int nft_parse_register_store(const struct nft_ctx *ctx,
++ const struct nlattr *attr, u8 *dreg,
++ const struct nft_data *data,
++ enum nft_data_types type, unsigned int len)
++{
++ int err;
++ u32 reg;
++
++ err = nft_parse_register(attr, &reg);
++ if (err < 0)
++ return err;
++
++ err = nft_validate_register_store(ctx, reg, data, type, len);
++ if (err < 0)
++ return err;
++
++ *dreg = reg;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nft_parse_register_store);
+
+ static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
+ [NFTA_VERDICT_CODE] = { .type = NLA_U32 },
+@@ -7800,7 +7866,9 @@ static int __net_init nf_tables_init_net(struct net *net)
+
+ static void __net_exit nf_tables_pre_exit_net(struct net *net)
+ {
++ mutex_lock(&net->nft.commit_mutex);
+ __nft_release_hooks(net);
++ mutex_unlock(&net->nft.commit_mutex);
+ }
+
+ static void __net_exit nf_tables_exit_net(struct net *net)
+diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
+index 10e9d50e4e193..ccab2e66d754b 100644
+--- a/net/netfilter/nft_bitwise.c
++++ b/net/netfilter/nft_bitwise.c
+@@ -16,8 +16,8 @@
+ #include <net/netfilter/nf_tables_offload.h>
+
+ struct nft_bitwise {
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ u8 len;
+ struct nft_data mask;
+ struct nft_data xor;
+@@ -65,14 +65,14 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
+
+ priv->len = len;
+
+- priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
+- err = nft_validate_register_load(priv->sreg, priv->len);
++ err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
++ priv->len);
+ if (err < 0)
+ return err;
+
+- priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index 12bed3f7bbc6d..9d5947ab8d4ef 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -16,8 +16,8 @@
+ #include <net/netfilter/nf_tables.h>
+
+ struct nft_byteorder {
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ enum nft_byteorder_ops op:8;
+ u8 len;
+ u8 size;
+@@ -131,20 +131,20 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
+ return -EINVAL;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]);
+ err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+
+ priv->len = len;
+
+- err = nft_validate_register_load(priv->sreg, priv->len);
++ err = nft_parse_register_load(tb[NFTA_BYTEORDER_SREG], &priv->sreg,
++ priv->len);
+ if (err < 0)
+ return err;
+
+- priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ return nft_parse_register_store(ctx, tb[NFTA_BYTEORDER_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ }
+
+ static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
+index ae730dba60c8e..a7c1e7c4381a1 100644
+--- a/net/netfilter/nft_cmp.c
++++ b/net/netfilter/nft_cmp.c
+@@ -17,7 +17,7 @@
+
+ struct nft_cmp_expr {
+ struct nft_data data;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 len;
+ enum nft_cmp_ops op:8;
+ };
+@@ -86,8 +86,7 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return err;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
+- err = nft_validate_register_load(priv->sreg, desc.len);
++ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ if (err < 0)
+ return err;
+
+@@ -169,8 +168,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
+ if (err < 0)
+ return err;
+
+- priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
+- err = nft_validate_register_load(priv->sreg, desc.len);
++ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 28991730728b9..7e269f7378cc0 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -27,8 +27,8 @@ struct nft_ct {
+ enum nft_ct_keys key:8;
+ enum ip_conntrack_dir dir:8;
+ union {
+- enum nft_registers dreg:8;
+- enum nft_registers sreg:8;
++ u8 dreg;
++ u8 sreg;
+ };
+ };
+
+@@ -498,9 +498,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
+ }
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
++ NFT_DATA_VALUE, len);
+ if (err < 0)
+ return err;
+
+@@ -600,8 +599,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
+ }
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]);
+- err = nft_validate_register_load(priv->sreg, len);
++ err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len);
+ if (err < 0)
+ goto err1;
+
+diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
+index 6007089e1c2f7..a5b560ee0337a 100644
+--- a/net/netfilter/nft_dup_netdev.c
++++ b/net/netfilter/nft_dup_netdev.c
+@@ -14,7 +14,7 @@
+ #include <net/netfilter/nf_dup_netdev.h>
+
+ struct nft_dup_netdev {
+- enum nft_registers sreg_dev:8;
++ u8 sreg_dev;
+ };
+
+ static void nft_dup_netdev_eval(const struct nft_expr *expr,
+@@ -40,8 +40,8 @@ static int nft_dup_netdev_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DUP_SREG_DEV] == NULL)
+ return -EINVAL;
+
+- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
++ return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
++ sizeof(int));
+ }
+
+ static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 7f9e6c90f7271..9f064f7b31d6d 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -16,8 +16,8 @@ struct nft_dynset {
+ struct nft_set *set;
+ struct nft_set_ext_tmpl tmpl;
+ enum nft_dynset_ops op:8;
+- enum nft_registers sreg_key:8;
+- enum nft_registers sreg_data:8;
++ u8 sreg_key;
++ u8 sreg_data;
+ bool invert;
+ u64 timeout;
+ struct nft_expr *expr;
+@@ -177,8 +177,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ return err;
+ }
+
+- priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
+- err = nft_validate_register_load(priv->sreg_key, set->klen);
++ err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key,
++ set->klen);
+ if (err < 0)
+ return err;
+
+@@ -188,8 +188,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (set->dtype == NFT_DATA_VERDICT)
+ return -EOPNOTSUPP;
+
+- priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]);
+- err = nft_validate_register_load(priv->sreg_data, set->dlen);
++ err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_DATA],
++ &priv->sreg_data, set->dlen);
+ if (err < 0)
+ return err;
+ } else if (set->flags & NFT_SET_MAP)
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index faa0844c01fb8..670dd146fb2b1 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -19,8 +19,8 @@ struct nft_exthdr {
+ u8 offset;
+ u8 len;
+ u8 op;
+- enum nft_registers dreg:8;
+- enum nft_registers sreg:8;
++ u8 dreg;
++ u8 sreg;
+ u8 flags;
+ };
+
+@@ -353,12 +353,12 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
+ priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+ priv->offset = offset;
+ priv->len = len;
+- priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
+ priv->flags = flags;
+ priv->op = op;
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ }
+
+ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
+@@ -403,11 +403,11 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
+ priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+ priv->offset = offset;
+ priv->len = len;
+- priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]);
+ priv->flags = flags;
+ priv->op = op;
+
+- return nft_validate_register_load(priv->sreg, priv->len);
++ return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
++ priv->len);
+ }
+
+ static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index cfac0964f48db..d2777aff5943d 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -86,7 +86,6 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return -EINVAL;
+
+ priv->result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
+- priv->dreg = nft_parse_register(tb[NFTA_FIB_DREG]);
+
+ switch (priv->result) {
+ case NFT_FIB_RESULT_OIF:
+@@ -106,8 +105,8 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ return -EINVAL;
+ }
+
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ err = nft_parse_register_store(ctx, tb[NFTA_FIB_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
+index 3b0dcd170551b..7730409f6f091 100644
+--- a/net/netfilter/nft_fwd_netdev.c
++++ b/net/netfilter/nft_fwd_netdev.c
+@@ -18,7 +18,7 @@
+ #include <net/ip.h>
+
+ struct nft_fwd_netdev {
+- enum nft_registers sreg_dev:8;
++ u8 sreg_dev;
+ };
+
+ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
+@@ -50,8 +50,8 @@ static int nft_fwd_netdev_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_FWD_SREG_DEV] == NULL)
+ return -EINVAL;
+
+- priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
+- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
++ return nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
++ sizeof(int));
+ }
+
+ static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
+@@ -83,8 +83,8 @@ static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
+ }
+
+ struct nft_fwd_neigh {
+- enum nft_registers sreg_dev:8;
+- enum nft_registers sreg_addr:8;
++ u8 sreg_dev;
++ u8 sreg_addr;
+ u8 nfproto;
+ };
+
+@@ -162,8 +162,6 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
+ !tb[NFTA_FWD_NFPROTO])
+ return -EINVAL;
+
+- priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
+- priv->sreg_addr = nft_parse_register(tb[NFTA_FWD_SREG_ADDR]);
+ priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO]));
+
+ switch (priv->nfproto) {
+@@ -177,11 +175,13 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- err = nft_validate_register_load(priv->sreg_dev, sizeof(int));
++ err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
++ sizeof(int));
+ if (err < 0)
+ return err;
+
+- return nft_validate_register_load(priv->sreg_addr, addr_len);
++ return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr,
++ addr_len);
+ }
+
+ static int nft_fwd_neigh_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
+index b836d550b9199..2ff6c7759494b 100644
+--- a/net/netfilter/nft_hash.c
++++ b/net/netfilter/nft_hash.c
+@@ -14,8 +14,8 @@
+ #include <linux/jhash.h>
+
+ struct nft_jhash {
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ u8 len;
+ bool autogen_seed:1;
+ u32 modulus;
+@@ -38,7 +38,7 @@ static void nft_jhash_eval(const struct nft_expr *expr,
+ }
+
+ struct nft_symhash {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u32 modulus;
+ u32 offset;
+ };
+@@ -83,9 +83,6 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_HASH_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
+
+- priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
+- priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+-
+ err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
+ if (err < 0)
+ return err;
+@@ -94,6 +91,10 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
+
+ priv->len = len;
+
++ err = nft_parse_register_load(tb[NFTA_HASH_SREG], &priv->sreg, len);
++ if (err < 0)
++ return err;
++
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+ if (priv->modulus < 1)
+ return -ERANGE;
+@@ -108,9 +109,8 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
+ get_random_bytes(&priv->seed, sizeof(priv->seed));
+ }
+
+- return nft_validate_register_load(priv->sreg, len) &&
+- nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+
+ static int nft_symhash_init(const struct nft_ctx *ctx,
+@@ -126,8 +126,6 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_HASH_OFFSET])
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
+
+- priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+-
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+ if (priv->modulus < 1)
+ return -ERANGE;
+@@ -135,8 +133,9 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ sizeof(u32));
+ }
+
+ static int nft_jhash_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 98a8149be094b..6a95d532eaecc 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -48,9 +48,9 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
+
+ priv->dlen = desc.len;
+
+- priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, &priv->data,
+- desc.type, desc.len);
++ err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG],
++ &priv->dreg, &priv->data, desc.type,
++ desc.len);
+ if (err < 0)
+ goto err1;
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 4eb4d076927e4..e0ffd463a1320 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -17,8 +17,8 @@
+
+ struct nft_lookup {
+ struct nft_set *set;
+- enum nft_registers sreg:8;
+- enum nft_registers dreg:8;
++ u8 sreg;
++ u8 dreg;
+ bool invert;
+ struct nft_set_binding binding;
+ };
+@@ -73,8 +73,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
+- priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
+- err = nft_validate_register_load(priv->sreg, set->klen);
++ err = nft_parse_register_load(tb[NFTA_LOOKUP_SREG], &priv->sreg,
++ set->klen);
+ if (err < 0)
+ return err;
+
+@@ -97,9 +97,9 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ if (!(set->flags & NFT_SET_MAP))
+ return -EINVAL;
+
+- priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- set->dtype, set->dlen);
++ err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
++ &priv->dreg, NULL, set->dtype,
++ set->dlen);
+ if (err < 0)
+ return err;
+ } else if (set->flags & NFT_SET_MAP)
+diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
+index 39dc94f2491e3..c2f04885347e7 100644
+--- a/net/netfilter/nft_masq.c
++++ b/net/netfilter/nft_masq.c
+@@ -15,8 +15,8 @@
+
+ struct nft_masq {
+ u32 flags;
+- enum nft_registers sreg_proto_min:8;
+- enum nft_registers sreg_proto_max:8;
++ u8 sreg_proto_min;
++ u8 sreg_proto_max;
+ };
+
+ static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
+@@ -54,19 +54,15 @@ static int nft_masq_init(const struct nft_ctx *ctx,
+ }
+
+ if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
+- priv->sreg_proto_min =
+- nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MIN]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_min, plen);
++ err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN],
++ &priv->sreg_proto_min, plen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
+- priv->sreg_proto_max =
+- nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_max,
+- plen);
++ err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX],
++ &priv->sreg_proto_max,
++ plen);
+ if (err < 0)
+ return err;
+ } else {
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index dda1e55d5801a..ec2798ff822e6 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -380,9 +380,8 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+ EXPORT_SYMBOL_GPL(nft_meta_get_init);
+
+@@ -475,8 +474,7 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
+- err = nft_validate_register_load(priv->sreg, len);
++ err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 0c5bc3c37ecf4..50fbd3c1d9f19 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -21,10 +21,10 @@
+ #include <net/ip.h>
+
+ struct nft_nat {
+- enum nft_registers sreg_addr_min:8;
+- enum nft_registers sreg_addr_max:8;
+- enum nft_registers sreg_proto_min:8;
+- enum nft_registers sreg_proto_max:8;
++ u8 sreg_addr_min;
++ u8 sreg_addr_max;
++ u8 sreg_proto_min;
++ u8 sreg_proto_max;
+ enum nf_nat_manip_type type:8;
+ u8 family;
+ u16 flags;
+@@ -154,18 +154,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ priv->family = family;
+
+ if (tb[NFTA_NAT_REG_ADDR_MIN]) {
+- priv->sreg_addr_min =
+- nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]);
+- err = nft_validate_register_load(priv->sreg_addr_min, alen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MIN],
++ &priv->sreg_addr_min, alen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+- priv->sreg_addr_max =
+- nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_addr_max,
+- alen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MAX],
++ &priv->sreg_addr_max,
++ alen);
+ if (err < 0)
+ return err;
+ } else {
+@@ -175,19 +172,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+
+ plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+ if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+- priv->sreg_proto_min =
+- nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_min, plen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
++ &priv->sreg_proto_min, plen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+- priv->sreg_proto_max =
+- nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_max,
+- plen);
++ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MAX],
++ &priv->sreg_proto_max,
++ plen);
+ if (err < 0)
+ return err;
+ } else {
+diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
+index 48edb9d5f0125..7bbca252e7fc5 100644
+--- a/net/netfilter/nft_numgen.c
++++ b/net/netfilter/nft_numgen.c
+@@ -16,7 +16,7 @@
+ static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
+
+ struct nft_ng_inc {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u32 modulus;
+ atomic_t counter;
+ u32 offset;
+@@ -66,11 +66,10 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
+ atomic_set(&priv->counter, priv->modulus - 1);
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+
+ static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
+@@ -100,7 +99,7 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ }
+
+ struct nft_ng_random {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u32 modulus;
+ u32 offset;
+ };
+@@ -140,10 +139,8 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
+
+ prandom_init_once(&nft_numgen_prandom_state);
+
+- priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
+-
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, sizeof(u32));
++ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, sizeof(u32));
+ }
+
+ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 74c61278e6bd3..7032b80592b20 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -95,7 +95,7 @@ static const struct nft_expr_ops nft_objref_ops = {
+
+ struct nft_objref_map {
+ struct nft_set *set;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ struct nft_set_binding binding;
+ };
+
+@@ -137,8 +137,8 @@ static int nft_objref_map_init(const struct nft_ctx *ctx,
+ if (!(set->flags & NFT_SET_OBJECT))
+ return -EINVAL;
+
+- priv->sreg = nft_parse_register(tb[NFTA_OBJREF_SET_SREG]);
+- err = nft_validate_register_load(priv->sreg, set->klen);
++ err = nft_parse_register_load(tb[NFTA_OBJREF_SET_SREG], &priv->sreg,
++ set->klen);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
+index d966a3aff1d33..b7c2bc01f8a27 100644
+--- a/net/netfilter/nft_osf.c
++++ b/net/netfilter/nft_osf.c
+@@ -6,7 +6,7 @@
+ #include <linux/netfilter/nfnetlink_osf.h>
+
+ struct nft_osf {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 ttl;
+ u32 flags;
+ };
+@@ -83,9 +83,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
+ priv->flags = flags;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
+- err = nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
++ err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE,
++ NFT_OSF_MAXGENRELEN);
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 6ed6ccef5e1ad..54298fcd82f0e 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -135,10 +135,10 @@ static int nft_payload_init(const struct nft_ctx *ctx,
+ priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+- priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, priv->len);
++ return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
++ &priv->dreg, NULL, NFT_DATA_VALUE,
++ priv->len);
+ }
+
+ static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+@@ -564,7 +564,6 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+- priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
+
+ if (tb[NFTA_PAYLOAD_CSUM_TYPE])
+ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
+@@ -595,7 +594,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
+ }
+ priv->csum_type = csum_type;
+
+- return nft_validate_register_load(priv->sreg, priv->len);
++ return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
++ priv->len);
+ }
+
+ static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
+diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
+index 5ece0a6aa8c3c..94a4f0a5a28e4 100644
+--- a/net/netfilter/nft_queue.c
++++ b/net/netfilter/nft_queue.c
+@@ -19,10 +19,10 @@
+ static u32 jhash_initval __read_mostly;
+
+ struct nft_queue {
+- enum nft_registers sreg_qnum:8;
+- u16 queuenum;
+- u16 queues_total;
+- u16 flags;
++ u8 sreg_qnum;
++ u16 queuenum;
++ u16 queues_total;
++ u16 flags;
+ };
+
+ static void nft_queue_eval(const struct nft_expr *expr,
+@@ -111,8 +111,8 @@ static int nft_queue_sreg_init(const struct nft_ctx *ctx,
+ struct nft_queue *priv = nft_expr_priv(expr);
+ int err;
+
+- priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]);
+- err = nft_validate_register_load(priv->sreg_qnum, sizeof(u32));
++ err = nft_parse_register_load(tb[NFTA_QUEUE_SREG_QNUM],
++ &priv->sreg_qnum, sizeof(u32));
+ if (err < 0)
+ return err;
+
+diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
+index 89efcc5a533d2..e4a1c44d7f513 100644
+--- a/net/netfilter/nft_range.c
++++ b/net/netfilter/nft_range.c
+@@ -15,7 +15,7 @@
+ struct nft_range_expr {
+ struct nft_data data_from;
+ struct nft_data data_to;
+- enum nft_registers sreg:8;
++ u8 sreg;
+ u8 len;
+ enum nft_range_ops op:8;
+ };
+@@ -86,8 +86,8 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
+ goto err2;
+ }
+
+- priv->sreg = nft_parse_register(tb[NFTA_RANGE_SREG]);
+- err = nft_validate_register_load(priv->sreg, desc_from.len);
++ err = nft_parse_register_load(tb[NFTA_RANGE_SREG], &priv->sreg,
++ desc_from.len);
+ if (err < 0)
+ goto err2;
+
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index d75de63189b61..81a191eb5c368 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -14,8 +14,8 @@
+ #include <net/netfilter/nf_tables.h>
+
+ struct nft_redir {
+- enum nft_registers sreg_proto_min:8;
+- enum nft_registers sreg_proto_max:8;
++ u8 sreg_proto_min;
++ u8 sreg_proto_max;
+ u16 flags;
+ };
+
+@@ -50,19 +50,15 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+
+ plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+ if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
+- priv->sreg_proto_min =
+- nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_min, plen);
++ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
++ &priv->sreg_proto_min, plen);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
+- priv->sreg_proto_max =
+- nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]);
+-
+- err = nft_validate_register_load(priv->sreg_proto_max,
+- plen);
++ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX],
++ &priv->sreg_proto_max,
++ plen);
+ if (err < 0)
+ return err;
+ } else {
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 7cfcb0e2f7ee1..bcd01a63e38f1 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -15,7 +15,7 @@
+
+ struct nft_rt {
+ enum nft_rt_keys key:8;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ };
+
+ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst)
+@@ -141,9 +141,8 @@ static int nft_rt_get_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_RT_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_RT_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static int nft_rt_get_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 4026ec38526f6..7e4f7063f4811 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -10,7 +10,7 @@
+ struct nft_socket {
+ enum nft_socket_keys key:8;
+ union {
+- enum nft_registers dreg:8;
++ u8 dreg;
+ };
+ };
+
+@@ -119,9 +119,8 @@ static int nft_socket_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_SOCKET_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static int nft_socket_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index a0e30bf4a845c..db780b5985abc 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -13,9 +13,9 @@
+ #endif
+
+ struct nft_tproxy {
+- enum nft_registers sreg_addr:8;
+- enum nft_registers sreg_port:8;
+- u8 family;
++ u8 sreg_addr;
++ u8 sreg_port;
++ u8 family;
+ };
+
+ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
+@@ -254,15 +254,15 @@ static int nft_tproxy_init(const struct nft_ctx *ctx,
+ }
+
+ if (tb[NFTA_TPROXY_REG_ADDR]) {
+- priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]);
+- err = nft_validate_register_load(priv->sreg_addr, alen);
++ err = nft_parse_register_load(tb[NFTA_TPROXY_REG_ADDR],
++ &priv->sreg_addr, alen);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[NFTA_TPROXY_REG_PORT]) {
+- priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]);
+- err = nft_validate_register_load(priv->sreg_port, sizeof(u16));
++ err = nft_parse_register_load(tb[NFTA_TPROXY_REG_PORT],
++ &priv->sreg_port, sizeof(u16));
+ if (err < 0)
+ return err;
+ }
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 4e850c81ad8d8..b2070f9f98ffa 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -14,7 +14,7 @@
+
+ struct nft_tunnel {
+ enum nft_tunnel_keys key:8;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ enum nft_tunnel_mode mode:8;
+ };
+
+@@ -92,8 +92,6 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
+ return -EOPNOTSUPP;
+ }
+
+- priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
+-
+ if (tb[NFTA_TUNNEL_MODE]) {
+ priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
+ if (priv->mode > NFT_TUNNEL_MODE_MAX)
+@@ -102,8 +100,8 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
+ priv->mode = NFT_TUNNEL_MODE_NONE;
+ }
+
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ static int nft_tunnel_get_dump(struct sk_buff *skb,
+diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
+index 06d5cabf1d7c4..cbbbc4ecad3ae 100644
+--- a/net/netfilter/nft_xfrm.c
++++ b/net/netfilter/nft_xfrm.c
+@@ -24,7 +24,7 @@ static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = {
+
+ struct nft_xfrm {
+ enum nft_xfrm_keys key:8;
+- enum nft_registers dreg:8;
++ u8 dreg;
+ u8 dir;
+ u8 spnum;
+ };
+@@ -86,9 +86,8 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx,
+
+ priv->spnum = spnum;
+
+- priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]);
+- return nft_validate_register_store(ctx, priv->dreg, NULL,
+- NFT_DATA_VALUE, len);
++ return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg,
++ NULL, NFT_DATA_VALUE, len);
+ }
+
+ /* Return true if key asks for daddr/saddr and current
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 00f040fb46b9c..31a3a562854fc 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1991,7 +1991,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+
+ skb_free_datagram(sk, skb);
+
+- if (nlk->cb_running &&
++ if (READ_ONCE(nlk->cb_running) &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ ret = netlink_dump(sk);
+ if (ret) {
+@@ -2284,7 +2284,7 @@ static int netlink_dump(struct sock *sk)
+ if (cb->done)
+ cb->done(cb);
+
+- nlk->cb_running = false;
++ WRITE_ONCE(nlk->cb_running, false);
+ module = cb->module;
+ skb = cb->skb;
+ mutex_unlock(nlk->cb_mutex);
+@@ -2347,7 +2347,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ goto error_put;
+ }
+
+- nlk->cb_running = true;
++ WRITE_ONCE(nlk->cb_running, true);
+ nlk->dump_done_errno = INT_MAX;
+
+ mutex_unlock(nlk->cb_mutex);
+@@ -2636,7 +2636,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+ nlk->groups ? (u32)nlk->groups[0] : 0,
+ sk_rmem_alloc_get(s),
+ sk_wmem_alloc_get(s),
+- nlk->cb_running,
++ READ_ONCE(nlk->cb_running),
+ refcount_read(&s->sk_refcnt),
+ atomic_read(&s->sk_drops),
+ sock_i_ino(s)
+diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
+index e9ca007718b7e..0f23e5e8e03eb 100644
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -77,13 +77,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
++ u16 mac_offset = skb->mac_header;
+ unsigned int nsh_len, mac_len;
+ __be16 proto;
+- int nhoff;
+
+ skb_reset_network_header(skb);
+
+- nhoff = skb->network_header - skb->mac_header;
+ mac_len = skb->mac_len;
+
+ if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
+@@ -108,15 +107,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ segs = skb_mac_gso_segment(skb, features);
+ if (IS_ERR_OR_NULL(segs)) {
+ skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
+- skb->network_header - nhoff,
+- mac_len);
++ mac_offset, mac_len);
+ goto out;
+ }
+
+ for (skb = segs; skb; skb = skb->next) {
+ skb->protocol = htons(ETH_P_NSH);
+ __skb_push(skb, nsh_len);
+- skb_set_mac_header(skb, -nhoff);
++ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 109a848aca151..7a940f2f30671 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1876,10 +1876,8 @@ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
+ /* Move network header to the right position for VLAN tagged packets */
+ if (likely(skb->dev->type == ARPHRD_ETHER) &&
+ eth_type_vlan(skb->protocol) &&
+- __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
+- if (pskb_may_pull(skb, depth))
+- skb_set_network_header(skb, depth);
+- }
++ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
++ skb_set_network_header(skb, depth);
+
+ skb_probe_transport_header(skb);
+ }
+diff --git a/net/socket.c b/net/socket.c
+index 02feaf5bd84a3..9dd4c7ce8343a 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2723,7 +2723,7 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ * error to return on the next call or if the
+ * app asks about it using getsockopt(SO_ERROR).
+ */
+- sock->sk->sk_err = -err;
++ WRITE_ONCE(sock->sk->sk_err, -err);
+ }
+ out_put:
+ fput_light(sock->file, fput_needed);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f33e90bd0683b..01fd049da104a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -531,7 +531,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ /* Clear state */
+ unix_state_lock(sk);
+ sock_orphan(sk);
+- sk->sk_shutdown = SHUTDOWN_MASK;
++ WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ path = u->path;
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
+@@ -549,7 +549,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
+ unix_state_lock(skpair);
+ /* No more writes */
+- skpair->sk_shutdown = SHUTDOWN_MASK;
++ WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+ if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ skpair->sk_err = ECONNRESET;
+ unix_state_unlock(skpair);
+@@ -1227,7 +1227,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
+
+ sched = !sock_flag(other, SOCK_DEAD) &&
+ !(other->sk_shutdown & RCV_SHUTDOWN) &&
+- unix_recvq_full(other);
++ unix_recvq_full_lockless(other);
+
+ unix_state_unlock(other);
+
+@@ -2546,7 +2546,7 @@ static int unix_shutdown(struct socket *sock, int mode)
+ ++mode;
+
+ unix_state_lock(sk);
+- sk->sk_shutdown |= mode;
++ WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
+ other = unix_peer(sk);
+ if (other)
+ sock_hold(other);
+@@ -2563,7 +2563,7 @@ static int unix_shutdown(struct socket *sock, int mode)
+ if (mode&SEND_SHUTDOWN)
+ peer_mode |= RCV_SHUTDOWN;
+ unix_state_lock(other);
+- other->sk_shutdown |= peer_mode;
++ WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
+ unix_state_unlock(other);
+ other->sk_state_change(other);
+ if (peer_mode == SHUTDOWN_MASK)
+@@ -2682,16 +2682,18 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
+ {
+ struct sock *sk = sock->sk;
+ __poll_t mask;
++ u8 shutdown;
+
+ sock_poll_wait(file, sock, wait);
+ mask = 0;
++ shutdown = READ_ONCE(sk->sk_shutdown);
+
+ /* exceptional events? */
+ if (sk->sk_err)
+ mask |= EPOLLERR;
+- if (sk->sk_shutdown == SHUTDOWN_MASK)
++ if (shutdown == SHUTDOWN_MASK)
+ mask |= EPOLLHUP;
+- if (sk->sk_shutdown & RCV_SHUTDOWN)
++ if (shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+
+ /* readable? */
+@@ -2719,18 +2721,20 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ struct sock *sk = sock->sk, *other;
+ unsigned int writable;
+ __poll_t mask;
++ u8 shutdown;
+
+ sock_poll_wait(file, sock, wait);
+ mask = 0;
++ shutdown = READ_ONCE(sk->sk_shutdown);
+
+ /* exceptional events? */
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+
+- if (sk->sk_shutdown & RCV_SHUTDOWN)
++ if (shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+- if (sk->sk_shutdown == SHUTDOWN_MASK)
++ if (shutdown == SHUTDOWN_MASK)
+ mask |= EPOLLHUP;
+
+ /* readable? */
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 28f6188458c42..4cd65a1a07f97 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1232,7 +1232,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
+ vsock_transport_cancel_pkt(vsk);
+ vsock_remove_connected(vsk);
+ goto out_wait;
+- } else if (timeout == 0) {
++ } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
+ err = -ETIMEDOUT;
+ sk->sk_state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
+index e0fbab9bec83e..6d6d4e4ea8437 100644
+--- a/samples/bpf/hbm.c
++++ b/samples/bpf/hbm.c
+@@ -307,6 +307,7 @@ static int run_bpf_prog(char *prog, int cg_id)
+ fout = fopen(fname, "w");
+ fprintf(fout, "id:%d\n", cg_id);
+ fprintf(fout, "ERROR: Could not lookup queue_stats\n");
++ fclose(fout);
+ } else if (stats_flag && qstats.lastPacketTime >
+ qstats.firstPacketTime) {
+ long long delta_us = (qstats.lastPacketTime -
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index cce12e1971d85..ec692af8ce9eb 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -102,6 +102,7 @@ static ssize_t uwrite(void const *const buf, size_t const count)
+ {
+ size_t cnt = count;
+ off_t idx = 0;
++ void *p = NULL;
+
+ file_updated = 1;
+
+@@ -109,7 +110,10 @@ static ssize_t uwrite(void const *const buf, size_t const count)
+ off_t aoffset = (file_ptr + count) - file_end;
+
+ if (aoffset > file_append_size) {
+- file_append = realloc(file_append, aoffset);
++ p = realloc(file_append, aoffset);
++ if (!p)
++ free(file_append);
++ file_append = p;
+ file_append_size = aoffset;
+ }
+ if (!file_append) {
+diff --git a/sound/firewire/digi00x/digi00x-stream.c b/sound/firewire/digi00x/digi00x-stream.c
+index d6a92460060f6..1a841c858e06e 100644
+--- a/sound/firewire/digi00x/digi00x-stream.c
++++ b/sound/firewire/digi00x/digi00x-stream.c
+@@ -259,8 +259,10 @@ int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x)
+ return err;
+
+ err = init_stream(dg00x, &dg00x->tx_stream);
+- if (err < 0)
++ if (err < 0) {
+ destroy_stream(dg00x, &dg00x->rx_stream);
++ return err;
++ }
+
+ err = amdtp_domain_init(&dg00x->domain);
+ if (err < 0) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index e92fcb150e57c..77c0abd252eb0 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -1147,8 +1147,8 @@ static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
+ return path && path->ctls[ctl_type];
+ }
+
+-static const char * const channel_name[4] = {
+- "Front", "Surround", "CLFE", "Side"
++static const char * const channel_name[] = {
++ "Front", "Surround", "CLFE", "Side", "Back",
+ };
+
+ /* give some appropriate ctl name prefix for the given line out channel */
+@@ -1174,7 +1174,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
+
+ /* multi-io channels */
+ if (ch >= cfg->line_outs)
+- return channel_name[ch];
++ goto fixed_name;
+
+ switch (cfg->line_out_type) {
+ case AUTO_PIN_SPEAKER_OUT:
+@@ -1226,6 +1226,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
+ if (cfg->line_outs == 1 && !spec->multi_ios)
+ return "Line Out";
+
++ fixed_name:
+ if (ch >= ARRAY_SIZE(channel_name)) {
+ snd_BUG();
+ return "PCM";
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index f9582053878df..40f50571ad63c 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1182,6 +1182,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
++ SND_PCI_QUIRK(0x3842, 0x104b, "EVGA X299 Dark", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 58e9a0171fe13..c72abdaac96eb 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4197,6 +4197,11 @@ HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b6b1440cc04a6..31f5ff74bb1c2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8273,6 +8273,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+@@ -10342,6 +10343,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
++ SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
+ SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
+@@ -10368,6 +10370,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index b33746d586337..6285ee8f829e5 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -740,18 +740,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
+
+ pm_runtime_enable(&pdev->dev);
+
++ /*
++ * Register platform component before registering cpu dai for there
++ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
++ */
++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to pcm register\n");
++ return ret;
++ }
++
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
+ &fsl_micfil_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register component %s\n",
+ fsl_micfil_component.name);
+- return ret;
+ }
+
+- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+- if (ret)
+- dev_err(&pdev->dev, "failed to pcm register\n");
+-
+ return ret;
+ }
+
+diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+index 44806a6dae11a..7a76d63003748 100644
+--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
++++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+@@ -66,8 +66,8 @@ static int max_freq_mode;
+ */
+ static unsigned long max_frequency;
+
+-static unsigned long long tsc_at_measure_start;
+-static unsigned long long tsc_at_measure_end;
++static unsigned long long *tsc_at_measure_start;
++static unsigned long long *tsc_at_measure_end;
+ static unsigned long long *mperf_previous_count;
+ static unsigned long long *aperf_previous_count;
+ static unsigned long long *mperf_current_count;
+@@ -130,7 +130,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
+ aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
+
+ if (max_freq_mode == MAX_FREQ_TSC_REF) {
+- tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
++ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
+ *percent = 100.0 * mperf_diff / tsc_diff;
+ dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
+ mperf_cstates[id].name, mperf_diff, tsc_diff);
+@@ -167,7 +167,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+
+ if (max_freq_mode == MAX_FREQ_TSC_REF) {
+ /* Calculate max_freq from TSC count */
+- tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
++ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
+ time_diff = timespec_diff_us(time_start, time_end);
+ max_frequency = tsc_diff / time_diff;
+ }
+@@ -186,33 +186,27 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ static int mperf_start(void)
+ {
+ int cpu;
+- unsigned long long dbg;
+
+ clock_gettime(CLOCK_REALTIME, &time_start);
+- mperf_get_tsc(&tsc_at_measure_start);
+
+- for (cpu = 0; cpu < cpu_count; cpu++)
++ for (cpu = 0; cpu < cpu_count; cpu++) {
++ mperf_get_tsc(&tsc_at_measure_start[cpu]);
+ mperf_init_stats(cpu);
++ }
+
+- mperf_get_tsc(&dbg);
+- dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
+ return 0;
+ }
+
+ static int mperf_stop(void)
+ {
+- unsigned long long dbg;
+ int cpu;
+
+- for (cpu = 0; cpu < cpu_count; cpu++)
++ for (cpu = 0; cpu < cpu_count; cpu++) {
+ mperf_measure_stats(cpu);
++ mperf_get_tsc(&tsc_at_measure_end[cpu]);
++ }
+
+- mperf_get_tsc(&tsc_at_measure_end);
+ clock_gettime(CLOCK_REALTIME, &time_end);
+-
+- mperf_get_tsc(&dbg);
+- dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
+-
+ return 0;
+ }
+
+@@ -311,7 +305,8 @@ struct cpuidle_monitor *mperf_register(void)
+ aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
+ mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+ aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+-
++ tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
++ tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
+ mperf_monitor.name_len = strlen(mperf_monitor.name);
+ return &mperf_monitor;
+ }
+@@ -322,6 +317,8 @@ void mperf_unregister(void)
+ free(aperf_previous_count);
+ free(mperf_current_count);
+ free(aperf_current_count);
++ free(tsc_at_measure_start);
++ free(tsc_at_measure_end);
+ free(is_valid);
+ }
+
+diff --git a/tools/testing/selftests/memfd/fuse_test.c b/tools/testing/selftests/memfd/fuse_test.c
+index b018e835737df..cda63164d9d35 100644
+--- a/tools/testing/selftests/memfd/fuse_test.c
++++ b/tools/testing/selftests/memfd/fuse_test.c
+@@ -22,6 +22,7 @@
+ #include <linux/falloc.h>
+ #include <linux/fcntl.h>
+ #include <linux/memfd.h>
++#include <linux/types.h>
+ #include <sched.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 24d67fa66d037..782db6ccef323 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -68,7 +68,7 @@ setup()
+ cleanup()
+ {
+ $IP link del dev dummy0 &> /dev/null
+- ip netns del ns1
++ ip netns del ns1 &> /dev/null
+ ip netns del ns2 &> /dev/null
+ }
+