summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-05-26 13:08:02 -0400
committerMike Pagano <mpagano@gentoo.org>2019-05-26 13:08:02 -0400
commit15741aee1cf7932e3be5a4e02b6d5867bdcb4bd0 (patch)
tree06f2d27b8060c27deb60aae9a00cd3bf860c6365
parentLinux patch 5.0.18 (diff)
downloadlinux-patches-5.0-20.tar.gz
linux-patches-5.0-20.tar.bz2
linux-patches-5.0-20.zip
Linux patch 5.0.195.0-20
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1018_linux-5.0.19.patch5315
2 files changed, 5319 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 396a4db5..599546cc 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch: 1017_linux-5.0.18.patch
From: http://www.kernel.org
Desc: Linux 5.0.18
+Patch: 1018_linux-5.0.19.patch
+From: http://www.kernel.org
+Desc: Linux 5.0.19
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1018_linux-5.0.19.patch b/1018_linux-5.0.19.patch
new file mode 100644
index 00000000..0c8c9775
--- /dev/null
+++ b/1018_linux-5.0.19.patch
@@ -0,0 +1,5315 @@
+diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
+index cf43bc4dbf31..a60fa516d4cb 100644
+--- a/Documentation/filesystems/porting
++++ b/Documentation/filesystems/porting
+@@ -638,3 +638,8 @@ in your dentry operations instead.
+ inode to d_splice_alias() will also do the right thing (equivalent of
+ d_add(dentry, NULL); return NULL;), so that kind of special cases
+ also doesn't need a separate treatment.
++--
++[mandatory]
++ DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
++ default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
++ business doing so.
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index ba8927c0d45c..a1b8e6d92298 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -3790,8 +3790,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to
+ the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
+ field. Bit 0 of the bitmap corresponds to page "first_page" in the
+ memory slot, and num_pages is the size in bits of the input bitmap.
+-Both first_page and num_pages must be a multiple of 64. For each bit
+-that is set in the input bitmap, the corresponding page is marked "clean"
++first_page must be a multiple of 64; num_pages must also be a multiple of
++64 unless first_page + num_pages is the size of the memory slot. For each
++bit that is set in the input bitmap, the corresponding page is marked "clean"
+ in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
+ (for example via write-protection, or by clearing the dirty bit in
+ a page table entry).
+diff --git a/Makefile b/Makefile
+index bf21b5a86e4b..66efffc3fb41 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 0
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+
+diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
+index 4135abec3fb0..63e6e6504699 100644
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
+ }
+
+ READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
+- if (cbcr.c)
++ if (cbcr.c) {
+ ioc_exists = 1;
+- else
++
++ /*
++ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
++ * simultaneously. This happens because as of today IOC aperture covers
++ * only ZONE_NORMAL (low mem) and any dma transactions outside this
++ * region won't be HW coherent.
++ * If we want to use both IOC and ZONE_HIGHMEM we can use
++ * bounce_buffer to handle dma transactions to HIGHMEM.
++ * Also it is possible to modify dma_direct cache ops or increase IOC
++ * aperture size if we are planning to use HIGHMEM without PAE.
++ */
++ if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
++ ioc_enable = 0;
++ } else {
+ ioc_enable = 0;
++ }
+
+ /* HS 2.0 didn't have AUX_VOL */
+ if (cpuinfo_arc700[cpu].core.family > 0x51) {
+@@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
+ if (!ioc_enable)
+ return;
+
+- /*
+- * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+- * simultaneously. This happens because as of today IOC aperture covers
+- * only ZONE_NORMAL (low mem) and any dma transactions outside this
+- * region won't be HW coherent.
+- * If we want to use both IOC and ZONE_HIGHMEM we can use
+- * bounce_buffer to handle dma transactions to HIGHMEM.
+- * Also it is possible to modify dma_direct cache ops or increase IOC
+- * aperture size if we are planning to use HIGHMEM without PAE.
+- */
+- if (IS_ENABLED(CONFIG_HIGHMEM))
+- panic("IOC and HIGHMEM can't be used simultaneously");
+-
+ /* Flush + invalidate + disable L1 dcache */
+ __dc_disable();
+
+diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
+index 413863508f6f..d67fb64e908c 100644
+--- a/arch/mips/kernel/perf_event_mipsxx.c
++++ b/arch/mips/kernel/perf_event_mipsxx.c
+@@ -64,17 +64,11 @@ struct mips_perf_event {
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+ #define CNTR_ALL 0xffffffff
+-#ifdef CONFIG_MIPS_MT_SMP
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+-#else
+- #define T
+- #define V
+- #define P
+-#endif
+ };
+
+ static struct mips_perf_event raw_event;
+@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+ {
+ struct perf_event *event = container_of(evt, struct perf_event, hw);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+-#ifdef CONFIG_MIPS_MT_SMP
+ unsigned int range = evt->event_base >> 24;
+-#endif /* CONFIG_MIPS_MT_SMP */
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+ /* Make sure interrupt enabled. */
+ MIPS_PERFCTRL_IE;
+
+-#ifdef CONFIG_CPU_BMIPS5000
+- {
++ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
+ /* enable the counter for the calling thread */
+ cpuc->saved_ctrl[idx] |=
+ (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
+- }
+-#else
+-#ifdef CONFIG_MIPS_MT_SMP
+- if (range > V) {
++ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
+ /* The counter is processor wide. Set it up to count all TCs. */
+ pr_debug("Enabling perf counter for all TCs\n");
+ cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
+- } else
+-#endif /* CONFIG_MIPS_MT_SMP */
+- {
++ } else {
+ unsigned int cpu, ctrl;
+
+ /*
+@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+ cpuc->saved_ctrl[idx] |= ctrl;
+ pr_debug("Enabling perf counter for CPU%d\n", cpu);
+ }
+-#endif /* CONFIG_CPU_BMIPS5000 */
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
+index 5aba20fa48aa..e8b798fd0cf0 100644
+--- a/arch/parisc/boot/compressed/head.S
++++ b/arch/parisc/boot/compressed/head.S
+@@ -22,7 +22,7 @@
+ __HEAD
+
+ ENTRY(startup)
+- .level LEVEL
++ .level PA_ASM_LEVEL
+
+ #define PSW_W_SM 0x200
+ #define PSW_W_BIT 36
+@@ -63,7 +63,7 @@ $bss_loop:
+ load32 BOOTADDR(decompress_kernel),%r3
+
+ #ifdef CONFIG_64BIT
+- .level LEVEL
++ .level PA_ASM_LEVEL
+ ssm PSW_W_SM, %r0 /* set W-bit */
+ depdi 0, 31, 32, %r3
+ #endif
+@@ -72,7 +72,7 @@ $bss_loop:
+
+ startup_continue:
+ #ifdef CONFIG_64BIT
+- .level LEVEL
++ .level PA_ASM_LEVEL
+ rsm PSW_W_SM, %r0 /* clear W-bit */
+ #endif
+
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index c17ec0ee6e7c..d85738a7bbe6 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -61,14 +61,14 @@
+ #define LDCW ldcw,co
+ #define BL b,l
+ # ifdef CONFIG_64BIT
+-# define LEVEL 2.0w
++# define PA_ASM_LEVEL 2.0w
+ # else
+-# define LEVEL 2.0
++# define PA_ASM_LEVEL 2.0
+ # endif
+ #else
+ #define LDCW ldcw
+ #define BL bl
+-#define LEVEL 1.1
++#define PA_ASM_LEVEL 1.1
+ #endif
+
+ #ifdef __ASSEMBLY__
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 006fb939cac8..4016fe1c65a9 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void);
+
+ #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
+ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
+- : : "r" (addr))
++ : : "r" (addr) : "memory")
+ #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
+ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
+ ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
+- : : "r" (addr))
++ : : "r" (addr) : "memory")
+ #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
+ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
+- : : "r" (addr))
++ : : "r" (addr) : "memory")
+
+ #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
+ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
+ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
+- : : "r" (addr))
++ : : "r" (addr) : "memory")
+ #define asm_io_sync() asm volatile("sync" \
+ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
+- ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
++ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
+
+ #endif /* ! __ASSEMBLY__ */
+
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index fbb4e43fda05..f56cbab64ac1 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -22,7 +22,7 @@
+ #include <linux/linkage.h>
+ #include <linux/init.h>
+
+- .level LEVEL
++ .level PA_ASM_LEVEL
+
+ __INITDATA
+ ENTRY(boot_args)
+@@ -258,7 +258,7 @@ stext_pdc_ret:
+ ldo R%PA(fault_vector_11)(%r10),%r10
+
+ $is_pa20:
+- .level LEVEL /* restore 1.1 || 2.0w */
++ .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
+ #endif /*!CONFIG_64BIT*/
+ load32 PA(fault_vector_20),%r10
+
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index 841db71958cd..97c206734e24 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
+ */
+
+ int running_on_qemu __read_mostly;
++EXPORT_SYMBOL(running_on_qemu);
+
+ void __cpuidle arch_cpu_idle_dead(void)
+ {
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 4f77bd9be66b..93cc36d98875 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -48,7 +48,7 @@ registers).
+ */
+ #define KILL_INSN break 0,0
+
+- .level LEVEL
++ .level PA_ASM_LEVEL
+
+ .text
+
+diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
+index 059187a3ded7..3d1305aa64b6 100644
+--- a/arch/parisc/mm/init.c
++++ b/arch/parisc/mm/init.c
+@@ -512,7 +512,7 @@ static void __init map_pages(unsigned long start_vaddr,
+
+ void __init set_kernel_text_rw(int enable_read_write)
+ {
+- unsigned long start = (unsigned long) _text;
++ unsigned long start = (unsigned long) __init_begin;
+ unsigned long end = (unsigned long) &data_start;
+
+ map_pages(start, __pa(start), end-start,
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index 6ee8195a2ffb..4a6dd3ba0b0b 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -237,7 +237,6 @@ extern void arch_exit_mmap(struct mm_struct *mm);
+ #endif
+
+ static inline void arch_unmap(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+ if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 532ab79734c7..d43e8fe6d424 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -543,14 +543,14 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ if (ret != H_SUCCESS)
+ return ret;
+
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
++
+ ret = kvmppc_tce_validate(stt, tce);
+ if (ret != H_SUCCESS)
+- return ret;
++ goto unlock_exit;
+
+ dir = iommu_tce_direction(tce);
+
+- idx = srcu_read_lock(&vcpu->kvm->srcu);
+-
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+ ret = H_PARAMETER;
+ goto unlock_exit;
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 5a066fc299e1..f17065f2c962 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3407,7 +3407,9 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+- mtspr(SPRN_PSSCR, host_psscr);
++ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
++ mtspr(SPRN_PSSCR, host_psscr |
++ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ mtspr(SPRN_HFSCR, host_hfscr);
+ mtspr(SPRN_CIABR, host_ciabr);
+ mtspr(SPRN_DAWR, host_dawr);
+diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
+index fca34b2177e2..9f4b4bb78120 100644
+--- a/arch/um/include/asm/mmu_context.h
++++ b/arch/um/include/asm/mmu_context.h
+@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+ }
+ extern void arch_exit_mmap(struct mm_struct *mm);
+ static inline void arch_unmap(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+ }
+diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
+index 5c205a9cb5a6..9f06ea5466dd 100644
+--- a/arch/unicore32/include/asm/mmu_context.h
++++ b/arch/unicore32/include/asm/mmu_context.h
+@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
+ }
+
+ static inline void arch_unmap(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+ }
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 4fe27b67d7e2..b1d59a7c556e 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -881,7 +881,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
+ * @paranoid == 2 is special: the stub will never switch stacks. This is for
+ * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
+ */
+-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
+ ENTRY(\sym)
+ UNWIND_HINT_IRET_REGS offset=\has_error_code*8
+
+@@ -901,6 +901,20 @@ ENTRY(\sym)
+ jnz .Lfrom_usermode_switch_stack_\@
+ .endif
+
++ .if \create_gap == 1
++ /*
++ * If coming from kernel space, create a 6-word gap to allow the
++ * int3 handler to emulate a call instruction.
++ */
++ testb $3, CS-ORIG_RAX(%rsp)
++ jnz .Lfrom_usermode_no_gap_\@
++ .rept 6
++ pushq 5*8(%rsp)
++ .endr
++ UNWIND_HINT_IRET_REGS offset=8
++.Lfrom_usermode_no_gap_\@:
++ .endif
++
+ .if \paranoid
+ call paranoid_entry
+ .else
+@@ -1132,7 +1146,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
+ #endif /* CONFIG_HYPERV */
+
+ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
+-idtentry int3 do_int3 has_error_code=0
++idtentry int3 do_int3 has_error_code=0 create_gap=1
+ idtentry stack_segment do_stack_segment has_error_code=1
+
+ #ifdef CONFIG_XEN_PV
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 71fb8b7b2954..c87b06ad9f86 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2090,15 +2090,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
+ cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
+ cpuc->intel_cp_status &= ~(1ull << hwc->idx);
+
+- if (unlikely(event->attr.precise_ip))
+- intel_pmu_pebs_disable(event);
+-
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ intel_pmu_disable_fixed(hwc);
+ return;
+ }
+
+ x86_pmu_disable_event(event);
++
++ /*
++ * Needs to be called after x86_pmu_disable_event,
++ * so we don't trigger the event without PEBS bit set.
++ */
++ if (unlikely(event->attr.precise_ip))
++ intel_pmu_pebs_disable(event);
+ }
+
+ static void intel_pmu_del_event(struct perf_event *event)
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 19d18fae6ec6..41019af68adf 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -277,8 +277,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ mpx_mm_init(mm);
+ }
+
+-static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long start, unsigned long end)
++static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
++ unsigned long end)
+ {
+ /*
+ * mpx_notify_unmap() goes and reads a rarely-hot
+@@ -298,7 +298,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+ * consistently wrong.
+ */
+ if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+- mpx_notify_unmap(mm, vma, start, end);
++ mpx_notify_unmap(mm, start, end);
+ }
+
+ /*
+diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
+index d0b1434fb0b6..143a5c193ed3 100644
+--- a/arch/x86/include/asm/mpx.h
++++ b/arch/x86/include/asm/mpx.h
+@@ -64,12 +64,15 @@ struct mpx_fault_info {
+ };
+
+ #ifdef CONFIG_X86_INTEL_MPX
+-int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
+-int mpx_handle_bd_fault(void);
++
++extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
++extern int mpx_handle_bd_fault(void);
++
+ static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
+ {
+ return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
+ }
++
+ static inline void mpx_mm_init(struct mm_struct *mm)
+ {
+ /*
+@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm)
+ */
+ mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
+ }
+-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long start, unsigned long end);
+
+-unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
+- unsigned long flags);
++extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
++extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
++
+ #else
+ static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
+ {
+@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm)
+ {
+ }
+ static inline void mpx_notify_unmap(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+ }
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index 9c85b54bf03c..0bb566315621 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -259,8 +259,7 @@ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+ extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
+ #define gup_fast_permitted gup_fast_permitted
+-static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
+- int write)
++static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+ {
+ unsigned long len, end;
+
+diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
+index e85ff65c43c3..05861cc08787 100644
+--- a/arch/x86/include/asm/text-patching.h
++++ b/arch/x86/include/asm/text-patching.h
+@@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_regs *regs);
+ extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+ extern int after_bootmem;
+
++static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
++{
++ regs->ip = ip;
++}
++
++#define INT3_INSN_SIZE 1
++#define CALL_INSN_SIZE 5
++
++#ifdef CONFIG_X86_64
++static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
++{
++ /*
++ * The int3 handler in entry_64.S adds a gap between the
++ * stack where the break point happened, and the saving of
++ * pt_regs. We can extend the original stack because of
++ * this gap. See the idtentry macro's create_gap option.
++ */
++ regs->sp -= sizeof(unsigned long);
++ *(unsigned long *)regs->sp = val;
++}
++
++static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
++{
++ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
++ int3_emulate_jmp(regs, func);
++}
++#endif
++
+ #endif /* _ASM_X86_TEXT_PATCHING_H */
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 763d4264d16a..2ee4b12a70e8 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -29,6 +29,7 @@
+ #include <asm/kprobes.h>
+ #include <asm/ftrace.h>
+ #include <asm/nops.h>
++#include <asm/text-patching.h>
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+@@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ }
+
+ static unsigned long ftrace_update_func;
++static unsigned long ftrace_update_func_call;
+
+ static int update_ftrace_func(unsigned long ip, void *new)
+ {
+@@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+ unsigned char *new;
+ int ret;
+
++ ftrace_update_func_call = (unsigned long)func;
++
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+
+@@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
+ if (WARN_ON_ONCE(!regs))
+ return 0;
+
+- ip = regs->ip - 1;
+- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
+- return 0;
++ ip = regs->ip - INT3_INSN_SIZE;
+
+- regs->ip += MCOUNT_INSN_SIZE - 1;
++#ifdef CONFIG_X86_64
++ if (ftrace_location(ip)) {
++ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
++ return 1;
++ } else if (is_ftrace_caller(ip)) {
++ if (!ftrace_update_func_call) {
++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
++ return 1;
++ }
++ int3_emulate_call(regs, ftrace_update_func_call);
++ return 1;
++ }
++#else
++ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
++ return 1;
++ }
++#endif
+
+- return 1;
++ return 0;
+ }
+
+ static int ftrace_write(unsigned long ip, const char *val, int size)
+@@ -858,6 +877,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
+
+ func = ftrace_ops_get_func(ops);
+
++ ftrace_update_func_call = (unsigned long)func;
++
+ /* Do a safe modify in case the trampoline is executing */
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+@@ -959,6 +980,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
+ {
+ unsigned char *new;
+
++ ftrace_update_func_call = 0UL;
+ new = ftrace_jmp_replace(ip, (unsigned long)func);
+
+ return update_ftrace_func(ip, new);
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 371c669696d7..610c0f1fbdd7 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1371,7 +1371,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
+
+ valid_bank_mask = BIT_ULL(0);
+ sparse_banks[0] = flush.processor_mask;
+- all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
++
++ /*
++ * Work around possible WS2012 bug: it sends hypercalls
++ * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
++ * while also expecting us to flush something and crashing if
++ * we don't. Let's treat processor_mask == 0 same as
++ * HV_FLUSH_ALL_PROCESSORS.
++ */
++ all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
++ flush.processor_mask == 0;
+ } else {
+ if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
+ sizeof(flush_ex))))
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 140e61843a07..3cb3af51ec89 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -6,6 +6,18 @@
+ # Produces uninteresting flaky coverage.
+ KCOV_INSTRUMENT_delay.o := n
+
++# Early boot use of cmdline; don't instrument it
++ifdef CONFIG_AMD_MEM_ENCRYPT
++KCOV_INSTRUMENT_cmdline.o := n
++KASAN_SANITIZE_cmdline.o := n
++
++ifdef CONFIG_FUNCTION_TRACER
++CFLAGS_REMOVE_cmdline.o = -pg
++endif
++
++CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
++endif
++
+ inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
+ inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
+ quiet_cmd_inat_tables = GEN $@
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index de1851d15699..ea17ff6c8588 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm,
+ * the virtual address region start...end have already been split if
+ * necessary, and the 'vma' is the first vma in this range (start -> end).
+ */
+-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long start, unsigned long end)
++void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
++ unsigned long end)
+ {
++ struct vm_area_struct *vma;
+ int ret;
+
+ /*
+@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+ * which should not occur normally. Being strict about it here
+ * helps ensure that we do not have an exploitable stack overflow.
+ */
+- do {
++ vma = find_vma(mm, start);
++ while (vma && vma->vm_start < end) {
+ if (vma->vm_flags & VM_MPX)
+ return;
+ vma = vma->vm_next;
+- } while (vma && vma->vm_start < end);
++ }
+
+ ret = mpx_unmap_tables(mm, start, end);
+ if (ret)
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 5bde73a49399..6ba6d8805697 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -375,7 +375,7 @@ void blk_cleanup_queue(struct request_queue *q)
+ blk_exit_queue(q);
+
+ if (queue_is_mq(q))
+- blk_mq_free_queue(q);
++ blk_mq_exit_queue(q);
+
+ percpu_ref_exit(&q->q_usage_counter);
+
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 3f9c3f4ac44c..4040e62c3737 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -10,6 +10,7 @@
+ #include <linux/smp.h>
+
+ #include <linux/blk-mq.h>
++#include "blk.h"
+ #include "blk-mq.h"
+ #include "blk-mq-tag.h"
+
+@@ -33,6 +34,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
+ {
+ struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
+ kobj);
++
++ if (hctx->flags & BLK_MQ_F_BLOCKING)
++ cleanup_srcu_struct(hctx->srcu);
++ blk_free_flush_queue(hctx->fq);
++ sbitmap_free(&hctx->ctx_map);
+ free_cpumask_var(hctx->cpumask);
+ kfree(hctx->ctxs);
+ kfree(hctx);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 5b920a82bfe6..9957e0fc17fc 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2270,12 +2270,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, hctx_idx);
+
+- if (hctx->flags & BLK_MQ_F_BLOCKING)
+- cleanup_srcu_struct(hctx->srcu);
+-
+ blk_mq_remove_cpuhp(hctx);
+- blk_free_flush_queue(hctx->fq);
+- sbitmap_free(&hctx->ctx_map);
+ }
+
+ static void blk_mq_exit_hw_queues(struct request_queue *q,
+@@ -2904,7 +2899,8 @@ err_exit:
+ }
+ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
+
+-void blk_mq_free_queue(struct request_queue *q)
++/* tags can _not_ be used after returning from blk_mq_exit_queue */
++void blk_mq_exit_queue(struct request_queue *q)
+ {
+ struct blk_mq_tag_set *set = q->tag_set;
+
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index a3a684a8c633..39bc1d5d4637 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -36,7 +36,7 @@ struct blk_mq_ctx {
+ struct kobject kobj;
+ } ____cacheline_aligned_in_smp;
+
+-void blk_mq_free_queue(struct request_queue *q);
++void blk_mq_exit_queue(struct request_queue *q);
+ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+ void blk_mq_wake_waiters(struct request_queue *q);
+ bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index d62487d02455..4add909e1a91 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -486,7 +486,7 @@ re_probe:
+ if (dev->bus->dma_configure) {
+ ret = dev->bus->dma_configure(dev);
+ if (ret)
+- goto dma_failed;
++ goto probe_failed;
+ }
+
+ if (driver_sysfs_add(dev)) {
+@@ -542,14 +542,13 @@ re_probe:
+ goto done;
+
+ probe_failed:
+- arch_teardown_dma_ops(dev);
+-dma_failed:
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ pinctrl_bind_failed:
+ device_links_no_driver(dev);
+ devres_release_all(dev);
++ arch_teardown_dma_ops(dev);
+ driver_sysfs_remove(dev);
+ dev->driver = NULL;
+ dev_set_drvdata(dev, NULL);
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index c18586fccb6f..17defbf4f332 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+ /*
+ * Must use NOIO because we don't want to recurse back into the
+ * block or filesystem layers from page reclaim.
+- *
+- * Cannot support DAX and highmem, because our ->direct_access
+- * routine for DAX must return memory that is always addressable.
+- * If DAX was reworked to use pfns and kmap throughout, this
+- * restriction might be able to be lifted.
+ */
+- gfp_flags = GFP_NOIO | __GFP_ZERO;
++ gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
+ page = alloc_page(gfp_flags);
+ if (!page)
+ return NULL;
+diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
+index f40419959656..794eeff0d5d2 100644
+--- a/drivers/clk/hisilicon/clk-hi3660.c
++++ b/drivers/clk/hisilicon/clk-hi3660.c
+@@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
++ /*
++ * clk_gate_ufs_subsys is a system bus clock, mark it as critical
++ * clock and keep it on for system suspend and resume.
++ */
+ { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
+- CLK_SET_RATE_PARENT, 0x50, 21, 0, },
++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
+ { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x50, 28, 0, },
+ { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
+diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
+index f54e4015b0b1..18842d660317 100644
+--- a/drivers/clk/mediatek/clk-pll.c
++++ b/drivers/clk/mediatek/clk-pll.c
+@@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
+ return ((unsigned long)vco + postdiv - 1) / postdiv;
+ }
+
++static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
++{
++ u32 r;
++
++ if (pll->tuner_en_addr) {
++ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
++ writel(r, pll->tuner_en_addr);
++ } else if (pll->tuner_addr) {
++ r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
++ writel(r, pll->tuner_addr);
++ }
++}
++
++static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
++{
++ u32 r;
++
++ if (pll->tuner_en_addr) {
++ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
++ writel(r, pll->tuner_en_addr);
++ } else if (pll->tuner_addr) {
++ r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
++ writel(r, pll->tuner_addr);
++ }
++}
++
+ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
+ int postdiv)
+ {
+@@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
+
+ pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
+
++ /* disable tuner */
++ __mtk_pll_tuner_disable(pll);
++
+ /* set postdiv */
+ val = readl(pll->pd_addr);
+ val &= ~(POSTDIV_MASK << pll->data->pd_shift);
+@@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
+ if (pll->tuner_addr)
+ writel(con1 + 1, pll->tuner_addr);
+
++ /* restore tuner_en */
++ __mtk_pll_tuner_enable(pll);
++
+ if (pll_en)
+ udelay(20);
+ }
+@@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
+ r |= pll->data->en_mask;
+ writel(r, pll->base_addr + REG_CON0);
+
+- if (pll->tuner_en_addr) {
+- r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+- writel(r, pll->tuner_en_addr);
+- } else if (pll->tuner_addr) {
+- r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
+- writel(r, pll->tuner_addr);
+- }
++ __mtk_pll_tuner_enable(pll);
+
+ udelay(20);
+
+@@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
+ writel(r, pll->base_addr + REG_CON0);
+ }
+
+- if (pll->tuner_en_addr) {
+- r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+- writel(r, pll->tuner_en_addr);
+- } else if (pll->tuner_addr) {
+- r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
+- writel(r, pll->tuner_addr);
+- }
++ __mtk_pll_tuner_disable(pll);
+
+ r = readl(pll->base_addr + REG_CON0);
+ r &= ~CON0_BASE_EN;
+diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
+index 65ab5c2f48b0..f12142d9cea2 100644
+--- a/drivers/clk/rockchip/clk-rk3328.c
++++ b/drivers/clk/rockchip/clk-rk3328.c
+@@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
+ RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3328_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
+- RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
++ RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3328_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
+ RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
+@@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
+ GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
+ RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
+- RK3328_CLKGATE_CON(25), 0, GFLAGS),
++ RK3328_CLKGATE_CON(25), 2, GFLAGS),
+ GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
+- RK3328_CLKGATE_CON(25), 1, GFLAGS),
++ RK3328_CLKGATE_CON(25), 3, GFLAGS),
+ GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
+- RK3328_CLKGATE_CON(25), 0, GFLAGS),
++ RK3328_CLKGATE_CON(25), 4, GFLAGS),
+ GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
+- RK3328_CLKGATE_CON(25), 1, GFLAGS),
++ RK3328_CLKGATE_CON(25), 5, GFLAGS),
+ GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
+- RK3328_CLKGATE_CON(25), 0, GFLAGS),
++ RK3328_CLKGATE_CON(25), 6, GFLAGS),
+
+ COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
+ RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
+@@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
+
+ /* PD_GMAC */
+ COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
+- RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
++ RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3328_CLKGATE_CON(3), 2, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
+ RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
+@@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
+
+ /* PD_PERI */
+ GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
+- GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
++ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
+
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
+@@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np)
+ &rk3328_cpuclk_data, rk3328_cpuclk_rates,
+ ARRAY_SIZE(rk3328_cpuclk_rates));
+
+- rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
++ rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
+diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
+index 9b49adb20d07..69dfc6de1c4e 100644
+--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
++++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
+@@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+ {
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+- u32 n_mask, k_mask, m_mask, p_mask;
++ u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
+ struct _ccu_nkmp _nkmp;
+ unsigned long flags;
+ u32 reg;
+@@ -186,10 +186,18 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
+
+ ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
+
+- n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
+- k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
+- m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
+- p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
++ if (nkmp->n.width)
++ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
++ nkmp->n.shift);
++ if (nkmp->k.width)
++ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
++ nkmp->k.shift);
++ if (nkmp->m.width)
++ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
++ nkmp->m.shift);
++ if (nkmp->p.width)
++ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
++ nkmp->p.shift);
+
+ spin_lock_irqsave(nkmp->common.lock, flags);
+
+diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
+index b50b7460014b..3e67cbcd80da 100644
+--- a/drivers/clk/tegra/clk-pll.c
++++ b/drivers/clk/tegra/clk-pll.c
+@@ -663,8 +663,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
+ pll_override_writel(val, params->pmc_divp_reg, pll);
+
+ val = pll_override_readl(params->pmc_divnm_reg, pll);
+- val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
+- ~(divn_mask(pll) << div_nmp->override_divn_shift);
++ val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
++ (divn_mask(pll) << div_nmp->override_divn_shift));
+ val |= (cfg->m << div_nmp->override_divm_shift) |
+ (cfg->n << div_nmp->override_divn_shift);
+ pll_override_writel(val, params->pmc_divnm_reg, pll);
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
+index ba7aaf421f36..8ff326c0c406 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -84,6 +84,7 @@ struct msc_iter {
+ * @reg_base: register window base address
+ * @thdev: intel_th_device pointer
+ * @win_list: list of windows in multiblock mode
++ * @single_sgt: single mode buffer
+ * @nr_pages: total number of pages allocated for this buffer
+ * @single_sz: amount of data in single mode
+ * @single_wrap: single mode wrap occurred
+@@ -104,6 +105,7 @@ struct msc {
+ struct intel_th_device *thdev;
+
+ struct list_head win_list;
++ struct sg_table single_sgt;
+ unsigned long nr_pages;
+ unsigned long single_sz;
+ unsigned int single_wrap : 1;
+@@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
+ */
+ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
+ {
++ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned int order = get_order(size);
+ struct page *page;
++ int ret;
+
+ if (!size)
+ return 0;
+
++ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
++ if (ret)
++ goto err_out;
++
++ ret = -ENOMEM;
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+- return -ENOMEM;
++ goto err_free_sgt;
+
+ split_page(page, order);
+- msc->nr_pages = size >> PAGE_SHIFT;
++ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
++
++ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
++ DMA_FROM_DEVICE);
++ if (ret < 0)
++ goto err_free_pages;
++
++ msc->nr_pages = nr_pages;
+ msc->base = page_address(page);
+- msc->base_addr = page_to_phys(page);
++ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
+
+ return 0;
++
++err_free_pages:
++ __free_pages(page, order);
++
++err_free_sgt:
++ sg_free_table(&msc->single_sgt);
++
++err_out:
++ return ret;
+ }
+
+ /**
+@@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc)
+ {
+ unsigned long off;
+
++ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
++ 1, DMA_FROM_DEVICE);
++ sg_free_table(&msc->single_sgt);
++
+ for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
+ struct page *page = virt_to_page(msc->base + off);
+
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index c7ba8acfd4d5..e55b902560de 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
+ static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
+ {
+ struct stp_master *master;
+- size_t size;
+
+- size = ALIGN(stm->data->sw_nchannels, 8) / 8;
+- size += sizeof(struct stp_master);
+- master = kzalloc(size, GFP_ATOMIC);
++ master = kzalloc(struct_size(master, chan_map,
++ BITS_TO_LONGS(stm->data->sw_nchannels)),
++ GFP_ATOMIC);
+ if (!master)
+ return -ENOMEM;
+
+@@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
+ bitmap_release_region(&master->chan_map[0], output->channel,
+ ilog2(output->nr_chans));
+
+- output->nr_chans = 0;
+ master->nr_free += output->nr_chans;
++ output->nr_chans = 0;
+ }
+
+ /*
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index bb8e3f149979..d464799e40a3 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+
+ pm_runtime_get_sync(dev->dev);
+
+- if (dev->suspended) {
+- dev_err(dev->dev, "Error %s call while suspended\n", __func__);
++ if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
+ ret = -ESHUTDOWN;
+ goto done_nolock;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c6bdd0d16c4b..ca91f90b4ccc 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1986,11 +1986,12 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+- if (!dev->mdev->clock_info_page)
++ if (!dev->mdev->clock_info)
+ return -EOPNOTSUPP;
+
+ return rdma_user_mmap_page(&context->ibucontext, vma,
+- dev->mdev->clock_info_page, PAGE_SIZE);
++ virt_to_page(dev->mdev->clock_info),
++ PAGE_SIZE);
+ }
+
+ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index d932f99201d1..1851bc5e05ae 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -2402,7 +2402,18 @@ static ssize_t dev_id_show(struct device *dev,
+ {
+ struct net_device *ndev = to_net_dev(dev);
+
+- if (ndev->dev_id == ndev->dev_port)
++ /*
++ * ndev->dev_port will be equal to 0 in old kernel prior to commit
++ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
++ * port numbers") Zero was chosen as special case for user space
++ * applications to fallback and query dev_id to check if it has
++ * different value or not.
++ *
++ * Don't print warning in such scenario.
++ *
++ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
++ */
++ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index 3a5c7dc6dc57..43fe59642930 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
+ #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
+ #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
+ #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
+-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
+ #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
+ SMMU_TLB_FLUSH_VA_MATCH_SECTION)
+ #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
+@@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
+ {
+ u32 value;
+
+- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
+- SMMU_TLB_FLUSH_VA_MATCH_ALL;
++ if (smmu->soc->num_asids == 4)
++ value = (asid & 0x3) << 29;
++ else
++ value = (asid & 0x7f) << 24;
++
++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
+ smmu_writel(smmu, value, SMMU_TLB_FLUSH);
+ }
+
+@@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
+ {
+ u32 value;
+
+- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
+- SMMU_TLB_FLUSH_VA_SECTION(iova);
++ if (smmu->soc->num_asids == 4)
++ value = (asid & 0x3) << 29;
++ else
++ value = (asid & 0x7f) << 24;
++
++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
+ smmu_writel(smmu, value, SMMU_TLB_FLUSH);
+ }
+
+@@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
+ {
+ u32 value;
+
+- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
+- SMMU_TLB_FLUSH_VA_GROUP(iova);
++ if (smmu->soc->num_asids == 4)
++ value = (asid & 0x3) << 29;
++ else
++ value = (asid & 0x7f) << 24;
++
++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
+ smmu_writel(smmu, value, SMMU_TLB_FLUSH);
+ }
+
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 6fc93834da44..151aa95775be 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
+ if (r)
+ return r;
+
+- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
++ for (b = 0; ; b++) {
+ r = fn(context, cmd->discard_block_size, to_dblock(b),
+ dm_bitset_cursor_get_value(&c));
+ if (r)
+ break;
++
++ if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
++ break;
++
++ r = dm_bitset_cursor_next(&c);
++ if (r)
++ break;
+ }
+
+ dm_bitset_cursor_end(&c);
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index dd538e6b2748..df39b07de800 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -949,6 +949,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
+ {
+ #ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
++ struct mapped_device *md = dm_table_get_md(ti->table);
+
+ /* From now we require underlying device with our integrity profile */
+ if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
+@@ -968,7 +969,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
+
+ if (crypt_integrity_aead(cc)) {
+ cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
+- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
++ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
+ cc->integrity_tag_size, cc->integrity_iv_size);
+
+ if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
+@@ -976,7 +977,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
+ return -EINVAL;
+ }
+ } else if (cc->integrity_iv_size)
+- DMINFO("Additional per-sector space %u bytes for IV.",
++ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
+ cc->integrity_iv_size);
+
+ if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
+@@ -1890,7 +1891,7 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
+ * algorithm implementation is used. Help people debug performance
+ * problems by logging the ->cra_driver_name.
+ */
+- DMINFO("%s using implementation \"%s\"", ciphermode,
++ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
+ crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
+ return 0;
+ }
+@@ -1910,7 +1911,7 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
+ return err;
+ }
+
+- DMINFO("%s using implementation \"%s\"", ciphermode,
++ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
+ crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
+ return 0;
+ }
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index fddffe251bf6..f496213f8b67 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- destroy_workqueue(dc->kdelayd_wq);
++ if (dc->kdelayd_wq)
++ destroy_workqueue(dc->kdelayd_wq);
+
+ if (dc->read.dev)
+ dm_put_device(ti, dc->read.dev);
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index f535fd8ac82d..a4fe187d50d0 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2568,7 +2568,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
+ if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
+ return -EINVAL;
+ } else {
+- __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
++ __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
+ >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
+ meta_size <<= ic->log2_buffer_sectors;
+@@ -3439,7 +3439,7 @@ try_smaller_buffer:
+ DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
+ DEBUG_print(" journal_entries %u\n", ic->journal_entries);
+ DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
+- DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
++ DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
+ DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
+ DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
+ DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 2ee5e357a0a7..cc5173dfd466 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -882,6 +882,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
+ if (attached_handler_name || m->hw_handler_name) {
+ INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
++ kfree(attached_handler_name);
+ if (r) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+@@ -896,7 +897,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
+
+ return p;
+ bad:
+- kfree(attached_handler_name);
+ free_pgpath(p);
+ return ERR_PTR(r);
+ }
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index fa68336560c3..d8334cd45d7c 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
+ goto out;
+ }
+
++ if (!nr_blkz)
++ break;
++
+ /* Process report */
+ for (i = 0; i < nr_blkz; i++) {
+ ret = dmz_init_zone(zmd, zone, &blkz[i]);
+@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+ /* Get zone information from disk */
+ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
+ &blkz, &nr_blkz, GFP_NOIO);
++ if (!nr_blkz)
++ ret = -EIO;
+ if (ret) {
+ dmz_dev_err(zmd->dev, "Get zone %u report failed",
+ dmz_id(zmd, zone));
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 05ffffb8b769..295ff09cff4c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -132,24 +132,6 @@ static inline int speed_max(struct mddev *mddev)
+ mddev->sync_speed_max : sysctl_speed_limit_max;
+ }
+
+-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
+-{
+- return kzalloc(sizeof(struct flush_info), gfp_flags);
+-}
+-static void flush_info_free(void *flush_info, void *data)
+-{
+- kfree(flush_info);
+-}
+-
+-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
+-{
+- return kzalloc(sizeof(struct flush_bio), gfp_flags);
+-}
+-static void flush_bio_free(void *flush_bio, void *data)
+-{
+- kfree(flush_bio);
+-}
+-
+ static struct ctl_table_header *raid_table_header;
+
+ static struct ctl_table raid_table[] = {
+@@ -423,54 +405,31 @@ static int md_congested(void *data, int bits)
+ /*
+ * Generic flush handling for md
+ */
+-static void submit_flushes(struct work_struct *ws)
+-{
+- struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
+- struct mddev *mddev = fi->mddev;
+- struct bio *bio = fi->bio;
+-
+- bio->bi_opf &= ~REQ_PREFLUSH;
+- md_handle_request(mddev, bio);
+-
+- mempool_free(fi, mddev->flush_pool);
+-}
+
+-static void md_end_flush(struct bio *fbio)
++static void md_end_flush(struct bio *bio)
+ {
+- struct flush_bio *fb = fbio->bi_private;
+- struct md_rdev *rdev = fb->rdev;
+- struct flush_info *fi = fb->fi;
+- struct bio *bio = fi->bio;
+- struct mddev *mddev = fi->mddev;
++ struct md_rdev *rdev = bio->bi_private;
++ struct mddev *mddev = rdev->mddev;
+
+ rdev_dec_pending(rdev, mddev);
+
+- if (atomic_dec_and_test(&fi->flush_pending)) {
+- if (bio->bi_iter.bi_size == 0) {
+- /* an empty barrier - all done */
+- bio_endio(bio);
+- mempool_free(fi, mddev->flush_pool);
+- } else {
+- INIT_WORK(&fi->flush_work, submit_flushes);
+- queue_work(md_wq, &fi->flush_work);
+- }
++ if (atomic_dec_and_test(&mddev->flush_pending)) {
++ /* The pre-request flush has finished */
++ queue_work(md_wq, &mddev->flush_work);
+ }
+-
+- mempool_free(fb, mddev->flush_bio_pool);
+- bio_put(fbio);
++ bio_put(bio);
+ }
+
+-void md_flush_request(struct mddev *mddev, struct bio *bio)
++static void md_submit_flush_data(struct work_struct *ws);
++
++static void submit_flushes(struct work_struct *ws)
+ {
++ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
+ struct md_rdev *rdev;
+- struct flush_info *fi;
+-
+- fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
+-
+- fi->bio = bio;
+- fi->mddev = mddev;
+- atomic_set(&fi->flush_pending, 1);
+
++ mddev->start_flush = ktime_get_boottime();
++ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
++ atomic_set(&mddev->flush_pending, 1);
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+@@ -480,37 +439,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
+ * we reclaim rcu_read_lock
+ */
+ struct bio *bi;
+- struct flush_bio *fb;
+ atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+-
+- fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
+- fb->fi = fi;
+- fb->rdev = rdev;
+-
+ bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
+- bio_set_dev(bi, rdev->bdev);
+ bi->bi_end_io = md_end_flush;
+- bi->bi_private = fb;
++ bi->bi_private = rdev;
++ bio_set_dev(bi, rdev->bdev);
+ bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+-
+- atomic_inc(&fi->flush_pending);
++ atomic_inc(&mddev->flush_pending);
+ submit_bio(bi);
+-
+ rcu_read_lock();
+ rdev_dec_pending(rdev, mddev);
+ }
+ rcu_read_unlock();
++ if (atomic_dec_and_test(&mddev->flush_pending))
++ queue_work(md_wq, &mddev->flush_work);
++}
++
++static void md_submit_flush_data(struct work_struct *ws)
++{
++ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
++ struct bio *bio = mddev->flush_bio;
++
++ /*
++ * must reset flush_bio before calling into md_handle_request to avoid a
++ * deadlock, because other bios passed md_handle_request suspend check
++ * could wait for this and below md_handle_request could wait for those
++ * bios because of suspend check
++ */
++ mddev->last_flush = mddev->start_flush;
++ mddev->flush_bio = NULL;
++ wake_up(&mddev->sb_wait);
++
++ if (bio->bi_iter.bi_size == 0) {
++ /* an empty barrier - all done */
++ bio_endio(bio);
++ } else {
++ bio->bi_opf &= ~REQ_PREFLUSH;
++ md_handle_request(mddev, bio);
++ }
++}
+
+- if (atomic_dec_and_test(&fi->flush_pending)) {
+- if (bio->bi_iter.bi_size == 0) {
++void md_flush_request(struct mddev *mddev, struct bio *bio)
++{
++ ktime_t start = ktime_get_boottime();
++ spin_lock_irq(&mddev->lock);
++ wait_event_lock_irq(mddev->sb_wait,
++ !mddev->flush_bio ||
++ ktime_after(mddev->last_flush, start),
++ mddev->lock);
++ if (!ktime_after(mddev->last_flush, start)) {
++ WARN_ON(mddev->flush_bio);
++ mddev->flush_bio = bio;
++ bio = NULL;
++ }
++ spin_unlock_irq(&mddev->lock);
++
++ if (!bio) {
++ INIT_WORK(&mddev->flush_work, submit_flushes);
++ queue_work(md_wq, &mddev->flush_work);
++ } else {
++ /* flush was performed for some other bio while we waited. */
++ if (bio->bi_iter.bi_size == 0)
+ /* an empty barrier - all done */
+ bio_endio(bio);
+- mempool_free(fi, mddev->flush_pool);
+- } else {
+- INIT_WORK(&fi->flush_work, submit_flushes);
+- queue_work(md_wq, &fi->flush_work);
++ else {
++ bio->bi_opf &= ~REQ_PREFLUSH;
++ mddev->pers->make_request(mddev, bio);
+ }
+ }
+ }
+@@ -560,6 +556,7 @@ void mddev_init(struct mddev *mddev)
+ atomic_set(&mddev->openers, 0);
+ atomic_set(&mddev->active_io, 0);
+ spin_lock_init(&mddev->lock);
++ atomic_set(&mddev->flush_pending, 0);
+ init_waitqueue_head(&mddev->sb_wait);
+ init_waitqueue_head(&mddev->recovery_wait);
+ mddev->reshape_position = MaxSector;
+@@ -2855,8 +2852,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
+ err = 0;
+ }
+ } else if (cmd_match(buf, "re-add")) {
+- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+- rdev->saved_raid_disk >= 0) {
++ if (!rdev->mddev->pers)
++ err = -EINVAL;
++ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
++ rdev->saved_raid_disk >= 0) {
+ /* clear_bit is performed _after_ all the devices
+ * have their local Faulty bit cleared. If any writes
+ * happen in the meantime in the local node, they
+@@ -5511,22 +5510,6 @@ int md_run(struct mddev *mddev)
+ if (err)
+ return err;
+ }
+- if (mddev->flush_pool == NULL) {
+- mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
+- flush_info_free, mddev);
+- if (!mddev->flush_pool) {
+- err = -ENOMEM;
+- goto abort;
+- }
+- }
+- if (mddev->flush_bio_pool == NULL) {
+- mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
+- flush_bio_free, mddev);
+- if (!mddev->flush_bio_pool) {
+- err = -ENOMEM;
+- goto abort;
+- }
+- }
+
+ spin_lock(&pers_lock);
+ pers = find_pers(mddev->level, mddev->clevel);
+@@ -5686,11 +5669,8 @@ int md_run(struct mddev *mddev)
+ return 0;
+
+ abort:
+- mempool_destroy(mddev->flush_bio_pool);
+- mddev->flush_bio_pool = NULL;
+- mempool_destroy(mddev->flush_pool);
+- mddev->flush_pool = NULL;
+-
++ bioset_exit(&mddev->bio_set);
++ bioset_exit(&mddev->sync_set);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(md_run);
+@@ -5894,14 +5874,6 @@ static void __md_stop(struct mddev *mddev)
+ mddev->to_remove = &md_redundancy_group;
+ module_put(pers->owner);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+- if (mddev->flush_bio_pool) {
+- mempool_destroy(mddev->flush_bio_pool);
+- mddev->flush_bio_pool = NULL;
+- }
+- if (mddev->flush_pool) {
+- mempool_destroy(mddev->flush_pool);
+- mddev->flush_pool = NULL;
+- }
+ }
+
+ void md_stop(struct mddev *mddev)
+@@ -9257,7 +9229,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ * reshape is happening in the remote node, we need to
+ * update reshape_position and call start_reshape.
+ */
+- mddev->reshape_position = sb->reshape_position;
++ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ if (mddev->pers->start_reshape)
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index c52afb52c776..257cb4c9e22b 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -252,19 +252,6 @@ enum mddev_sb_flags {
+ MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
+ };
+
+-#define NR_FLUSH_INFOS 8
+-#define NR_FLUSH_BIOS 64
+-struct flush_info {
+- struct bio *bio;
+- struct mddev *mddev;
+- struct work_struct flush_work;
+- atomic_t flush_pending;
+-};
+-struct flush_bio {
+- struct flush_info *fi;
+- struct md_rdev *rdev;
+-};
+-
+ struct mddev {
+ void *private;
+ struct md_personality *pers;
+@@ -470,8 +457,16 @@ struct mddev {
+ * metadata and bitmap writes
+ */
+
+- mempool_t *flush_pool;
+- mempool_t *flush_bio_pool;
++ /* Generic flush handling.
++ * The last to finish preflush schedules a worker to submit
++ * the rest of the request (without the REQ_PREFLUSH flag).
++ */
++ struct bio *flush_bio;
++ atomic_t flush_pending;
++ ktime_t start_flush, last_flush; /* last_flush is when the last completed
++ * flush was started.
++ */
++ struct work_struct flush_work;
+ struct work_struct event_work; /* used by dm to report failure event */
+ void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
+ struct md_cluster_info *cluster_info;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 3ae13c06b200..f9c90ab220b9 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -4197,7 +4197,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+ /* now write out any block on a failed drive,
+ * or P or Q if they were recomputed
+ */
+- BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
++ dev = NULL;
+ if (s->failed == 2) {
+ dev = &sh->dev[s->failed_num[1]];
+ s->locked++;
+@@ -4222,6 +4222,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
++ if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
++ "%s: disk%td not up to date\n",
++ mdname(conf->mddev),
++ dev - (struct r5dev *) &sh->dev)) {
++ clear_bit(R5_LOCKED, &dev->flags);
++ clear_bit(R5_Wantwrite, &dev->flags);
++ s->locked--;
++ }
+ clear_bit(STRIPE_DEGRADED, &sh->state);
+
+ set_bit(STRIPE_INSYNC, &sh->state);
+@@ -4233,15 +4241,26 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+ case check_state_check_result:
+ sh->check_state = check_state_idle;
+
+- if (s->failed > 1)
+- break;
+ /* handle a successful check operation, if parity is correct
+ * we are done. Otherwise update the mismatch count and repair
+ * parity if !MD_RECOVERY_CHECK
+ */
+ if (sh->ops.zero_sum_result == 0) {
+- /* Any parity checked was correct */
+- set_bit(STRIPE_INSYNC, &sh->state);
++ /* both parities are correct */
++ if (!s->failed)
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ /* in contrast to the raid5 case we can validate
++ * parity, but still have a failure to write
++ * back
++ */
++ sh->check_state = check_state_compute_result;
++ /* Returning at this point means that we may go
++ * off and bring p and/or q uptodate again so
++ * we make sure to check zero_sum_result again
++ * to verify if p or q need writeback
++ */
++ }
+ } else {
+ atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
+diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
+index 5d1b218bb7f0..2d3f7e00b129 100644
+--- a/drivers/media/i2c/ov6650.c
++++ b/drivers/media/i2c/ov6650.c
+@@ -814,6 +814,8 @@ static int ov6650_video_probe(struct i2c_client *client)
+ if (ret < 0)
+ return ret;
+
++ msleep(20);
++
+ /*
+ * check and show product ID and manufacturer ID
+ */
+diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
+index 24afc36833bf..1608a482f681 100644
+--- a/drivers/memory/tegra/mc.c
++++ b/drivers/memory/tegra/mc.c
+@@ -280,7 +280,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
+ u32 value;
+
+ /* compute the number of MC clock cycles per tick */
+- tick = mc->tick * clk_get_rate(mc->clk);
++ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
+ do_div(tick, NSEC_PER_SEC);
+
+ value = readl(mc->regs + MC_EMEM_ARB_CFG);
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index 21cde7e78621..0d3ba056cda3 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -40,7 +40,7 @@ obj-$(CONFIG_ARCNET) += arcnet/
+ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
+ obj-$(CONFIG_CAIF) += caif/
+ obj-$(CONFIG_CAN) += can/
+-obj-$(CONFIG_NET_DSA) += dsa/
++obj-y += dsa/
+ obj-$(CONFIG_ETHERNET) += ethernet/
+ obj-$(CONFIG_FDDI) += fddi/
+ obj-$(CONFIG_HIPPI) += hippi/
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
+index ffed2d4c9403..9c481823b3e8 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
++++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
+@@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
+ rule.port = port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+- mlx4_err(dev, "going promisc on %x\n", port);
++ mlx4_info(dev, "going promisc on %x\n", port);
+
+ return mlx4_flow_attach(dev, &rule, regid_p);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+index 37a551436e4a..b7e3b8902e7e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+@@ -8,6 +8,7 @@ config MLX5_CORE
+ depends on PCI
+ imply PTP_1588_CLOCK
+ imply VXLAN
++ imply MLXFW
+ default n
+ ---help---
+ Core driver for low level functionality of the ConnectX-4 and
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 253496c4a3db..a908e29ddb7b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1802,6 +1802,22 @@ static int mlx5e_flash_device(struct net_device *dev,
+ return mlx5e_ethtool_flash_device(priv, flash);
+ }
+
++#ifndef CONFIG_MLX5_EN_RXNFC
++/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
++ * otherwise this function will be defined from en_fs_ethtool.c
++ */
++static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
++{
++ struct mlx5e_priv *priv = netdev_priv(dev);
++
++ if (info->cmd != ETHTOOL_GRXRINGS)
++ return -EOPNOTSUPP;
++ /* ring_count is needed by ethtool -x */
++ info->data = priv->channels.params.num_channels;
++ return 0;
++}
++#endif
++
+ const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_drvinfo = mlx5e_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+@@ -1820,8 +1836,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+-#ifdef CONFIG_MLX5_EN_RXNFC
+ .get_rxnfc = mlx5e_get_rxnfc,
++#ifdef CONFIG_MLX5_EN_RXNFC
+ .set_rxnfc = mlx5e_set_rxnfc,
+ #endif
+ .flash_device = mlx5e_flash_device,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index ef9e472daffb..3977f763b6ed 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -64,9 +64,26 @@ static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+ {
++ struct mlx5e_priv *priv = netdev_priv(dev);
++ struct mlx5_core_dev *mdev = priv->mdev;
++
+ strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)",
++ fw_rev_maj(mdev), fw_rev_min(mdev),
++ fw_rev_sub(mdev), mdev->board_id);
++}
++
++static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct mlx5e_priv *priv = netdev_priv(dev);
++
++ mlx5e_rep_get_drvinfo(dev, drvinfo);
++ strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
++ sizeof(drvinfo->bus_info));
+ }
+
+ static const struct counter_desc sw_rep_stats_desc[] = {
+@@ -374,7 +391,7 @@ static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
+ };
+
+ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
+- .get_drvinfo = mlx5e_rep_get_drvinfo,
++ .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_rep_get_strings,
+ .get_sset_count = mlx5e_rep_get_sset_count,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 79f122b45def..abbdd4906984 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1375,6 +1375,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ d1->vport.num == d2->vport.num &&
+ d1->vport.flags == d2->vport.flags &&
++ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
++ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
+ (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index ca0ee9916e9e..0059b290e095 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -535,23 +535,16 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
+ do_div(ns, NSEC_PER_SEC / HZ);
+ clock->overflow_period = ns;
+
+- mdev->clock_info_page = alloc_page(GFP_KERNEL);
+- if (mdev->clock_info_page) {
+- mdev->clock_info = kmap(mdev->clock_info_page);
+- if (!mdev->clock_info) {
+- __free_page(mdev->clock_info_page);
+- mlx5_core_warn(mdev, "failed to map clock page\n");
+- } else {
+- mdev->clock_info->sign = 0;
+- mdev->clock_info->nsec = clock->tc.nsec;
+- mdev->clock_info->cycles = clock->tc.cycle_last;
+- mdev->clock_info->mask = clock->cycles.mask;
+- mdev->clock_info->mult = clock->nominal_c_mult;
+- mdev->clock_info->shift = clock->cycles.shift;
+- mdev->clock_info->frac = clock->tc.frac;
+- mdev->clock_info->overflow_period =
+- clock->overflow_period;
+- }
++ mdev->clock_info =
++ (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
++ if (mdev->clock_info) {
++ mdev->clock_info->nsec = clock->tc.nsec;
++ mdev->clock_info->cycles = clock->tc.cycle_last;
++ mdev->clock_info->mask = clock->cycles.mask;
++ mdev->clock_info->mult = clock->nominal_c_mult;
++ mdev->clock_info->shift = clock->cycles.shift;
++ mdev->clock_info->frac = clock->tc.frac;
++ mdev->clock_info->overflow_period = clock->overflow_period;
+ }
+
+ INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
+@@ -599,8 +592,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+ cancel_delayed_work_sync(&clock->overflow_work);
+
+ if (mdev->clock_info) {
+- kunmap(mdev->clock_info_page);
+- __free_page(mdev->clock_info_page);
++ free_page((unsigned long)mdev->clock_info);
+ mdev->clock_info = NULL;
+ }
+
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+index 2d9f26a725c2..37bd2dbcd206 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+@@ -164,6 +164,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
+ return;
+ }
+
++ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ ipv4_addr = payload->tun_info[i].ipv4;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+@@ -179,6 +180,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
++ rcu_read_unlock();
+ }
+
+ static int
+@@ -362,9 +364,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
++ rcu_read_lock();
+ netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ if (!netdev)
+- goto route_fail_warning;
++ goto fail_rcu_unlock;
+
+ flow.daddr = payload->ipv4_addr;
+ flow.flowi4_proto = IPPROTO_UDP;
+@@ -374,21 +377,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+ rt = ip_route_output_key(dev_net(netdev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+- goto route_fail_warning;
++ goto fail_rcu_unlock;
+ #else
+- goto route_fail_warning;
++ goto fail_rcu_unlock;
+ #endif
+
+ /* Get the neighbour entry for the lookup */
+ n = dst_neigh_lookup(&rt->dst, &flow.daddr);
+ ip_rt_put(rt);
+ if (!n)
+- goto route_fail_warning;
+- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
++ goto fail_rcu_unlock;
++ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+ neigh_release(n);
++ rcu_read_unlock();
+ return;
+
+-route_fail_warning:
++fail_rcu_unlock:
++ rcu_read_unlock();
+ nfp_flower_cmsg_warn(app, "Requested route not found.\n");
+ }
+
+diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
+index c589f5ae75bb..8bb53ec8d9cf 100644
+--- a/drivers/net/ieee802154/mcr20a.c
++++ b/drivers/net/ieee802154/mcr20a.c
+@@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
+ dev_dbg(printdev(lp), "no slotted operation\n");
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_SLOTTED, 0x0);
++ if (ret < 0)
++ return ret;
+
+ /* enable irq */
+ enable_irq(lp->spi->irq);
+@@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
+ /* Unmask SEQ interrupt */
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
+ DAR_PHY_CTRL2_SEQMSK, 0x0);
++ if (ret < 0)
++ return ret;
+
+ /* Start the RX sequence */
+ dev_dbg(printdev(lp), "start the RX sequence\n");
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
++ if (ret < 0)
++ return ret;
+
+ return 0;
+ }
+diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
+index b5edc7f96a39..685e875f5164 100644
+--- a/drivers/net/ppp/ppp_deflate.c
++++ b/drivers/net/ppp/ppp_deflate.c
+@@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
+
+ static int __init deflate_init(void)
+ {
+- int answer = ppp_register_compressor(&ppp_deflate);
+- if (answer == 0)
+- printk(KERN_INFO
+- "PPP Deflate Compression module registered\n");
+- ppp_register_compressor(&ppp_deflate_draft);
+- return answer;
++ int rc;
++
++ rc = ppp_register_compressor(&ppp_deflate);
++ if (rc)
++ return rc;
++
++ rc = ppp_register_compressor(&ppp_deflate_draft);
++ if (rc) {
++ ppp_unregister_compressor(&ppp_deflate);
++ return rc;
++ }
++
++ pr_info("PPP Deflate Compression module registered\n");
++ return 0;
+ }
+
+ static void __exit deflate_cleanup(void)
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 9195f3476b1d..366217263d70 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
++ {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
++ {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
++ {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
++ {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
++ {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
++ {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
++ {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
+ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
+ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
+ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
+@@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
+ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
+ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
++ {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
+ {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
+ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
+@@ -1200,7 +1208,9 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
+ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
++ {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
+ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
++ {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
+@@ -1240,6 +1250,8 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+index 51d76ac45075..188d7961584e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+@@ -31,6 +31,10 @@ struct brcmf_dmi_data {
+
+ /* NOTE: Please keep all entries sorted alphabetically */
+
++static const struct brcmf_dmi_data acepc_t8_data = {
++ BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
++};
++
+ static const struct brcmf_dmi_data gpd_win_pocket_data = {
+ BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
+ };
+@@ -44,6 +48,28 @@ static const struct brcmf_dmi_data meegopad_t08_data = {
+ };
+
+ static const struct dmi_system_id dmi_platform_data[] = {
++ {
++ /* ACEPC T8 Cherry Trail Z8350 mini PC */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
++ /* also match on somewhat unique bios-version */
++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
++ },
++ .driver_data = (void *)&acepc_t8_data,
++ },
++ {
++ /* ACEPC T11 Cherry Trail Z8350 mini PC, same wifi as the T8 */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
++ /* also match on somewhat unique bios-version */
++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
++ },
++ .driver_data = (void *)&acepc_t8_data,
++ },
+ {
+ /* Match for the GPDwin which unfortunately uses somewhat
+ * generic dmi strings, which is why we test for 4 strings.
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 7bd8676508f5..519c7dd47f69 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -143,9 +143,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
+ }
+
+ /* iwl_mvm_create_skb Adds the rxb to a new skb */
+-static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
+- u16 len, u8 crypt_len,
+- struct iwl_rx_cmd_buffer *rxb)
++static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
++ struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
++ struct iwl_rx_cmd_buffer *rxb)
+ {
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+@@ -178,6 +178,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
+ * present before copying packet data.
+ */
+ hdrlen += crypt_len;
++
++ if (WARN_ONCE(headlen < hdrlen,
++ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
++ hdrlen, len, crypt_len)) {
++ /*
++ * We warn and trace because we want to be able to see
++ * it in trace-cmd as well.
++ */
++ IWL_DEBUG_RX(mvm,
++ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
++ hdrlen, len, crypt_len);
++ return -EINVAL;
++ }
++
+ skb_put_data(skb, hdr, hdrlen);
+ skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+@@ -190,6 +204,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
++
++ return 0;
+ }
+
+ /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
+@@ -1600,7 +1616,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ rx_status->boottime_ns = ktime_get_boot_ns();
+ }
+
+- iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
++ if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
++ kfree_skb(skb);
++ goto out;
++ }
++
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ out:
+diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
+index 27a49068d32d..57ad56435dda 100644
+--- a/drivers/net/wireless/intersil/p54/p54pci.c
++++ b/drivers/net/wireless/intersil/p54/p54pci.c
+@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable new PCI device\n");
+- return err;
++ goto err_put;
+ }
+
+ mem_addr = pci_resource_start(pdev, 0);
+@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
+ pci_release_regions(pdev);
+ err_disable_dev:
+ pci_disable_device(pdev);
++err_put:
+ pci_dev_put(pdev);
+ return err;
+ }
+diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
+index 0c6e8b44b4ed..c60b465f6fe4 100644
+--- a/drivers/parisc/led.c
++++ b/drivers/parisc/led.c
+@@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
+ break;
+
+ case DISPLAY_MODEL_LASI:
++ /* Skip to register LED in QEMU */
++ if (running_on_qemu)
++ return 1;
+ LED_DATA_REG = data_reg;
+ led_func_ptr = led_LASI_driver;
+ printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
+diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
+index c8febb009454..6a4e435bd35f 100644
+--- a/drivers/pci/controller/pcie-rcar.c
++++ b/drivers/pci/controller/pcie-rcar.c
+@@ -46,6 +46,7 @@
+
+ /* Transfer control */
+ #define PCIETCTLR 0x02000
++#define DL_DOWN BIT(3)
+ #define CFINIT 1
+ #define PCIETSTR 0x02004
+ #define DATA_LINK_ACTIVE 1
+@@ -94,6 +95,7 @@
+ #define MACCTLR 0x011058
+ #define SPEED_CHANGE BIT(24)
+ #define SCRAMBLE_DISABLE BIT(27)
++#define PMSR 0x01105c
+ #define MACS2R 0x011078
+ #define MACCGSPSETR 0x011084
+ #define SPCNGRSN BIT(31)
+@@ -1130,6 +1132,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
+ pcie = pci_host_bridge_priv(bridge);
+
+ pcie->dev = dev;
++ platform_set_drvdata(pdev, pcie);
+
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
+ if (err)
+@@ -1221,10 +1224,28 @@ err_free_bridge:
+ return err;
+ }
+
++static int rcar_pcie_resume_noirq(struct device *dev)
++{
++ struct rcar_pcie *pcie = dev_get_drvdata(dev);
++
++ if (rcar_pci_read_reg(pcie, PMSR) &&
++ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
++ return 0;
++
++ /* Re-establish the PCIe link */
++ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
++ return rcar_pcie_wait_for_dl(pcie);
++}
++
++static const struct dev_pm_ops rcar_pcie_pm_ops = {
++ .resume_noirq = rcar_pcie_resume_noirq,
++};
++
+ static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = "rcar-pcie",
+ .of_match_table = rcar_pcie_of_match,
++ .pm = &rcar_pcie_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e91005d0f20c..3f77bab698ce 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -6266,8 +6266,7 @@ static int __init pci_setup(char *str)
+ } else if (!strncmp(str, "pcie_scan_all", 13)) {
+ pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
+ } else if (!strncmp(str, "disable_acs_redir=", 18)) {
+- disable_acs_redir_param =
+- kstrdup(str + 18, GFP_KERNEL);
++ disable_acs_redir_param = str + 18;
+ } else {
+ printk(KERN_ERR "PCI: Unknown option `%s'\n",
+ str);
+@@ -6278,3 +6277,19 @@ static int __init pci_setup(char *str)
+ return 0;
+ }
+ early_param("pci", pci_setup);
++
++/*
++ * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
++ * to data in the __initdata section which will be freed after the init
++ * sequence is complete. We can't allocate memory in pci_setup() because some
++ * architectures do not have any memory allocation service available during
++ * an early_param() call. So we allocate memory and copy the variable here
++ * before the init section is freed.
++ */
++static int __init pci_realloc_setup_params(void)
++{
++ disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
++
++ return 0;
++}
++pure_initcall(pci_realloc_setup_params);
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 224d88634115..17c4ed2021de 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -596,7 +596,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
+ void pci_aer_clear_device_status(struct pci_dev *dev);
+ #else
+ static inline void pci_no_aer(void) { }
+-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
++static inline void pci_aer_init(struct pci_dev *d) { }
+ static inline void pci_aer_exit(struct pci_dev *d) { }
+ static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
+ static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 727e3c1ef9a4..38e7017478b5 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -196,6 +196,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+ link->clkpm_capable = (blacklist) ? 0 : capable;
+ }
+
++static bool pcie_retrain_link(struct pcie_link_state *link)
++{
++ struct pci_dev *parent = link->pdev;
++ unsigned long start_jiffies;
++ u16 reg16;
++
++ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
++ reg16 |= PCI_EXP_LNKCTL_RL;
++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
++ if (parent->clear_retrain_link) {
++ /*
++ * Due to an erratum in some devices the Retrain Link bit
++ * needs to be cleared again manually to allow the link
++ * training to succeed.
++ */
++ reg16 &= ~PCI_EXP_LNKCTL_RL;
++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
++ }
++
++ /* Wait for link training end. Break out after waiting for timeout */
++ start_jiffies = jiffies;
++ for (;;) {
++ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
++ if (!(reg16 & PCI_EXP_LNKSTA_LT))
++ break;
++ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
++ break;
++ msleep(1);
++ }
++ return !(reg16 & PCI_EXP_LNKSTA_LT);
++}
++
+ /*
+ * pcie_aspm_configure_common_clock: check if the 2 ends of a link
+ * could use common clock. If they are, configure them to use the
+@@ -205,7 +237,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ {
+ int same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
+- unsigned long start_jiffies;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+ /*
+@@ -263,21 +294,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ reg16 &= ~PCI_EXP_LNKCTL_CCC;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+
+- /* Retrain link */
+- reg16 |= PCI_EXP_LNKCTL_RL;
+- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+-
+- /* Wait for link training end. Break out after waiting for timeout */
+- start_jiffies = jiffies;
+- for (;;) {
+- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+- break;
+- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+- break;
+- msleep(1);
+- }
+- if (!(reg16 & PCI_EXP_LNKSTA_LT))
++ if (pcie_retrain_link(link))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index c46a3fcb341e..3bb9bdb884e5 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -535,16 +535,9 @@ static void pci_release_host_bridge_dev(struct device *dev)
+ kfree(to_pci_host_bridge(dev));
+ }
+
+-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
++static void pci_init_host_bridge(struct pci_host_bridge *bridge)
+ {
+- struct pci_host_bridge *bridge;
+-
+- bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
+- if (!bridge)
+- return NULL;
+-
+ INIT_LIST_HEAD(&bridge->windows);
+- bridge->dev.release = pci_release_host_bridge_dev;
+
+ /*
+ * We assume we can manage these PCIe features. Some systems may
+@@ -557,6 +550,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
+ bridge->native_shpc_hotplug = 1;
+ bridge->native_pme = 1;
+ bridge->native_ltr = 1;
++}
++
++struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
++{
++ struct pci_host_bridge *bridge;
++
++ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
++ if (!bridge)
++ return NULL;
++
++ pci_init_host_bridge(bridge);
++ bridge->dev.release = pci_release_host_bridge_dev;
+
+ return bridge;
+ }
+@@ -571,7 +576,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
+ if (!bridge)
+ return NULL;
+
+- INIT_LIST_HEAD(&bridge->windows);
++ pci_init_host_bridge(bridge);
+ bridge->dev.release = devm_pci_release_host_bridge_dev;
+
+ return bridge;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index fba03a7d5c7f..c2c54dc4433e 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2245,6 +2245,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+
++/*
++ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
++ * Link bit cleared after starting the link retrain process to allow this
++ * process to finish.
++ *
++ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
++ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
++ */
++static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
++{
++ dev->clear_retrain_link = 1;
++ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
++}
++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
++
+ static void fixup_rev1_53c810(struct pci_dev *dev)
+ {
+ u32 class = dev->class;
+@@ -3408,6 +3425,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
+
+ /*
+ * Root port on some Cavium CN8xxx chips do not successfully complete a bus
+@@ -4903,6 +4921,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
+
+ /* AMD Stoney platform GPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+
+ /* Freescale PCIe doesn't support MSI in RC mode */
+@@ -5120,3 +5139,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
+ SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
+ SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
+ SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
++
++/*
++ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
++ * not always reset the secondary Nvidia GPU between reboots if the system
++ * is configured to use Hybrid Graphics mode. This results in the GPU
++ * being left in whatever state it was in during the *previous* boot, which
++ * causes spurious interrupts from the GPU, which in turn causes us to
++ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
++ * this also completely breaks nouveau.
++ *
++ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
++ * clean state and fixes all these issues.
++ *
++ * When the machine is configured in Dedicated display mode, the issue
++ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
++ * mode, so we can detect that and avoid resetting it.
++ */
++static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
++{
++ void __iomem *map;
++ int ret;
++
++ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
++ pdev->subsystem_device != 0x222e ||
++ !pdev->reset_fn)
++ return;
++
++ if (pci_enable_device_mem(pdev))
++ return;
++
++ /*
++ * Based on nvkm_device_ctor() in
++ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
++ */
++ map = pci_iomap(pdev, 0, 0x23000);
++ if (!map) {
++ pci_err(pdev, "Can't map MMIO space\n");
++ goto out_disable;
++ }
++
++ /*
++ * Make sure the GPU looks like it's been POSTed before resetting
++ * it.
++ */
++ if (ioread32(map + 0x2240c) & 0x2) {
++ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
++ ret = pci_reset_function(pdev);
++ if (ret < 0)
++ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
++ }
++
++ iounmap(map);
++out_disable:
++ pci_disable_device(pdev);
++}
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
++ PCI_CLASS_DISPLAY_VGA, 8,
++ quirk_reset_lenovo_thinkpad_p50_nvgpu);
+diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
+index 68ce4a082b9b..693acc167351 100644
+--- a/drivers/phy/ti/phy-ti-pipe3.c
++++ b/drivers/phy/ti/phy-ti-pipe3.c
+@@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
+ val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
+- val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
++ val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
+diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
+index 08d5037fd052..6887870ba32c 100644
+--- a/drivers/power/supply/cpcap-battery.c
++++ b/drivers/power/supply/cpcap-battery.c
+@@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
+ int avg_current;
+ u32 cc_lsb;
+
++ if (!divider)
++ return 0;
++
+ sample &= 0xffffff; /* 24-bits, unsigned */
+ offset &= 0x7ff; /* 10-bits, signed */
+
+diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
+index dce24f596160..5358a80d854f 100644
+--- a/drivers/power/supply/power_supply_sysfs.c
++++ b/drivers/power/supply/power_supply_sysfs.c
+@@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
+ char *prop_buf;
+ char *attrname;
+
+- dev_dbg(dev, "uevent\n");
+-
+ if (!psy || !psy->desc) {
+ dev_dbg(dev, "No power supply yet\n");
+ return ret;
+ }
+
+- dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
+-
+ ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
+ if (ret)
+ return ret;
+@@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
+ goto out;
+ }
+
+- dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
+-
+ ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
+ kfree(attrname);
+ if (ret)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index e2caf11598c7..fb9fe26fd0fa 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3360,15 +3360,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
+
+ /* for not coupled regulators this will just set the voltage */
+ ret = regulator_balance_voltage(rdev, state);
+- if (ret < 0)
+- goto out2;
++ if (ret < 0) {
++ voltage->min_uV = old_min_uV;
++ voltage->max_uV = old_max_uV;
++ }
+
+ out:
+- return 0;
+-out2:
+- voltage->min_uV = old_min_uV;
+- voltage->max_uV = old_max_uV;
+-
+ return ret;
+ }
+
+diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
+index be1e9e52b2a0..cbccb9b38503 100644
+--- a/drivers/staging/media/imx/imx-media-csi.c
++++ b/drivers/staging/media/imx/imx-media-csi.c
+@@ -153,9 +153,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
+ /*
+ * Parses the fwnode endpoint from the source pad of the entity
+ * connected to this CSI. This will either be the entity directly
+- * upstream from the CSI-2 receiver, or directly upstream from the
+- * video mux. The endpoint is needed to determine the bus type and
+- * bus config coming into the CSI.
++ * upstream from the CSI-2 receiver, directly upstream from the
++ * video mux, or directly upstream from the CSI itself. The endpoint
++ * is needed to determine the bus type and bus config coming into
++ * the CSI.
+ */
+ static int csi_get_upstream_endpoint(struct csi_priv *priv,
+ struct v4l2_fwnode_endpoint *ep)
+@@ -171,7 +172,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
+ if (!priv->src_sd)
+ return -EPIPE;
+
+- src = &priv->src_sd->entity;
++ sd = priv->src_sd;
++ src = &sd->entity;
+
+ if (src->function == MEDIA_ENT_F_VID_MUX) {
+ /*
+@@ -185,6 +187,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
+ src = &sd->entity;
+ }
+
++ /*
++ * If the source is neither the video mux nor the CSI-2 receiver,
++ * get the source pad directly upstream from CSI itself.
++ */
++ if (src->function != MEDIA_ENT_F_VID_MUX &&
++ sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
++ src = &priv->sd.entity;
++
+ /* get source pad of entity directly upstream from src */
+ pad = imx_media_find_upstream_pad(priv->md, src, 0);
+ if (IS_ERR(pad))
+diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
+index a01327f6e045..2da81a5af274 100644
+--- a/drivers/staging/media/imx/imx-media-of.c
++++ b/drivers/staging/media/imx/imx-media-of.c
+@@ -143,15 +143,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi)
+ {
+ struct device_node *csi_np = csi->dev->of_node;
+- struct fwnode_handle *fwnode, *csi_ep;
+- struct v4l2_fwnode_link link;
+ struct device_node *ep;
+- int ret;
+-
+- link.local_node = of_fwnode_handle(csi_np);
+- link.local_port = CSI_SINK_PAD;
+
+ for_each_child_of_node(csi_np, ep) {
++ struct fwnode_handle *fwnode, *csi_ep;
++ struct v4l2_fwnode_link link;
++ int ret;
++
++ memset(&link, 0, sizeof(link));
++
++ link.local_node = of_fwnode_handle(csi_np);
++ link.local_port = CSI_SINK_PAD;
++
+ csi_ep = of_fwnode_handle(ep);
+
+ fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index ba906876cc45..fd02e8a4841d 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -476,8 +476,12 @@ static int efifb_probe(struct platform_device *dev)
+ * If the UEFI memory map covers the efifb region, we may only
+ * remap it using the attributes the memory map prescribes.
+ */
+- mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+- mem_flags &= md.attribute;
++ md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
++ EFI_MEMORY_WT | EFI_MEMORY_WB;
++ if (md.attribute) {
++ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
++ mem_flags &= md.attribute;
++ }
+ }
+ if (mem_flags & EFI_MEMORY_WC)
+ info->screen_base = ioremap_wc(efifb_fix.smem_start,
+diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
+index aad1cc4be34a..c7ebf03b8d53 100644
+--- a/drivers/video/fbdev/sm712.h
++++ b/drivers/video/fbdev/sm712.h
+@@ -15,14 +15,10 @@
+
+ #define FB_ACCEL_SMI_LYNX 88
+
+-#define SCREEN_X_RES 1024
+-#define SCREEN_Y_RES 600
+-#define SCREEN_BPP 16
+-
+-/*Assume SM712 graphics chip has 4MB VRAM */
+-#define SM712_VIDEOMEMORYSIZE 0x00400000
+-/*Assume SM722 graphics chip has 8MB VRAM */
+-#define SM722_VIDEOMEMORYSIZE 0x00800000
++#define SCREEN_X_RES 1024
++#define SCREEN_Y_RES_PC 768
++#define SCREEN_Y_RES_NETBOOK 600
++#define SCREEN_BPP 16
+
+ #define dac_reg (0x3c8)
+ #define dac_val (0x3c9)
+diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
+index 502d0de2feec..f1dcc6766d1e 100644
+--- a/drivers/video/fbdev/sm712fb.c
++++ b/drivers/video/fbdev/sm712fb.c
+@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
++ { /* 1024 x 768 16Bpp 60Hz */
++ 1024, 768, 16, 60,
++ /* Init_MISC */
++ 0xEB,
++ { /* Init_SR0_SR4 */
++ 0x03, 0x01, 0x0F, 0x03, 0x0E,
++ },
++ { /* Init_SR10_SR24 */
++ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
++ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0xC4, 0x30, 0x02, 0x01, 0x01,
++ },
++ { /* Init_SR30_SR75 */
++ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
++ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
++ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
++ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
++ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
++ 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
++ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
++ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
++ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
++ },
++ { /* Init_SR80_SR93 */
++ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
++ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
++ 0x00, 0x00, 0x00, 0x00,
++ },
++ { /* Init_SRA0_SRAF */
++ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
++ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
++ },
++ { /* Init_GR00_GR08 */
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
++ 0xFF,
++ },
++ { /* Init_AR00_AR14 */
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
++ 0x41, 0x00, 0x0F, 0x00, 0x00,
++ },
++ { /* Init_CR00_CR18 */
++ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
++ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
++ 0xFF,
++ },
++ { /* Init_CR30_CR4D */
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
++ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
++ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
++ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
++ },
++ { /* Init_CR90_CRA7 */
++ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
++ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
++ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
++ },
++ },
+ { /* mode#5: 1024 x 768 24Bpp 60Hz */
+ 1024, 768, 24, 60,
+ /* Init_MISC */
+@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
+
+ static int smtc_blank(int blank_mode, struct fb_info *info)
+ {
++ struct smtcfb_info *sfb = info->par;
++
+ /* clear DPMS setting */
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ /* Screen On: HSync: On, VSync : On */
++
++ switch (sfb->chip_id) {
++ case 0x710:
++ case 0x712:
++ smtc_seqw(0x6a, 0x16);
++ smtc_seqw(0x6b, 0x02);
++ break;
++ case 0x720:
++ smtc_seqw(0x6a, 0x0d);
++ smtc_seqw(0x6b, 0x02);
++ break;
++ }
++
++ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+- smtc_seqw(0x6a, 0x16);
+- smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
++ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ break;
+ case FB_BLANK_NORMAL:
+ /* Screen Off: HSync: On, VSync : On Soft blank */
++ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
++ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
++ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ /* Screen On: HSync: On, VSync : Off */
++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+- smtc_seqw(0x6a, 0x0c);
+- smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
+- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
++ smtc_seqw(0x6a, 0x0c);
++ smtc_seqw(0x6b, 0x02);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ /* Screen On: HSync: Off, VSync : On */
++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+- smtc_seqw(0x6a, 0x0c);
+- smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
+- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
++ smtc_seqw(0x6a, 0x0c);
++ smtc_seqw(0x6b, 0x02);
+ break;
+ case FB_BLANK_POWERDOWN:
+ /* Screen On: HSync: Off, VSync : Off */
++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+- smtc_seqw(0x6a, 0x0c);
+- smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
+- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
++ smtc_seqw(0x6a, 0x0c);
++ smtc_seqw(0x6b, 0x02);
+ break;
+ default:
+ return -EINVAL;
+@@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
+
+ /* init SEQ register SR30 - SR75 */
+ for (i = 0; i < SIZE_SR30_SR75; i++)
+- if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
+- (i + 0x30) != 0x6b)
++ if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
++ (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
++ (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
++ (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
+ smtc_seqw(i + 0x30,
+ vgamode[j].init_sr30_sr75[i]);
+
+@@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
+ smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
+
+ /* init CRTC register CR30 - CR4D */
+- for (i = 0; i < SIZE_CR30_CR4D; i++)
++ for (i = 0; i < SIZE_CR30_CR4D; i++) {
++ if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
++ /* side-effect, don't write to CR3B-CR3F */
++ continue;
+ smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
++ }
+
+ /* init CRTC register CR90 - CRA7 */
+ for (i = 0; i < SIZE_CR90_CRA7; i++)
+@@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
+ {
+ sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
+
++ if (sfb->chip_id == 0x720)
++ /* on SM720, the framebuffer starts at the 1 MB offset */
++ sfb->fb->fix.smem_start += 0x00200000;
++
++ /* XXX: is it safe for SM720 on Big-Endian? */
+ if (sfb->fb->var.bits_per_pixel == 32)
+ sfb->fb->fix.smem_start += big_addr;
+
+@@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
+ outb_p(0x11, 0x3c5);
+ }
+
++static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
++{
++ u8 vram;
++
++ switch (sfb->chip_id) {
++ case 0x710:
++ case 0x712:
++ /*
++ * Assume SM712 graphics chip has 4MB VRAM.
++ *
++ * FIXME: SM712 can have 2MB VRAM, which is used on earlier
++ * laptops, such as IBM Thinkpad 240X. This driver would
++ * probably crash on those machines. If anyone gets one of
++ * those and is willing to help, run "git blame" and send me
++ * an E-mail.
++ */
++ return 0x00400000;
++ case 0x720:
++ outb_p(0x76, 0x3c4);
++ vram = inb_p(0x3c5) >> 6;
++
++ if (vram == 0x00)
++ return 0x00800000; /* 8 MB */
++ else if (vram == 0x01)
++ return 0x01000000; /* 16 MB */
++ else if (vram == 0x02)
++ return 0x00400000; /* illegal, fallback to 4 MB */
++ else if (vram == 0x03)
++ return 0x00400000; /* 4 MB */
++ }
++ return 0; /* unknown hardware */
++}
++
++static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
++{
++ /* get mode parameter from smtc_scr_info */
++ if (smtc_scr_info.lfb_width != 0) {
++ sfb->fb->var.xres = smtc_scr_info.lfb_width;
++ sfb->fb->var.yres = smtc_scr_info.lfb_height;
++ sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
++ goto final;
++ }
++
++ /*
++ * No parameter, default resolution is 1024x768-16.
++ *
++ * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
++ * panel, also see the comments about Thinkpad 240X above.
++ */
++ sfb->fb->var.xres = SCREEN_X_RES;
++ sfb->fb->var.yres = SCREEN_Y_RES_PC;
++ sfb->fb->var.bits_per_pixel = SCREEN_BPP;
++
++#ifdef CONFIG_MIPS
++ /*
++ * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
++ * target platform of this driver, but nearly all old x86 laptops have
++ * 1024x768. Lighting 768 panels using 600's timings would partially
++ * garble the display, so we don't want that. But it's not possible to
++ * distinguish them reliably.
++ *
++ * So we change the default to 768, but keep 600 as-is on MIPS.
++ */
++ sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
++#endif
++
++final:
++ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
++}
++
+ static int smtcfb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+ struct smtcfb_info *sfb;
+ struct fb_info *info;
+- u_long smem_size = 0x00800000; /* default 8MB */
++ u_long smem_size;
+ int err;
+ unsigned long mmio_base;
+
+@@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
+
+ sm7xx_init_hw();
+
+- /* get mode parameter from smtc_scr_info */
+- if (smtc_scr_info.lfb_width != 0) {
+- sfb->fb->var.xres = smtc_scr_info.lfb_width;
+- sfb->fb->var.yres = smtc_scr_info.lfb_height;
+- sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+- } else {
+- /* default resolution 1024x600 16bit mode */
+- sfb->fb->var.xres = SCREEN_X_RES;
+- sfb->fb->var.yres = SCREEN_Y_RES;
+- sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+- }
+-
+- big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+ /* Map address and memory detection */
+ mmio_base = pci_resource_start(pdev, 0);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
+
++ smem_size = sm7xx_vram_probe(sfb);
++ dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
++ smem_size / 1048576);
++
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
+ sfb->fb->fix.mmio_len = 0x00400000;
+- smem_size = SM712_VIDEOMEMORYSIZE;
+ sfb->lfb = ioremap(mmio_base, mmio_addr);
+ if (!sfb->lfb) {
+ dev_err(&pdev->dev,
+@@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
+ case 0x720:
+ sfb->fb->fix.mmio_start = mmio_base;
+ sfb->fb->fix.mmio_len = 0x00200000;
+- smem_size = SM722_VIDEOMEMORYSIZE;
+- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
++ sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
+ sfb->lfb = sfb->dp_regs + 0x00200000;
+ sfb->mmio = (smtc_regbaseaddress =
+ sfb->dp_regs + 0x000c0000);
+@@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
+ goto failed_fb;
+ }
+
++ /* probe and decide resolution */
++ sm7xx_resolution_probe(sfb);
++
+ /* can support 32 bpp */
+ if (sfb->fb->var.bits_per_pixel == 15)
+ sfb->fb->var.bits_per_pixel = 16;
+@@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
+ if (err)
+ goto failed;
+
+- smtcfb_setmode(sfb);
++ /*
++ * The screen would be temporarily garbled when sm712fb takes over
++ * vesafb or VGA text mode. Zero the framebuffer.
++ */
++ memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
+
+ err = register_framebuffer(info);
+ if (err < 0)
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index 1d034dddc556..5a0d6fb02bbc 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
+ return 0;
+ }
+
+-static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
+- int width, int height, char *data)
++static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
+ {
+ int i, ret;
+ char *cmd;
+@@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
+
+ start_cycles = get_cycles();
+
++ mutex_lock(&dlfb->render_mutex);
++
+ aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+ width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+ x = aligned_x;
+
+ if ((width <= 0) ||
+ (x + width > dlfb->info->var.xres) ||
+- (y + height > dlfb->info->var.yres))
+- return -EINVAL;
++ (y + height > dlfb->info->var.yres)) {
++ ret = -EINVAL;
++ goto unlock_ret;
++ }
+
+- if (!atomic_read(&dlfb->usb_active))
+- return 0;
++ if (!atomic_read(&dlfb->usb_active)) {
++ ret = 0;
++ goto unlock_ret;
++ }
+
+ urb = dlfb_get_urb(dlfb);
+- if (!urb)
+- return 0;
++ if (!urb) {
++ ret = 0;
++ goto unlock_ret;
++ }
+ cmd = urb->transfer_buffer;
+
+ for (i = y; i < y + height ; i++) {
+@@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
+ *cmd++ = 0xAF;
+ /* Send partial buffer remaining before exiting */
+ len = cmd - (char *) urb->transfer_buffer;
+- ret = dlfb_submit_urb(dlfb, urb, len);
++ dlfb_submit_urb(dlfb, urb, len);
+ bytes_sent += len;
+ } else
+ dlfb_urb_completion(urb);
+@@ -655,7 +662,55 @@ error:
+ >> 10)), /* Kcycles */
+ &dlfb->cpu_kcycles_used);
+
+- return 0;
++ ret = 0;
++
++unlock_ret:
++ mutex_unlock(&dlfb->render_mutex);
++ return ret;
++}
++
++static void dlfb_init_damage(struct dlfb_data *dlfb)
++{
++ dlfb->damage_x = INT_MAX;
++ dlfb->damage_x2 = 0;
++ dlfb->damage_y = INT_MAX;
++ dlfb->damage_y2 = 0;
++}
++
++static void dlfb_damage_work(struct work_struct *w)
++{
++ struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
++ int x, x2, y, y2;
++
++ spin_lock_irq(&dlfb->damage_lock);
++ x = dlfb->damage_x;
++ x2 = dlfb->damage_x2;
++ y = dlfb->damage_y;
++ y2 = dlfb->damage_y2;
++ dlfb_init_damage(dlfb);
++ spin_unlock_irq(&dlfb->damage_lock);
++
++ if (x < x2 && y < y2)
++ dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
++}
++
++static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
++{
++ unsigned long flags;
++ int x2 = x + width;
++ int y2 = y + height;
++
++ if (x >= x2 || y >= y2)
++ return;
++
++ spin_lock_irqsave(&dlfb->damage_lock, flags);
++ dlfb->damage_x = min(x, dlfb->damage_x);
++ dlfb->damage_x2 = max(x2, dlfb->damage_x2);
++ dlfb->damage_y = min(y, dlfb->damage_y);
++ dlfb->damage_y2 = max(y2, dlfb->damage_y2);
++ spin_unlock_irqrestore(&dlfb->damage_lock, flags);
++
++ schedule_work(&dlfb->damage_work);
+ }
+
+ /*
+@@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
+ (u32)info->var.yres);
+
+ dlfb_handle_damage(dlfb, 0, start, info->var.xres,
+- lines, info->screen_base);
++ lines);
+ }
+
+ return result;
+@@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
+
+ sys_copyarea(info, area);
+
+- dlfb_handle_damage(dlfb, area->dx, area->dy,
+- area->width, area->height, info->screen_base);
++ dlfb_offload_damage(dlfb, area->dx, area->dy,
++ area->width, area->height);
+ }
+
+ static void dlfb_ops_imageblit(struct fb_info *info,
+@@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
+
+ sys_imageblit(info, image);
+
+- dlfb_handle_damage(dlfb, image->dx, image->dy,
+- image->width, image->height, info->screen_base);
++ dlfb_offload_damage(dlfb, image->dx, image->dy,
++ image->width, image->height);
+ }
+
+ static void dlfb_ops_fillrect(struct fb_info *info,
+@@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
+
+ sys_fillrect(info, rect);
+
+- dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
+- rect->height, info->screen_base);
++ dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
++ rect->height);
+ }
+
+ /*
+@@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
+ int bytes_identical = 0;
+ int bytes_rendered = 0;
+
++ mutex_lock(&dlfb->render_mutex);
++
+ if (!fb_defio)
+- return;
++ goto unlock_ret;
+
+ if (!atomic_read(&dlfb->usb_active))
+- return;
++ goto unlock_ret;
+
+ start_cycles = get_cycles();
+
+ urb = dlfb_get_urb(dlfb);
+ if (!urb)
+- return;
++ goto unlock_ret;
+
+ cmd = urb->transfer_buffer;
+
+@@ -782,6 +839,8 @@ error:
+ atomic_add(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dlfb->cpu_kcycles_used);
++unlock_ret:
++ mutex_unlock(&dlfb->render_mutex);
+ }
+
+ static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
+@@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
+ if (area.y > info->var.yres)
+ area.y = info->var.yres;
+
+- dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
+- info->screen_base);
++ dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
+ }
+
+ return 0;
+@@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
+ {
+ struct dlfb_data *dlfb = info->par;
+
++ cancel_work_sync(&dlfb->damage_work);
++
++ mutex_destroy(&dlfb->render_mutex);
++
+ if (info->cmap.len != 0)
+ fb_dealloc_cmap(&info->cmap);
+ if (info->monspecs.modedb)
+@@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
+ pix_framebuffer[i] = 0x37e6;
+ }
+
+- dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
+- info->screen_base);
++ dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
+
+ return 0;
+ }
+@@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
+ dlfb->ops = dlfb_ops;
+ info->fbops = &dlfb->ops;
+
++ mutex_init(&dlfb->render_mutex);
++ dlfb_init_damage(dlfb);
++ spin_lock_init(&dlfb->damage_lock);
++ INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
++
+ INIT_LIST_HEAD(&info->modelist);
+
+ if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index da2cd8e89062..950919411460 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -832,6 +832,12 @@ static void ceph_umount_begin(struct super_block *sb)
+ return;
+ }
+
++static int ceph_remount(struct super_block *sb, int *flags, char *data)
++{
++ sync_filesystem(sb);
++ return 0;
++}
++
+ static const struct super_operations ceph_super_ops = {
+ .alloc_inode = ceph_alloc_inode,
+ .destroy_inode = ceph_destroy_inode,
+@@ -839,6 +845,7 @@ static const struct super_operations ceph_super_ops = {
+ .drop_inode = ceph_drop_inode,
+ .sync_fs = ceph_sync_fs,
+ .put_super = ceph_put_super,
++ .remount_fs = ceph_remount,
+ .show_options = ceph_show_options,
+ .statfs = ceph_statfs,
+ .umount_begin = ceph_umount_begin,
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index d5434ac0571b..105ddbad00e5 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2652,26 +2652,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
+ {
+ char message[5] = {0};
++ unsigned int new_oplock = 0;
+
+ oplock &= 0xFF;
+ if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
+ return;
+
+- cinode->oplock = 0;
+ if (oplock & SMB2_LEASE_READ_CACHING_HE) {
+- cinode->oplock |= CIFS_CACHE_READ_FLG;
++ new_oplock |= CIFS_CACHE_READ_FLG;
+ strcat(message, "R");
+ }
+ if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
+- cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
++ new_oplock |= CIFS_CACHE_HANDLE_FLG;
+ strcat(message, "H");
+ }
+ if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
+- cinode->oplock |= CIFS_CACHE_WRITE_FLG;
++ new_oplock |= CIFS_CACHE_WRITE_FLG;
+ strcat(message, "W");
+ }
+- if (!cinode->oplock)
+- strcat(message, "None");
++ if (!new_oplock)
++ strncpy(message, "None", sizeof(message));
++
++ cinode->oplock = new_oplock;
+ cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
+ &cinode->vfs_inode);
+ }
+diff --git a/fs/dcache.c b/fs/dcache.c
+index aac41adf4743..c663c602f9ef 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -344,7 +344,7 @@ static void dentry_free(struct dentry *dentry)
+ }
+ }
+ /* if dentry was never visible to RCU, immediate free is OK */
+- if (!(dentry->d_flags & DCACHE_RCUACCESS))
++ if (dentry->d_flags & DCACHE_NORCU)
+ __d_free(&dentry->d_u.d_rcu);
+ else
+ call_rcu(&dentry->d_u.d_rcu, __d_free);
+@@ -1701,7 +1701,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
+ struct dentry *dentry = __d_alloc(parent->d_sb, name);
+ if (!dentry)
+ return NULL;
+- dentry->d_flags |= DCACHE_RCUACCESS;
+ spin_lock(&parent->d_lock);
+ /*
+ * don't need child lock because it is not subject
+@@ -1726,7 +1725,7 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
+ {
+ struct dentry *dentry = d_alloc_anon(parent->d_sb);
+ if (dentry) {
+- dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
++ dentry->d_flags |= DCACHE_DENTRY_CURSOR;
+ dentry->d_parent = dget(parent);
+ }
+ return dentry;
+@@ -1739,10 +1738,17 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
+ *
+ * For a filesystem that just pins its dentries in memory and never
+ * performs lookups at all, return an unhashed IS_ROOT dentry.
++ * This is used for pipes, sockets et.al. - the stuff that should
++ * never be anyone's children or parents. Unlike all other
++ * dentries, these will not have RCU delay between dropping the
++ * last reference and freeing them.
+ */
+ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
+ {
+- return __d_alloc(sb, name);
++ struct dentry *dentry = __d_alloc(sb, name);
++ if (likely(dentry))
++ dentry->d_flags |= DCACHE_NORCU;
++ return dentry;
+ }
+ EXPORT_SYMBOL(d_alloc_pseudo);
+
+@@ -1911,12 +1917,10 @@ struct dentry *d_make_root(struct inode *root_inode)
+
+ if (root_inode) {
+ res = d_alloc_anon(root_inode->i_sb);
+- if (res) {
+- res->d_flags |= DCACHE_RCUACCESS;
++ if (res)
+ d_instantiate(res, root_inode);
+- } else {
++ else
+ iput(root_inode);
+- }
+ }
+ return res;
+ }
+@@ -2781,9 +2785,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
+ copy_name(dentry, target);
+ target->d_hash.pprev = NULL;
+ dentry->d_parent->d_lockref.count++;
+- if (dentry == old_parent)
+- dentry->d_flags |= DCACHE_RCUACCESS;
+- else
++ if (dentry != old_parent) /* wasn't IS_ROOT */
+ WARN_ON(!--old_parent->d_lockref.count);
+ } else {
+ target->d_parent = old_parent;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index a59c16bd90ac..d2926ac44f83 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -181,7 +181,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
+ file->f_op = &fuse_direct_io_file_operations;
+ if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
+- if (ff->open_flags & FOPEN_NONSEEKABLE)
++ if (ff->open_flags & FOPEN_STREAM)
++ stream_open(inode, file);
++ else if (ff->open_flags & FOPEN_NONSEEKABLE)
+ nonseekable_open(inode, file);
+ if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+@@ -1530,7 +1532,7 @@ __acquires(fc->lock)
+ {
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+- size_t crop = i_size_read(inode);
++ loff_t crop = i_size_read(inode);
+ struct fuse_req *req;
+
+ while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
+@@ -2987,6 +2989,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+ }
+ }
+
++ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
++ offset + length > i_size_read(inode)) {
++ err = inode_newsize_ok(inode, offset + length);
++ if (err)
++ return err;
++ }
++
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index 61f46facb39c..b3e8ba3bd654 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -904,7 +904,7 @@ fl_pnfs_update_layout(struct inode *ino,
+ status = filelayout_check_deviceid(lo, fl, gfp_flags);
+ if (status) {
+ pnfs_put_lseg(lseg);
+- lseg = ERR_PTR(status);
++ lseg = NULL;
+ }
+ out:
+ return lseg;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 02488b50534a..6999e870baa9 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -159,6 +159,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
+ /* Sustain the lease, even if it's empty. If the clientid4
+ * goes stale it's of no use for trunking discovery. */
+ nfs4_schedule_state_renewal(*result);
++
++ /* If the client state need to recover, do it. */
++ if (clp->cl_state)
++ nfs4_schedule_state_manager(clp);
+ }
+ out:
+ return status;
+diff --git a/fs/nsfs.c b/fs/nsfs.c
+index 60702d677bd4..30d150a4f0c6 100644
+--- a/fs/nsfs.c
++++ b/fs/nsfs.c
+@@ -85,13 +85,12 @@ slow:
+ inode->i_fop = &ns_file_operations;
+ inode->i_private = ns;
+
+- dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name);
++ dentry = d_alloc_anon(mnt->mnt_sb);
+ if (!dentry) {
+ iput(inode);
+ return ERR_PTR(-ENOMEM);
+ }
+ d_instantiate(dentry, inode);
+- dentry->d_flags |= DCACHE_RCUACCESS;
+ dentry->d_fsdata = (void *)ns->ops;
+ d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
+ if (d) {
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 68b3303e4b46..56feaa739979 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -909,14 +909,14 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
+ return true;
+ }
+
+-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
++int ovl_maybe_copy_up(struct dentry *dentry, int flags)
+ {
+ int err = 0;
+
+- if (ovl_open_need_copy_up(dentry, file_flags)) {
++ if (ovl_open_need_copy_up(dentry, flags)) {
+ err = ovl_want_write(dentry);
+ if (!err) {
+- err = ovl_copy_up_flags(dentry, file_flags);
++ err = ovl_copy_up_flags(dentry, flags);
+ ovl_drop_write(dentry);
+ }
+ }
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 84dd957efa24..50e4407398d8 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -116,11 +116,10 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
+
+ static int ovl_open(struct inode *inode, struct file *file)
+ {
+- struct dentry *dentry = file_dentry(file);
+ struct file *realfile;
+ int err;
+
+- err = ovl_open_maybe_copy_up(dentry, file->f_flags);
++ err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
+ if (err)
+ return err;
+
+@@ -390,7 +389,7 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (ret)
+ return ret;
+
+- ret = ovl_copy_up_with_data(file_dentry(file));
++ ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
+ if (!ret) {
+ ret = ovl_real_ioctl(file, cmd, arg);
+
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 9c6018287d57..d26efed9f80a 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -421,7 +421,7 @@ extern const struct file_operations ovl_file_operations;
+ int ovl_copy_up(struct dentry *dentry);
+ int ovl_copy_up_with_data(struct dentry *dentry);
+ int ovl_copy_up_flags(struct dentry *dentry, int flags);
+-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
++int ovl_maybe_copy_up(struct dentry *dentry, int flags);
+ int ovl_copy_xattr(struct dentry *old, struct dentry *new);
+ int ovl_set_attr(struct dentry *upper, struct kstat *stat);
+ struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index f5ed9512d193..ef11c54ad712 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -2550,6 +2550,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
+ rcu_read_unlock();
+ return -EACCES;
+ }
++ /* Prevent changes to overridden credentials. */
++ if (current_cred() != current_real_cred()) {
++ rcu_read_unlock();
++ return -EBUSY;
++ }
+ rcu_read_unlock();
+
+ if (count > PAGE_SIZE)
+diff --git a/fs/ufs/util.h b/fs/ufs/util.h
+index 1fd3011ea623..7fd4802222b8 100644
+--- a/fs/ufs/util.h
++++ b/fs/ufs/util.h
+@@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
+ case UFS_UID_44BSD:
+ return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
+ case UFS_UID_EFT:
+- if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
++ if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
+ return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
+ /* Fall through */
+ default:
+diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
+index 8ac4e68a12f0..6736ed2f632b 100644
+--- a/include/asm-generic/mm_hooks.h
++++ b/include/asm-generic/mm_hooks.h
+@@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
+ }
+
+ static inline void arch_unmap(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+ }
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index e734f163bd0b..bd8c322fd92a 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -35,6 +35,7 @@ struct bpf_map_ops {
+ void (*map_free)(struct bpf_map *map);
+ int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+ void (*map_release_uref)(struct bpf_map *map);
++ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
+
+ /* funcs callable from userspace and from eBPF programs */
+ void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+@@ -455,7 +456,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ } \
+ _out: \
+ rcu_read_unlock(); \
+- preempt_enable_no_resched(); \
++ preempt_enable(); \
+ _ret; \
+ })
+
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 60996e64c579..6e1e8e6602c6 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -176,7 +176,6 @@ struct dentry_operations {
+ * typically using d_splice_alias. */
+
+ #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+-#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
+
+ #define DCACHE_CANT_MOUNT 0x00000100
+ #define DCACHE_GENOCIDE 0x00000200
+@@ -217,6 +216,7 @@ struct dentry_operations {
+
+ #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
+ #define DCACHE_DENTRY_CURSOR 0x20000000
++#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
+
+ extern seqlock_t rename_lock;
+
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 4f001619f854..a6d4436c76b5 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -677,7 +677,6 @@ struct mlx5_core_dev {
+ #endif
+ struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+- struct page *clock_info_page;
+ struct mlx5_fw_tracer *tracer;
+ };
+
+diff --git a/include/linux/of.h b/include/linux/of.h
+index e240992e5cb6..074913002e39 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -234,8 +234,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
+ static inline u64 of_read_number(const __be32 *cell, int size)
+ {
+ u64 r = 0;
+- while (size--)
+- r = (r << 32) | be32_to_cpu(*(cell++));
++ for (; size--; cell++)
++ r = (r << 32) | be32_to_cpu(*cell);
+ return r;
+ }
+
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 65f1d8c2f082..0e5e1ceae27d 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -348,6 +348,8 @@ struct pci_dev {
+ unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
+ controlled exclusively by
+ user sysfs */
++ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
++ bit manually */
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index b8679dcba96f..3b1a8f38a1ef 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1366,10 +1366,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+ struct ubuf_info *uarg = skb_zcopy(skb);
+
+ if (uarg) {
+- if (uarg->callback == sock_zerocopy_callback) {
++ if (skb_zcopy_is_nouarg(skb)) {
++ /* no notification callback */
++ } else if (uarg->callback == sock_zerocopy_callback) {
+ uarg->zerocopy = uarg->zerocopy && zerocopy;
+ sock_zerocopy_put(uarg);
+- } else if (!skb_zcopy_is_nouarg(skb)) {
++ } else {
+ uarg->callback(uarg, zerocopy);
+ }
+
+@@ -2627,7 +2629,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
+ {
+ if (likely(!skb_zcopy(skb)))
+ return 0;
+- if (skb_uarg(skb)->callback == sock_zerocopy_callback)
++ if (!skb_zcopy_is_nouarg(skb) &&
++ skb_uarg(skb)->callback == sock_zerocopy_callback)
+ return 0;
+ return skb_copy_ubufs(skb, gfp_mask);
+ }
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 84097010237c..b5e3add90e99 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -171,7 +171,8 @@ struct fib6_info {
+ dst_nocount:1,
+ dst_nopolicy:1,
+ dst_host:1,
+- unused:3;
++ fib6_destroying:1,
++ unused:2;
+
+ struct fib6_nh fib6_nh;
+ struct rcu_head rcu;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 85386becbaea..c9b0b2b5d672 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -295,7 +295,8 @@ struct xfrm_replay {
+ };
+
+ struct xfrm_if_cb {
+- struct xfrm_if *(*decode_session)(struct sk_buff *skb);
++ struct xfrm_if *(*decode_session)(struct sk_buff *skb,
++ unsigned short family);
+ };
+
+ void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
+@@ -1404,6 +1405,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
+ return atomic_read(&x->tunnel_users);
+ }
+
++static inline bool xfrm_id_proto_valid(u8 proto)
++{
++ switch (proto) {
++ case IPPROTO_AH:
++ case IPPROTO_ESP:
++ case IPPROTO_COMP:
++#if IS_ENABLED(CONFIG_IPV6)
++ case IPPROTO_ROUTING:
++ case IPPROTO_DSTOPTS:
++#endif
++ return true;
++ default:
++ return false;
++ }
++}
++
++/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
+ static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
+ {
+ return (!userproto || proto == userproto ||
+diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
+index b4967d48bfda..5f7c3a221894 100644
+--- a/include/uapi/linux/fuse.h
++++ b/include/uapi/linux/fuse.h
+@@ -226,11 +226,13 @@ struct fuse_file_lock {
+ * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
+ * FOPEN_NONSEEKABLE: the file is not seekable
+ * FOPEN_CACHE_DIR: allow caching this directory
++ * FOPEN_STREAM: the file is stream-like (no file position at all)
+ */
+ #define FOPEN_DIRECT_IO (1 << 0)
+ #define FOPEN_KEEP_CACHE (1 << 1)
+ #define FOPEN_NONSEEKABLE (1 << 2)
+ #define FOPEN_CACHE_DIR (1 << 3)
++#define FOPEN_STREAM (1 << 4)
+
+ /**
+ * INIT request/reply flags
+diff --git a/include/video/udlfb.h b/include/video/udlfb.h
+index 7d09e54ae54e..58fb5732831a 100644
+--- a/include/video/udlfb.h
++++ b/include/video/udlfb.h
+@@ -48,6 +48,13 @@ struct dlfb_data {
+ int base8;
+ u32 pseudo_palette[256];
+ int blank_mode; /*one of FB_BLANK_ */
++ struct mutex render_mutex;
++ int damage_x;
++ int damage_y;
++ int damage_x2;
++ int damage_y2;
++ spinlock_t damage_lock;
++ struct work_struct damage_work;
+ struct fb_ops ops;
+ /* blit-only rendering path metrics, exposed through sysfs */
+ atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index f9274114c88d..be5747a5337a 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -527,18 +527,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+ return insn - insn_buf;
+ }
+
+-static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
++static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
++ void *key, const bool mark)
+ {
+ struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
+ if (l) {
+- bpf_lru_node_set_ref(&l->lru_node);
++ if (mark)
++ bpf_lru_node_set_ref(&l->lru_node);
+ return l->key + round_up(map->key_size, 8);
+ }
+
+ return NULL;
+ }
+
++static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
++{
++ return __htab_lru_map_lookup_elem(map, key, true);
++}
++
++static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
++{
++ return __htab_lru_map_lookup_elem(map, key, false);
++}
++
+ static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
+ struct bpf_insn *insn_buf)
+ {
+@@ -1215,6 +1227,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
+ .map_free = htab_map_free,
+ .map_get_next_key = htab_map_get_next_key,
+ .map_lookup_elem = htab_lru_map_lookup_elem,
++ .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
+ .map_update_elem = htab_lru_map_update_elem,
+ .map_delete_elem = htab_lru_map_delete_elem,
+ .map_gen_lookup = htab_lru_map_gen_lookup,
+@@ -1246,7 +1259,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
+
+ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
+ {
+- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct htab_elem *l;
+ void __percpu *pptr;
+ int ret = -ENOENT;
+@@ -1262,8 +1274,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
+ l = __htab_map_lookup_elem(map, key);
+ if (!l)
+ goto out;
+- if (htab_is_lru(htab))
+- bpf_lru_node_set_ref(&l->lru_node);
++ /* We do not mark LRU map element here in order to not mess up
++ * eviction heuristics when user space does a map walk.
++ */
+ pptr = htab_elem_get_ptr(l, map->key_size);
+ for_each_possible_cpu(cpu) {
+ bpf_long_memcpy(value + off,
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index 4a8f390a2b82..dc9d7ac8228d 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -518,7 +518,7 @@ out:
+ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
+ {
+ struct bpf_prog *prog;
+- int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
++ int ret = inode_permission(inode, MAY_READ);
+ if (ret)
+ return ERR_PTR(ret);
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 84470d1480aa..07d9b76e90ce 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -738,7 +738,10 @@ static int map_lookup_elem(union bpf_attr *attr)
+ err = map->ops->map_peek_elem(map, value);
+ } else {
+ rcu_read_lock();
+- ptr = map->ops->map_lookup_elem(map, key);
++ if (map->ops->map_lookup_elem_sys_only)
++ ptr = map->ops->map_lookup_elem_sys_only(map, key);
++ else
++ ptr = map->ops->map_lookup_elem(map, key);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ } else if (!ptr) {
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 1ccf77f6d346..d4ab9245e016 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -771,6 +771,7 @@ out:
+ return 0;
+
+ fail:
++ kobject_put(&tunables->attr_set.kobj);
+ policy->governor_data = NULL;
+ sugov_tunables_free(tunables);
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 5b3b0c3c8a47..d910e36c34b5 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1318,9 +1318,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+ char buf[32];
+ int len;
+
+- if (*ppos)
+- return 0;
+-
+ if (unlikely(!id))
+ return -ENODEV;
+
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 9962cb5da8ac..44f078cda0ac 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -405,13 +405,14 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
+ return -E2BIG;
+ }
+ }
+- /*
+- * The default type of $comm should be "string", and it can't be
+- * dereferenced.
+- */
+- if (!t && strcmp(arg, "$comm") == 0)
++
++ /* Since $comm can not be dereferred, we can find $comm by strcmp */
++ if (strcmp(arg, "$comm") == 0) {
++ /* The type of $comm must be "string", and not an array. */
++ if (parg->count || (t && strcmp(t, "string")))
++ return -EINVAL;
+ parg->type = find_fetch_type("string");
+- else
++ } else
+ parg->type = find_fetch_type(t);
+ if (!parg->type) {
+ pr_info("Unsupported type: %s\n", t);
+diff --git a/lib/Makefile b/lib/Makefile
+index e1b59da71418..d1f312096bec 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n
+ KCOV_INSTRUMENT_debugobjects.o := n
+ KCOV_INSTRUMENT_dynamic_debug.o := n
+
++# Early boot use of cmdline, don't instrument it
++ifdef CONFIG_AMD_MEM_ENCRYPT
++KASAN_SANITIZE_string.o := n
++
++ifdef CONFIG_FUNCTION_TRACER
++CFLAGS_REMOVE_string.o = -pg
++endif
++
++CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
++endif
++
+ lib-y := ctype.o string.o vsprintf.o cmdline.o \
+ rbtree.o radix-tree.o timerqueue.o xarray.o \
+ idr.o int_sqrt.o extable.o \
+diff --git a/mm/gup.c b/mm/gup.c
+index 81e0bdefa2cc..1a42b4367c3a 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1811,7 +1811,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
+ * Check if it's allowed to use __get_user_pages_fast() for the range, or
+ * we need to fall back to the slow version:
+ */
+-bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
++bool gup_fast_permitted(unsigned long start, int nr_pages)
+ {
+ unsigned long len, end;
+
+@@ -1853,7 +1853,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ * block IPIs that come from THPs splitting.
+ */
+
+- if (gup_fast_permitted(start, nr_pages, write)) {
++ if (gup_fast_permitted(start, nr_pages)) {
+ local_irq_save(flags);
+ gup_pgd_range(start, end, write, pages, &nr);
+ local_irq_restore(flags);
+@@ -1895,7 +1895,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ if (unlikely(!access_ok((void __user *)start, len)))
+ return -EFAULT;
+
+- if (gup_fast_permitted(start, nr_pages, write)) {
++ if (gup_fast_permitted(start, nr_pages)) {
+ local_irq_disable();
+ gup_pgd_range(addr, end, write, pages, &nr);
+ local_irq_enable();
+diff --git a/mm/mmap.c b/mm/mmap.c
+index da9236a5022e..446698476e4c 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2736,9 +2736,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+ return -EINVAL;
+
+ len = PAGE_ALIGN(len);
++ end = start + len;
+ if (len == 0)
+ return -EINVAL;
+
++ /*
++ * arch_unmap() might do unmaps itself. It must be called
++ * and finish any rbtree manipulation before this code
++ * runs and also starts to manipulate the rbtree.
++ */
++ arch_unmap(mm, start, end);
++
+ /* Find the first overlapping VMA */
+ vma = find_vma(mm, start);
+ if (!vma)
+@@ -2747,7 +2755,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+ /* we have start < vma->vm_end */
+
+ /* if it doesn't overlap, we have nothing.. */
+- end = start + len;
+ if (vma->vm_start >= end)
+ return 0;
+
+@@ -2817,12 +2824,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+ /* Detach vmas from rbtree */
+ detach_vmas_to_be_unmapped(mm, vma, prev, end);
+
+- /*
+- * mpx unmap needs to be called with mmap_sem held for write.
+- * It is safe to call it before unmap_region().
+- */
+- arch_unmap(mm, vma, start, end);
+-
+ if (downgrade)
+ downgrade_write(&mm->mmap_sem);
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7277dd393c00..c8e672ac32cb 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8829,7 +8829,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
+
+ refcnt = netdev_refcnt_read(dev);
+
+- if (time_after(jiffies, warning_time + 10 * HZ)) {
++ if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
+ pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
+ dev->name, refcnt);
+ warning_time = jiffies;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5ea1bed08ede..fd449017c55e 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1502,14 +1502,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
+ return ret;
+ }
+
+-static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
++static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
++ bool force)
+ {
+ int ifindex = dev_get_iflink(dev);
+
+- if (dev->ifindex == ifindex)
+- return 0;
++ if (force || dev->ifindex != ifindex)
++ return nla_put_u32(skb, IFLA_LINK, ifindex);
+
+- return nla_put_u32(skb, IFLA_LINK, ifindex);
++ return 0;
+ }
+
+ static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
+@@ -1526,6 +1527,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
+ const struct net_device *dev,
+ struct net *src_net)
+ {
++ bool put_iflink = false;
++
+ if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
+ struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
+
+@@ -1534,10 +1537,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
+
+ if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
+ return -EMSGSIZE;
++
++ put_iflink = true;
+ }
+ }
+
+- return 0;
++ return nla_put_iflink(skb, dev, put_iflink);
+ }
+
+ static int rtnl_fill_link_af(struct sk_buff *skb,
+@@ -1623,7 +1628,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
+ #ifdef CONFIG_RPS
+ nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
+ #endif
+- nla_put_iflink(skb, dev) ||
+ put_master_ifindex(skb, dev) ||
+ nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
+ (dev->qdisc &&
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 10e809b296ec..fb065a8937ea 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+ tail[plen - 1] = proto;
+ }
+
+-static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
++static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+ {
+ int encap_type;
+ struct udphdr *uh;
+@@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
+ __be16 sport, dport;
+ struct xfrm_encap_tmpl *encap = x->encap;
+ struct ip_esp_hdr *esph = esp->esph;
++ unsigned int len;
+
+ spin_lock_bh(&x->lock);
+ sport = encap->encap_sport;
+@@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
+ encap_type = encap->encap_type;
+ spin_unlock_bh(&x->lock);
+
++ len = skb->len + esp->tailen - skb_transport_offset(skb);
++ if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
++ return -EMSGSIZE;
++
+ uh = (struct udphdr *)esph;
+ uh->source = sport;
+ uh->dest = dport;
+- uh->len = htons(skb->len + esp->tailen
+- - skb_transport_offset(skb));
++ uh->len = htons(len);
+ uh->check = 0;
+
+ switch (encap_type) {
+@@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
+
+ *skb_mac_header(skb) = IPPROTO_UDP;
+ esp->esph = esph;
++
++ return 0;
+ }
+
+ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+@@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
+ int tailen = esp->tailen;
+
+ /* this is non-NULL only with UDP Encapsulation */
+- if (x->encap)
+- esp_output_udp_encap(x, skb, esp);
++ if (x->encap) {
++ int err = esp_output_udp_encap(x, skb, esp);
++
++ if (err < 0)
++ return err;
++ }
+
+ if (!skb_cloned(skb)) {
+ if (tailen <= skb_tailroom(skb)) {
+diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
+index 8756e0e790d2..d3170a8001b2 100644
+--- a/net/ipv4/esp4_offload.c
++++ b/net/ipv4/esp4_offload.c
+@@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
+ goto out;
+
+ if (sp->len == XFRM_MAX_DEPTH)
+- goto out;
++ goto out_reset;
+
+ x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+ (xfrm_address_t *)&ip_hdr(skb)->daddr,
+ spi, IPPROTO_ESP, AF_INET);
+ if (!x)
+- goto out;
++ goto out_reset;
+
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
+@@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
+ xo = xfrm_offload(skb);
+ if (!xo) {
+ xfrm_state_put(x);
+- goto out;
++ goto out_reset;
+ }
+ }
+
+@@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
+ xfrm_input(skb, IPPROTO_ESP, spi, -2);
+
+ return ERR_PTR(-EINPROGRESS);
++out_reset:
++ secpath_reset(skb);
+ out:
+ skb_push(skb, offset);
+ NAPI_GRO_CB(skb)->same_flow = 0;
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 68a21bf75dd0..b6235ca09fa5 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -659,9 +659,9 @@ static int __init vti_init(void)
+ return err;
+
+ rtnl_link_failed:
+- xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+-xfrm_tunnel_failed:
+ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
++xfrm_tunnel_failed:
++ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+ xfrm_proto_comp_failed:
+ xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm_proto_ah_failed:
+@@ -676,6 +676,7 @@ pernet_dev_failed:
+ static void __exit vti_fini(void)
+ {
+ rtnl_link_unregister(&vti_link_ops);
++ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+ xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index d73a6d6652f6..2b144b92ae46 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -111,7 +111,8 @@ static void
+ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ {
+ const struct iphdr *iph = ip_hdr(skb);
+- u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
++ int ihl = iph->ihl;
++ u8 *xprth = skb_network_header(skb) + ihl * 4;
+ struct flowi4 *fl4 = &fl->u.ip4;
+ int oif = 0;
+
+@@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ fl4->flowi4_mark = skb->mark;
+ fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
+
++ fl4->flowi4_proto = iph->protocol;
++ fl4->daddr = reverse ? iph->saddr : iph->daddr;
++ fl4->saddr = reverse ? iph->daddr : iph->saddr;
++ fl4->flowi4_tos = iph->tos;
++
+ if (!ip_is_fragment(iph)) {
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+@@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ pskb_may_pull(skb, xprth + 4 - skb->data)) {
+ __be16 *ports;
+
+- xprth = skb_network_header(skb) + iph->ihl * 4;
++ xprth = skb_network_header(skb) + ihl * 4;
+ ports = (__be16 *)xprth;
+
+ fl4->fl4_sport = ports[!!reverse];
+@@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ pskb_may_pull(skb, xprth + 2 - skb->data)) {
+ u8 *icmp;
+
+- xprth = skb_network_header(skb) + iph->ihl * 4;
++ xprth = skb_network_header(skb) + ihl * 4;
+ icmp = xprth;
+
+ fl4->fl4_icmp_type = icmp[0];
+@@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ pskb_may_pull(skb, xprth + 4 - skb->data)) {
+ __be32 *ehdr;
+
+- xprth = skb_network_header(skb) + iph->ihl * 4;
++ xprth = skb_network_header(skb) + ihl * 4;
+ ehdr = (__be32 *)xprth;
+
+ fl4->fl4_ipsec_spi = ehdr[0];
+@@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ pskb_may_pull(skb, xprth + 8 - skb->data)) {
+ __be32 *ah_hdr;
+
+- xprth = skb_network_header(skb) + iph->ihl * 4;
++ xprth = skb_network_header(skb) + ihl * 4;
+ ah_hdr = (__be32 *)xprth;
+
+ fl4->fl4_ipsec_spi = ah_hdr[1];
+@@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ pskb_may_pull(skb, xprth + 4 - skb->data)) {
+ __be16 *ipcomp_hdr;
+
+- xprth = skb_network_header(skb) + iph->ihl * 4;
++ xprth = skb_network_header(skb) + ihl * 4;
+ ipcomp_hdr = (__be16 *)xprth;
+
+ fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
+@@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ __be16 *greflags;
+ __be32 *gre_hdr;
+
+- xprth = skb_network_header(skb) + iph->ihl * 4;
++ xprth = skb_network_header(skb) + ihl * 4;
+ greflags = (__be16 *)xprth;
+ gre_hdr = (__be32 *)xprth;
+
+@@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ break;
+ }
+ }
+- fl4->flowi4_proto = iph->protocol;
+- fl4->daddr = reverse ? iph->saddr : iph->daddr;
+- fl4->saddr = reverse ? iph->daddr : iph->saddr;
+- fl4->flowi4_tos = iph->tos;
+ }
+
+ static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
+diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
+index d46b4eb645c2..cb99f6fb79b7 100644
+--- a/net/ipv6/esp6_offload.c
++++ b/net/ipv6/esp6_offload.c
+@@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
+ goto out;
+
+ if (sp->len == XFRM_MAX_DEPTH)
+- goto out;
++ goto out_reset;
+
+ x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+ (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
+ spi, IPPROTO_ESP, AF_INET6);
+ if (!x)
+- goto out;
++ goto out_reset;
+
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
+@@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
+ xo = xfrm_offload(skb);
+ if (!xo) {
+ xfrm_state_put(x);
+- goto out;
++ goto out_reset;
+ }
+ }
+
+@@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
+ xfrm_input(skb, IPPROTO_ESP, spi, -2);
+
+ return ERR_PTR(-EINPROGRESS);
++out_reset:
++ secpath_reset(skb);
+ out:
+ skb_push(skb, offset);
+ NAPI_GRO_CB(skb)->same_flow = 0;
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 91247a6fc67f..9915f64b38a0 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -909,6 +909,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
+ {
+ int cpu;
+
++ /* Make sure rt6_make_pcpu_route() wont add other percpu routes
++ * while we are cleaning them here.
++ */
++ f6i->fib6_destroying = 1;
++ mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
++
+ /* release the reference to this fib entry from
+ * all of its cached pcpu routes
+ */
+@@ -932,6 +938,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
+ {
+ struct fib6_table *table = rt->fib6_table;
+
++ if (rt->rt6i_pcpu)
++ fib6_drop_pcpu_from(rt, table);
++
+ if (atomic_read(&rt->fib6_ref) != 1) {
+ /* This route is used as dummy address holder in some split
+ * nodes. It is not leaked, but it still holds other resources,
+@@ -953,9 +962,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
+ fn = rcu_dereference_protected(fn->parent,
+ lockdep_is_held(&table->tb6_lock));
+ }
+-
+- if (rt->rt6i_pcpu)
+- fib6_drop_pcpu_from(rt, table);
+ }
+ }
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 59c90bba048c..b471afce1330 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -110,8 +110,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ int iif, int type, u32 portid, u32 seq,
+ unsigned int flags);
+ static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
+- struct in6_addr *daddr,
+- struct in6_addr *saddr);
++ const struct in6_addr *daddr,
++ const struct in6_addr *saddr);
+
+ #ifdef CONFIG_IPV6_ROUTE_INFO
+ static struct fib6_info *rt6_add_route_info(struct net *net,
+@@ -1260,6 +1260,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
+ prev = cmpxchg(p, NULL, pcpu_rt);
+ BUG_ON(prev);
+
++ if (rt->fib6_destroying) {
++ struct fib6_info *from;
++
++ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
++ fib6_info_release(from);
++ }
++
+ return pcpu_rt;
+ }
+
+@@ -1529,31 +1536,44 @@ out:
+ * Caller has to hold rcu_read_lock()
+ */
+ static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
+- struct in6_addr *daddr,
+- struct in6_addr *saddr)
++ const struct in6_addr *daddr,
++ const struct in6_addr *saddr)
+ {
++ const struct in6_addr *src_key = NULL;
+ struct rt6_exception_bucket *bucket;
+- struct in6_addr *src_key = NULL;
+ struct rt6_exception *rt6_ex;
+ struct rt6_info *res = NULL;
+
+- bucket = rcu_dereference(rt->rt6i_exception_bucket);
+-
+ #ifdef CONFIG_IPV6_SUBTREES
+ /* rt6i_src.plen != 0 indicates rt is in subtree
+ * and exception table is indexed by a hash of
+ * both rt6i_dst and rt6i_src.
+- * Otherwise, the exception table is indexed by
+- * a hash of only rt6i_dst.
++ * However, the src addr used to create the hash
++ * might not be exactly the passed in saddr which
++ * is a /128 addr from the flow.
++ * So we need to use f6i->fib6_src to redo lookup
++ * if the passed in saddr does not find anything.
++ * (See the logic in ip6_rt_cache_alloc() on how
++ * rt->rt6i_src is updated.)
+ */
+ if (rt->fib6_src.plen)
+ src_key = saddr;
++find_ex:
+ #endif
++ bucket = rcu_dereference(rt->rt6i_exception_bucket);
+ rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
+
+ if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
+ res = rt6_ex->rt6i;
+
++#ifdef CONFIG_IPV6_SUBTREES
++ /* Use fib6_src as src_key and redo lookup */
++ if (!res && src_key && src_key != &rt->fib6_src.addr) {
++ src_key = &rt->fib6_src.addr;
++ goto find_ex;
++ }
++#endif
++
+ return res;
+ }
+
+@@ -2614,10 +2634,8 @@ out:
+ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
+ struct in6_addr *saddr)
+ {
+- struct rt6_exception_bucket *bucket;
+- struct rt6_exception *rt6_ex;
+- struct in6_addr *src_key;
+ struct inet6_dev *idev;
++ struct rt6_info *rt;
+ u32 mtu = 0;
+
+ if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
+@@ -2626,18 +2644,10 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
+ goto out;
+ }
+
+- src_key = NULL;
+-#ifdef CONFIG_IPV6_SUBTREES
+- if (f6i->fib6_src.plen)
+- src_key = saddr;
+-#endif
+-
+- bucket = rcu_dereference(f6i->rt6i_exception_bucket);
+- rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
+- if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
+- mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
+-
+- if (likely(!mtu)) {
++ rt = rt6_find_cached_rt(f6i, daddr, saddr);
++ if (unlikely(rt)) {
++ mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
++ } else {
+ struct net_device *dev = fib6_info_nh_dev(f6i);
+
+ mtu = IPV6_MIN_MTU;
+diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
+index bc65db782bfb..d9e5f6808811 100644
+--- a/net/ipv6/xfrm6_tunnel.c
++++ b/net/ipv6/xfrm6_tunnel.c
+@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
+ unsigned int i;
+
+ xfrm_flush_gc();
+- xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
++ xfrm_state_flush(net, 0, false, true);
+
+ for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
+ WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
+@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
+ xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
+ xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
+ unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
++ /* Someone maybe has gotten the xfrm6_tunnel_spi.
++ * So need to wait it.
++ */
++ rcu_barrier();
+ kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
+ }
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 5651c29cb5bd..4af1e1d60b9f 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+
+ if (rq->sadb_x_ipsecrequest_mode == 0)
+ return -EINVAL;
++ if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
++ return -EINVAL;
+
+- t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
++ t->id.proto = rq->sadb_x_ipsecrequest_proto;
+ if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
+ return -EINVAL;
+ t->mode = mode;
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 4a6ff1482a9f..02d2e6f11e93 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1908,6 +1908,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
+ list_del_rcu(&sdata->list);
+ mutex_unlock(&sdata->local->iflist_mtx);
+
++ if (sdata->vif.txq)
++ ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
++
+ synchronize_rcu();
+
+ if (sdata->dev) {
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 5b38f5164281..d7b0688c98dd 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -66,6 +66,10 @@ static int __net_init tipc_init_net(struct net *net)
+ INIT_LIST_HEAD(&tn->node_list);
+ spin_lock_init(&tn->node_list_lock);
+
++ err = tipc_socket_init();
++ if (err)
++ goto out_socket;
++
+ err = tipc_sk_rht_init(net);
+ if (err)
+ goto out_sk_rht;
+@@ -92,6 +96,8 @@ out_subscr:
+ out_nametbl:
+ tipc_sk_rht_destroy(net);
+ out_sk_rht:
++ tipc_socket_stop();
++out_socket:
+ return err;
+ }
+
+@@ -102,6 +108,7 @@ static void __net_exit tipc_exit_net(struct net *net)
+ tipc_bcast_stop(net);
+ tipc_nametbl_stop(net);
+ tipc_sk_rht_destroy(net);
++ tipc_socket_stop();
+ }
+
+ static struct pernet_operations tipc_net_ops = {
+@@ -129,10 +136,6 @@ static int __init tipc_init(void)
+ if (err)
+ goto out_netlink_compat;
+
+- err = tipc_socket_init();
+- if (err)
+- goto out_socket;
+-
+ err = tipc_register_sysctl();
+ if (err)
+ goto out_sysctl;
+@@ -152,8 +155,6 @@ out_bearer:
+ out_pernet:
+ tipc_unregister_sysctl();
+ out_sysctl:
+- tipc_socket_stop();
+-out_socket:
+ tipc_netlink_compat_stop();
+ out_netlink_compat:
+ tipc_netlink_stop();
+@@ -168,7 +169,6 @@ static void __exit tipc_exit(void)
+ unregister_pernet_subsys(&tipc_net_ops);
+ tipc_netlink_stop();
+ tipc_netlink_compat_stop();
+- tipc_socket_stop();
+ tipc_unregister_sysctl();
+
+ pr_info("Deactivated\n");
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 15eb5d3d4750..96ab344f17bb 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void)
+ if (!virtio_vsock_workqueue)
+ return -ENOMEM;
+
+- ret = register_virtio_driver(&virtio_vsock_driver);
++ ret = vsock_core_init(&virtio_transport.transport);
+ if (ret)
+ goto out_wq;
+
+- ret = vsock_core_init(&virtio_transport.transport);
++ ret = register_virtio_driver(&virtio_vsock_driver);
+ if (ret)
+- goto out_vdr;
++ goto out_vci;
+
+ return 0;
+
+-out_vdr:
+- unregister_virtio_driver(&virtio_vsock_driver);
++out_vci:
++ vsock_core_exit();
+ out_wq:
+ destroy_workqueue(virtio_vsock_workqueue);
+ return ret;
+-
+ }
+
+ static void __exit virtio_vsock_exit(void)
+ {
+- vsock_core_exit();
+ unregister_virtio_driver(&virtio_vsock_driver);
++ vsock_core_exit();
+ destroy_workqueue(virtio_vsock_workqueue);
+ }
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 602715fc9a75..f3f3d06cb6d8 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
+
+ void virtio_transport_release(struct vsock_sock *vsk)
+ {
++ struct virtio_vsock_sock *vvs = vsk->trans;
++ struct virtio_vsock_pkt *pkt, *tmp;
+ struct sock *sk = &vsk->sk;
+ bool remove_sock = true;
+
+ lock_sock(sk);
+ if (sk->sk_type == SOCK_STREAM)
+ remove_sock = virtio_transport_close(vsk);
++
++ list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
++ list_del(&pkt->list);
++ virtio_transport_free_pkt(pkt);
++ }
+ release_sock(sk);
+
+ if (remove_sock)
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index dbb3c1945b5c..85fec98676d3 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
+ return NULL;
+ }
+
+-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
++static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
++ unsigned short family)
+ {
+ struct xfrmi_net *xfrmn;
+- int ifindex;
+ struct xfrm_if *xi;
++ int ifindex = 0;
+
+ if (!secpath_exists(skb) || !skb->dev)
+ return NULL;
+
++ switch (family) {
++ case AF_INET6:
++ ifindex = inet6_sdif(skb);
++ break;
++ case AF_INET:
++ ifindex = inet_sdif(skb);
++ break;
++ }
++ if (!ifindex)
++ ifindex = skb->dev->ifindex;
++
+ xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
+- ifindex = skb->dev->ifindex;
+
+ for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
+ if (ifindex == xi->dev->ifindex &&
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 8d1a898d0ba5..a6b58df7a70f 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3313,7 +3313,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ ifcb = xfrm_if_get_cb();
+
+ if (ifcb) {
+- xi = ifcb->decode_session(skb);
++ xi = ifcb->decode_session(skb, family);
+ if (xi) {
+ if_id = xi->p.if_id;
+ net = xi->net;
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 1bb971f46fc6..178baaa037e5 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2384,7 +2384,7 @@ void xfrm_state_fini(struct net *net)
+
+ flush_work(&net->xfrm.state_hash_work);
+ flush_work(&xfrm_state_gc_work);
+- xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
++ xfrm_state_flush(net, 0, false, true);
+
+ WARN_ON(!list_empty(&net->xfrm.state_all));
+
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index a131f9ff979e..6916931b1de1 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
+ ret = verify_policy_dir(p->dir);
+ if (ret)
+ return ret;
+- if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
++ if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
+ return -EINVAL;
+
+ return 0;
+@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+ return -EINVAL;
+ }
+
+- switch (ut[i].id.proto) {
+- case IPPROTO_AH:
+- case IPPROTO_ESP:
+- case IPPROTO_COMP:
+-#if IS_ENABLED(CONFIG_IPV6)
+- case IPPROTO_ROUTING:
+- case IPPROTO_DSTOPTS:
+-#endif
+- case IPSEC_PROTO_ANY:
+- break;
+- default:
++ if (!xfrm_id_proto_valid(ut[i].id.proto))
+ return -EINVAL;
+- }
+-
+ }
+
+ return 0;
+diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
+index 89c47f57d1ce..8c1af9bdcb1b 100644
+--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
++++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
+@@ -36,7 +36,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
+ mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
+ masked_sp = gen_reg_rtx(Pmode);
+
+- emit_insn_before(gen_rtx_SET(masked_sp,
++ emit_insn_before(gen_rtx_set(masked_sp,
+ gen_rtx_AND(Pmode,
+ stack_pointer_rtx,
+ mask)),
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 3f80a684c232..665853dd517c 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
+ return 0;
+ }
+
+-static void aafs_evict_inode(struct inode *inode)
++static void aafs_i_callback(struct rcu_head *head)
+ {
+- truncate_inode_pages_final(&inode->i_data);
+- clear_inode(inode);
++ struct inode *inode = container_of(head, struct inode, i_rcu);
+ if (S_ISLNK(inode->i_mode))
+ kfree(inode->i_link);
++ free_inode_nonrcu(inode);
++}
++
++static void aafs_destroy_inode(struct inode *inode)
++{
++ call_rcu(&inode->i_rcu, aafs_i_callback);
+ }
+
+ static const struct super_operations aafs_super_ops = {
+ .statfs = simple_statfs,
+- .evict_inode = aafs_evict_inode,
++ .destroy_inode = aafs_destroy_inode,
+ .show_path = aafs_show_path,
+ };
+
+diff --git a/security/inode.c b/security/inode.c
+index b7772a9b315e..421dd72b5876 100644
+--- a/security/inode.c
++++ b/security/inode.c
+@@ -27,17 +27,22 @@
+ static struct vfsmount *mount;
+ static int mount_count;
+
+-static void securityfs_evict_inode(struct inode *inode)
++static void securityfs_i_callback(struct rcu_head *head)
+ {
+- truncate_inode_pages_final(&inode->i_data);
+- clear_inode(inode);
++ struct inode *inode = container_of(head, struct inode, i_rcu);
+ if (S_ISLNK(inode->i_mode))
+ kfree(inode->i_link);
++ free_inode_nonrcu(inode);
++}
++
++static void securityfs_destroy_inode(struct inode *inode)
++{
++ call_rcu(&inode->i_rcu, securityfs_i_callback);
+ }
+
+ static const struct super_operations securityfs_super_operations = {
+ .statfs = simple_statfs,
+- .evict_inode = securityfs_evict_inode,
++ .destroy_inode = securityfs_destroy_inode,
+ };
+
+ static int fill_super(struct super_block *sb, void *data, int silent)
+diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
+index 1ef1ee2280a2..227766d9f43b 100644
+--- a/tools/bpf/bpftool/map.c
++++ b/tools/bpf/bpftool/map.c
+@@ -1111,6 +1111,9 @@ static int do_create(int argc, char **argv)
+ return -1;
+ }
+ NEXT_ARG();
++ } else {
++ p_err("unknown arg %s", *argv);
++ return -1;
+ }
+ }
+
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
+index 53f8be0f4a1f..88158239622b 100644
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -7,11 +7,12 @@ ARCH := x86
+ endif
+
+ # always use the host compiler
++HOSTAR ?= ar
+ HOSTCC ?= gcc
+ HOSTLD ?= ld
++AR = $(HOSTAR)
+ CC = $(HOSTCC)
+ LD = $(HOSTLD)
+-AR = ar
+
+ ifeq ($(srctree),)
+ srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index 44195514b19e..fa56fde6e8d8 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -38,6 +38,10 @@
+ #include <numa.h>
+ #include <numaif.h>
+
++#ifndef RUSAGE_THREAD
++# define RUSAGE_THREAD 1
++#endif
++
+ /*
+ * Regular printout to the terminal, supressed if -q is specified:
+ */
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index 27a374ddf661..947f1bb2fbdf 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -345,11 +345,9 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
+ if (!etmq->packet)
+ goto out_free;
+
+- if (etm->synth_opts.last_branch || etm->sample_branches) {
+- etmq->prev_packet = zalloc(szp);
+- if (!etmq->prev_packet)
+- goto out_free;
+- }
++ etmq->prev_packet = zalloc(szp);
++ if (!etmq->prev_packet)
++ goto out_free;
+
+ if (etm->synth_opts.last_branch) {
+ size_t sz = sizeof(struct branch_stack);
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 7c0b975dd2f0..73fc4abee302 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
+ INTEL_PT_STATE_NO_IP,
+ INTEL_PT_STATE_ERR_RESYNC,
+ INTEL_PT_STATE_IN_SYNC,
++ INTEL_PT_STATE_TNT_CONT,
+ INTEL_PT_STATE_TNT,
+ INTEL_PT_STATE_TIP,
+ INTEL_PT_STATE_TIP_PGD,
+@@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
+ case INTEL_PT_STATE_NO_IP:
+ case INTEL_PT_STATE_ERR_RESYNC:
+ case INTEL_PT_STATE_IN_SYNC:
+- case INTEL_PT_STATE_TNT:
++ case INTEL_PT_STATE_TNT_CONT:
+ return true;
++ case INTEL_PT_STATE_TNT:
+ case INTEL_PT_STATE_TIP:
+ case INTEL_PT_STATE_TIP_PGD:
+ case INTEL_PT_STATE_FUP:
+@@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
+ timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
+ masked_timestamp = timestamp & decoder->period_mask;
+ if (decoder->continuous_period) {
+- if (masked_timestamp != decoder->last_masked_timestamp)
++ if (masked_timestamp > decoder->last_masked_timestamp)
+ return 1;
+ } else {
+ timestamp += 1;
+ masked_timestamp = timestamp & decoder->period_mask;
+- if (masked_timestamp != decoder->last_masked_timestamp) {
++ if (masked_timestamp > decoder->last_masked_timestamp) {
+ decoder->last_masked_timestamp = masked_timestamp;
+ decoder->continuous_period = true;
+ }
+ }
++
++ if (masked_timestamp < decoder->last_masked_timestamp)
++ return decoder->period_ticks;
++
+ return decoder->period_ticks - (timestamp - masked_timestamp);
+ }
+
+@@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
+ case INTEL_PT_PERIOD_TICKS:
+ timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
+ masked_timestamp = timestamp & decoder->period_mask;
+- decoder->last_masked_timestamp = masked_timestamp;
++ if (masked_timestamp > decoder->last_masked_timestamp)
++ decoder->last_masked_timestamp = masked_timestamp;
++ else
++ decoder->last_masked_timestamp += decoder->period_ticks;
+ break;
+ case INTEL_PT_PERIOD_NONE:
+ case INTEL_PT_PERIOD_MTC:
+@@ -1254,7 +1263,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
+ return -ENOENT;
+ }
+ decoder->tnt.count -= 1;
+- if (!decoder->tnt.count)
++ if (decoder->tnt.count)
++ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
++ else
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ decoder->tnt.payload <<= 1;
+ decoder->state.from_ip = decoder->ip;
+@@ -1285,7 +1296,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
+
+ if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
+ decoder->tnt.count -= 1;
+- if (!decoder->tnt.count)
++ if (decoder->tnt.count)
++ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
++ else
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ if (decoder->tnt.payload & BIT63) {
+ decoder->tnt.payload <<= 1;
+@@ -1305,8 +1318,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
+ return 0;
+ }
+ decoder->ip += intel_pt_insn.length;
+- if (!decoder->tnt.count)
++ if (!decoder->tnt.count) {
++ decoder->sample_timestamp = decoder->timestamp;
++ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ return -EAGAIN;
++ }
+ decoder->tnt.payload <<= 1;
+ continue;
+ }
+@@ -2365,6 +2381,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+ err = intel_pt_walk_trace(decoder);
+ break;
+ case INTEL_PT_STATE_TNT:
++ case INTEL_PT_STATE_TNT_CONT:
+ err = intel_pt_walk_tnt(decoder);
+ if (err == -EAGAIN)
+ err = intel_pt_walk_trace(decoder);
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index 4715cfba20dc..93f99c6b7d79 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
+ #endif
+ max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
+ guest_page_size = (1ul << guest_page_shift);
+- /* 1G of guest page sized pages */
+- guest_num_pages = (1ul << (30 - guest_page_shift));
++ /*
++ * A little more than 1G of guest page sized pages. Cover the
++ * case where the size is not aligned to 64 pages.
++ */
++ guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
+ host_page_size = getpagesize();
+ host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
+ !!((guest_num_pages * guest_page_size) % host_page_size);
+@@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
+ kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+ #ifdef USE_CLEAR_DIRTY_LOG
+ kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+- DIV_ROUND_UP(host_num_pages, 64) * 64);
++ host_num_pages);
+ #endif
+ vm_dirty_log_verify(bmap);
+ iteration++;
+diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+index 264425f75806..9a21e912097c 100644
+--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
++++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+@@ -141,7 +141,13 @@ int main(int argc, char *argv[])
+
+ free(hv_cpuid_entries);
+
+- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
++ rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
++
++ if (rv) {
++ fprintf(stderr,
++ "Enlightened VMCS is unsupported, skip related test\n");
++ goto vm_free;
++ }
+
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
+ if (!hv_cpuid_entries)
+@@ -151,6 +157,7 @@ int main(int argc, char *argv[])
+
+ free(hv_cpuid_entries);
+
++vm_free:
+ kvm_vm_free(vm);
+
+ return 0;
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index 9c486fad3f9f..6202b4f718ce 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -949,7 +949,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+ {
+- unsigned int i;
++ unsigned int i, ret;
+ int phys_target = kvm_target_cpu();
+
+ if (init->target != phys_target)
+@@ -984,9 +984,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ vcpu->arch.target = phys_target;
+
+ /* Now we know what it is, we can reset it. */
+- return kvm_reset_vcpu(vcpu);
+-}
++ ret = kvm_reset_vcpu(vcpu);
++ if (ret) {
++ vcpu->arch.target = -1;
++ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
++ }
+
++ return ret;
++}
+
+ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_init *init)
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index b5238bcba72c..4cc0d8a46891 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1241,7 +1241,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ return -EINVAL;
+
+- if ((log->first_page & 63) || (log->num_pages & 63))
++ if (log->first_page & 63)
+ return -EINVAL;
+
+ slots = __kvm_memslots(kvm, as_id);
+@@ -1254,8 +1254,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
+
+ if (log->first_page > memslot->npages ||
+- log->num_pages > memslot->npages - log->first_page)
+- return -EINVAL;
++ log->num_pages > memslot->npages - log->first_page ||
++ (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
++ return -EINVAL;
+
+ *flush = false;
+ dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);