summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2017-06-14 15:32:16 +0100
committerAlice Ferrazzi <alicef@gentoo.org>2017-06-14 15:35:06 +0100
commitfedff6dac940be4dd044e8439321065d7f6ead7c (patch)
tree2aaa84f551322a6a0f08466b3e445e55ed9a2c99
parentlinux kernel 4.11.4 (diff)
downloadlinux-patches-fedff6dac940be4dd044e8439321065d7f6ead7c.tar.gz
linux-patches-fedff6dac940be4dd044e8439321065d7f6ead7c.tar.bz2
linux-patches-fedff6dac940be4dd044e8439321065d7f6ead7c.zip
linux kernel 4.11.54.11-7
-rw-r--r--0000_README4
-rw-r--r--1004_linux-4.11.5.patch5240
2 files changed, 5244 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 1afb734b..12a51296 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch: 1003_linux-4.11.4.patch
From: http://www.kernel.org
Desc: Linux 4.11.4
+Patch: 1004_linux-4.11.5.patch
+From: http://www.kernel.org
+Desc: Linux 4.11.5
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1004_linux-4.11.5.patch b/1004_linux-4.11.5.patch
new file mode 100644
index 00000000..d590c0e1
--- /dev/null
+++ b/1004_linux-4.11.5.patch
@@ -0,0 +1,5240 @@
+diff --git a/Makefile b/Makefile
+index 741814dca844..5b3a81d3262e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 11
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+index b6f26824e83a..66f615a74118 100644
+--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
++++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
+ /* NetCP address range */
+ ranges = <0 0x26000000 0x1000000>;
+
+- clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
+- clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
++ clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
++ clock-names = "pa_clk", "ethss_clk", "cpts";
+ dma-coherent;
+
+ ti,navigator-dmas = <&dma_gbe 0>,
+diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
+index b58e7ebc0919..148650406cf7 100644
+--- a/arch/arm/boot/dts/keystone-k2l.dtsi
++++ b/arch/arm/boot/dts/keystone-k2l.dtsi
+@@ -232,6 +232,14 @@
+ };
+ };
+
++ osr: sram@70000000 {
++ compatible = "mmio-sram";
++ reg = <0x70000000 0x10000>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ clocks = <&clkosr>;
++ };
++
+ dspgpio0: keystone_dsp_gpio@02620240 {
+ compatible = "ti,keystone-dsp-gpio";
+ gpio-controller;
+diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
+index bf89c919efc1..bd0ee7fc304c 100644
+--- a/arch/arm/kvm/init.S
++++ b/arch/arm/kvm/init.S
+@@ -95,7 +95,6 @@ __do_hyp_init:
+ @ - Write permission implies XN: disabled
+ @ - Instruction cache: enabled
+ @ - Data/Unified cache: enabled
+- @ - Memory alignment checks: enabled
+ @ - MMU: enabled (this code must be run from an identity mapping)
+ mrc p15, 4, r0, c1, c0, 0 @ HSCR
+ ldr r2, =HSCTLR_MASK
+@@ -103,8 +102,8 @@ __do_hyp_init:
+ mrc p15, 0, r1, c1, c0, 0 @ SCTLR
+ ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
+ and r1, r1, r2
+- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
+- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
++ ARM( ldr r2, =(HSCTLR_M) )
++ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
+ orr r1, r1, r2
+ orr r0, r0, r1
+ mcr p15, 4, r0, c1, c0, 0 @ HSCR
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 3837b096e1a6..b97bc12812ab 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
+ pmd_t *pmd;
+
+ pud = stage2_get_pud(kvm, cache, addr);
++ if (!pud)
++ return NULL;
++
+ if (stage2_pud_none(*pud)) {
+ if (!cache)
+ return NULL;
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index ac24b6e798b1..2d3e155b185f 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -138,6 +138,10 @@
+ #define SCTLR_ELx_A (1 << 1)
+ #define SCTLR_ELx_M 1
+
++#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
++ (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
++ (1 << 28) | (1 << 29))
++
+ #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I)
+
+diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
+index 6b29d3d9e1f2..4bbff904169d 100644
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -102,10 +102,13 @@ __do_hyp_init:
+ tlbi alle2
+ dsb sy
+
+- mrs x4, sctlr_el2
+- and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
+- ldr x5, =SCTLR_ELx_FLAGS
+- orr x4, x4, x5
++ /*
++ * Preserve all the RES1 bits while setting the default flags,
++ * as well as the EE bit on BE. Drop the A flag since the compiler
++ * is allowed to generate unaligned accesses.
++ */
++ ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
++CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
+ msr sctlr_el2, x4
+ isb
+
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index b68e10fc453d..0f88015f3bfa 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -120,7 +120,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
+ struct thread_info *ti = task_thread_info(p);
+ struct pt_regs *childregs, *regs = current_pt_regs();
+ unsigned long childksp;
+- p->set_child_tid = p->clear_child_tid = NULL;
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+
+diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
+index f8da545854f9..106859ae27ff 100644
+--- a/arch/openrisc/kernel/process.c
++++ b/arch/openrisc/kernel/process.c
+@@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
+
+ top_of_kernel_stack = sp;
+
+- p->set_child_tid = p->clear_child_tid = NULL;
+-
+ /* Locate userspace context on stack... */
+ sp -= STACK_FRAME_OVERHEAD; /* redzone */
+ sp -= sizeof(struct pt_regs);
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index 8b3b46b7b0f2..329771559cbb 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
+ extern int sysfs_add_device_to_node(struct device *dev, int nid);
+ extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+
++static inline int early_cpu_to_node(int cpu)
++{
++ int nid;
++
++ nid = numa_cpu_lookup_table[cpu];
++
++ /*
++ * Fall back to node 0 if nid is unset (it should be, except bugs).
++ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
++ */
++ return (nid < 0) ? 0 : nid;
++}
+ #else
+
++static inline int early_cpu_to_node(int cpu) { return 0; }
++
+ static inline void dump_numa_cpu_topology(void) {}
+
+ static inline int sysfs_add_device_to_node(struct device *dev, int nid)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index baae104b16c7..2ad725ef4368 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
+ #ifdef CONFIG_VSX
+ current->thread.used_vsr = 0;
+ #endif
++ current->thread.load_fp = 0;
+ memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
+ current->thread.fp_save_area = NULL;
+ #ifdef CONFIG_ALTIVEC
+@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
+ current->thread.vr_save_area = NULL;
+ current->thread.vrsave = 0;
+ current->thread.used_vr = 0;
++ current->thread.load_vec = 0;
+ #endif /* CONFIG_ALTIVEC */
+ #ifdef CONFIG_SPE
+ memset(current->thread.evr, 0, sizeof(current->thread.evr));
+@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
+ current->thread.tm_tfhar = 0;
+ current->thread.tm_texasr = 0;
+ current->thread.tm_tfiar = 0;
++ current->thread.load_tm = 0;
+ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+ }
+ EXPORT_SYMBOL(start_thread);
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index f997154dfc41..7183c43d4e81 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -650,7 +650,7 @@ void __init emergency_stack_init(void)
+
+ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
+ {
+- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
++ return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
+ __pa(MAX_DMA_ADDRESS));
+ }
+
+@@ -661,7 +661,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
+
+ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
+ {
+- if (cpu_to_node(from) == cpu_to_node(to))
++ if (early_cpu_to_node(from) == early_cpu_to_node(to))
+ return LOCAL_DISTANCE;
+ else
+ return REMOTE_DISTANCE;
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index e104c71ea44a..1fb162ba9d1c 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
+ lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
++ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
+ lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
+ }
+
+@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
+ lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
++ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
+ lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
+ }
+
+diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
+index ef470b470b04..6afddae2fb47 100644
+--- a/arch/powerpc/sysdev/simple_gpio.c
++++ b/arch/powerpc/sysdev/simple_gpio.c
+@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+
+ static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+ {
+- struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
++ struct u8_gpio_chip *u8_gc =
++ container_of(mm_gc, struct u8_gpio_chip, mm_gc);
+
+ u8_gc->data = in_8(mm_gc->regs);
+ }
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 3db2543733a5..1384d4c9764b 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -192,9 +192,9 @@ config NR_CPUS
+ int "Maximum number of CPUs"
+ depends on SMP
+ range 2 32 if SPARC32
+- range 2 1024 if SPARC64
++ range 2 4096 if SPARC64
+ default 32 if SPARC32
+- default 64 if SPARC64
++ default 4096 if SPARC64
+
+ source kernel/Kconfig.hz
+
+diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
+index f7de0dbc38af..83b36a5371ff 100644
+--- a/arch/sparc/include/asm/mmu_64.h
++++ b/arch/sparc/include/asm/mmu_64.h
+@@ -52,7 +52,7 @@
+ #define CTX_NR_MASK TAG_CONTEXT_BITS
+ #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
+
+-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
++#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
+ #define CTX_VALID(__ctx) \
+ (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
+ #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
+diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
+index 22fede6eba11..2cddcda4f85f 100644
+--- a/arch/sparc/include/asm/mmu_context_64.h
++++ b/arch/sparc/include/asm/mmu_context_64.h
+@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
+ extern unsigned long tlb_context_cache;
+ extern unsigned long mmu_context_bmap[];
+
++DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
+ void get_new_mmu_context(struct mm_struct *mm);
+-#ifdef CONFIG_SMP
+-void smp_new_mmu_context_version(void);
+-#else
+-#define smp_new_mmu_context_version() do { } while (0)
+-#endif
+-
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+ void destroy_context(struct mm_struct *mm);
+
+@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
+ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
+ {
+ unsigned long ctx_valid, flags;
+- int cpu;
++ int cpu = smp_processor_id();
+
++ per_cpu(per_cpu_secondary_mm, cpu) = mm;
+ if (unlikely(mm == &init_mm))
+ return;
+
+@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
+ * for the first time, we must flush that context out of the
+ * local TLB.
+ */
+- cpu = smp_processor_id();
+ if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ __flush_tlb_mm(CTX_HWBITS(mm->context),
+@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
+ }
+
+ #define deactivate_mm(tsk,mm) do { } while (0)
+-
+-/* Activate a new MM instance for the current task. */
+-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
+-{
+- unsigned long flags;
+- int cpu;
+-
+- spin_lock_irqsave(&mm->context.lock, flags);
+- if (!CTX_VALID(mm->context))
+- get_new_mmu_context(mm);
+- cpu = smp_processor_id();
+- if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
+- cpumask_set_cpu(cpu, mm_cpumask(mm));
+-
+- load_secondary_context(mm);
+- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
+- tsb_context_switch(mm);
+- spin_unlock_irqrestore(&mm->context.lock, flags);
+-}
+-
++#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
+ #endif /* !(__ASSEMBLY__) */
+
+ #endif /* !(__SPARC64_MMU_CONTEXT_H) */
+diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
+index 266937030546..522b43db2ed3 100644
+--- a/arch/sparc/include/asm/pil.h
++++ b/arch/sparc/include/asm/pil.h
+@@ -20,7 +20,6 @@
+ #define PIL_SMP_CALL_FUNC 1
+ #define PIL_SMP_RECEIVE_SIGNAL 2
+ #define PIL_SMP_CAPTURE 3
+-#define PIL_SMP_CTX_NEW_VERSION 4
+ #define PIL_DEVICE_IRQ 5
+ #define PIL_SMP_CALL_FUNC_SNGL 6
+ #define PIL_DEFERRED_PCR_WORK 7
+diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
+index 8174f6cdbbbb..9dca7a892978 100644
+--- a/arch/sparc/include/asm/vio.h
++++ b/arch/sparc/include/asm/vio.h
+@@ -327,6 +327,7 @@ struct vio_dev {
+ int compat_len;
+
+ u64 dev_no;
++ u64 id;
+
+ unsigned long channel_id;
+
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index 4d0248aa0928..99dd133a029f 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
+ {
+ #ifdef CONFIG_SMP
+ unsigned long page;
++ void *mondo, *p;
+
+- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
++ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
++
++ /* Make sure mondo block is 64byte aligned */
++ p = kzalloc(127, GFP_KERNEL);
++ if (!p) {
++ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
++ prom_halt();
++ }
++ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
++ tb->cpu_mondo_block_pa = __pa(mondo);
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
++ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
+ prom_halt();
+ }
+
+- tb->cpu_mondo_block_pa = __pa(page);
+- tb->cpu_list_pa = __pa(page + 64);
++ tb->cpu_list_pa = __pa(page);
+ #endif
+ }
+
+diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
+index c9804551262c..6ae1e77be0bf 100644
+--- a/arch/sparc/kernel/kernel.h
++++ b/arch/sparc/kernel/kernel.h
+@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
+ /* smp_64.c */
+ void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
+ void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
+-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
+ void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
+ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
+
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index b3bc0ac757cc..fdf31040a7dc 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ preempt_enable();
+ }
+
+-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+-{
+- struct mm_struct *mm;
+- unsigned long flags;
+-
+- clear_softint(1 << irq);
+-
+- /* See if we need to allocate a new TLB context because
+- * the version of the one we are using is now out of date.
+- */
+- mm = current->active_mm;
+- if (unlikely(!mm || (mm == &init_mm)))
+- return;
+-
+- spin_lock_irqsave(&mm->context.lock, flags);
+-
+- if (unlikely(!CTX_VALID(mm->context)))
+- get_new_mmu_context(mm);
+-
+- spin_unlock_irqrestore(&mm->context.lock, flags);
+-
+- load_secondary_context(mm);
+- __flush_tlb_mm(CTX_HWBITS(mm->context),
+- SECONDARY_CONTEXT);
+-}
+-
+-void smp_new_mmu_context_version(void)
+-{
+- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
+-}
+-
+ #ifdef CONFIG_KGDB
+ void kgdb_roundup_cpus(unsigned long flags)
+ {
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index 10689cfd0ad4..07c0df924960 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -455,13 +455,16 @@ __tsb_context_switch:
+ .type copy_tsb,#function
+ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
+ * %o2=new_tsb_base, %o3=new_tsb_size
++ * %o4=page_size_shift
+ */
+ sethi %uhi(TSB_PASS_BITS), %g7
+ srlx %o3, 4, %o3
+- add %o0, %o1, %g1 /* end of old tsb */
++ add %o0, %o1, %o1 /* end of old tsb */
+ sllx %g7, 32, %g7
+ sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+
++ mov %o4, %g1 /* page_size_shift */
++
+ 661: prefetcha [%o0] ASI_N, #one_read
+ .section .tsb_phys_patch, "ax"
+ .word 661b
+@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
+ /* This can definitely be computed faster... */
+ srlx %o0, 4, %o5 /* Build index */
+ and %o5, 511, %o5 /* Mask index */
+- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
++ sllx %o5, %g1, %o5 /* Put into vaddr position */
+ or %o4, %o5, %o4 /* Full VADDR. */
+- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
++ srlx %o4, %g1, %o4 /* Shift down to create index */
+ and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
+ sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
+ TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
+@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
+ TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
+
+ 80: add %o0, 16, %o0
+- cmp %o0, %g1
++ cmp %o0, %o1
+ bne,pt %xcc, 90b
+ nop
+
+diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
+index 7bd8f6556352..efe93ab4a9c0 100644
+--- a/arch/sparc/kernel/ttable_64.S
++++ b/arch/sparc/kernel/ttable_64.S
+@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
+ tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
+ tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
+ tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
+-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
++tl0_irq4: BTRAP(0x44)
+ #else
+ tl0_irq1: BTRAP(0x41)
+ tl0_irq2: BTRAP(0x42)
+diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
+index f6bb857254fc..075d38980dee 100644
+--- a/arch/sparc/kernel/vio.c
++++ b/arch/sparc/kernel/vio.c
+@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
+ if (!id) {
+ dev_set_name(&vdev->dev, "%s", bus_id_name);
+ vdev->dev_no = ~(u64)0;
++ vdev->id = ~(u64)0;
+ } else if (!cfg_handle) {
+ dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
+ vdev->dev_no = *id;
++ vdev->id = ~(u64)0;
+ } else {
+ dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
+ *cfg_handle, *id);
+ vdev->dev_no = *cfg_handle;
++ vdev->id = *id;
+ }
+
+ vdev->dev.parent = parent;
+@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
+ (void) vio_create_one(hp, node, &root_vdev->dev);
+ }
+
++struct vio_md_node_query {
++ const char *type;
++ u64 dev_no;
++ u64 id;
++};
++
+ static int vio_md_node_match(struct device *dev, void *arg)
+ {
++ struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
+ struct vio_dev *vdev = to_vio_dev(dev);
+
+- if (vdev->mp == (u64) arg)
+- return 1;
++ if (vdev->dev_no != query->dev_no)
++ return 0;
++ if (vdev->id != query->id)
++ return 0;
++ if (strcmp(vdev->type, query->type))
++ return 0;
+
+- return 0;
++ return 1;
+ }
+
+ static void vio_remove(struct mdesc_handle *hp, u64 node)
+ {
++ const char *type;
++ const u64 *id, *cfg_handle;
++ u64 a;
++ struct vio_md_node_query query;
+ struct device *dev;
+
+- dev = device_find_child(&root_vdev->dev, (void *) node,
++ type = mdesc_get_property(hp, node, "device-type", NULL);
++ if (!type) {
++ type = mdesc_get_property(hp, node, "name", NULL);
++ if (!type)
++ type = mdesc_node_name(hp, node);
++ }
++
++ query.type = type;
++
++ id = mdesc_get_property(hp, node, "id", NULL);
++ cfg_handle = NULL;
++ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
++ u64 target;
++
++ target = mdesc_arc_target(hp, a);
++ cfg_handle = mdesc_get_property(hp, target,
++ "cfg-handle", NULL);
++ if (cfg_handle)
++ break;
++ }
++
++ if (!id) {
++ query.dev_no = ~(u64)0;
++ query.id = ~(u64)0;
++ } else if (!cfg_handle) {
++ query.dev_no = *id;
++ query.id = ~(u64)0;
++ } else {
++ query.dev_no = *cfg_handle;
++ query.id = *id;
++ }
++
++ dev = device_find_child(&root_vdev->dev, &query,
+ vio_md_node_match);
+ if (dev) {
+ printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
+
+ device_unregister(dev);
+ put_device(dev);
++ } else {
++ if (!id)
++ printk(KERN_ERR "VIO: Removed unknown %s node.\n",
++ type);
++ else if (!cfg_handle)
++ printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
++ type, *id);
++ else
++ printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
++ type, *cfg_handle, *id);
+ }
+ }
+
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index 69912d2f8b54..07c03e72d812 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
+ lib-$(CONFIG_SPARC64) += atomic_64.o
+ lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
+ lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
++lib-$(CONFIG_SPARC64) += multi3.o
+
+ lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
+ lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
+diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
+new file mode 100644
+index 000000000000..d6b6c97fe3c7
+--- /dev/null
++++ b/arch/sparc/lib/multi3.S
+@@ -0,0 +1,35 @@
++#include <linux/linkage.h>
++#include <asm/export.h>
++
++ .text
++ .align 4
++ENTRY(__multi3) /* %o0 = u, %o1 = v */
++ mov %o1, %g1
++ srl %o3, 0, %g4
++ mulx %g4, %g1, %o1
++ srlx %g1, 0x20, %g3
++ mulx %g3, %g4, %g5
++ sllx %g5, 0x20, %o5
++ srl %g1, 0, %g4
++ sub %o1, %o5, %o5
++ srlx %o5, 0x20, %o5
++ addcc %g5, %o5, %g5
++ srlx %o3, 0x20, %o5
++ mulx %g4, %o5, %g4
++ mulx %g3, %o5, %o5
++ sethi %hi(0x80000000), %g3
++ addcc %g5, %g4, %g5
++ srlx %g5, 0x20, %g5
++ add %g3, %g3, %g3
++ movcc %xcc, %g0, %g3
++ addcc %o5, %g5, %o5
++ sllx %g4, 0x20, %g4
++ add %o1, %g4, %o1
++ add %o5, %g3, %g2
++ mulx %g1, %o2, %g1
++ add %g1, %g2, %g1
++ mulx %o0, %o3, %o0
++ retl
++ add %g1, %o0, %o0
++ENDPROC(__multi3)
++EXPORT_SYMBOL(__multi3)
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 0cda653ae007..3c40ebd50f92 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
+ }
+
+ if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
+- pr_warn("hugepagesz=%llu not supported by MMU.\n",
++ hugetlb_bad_size();
++ pr_err("hugepagesz=%llu not supported by MMU.\n",
+ hugepage_size);
+ goto out;
+ }
+@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
+
+ /* get_new_mmu_context() uses "cache + 1". */
+ DEFINE_SPINLOCK(ctx_alloc_lock);
+-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
++unsigned long tlb_context_cache = CTX_FIRST_VERSION;
+ #define MAX_CTX_NR (1UL << CTX_NR_BITS)
+ #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
+ DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
++DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
++
++static void mmu_context_wrap(void)
++{
++ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
++ unsigned long new_ver, new_ctx, old_ctx;
++ struct mm_struct *mm;
++ int cpu;
++
++ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
++
++ /* Reserve kernel context */
++ set_bit(0, mmu_context_bmap);
++
++ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
++ if (unlikely(new_ver == 0))
++ new_ver = CTX_FIRST_VERSION;
++ tlb_context_cache = new_ver;
++
++ /*
++ * Make sure that any new mm that are added into per_cpu_secondary_mm,
++ * are going to go through get_new_mmu_context() path.
++ */
++ mb();
++
++ /*
++ * Updated versions to current on those CPUs that had valid secondary
++ * contexts
++ */
++ for_each_online_cpu(cpu) {
++ /*
++ * If a new mm is stored after we took this mm from the array,
++ * it will go into get_new_mmu_context() path, because we
++ * already bumped the version in tlb_context_cache.
++ */
++ mm = per_cpu(per_cpu_secondary_mm, cpu);
++
++ if (unlikely(!mm || mm == &init_mm))
++ continue;
++
++ old_ctx = mm->context.sparc64_ctx_val;
++ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
++ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
++ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
++ mm->context.sparc64_ctx_val = new_ctx;
++ }
++ }
++}
+
+ /* Caller does TLB context flushing on local CPU if necessary.
+ * The caller also ensures that CTX_VALID(mm->context) is false.
+@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
+ {
+ unsigned long ctx, new_ctx;
+ unsigned long orig_pgsz_bits;
+- int new_version;
+
+ spin_lock(&ctx_alloc_lock);
++retry:
++ /* wrap might have happened, test again if our context became valid */
++ if (unlikely(CTX_VALID(mm->context)))
++ goto out;
+ orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
+ ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
+ new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+- new_version = 0;
+ if (new_ctx >= (1 << CTX_NR_BITS)) {
+ new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
+ if (new_ctx >= ctx) {
+- int i;
+- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
+- CTX_FIRST_VERSION;
+- if (new_ctx == 1)
+- new_ctx = CTX_FIRST_VERSION;
+-
+- /* Don't call memset, for 16 entries that's just
+- * plain silly...
+- */
+- mmu_context_bmap[0] = 3;
+- mmu_context_bmap[1] = 0;
+- mmu_context_bmap[2] = 0;
+- mmu_context_bmap[3] = 0;
+- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
+- mmu_context_bmap[i + 0] = 0;
+- mmu_context_bmap[i + 1] = 0;
+- mmu_context_bmap[i + 2] = 0;
+- mmu_context_bmap[i + 3] = 0;
+- }
+- new_version = 1;
+- goto out;
++ mmu_context_wrap();
++ goto retry;
+ }
+ }
++ if (mm->context.sparc64_ctx_val)
++ cpumask_clear(mm_cpumask(mm));
+ mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
+ new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
+-out:
+ tlb_context_cache = new_ctx;
+ mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
++out:
+ spin_unlock(&ctx_alloc_lock);
+-
+- if (unlikely(new_version))
+- smp_new_mmu_context_version();
+ }
+
+ static int numa_enabled = 1;
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index bedf08b22a47..0d4b998c7d7b 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -496,7 +496,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
+ extern void copy_tsb(unsigned long old_tsb_base,
+ unsigned long old_tsb_size,
+ unsigned long new_tsb_base,
+- unsigned long new_tsb_size);
++ unsigned long new_tsb_size,
++ unsigned long page_size_shift);
+ unsigned long old_tsb_base = (unsigned long) old_tsb;
+ unsigned long new_tsb_base = (unsigned long) new_tsb;
+
+@@ -504,7 +505,9 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
+ old_tsb_base = __pa(old_tsb_base);
+ new_tsb_base = __pa(new_tsb_base);
+ }
+- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
++ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
++ tsb_index == MM_TSB_BASE ?
++ PAGE_SHIFT : REAL_HPAGE_SHIFT);
+ }
+
+ mm->context.tsb_block[tsb_index].tsb = new_tsb;
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index 5d2fd6cd3189..fcf4d27a38fb 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -971,11 +971,6 @@ xcall_capture:
+ wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
+ retry
+
+- .globl xcall_new_mmu_context_version
+-xcall_new_mmu_context_version:
+- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
+- retry
+-
+ #ifdef CONFIG_KGDB
+ .globl xcall_kgdb_capture
+ xcall_kgdb_capture:
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 8325d8a09ab0..91eb813e8917 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
+
+ show_saved_mc();
+
++ /* initrd is going away, clear patch ptr. */
++ intel_ucode_patch = NULL;
++
+ return 0;
+ }
+
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 14f65a5f938e..2a7835932b71 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
+ */
+ rcu_irq_exit();
+ native_safe_halt();
+- rcu_irq_enter();
+ local_irq_disable();
++ rcu_irq_enter();
+ }
+ }
+ if (!n.halted)
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index efde6cc50875..3665f755baa3 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -780,18 +780,20 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+ {
+ struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+- int j, nent = vcpu->arch.cpuid_nent;
++ struct kvm_cpuid_entry2 *ej;
++ int j = i;
++ int nent = vcpu->arch.cpuid_nent;
+
+ e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+ /* when no next entry is found, the current entry[i] is reselected */
+- for (j = i + 1; ; j = (j + 1) % nent) {
+- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+- if (ej->function == e->function) {
+- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+- return j;
+- }
+- }
+- return 0; /* silence gcc, even though control never reaches here */
++ do {
++ j = (j + 1) % nent;
++ ej = &vcpu->arch.cpuid_entries[j];
++ } while (ej->function != e->function);
++
++ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
++
++ return j;
+ }
+
+ /* find an entry with matching function, matching index (if needed), and that
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index ac7810513d0e..732c0270a489 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3683,12 +3683,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+ return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+ }
+
+-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
++bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
+ {
+ if (unlikely(!lapic_in_kernel(vcpu) ||
+ kvm_event_needs_reinjection(vcpu)))
+ return false;
+
++ if (is_guest_mode(vcpu))
++ return false;
++
+ return kvm_x86_ops->interrupt_allowed(vcpu);
+ }
+
+@@ -3704,7 +3707,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+ if (!async)
+ return false; /* *pfn has correct page already */
+
+- if (!prefault && can_do_async_pf(vcpu)) {
++ if (!prefault && kvm_can_do_async_pf(vcpu)) {
+ trace_kvm_try_async_get_page(gva, gfn);
+ if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+ trace_kvm_async_pf_doublefault(gva, gfn);
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index ddc56e91f2e4..c92834c55c59 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -75,6 +75,7 @@ enum {
+ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
++bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
+
+ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+ {
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a4a2bae7c274..6557c790c8c1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8623,8 +8623,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+ if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+ return true;
+ else
+- return !kvm_event_needs_reinjection(vcpu) &&
+- kvm_x86_ops->interrupt_allowed(vcpu);
++ return kvm_can_do_async_pf(vcpu);
+ }
+
+ void kvm_arch_start_assignment(struct kvm *kvm)
+diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
+index 04ca8764f0c0..8bf27323f7a3 100644
+--- a/arch/x86/platform/efi/efi-bgrt.c
++++ b/arch/x86/platform/efi/efi-bgrt.c
+@@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
+ if (acpi_disabled)
+ return;
+
++ if (!efi_enabled(EFI_BOOT))
++ return;
++
+ if (table->length < sizeof(bgrt_tab)) {
+ pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
+ table->length, sizeof(bgrt_tab));
+diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
+index cdfe8c628959..393a0c0288d1 100644
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -358,6 +358,9 @@ void __init efi_free_boot_services(void)
+ free_bootmem_late(start, size);
+ }
+
++ if (!num_entries)
++ return;
++
+ new_size = efi.memmap.desc_size * num_entries;
+ new_phys = efi_memmap_alloc(num_entries);
+ if (!new_phys) {
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index bbe7ee00bd3d..a981cc916a13 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
+ blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+
+ if (blkg->blkcg != &blkcg_root)
+- blk_exit_rl(&blkg->rl);
++ blk_exit_rl(blkg->q, &blkg->rl);
+
+ blkg_rwstat_exit(&blkg->stat_ios);
+ blkg_rwstat_exit(&blkg->stat_bytes);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index d772c221cc17..1fb277501017 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -643,13 +643,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ if (!rl->rq_pool)
+ return -ENOMEM;
+
++ if (rl != &q->root_rl)
++ WARN_ON_ONCE(!blk_get_queue(q));
++
+ return 0;
+ }
+
+-void blk_exit_rl(struct request_list *rl)
++void blk_exit_rl(struct request_queue *q, struct request_list *rl)
+ {
+- if (rl->rq_pool)
++ if (rl->rq_pool) {
+ mempool_destroy(rl->rq_pool);
++ if (rl != &q->root_rl)
++ blk_put_queue(q);
++ }
+ }
+
+ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 37f0b3ad635e..6a13d0924a66 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -819,7 +819,7 @@ static void blk_release_queue(struct kobject *kobj)
+ elevator_exit(q, q->elevator);
+ }
+
+- blk_exit_rl(&q->root_rl);
++ blk_exit_rl(q, &q->root_rl);
+
+ if (q->queue_tags)
+ __blk_queue_free_tags(q);
+diff --git a/block/blk.h b/block/blk.h
+index d1ea4bd9b9a3..8701d0a74eb1 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
+
+ int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ gfp_t gfp_mask);
+-void blk_exit_rl(struct request_list *rl);
++void blk_exit_rl(struct request_queue *q, struct request_list *rl);
+ void init_request_from_bio(struct request *req, struct bio *bio);
+ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio);
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 440b95ee593c..2762505664a6 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
+ static const int cfq_hist_divisor = 4;
+
+ /*
+- * offset from end of service tree
++ * offset from end of queue service tree for idle class
+ */
+ #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
++/* offset from end of group service tree under time slice mode */
++#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
++/* offset from end of group service under IOPS mode */
++#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
+
+ /*
+ * below this threshold, we consider thinktime immediate
+@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
+ cfqg->vfraction = max_t(unsigned, vfr, 1);
+ }
+
++static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
++{
++ if (!iops_mode(cfqd))
++ return CFQ_SLICE_MODE_GROUP_DELAY;
++ else
++ return CFQ_IOPS_MODE_GROUP_DELAY;
++}
++
+ static void
+ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
+ {
+@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
+ n = rb_last(&st->rb);
+ if (n) {
+ __cfqg = rb_entry_cfqg(n);
+- cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
++ cfqg->vdisktime = __cfqg->vdisktime +
++ cfq_get_cfqg_vdisktime_delay(cfqd);
+ } else
+ cfqg->vdisktime = st->min_vdisktime;
+ cfq_group_service_tree_add(st, cfqg);
+diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
+index d3a989e718f5..3cd6e12cfc46 100644
+--- a/crypto/asymmetric_keys/public_key.c
++++ b/crypto/asymmetric_keys/public_key.c
+@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
+ * signature and returns that to us.
+ */
+ ret = crypto_akcipher_verify(req);
+- if (ret == -EINPROGRESS) {
++ if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
+ wait_for_completion(&compl.completion);
+ ret = compl.err;
+ }
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index 8a4d98b4adba..5efc2b22a831 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1768,9 +1768,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+- ret = wait_for_completion_interruptible(
+- &drbg->ctr_completion);
+- if (!ret && !drbg->ctr_async_err) {
++ wait_for_completion(&drbg->ctr_completion);
++ if (!drbg->ctr_async_err) {
+ reinit_completion(&drbg->ctr_completion);
+ break;
+ }
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index b7ad808be3d4..3841b5eafa7e 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+
+ err = crypto_skcipher_encrypt(&data->req);
+ if (err == -EINPROGRESS || err == -EBUSY) {
+- err = wait_for_completion_interruptible(
+- &data->result.completion);
+- if (!err)
+- err = data->result.err;
++ wait_for_completion(&data->result.completion);
++ err = data->result.err;
+ }
+
+ if (err)
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 2fc52407306c..c69954023c2e 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
+ {}
+ #endif
+
++/*
++ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
++ * as DUMMY, or detected but eventually get a "link down" and never get up
++ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
++ * port_map may hold a value of 0x00.
++ *
++ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
++ * and can significantly reduce the occurrence of the problem.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
++ */
++static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
++ struct pci_dev *pdev)
++{
++ static const struct dmi_system_id sysids[] = {
++ {
++ .ident = "Acer Switch Alpha 12",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
++ },
++ },
++ { }
++ };
++
++ if (dmi_check_system(sysids)) {
++ dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
++ if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
++ hpriv->port_map = 0x7;
++ hpriv->cap = 0xC734FF02;
++ }
++ }
++}
++
+ #ifdef CONFIG_ARM64
+ /*
+ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
+@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ "online status unreliable, applying workaround\n");
+ }
+
++
++ /* Acer SA5-271 workaround modifies private_data */
++ acer_sa5_271_workaround(hpriv, pdev);
++
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 00ce26d0c047..6eed4a72d328 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
+ struct ata_host *host;
+ struct mv_host_priv *hpriv;
+ struct resource *res;
+- void __iomem *mmio;
+ int n_ports = 0, irq = 0;
+ int rc;
+ int port;
+@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
+ * Get the register base first
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- mmio = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(mmio))
+- return PTR_ERR(mmio);
++ if (res == NULL)
++ return -EINVAL;
+
+ /* allocate host */
+ if (pdev->dev.of_node) {
+@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
+ hpriv->board_idx = chip_soc;
+
+ host->iomap = NULL;
+- hpriv->base = mmio - SATAHC0_REG_BASE;
++ hpriv->base = devm_ioremap(&pdev->dev, res->start,
++ resource_size(res));
++ if (!hpriv->base)
++ return -ENOMEM;
++
++ hpriv->base -= SATAHC0_REG_BASE;
+
+ hpriv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hpriv->clk))
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index 6e0cbe092220..593a8818aca9 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+ phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+ /* It's illegal to wrap around the end of the physical address space. */
+- if (offset + (phys_addr_t)size < offset)
++ if (offset + (phys_addr_t)size - 1 < offset)
+ return -EINVAL;
+
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 0ab024918907..2291e6224ed3 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1,6 +1,9 @@
+ /*
+ * random.c -- A strong random number generator
+ *
++ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
++ * Rights Reserved.
++ *
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ *
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
+@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+ static struct crng_state **crng_node_pool __read_mostly;
+ #endif
+
++static void invalidate_batched_entropy(void);
++
+ static void crng_initialize(struct crng_state *crng)
+ {
+ int i;
+@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
+ cp++; crng_init_cnt++; len--;
+ }
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
++ invalidate_batched_entropy();
+ crng_init = 1;
+ wake_up_interruptible(&crng_init_wait);
+ pr_notice("random: fast init done\n");
+@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+ memzero_explicit(&buf, sizeof(buf));
+ crng->init_time = jiffies;
+ if (crng == &primary_crng && crng_init < 2) {
++ invalidate_batched_entropy();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+@@ -2019,6 +2026,7 @@ struct batched_entropy {
+ };
+ unsigned int position;
+ };
++static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+@@ -2029,6 +2037,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+ u64 get_random_u64(void)
+ {
+ u64 ret;
++ bool use_lock = crng_init < 2;
++ unsigned long flags;
+ struct batched_entropy *batch;
+
+ #if BITS_PER_LONG == 64
+@@ -2041,11 +2051,15 @@ u64 get_random_u64(void)
+ #endif
+
+ batch = &get_cpu_var(batched_entropy_u64);
++ if (use_lock)
++ read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
++ if (use_lock)
++ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ put_cpu_var(batched_entropy_u64);
+ return ret;
+ }
+@@ -2055,22 +2069,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+ u32 get_random_u32(void)
+ {
+ u32 ret;
++ bool use_lock = crng_init < 2;
++ unsigned long flags;
+ struct batched_entropy *batch;
+
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ batch = &get_cpu_var(batched_entropy_u32);
++ if (use_lock)
++ read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
++ if (use_lock)
++ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ put_cpu_var(batched_entropy_u32);
+ return ret;
+ }
+ EXPORT_SYMBOL(get_random_u32);
+
++/* It's important to invalidate all potential batched entropy that might
++ * be stored before the crng is initialized, which we can do lazily by
++ * simply resetting the counter to zero so that it's re-extracted on the
++ * next usage. */
++static void invalidate_batched_entropy(void)
++{
++ int cpu;
++ unsigned long flags;
++
++ write_lock_irqsave(&batched_entropy_reset_lock, flags);
++ for_each_possible_cpu (cpu) {
++ per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
++ per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
++ }
++ write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
++}
++
+ /**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 0e3f6496524d..26b643d57847 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
+ list_empty(&cpufreq_policy_list)) {
+ /* if all ->init() calls failed, unregister */
++ ret = -ENODEV;
+ pr_debug("%s: No CPU initialized for driver %s\n", __func__,
+ driver_data->name);
+ goto err_if_unreg;
+diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
+index d37e8dda8079..ec240592f5c8 100644
+--- a/drivers/dma/ep93xx_dma.c
++++ b/drivers/dma/ep93xx_dma.c
+@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
+ struct dma_device dma_dev;
+ bool m2m;
+ int (*hw_setup)(struct ep93xx_dma_chan *);
++ void (*hw_synchronize)(struct ep93xx_dma_chan *);
+ void (*hw_shutdown)(struct ep93xx_dma_chan *);
+ void (*hw_submit)(struct ep93xx_dma_chan *);
+ int (*hw_interrupt)(struct ep93xx_dma_chan *);
+@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
+ | M2P_CONTROL_ENABLE;
+ m2p_set_control(edmac, control);
+
++ edmac->buffer = 0;
++
+ return 0;
+ }
+
+@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+ return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+ }
+
+-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
++static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
+ {
++ unsigned long flags;
+ u32 control;
+
++ spin_lock_irqsave(&edmac->lock, flags);
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
++ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+- cpu_relax();
++ schedule();
++}
+
++static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
++{
+ m2p_set_control(edmac, 0);
+
+- while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+- cpu_relax();
++ while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
++ dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
+ }
+
+ static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+@@ -1161,6 +1170,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ }
+
+ /**
++ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
++ * current context.
++ * @chan: channel
++ *
++ * Synchronizes the DMA channel termination to the current context. When this
++ * function returns it is guaranteed that all transfers for previously issued
++ * descriptors have stopped and and it is safe to free the memory associated
++ * with them. Furthermore it is guaranteed that all complete callback functions
++ * for a previously submitted descriptor have finished running and it is safe to
++ * free resources accessed from within the complete callbacks.
++ */
++static void ep93xx_dma_synchronize(struct dma_chan *chan)
++{
++ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
++
++ if (edmac->edma->hw_synchronize)
++ edmac->edma->hw_synchronize(edmac);
++}
++
++/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
+ dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+ dma_dev->device_config = ep93xx_dma_slave_config;
++ dma_dev->device_synchronize = ep93xx_dma_synchronize;
+ dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
+ dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+ dma_dev->device_tx_status = ep93xx_dma_tx_status;
+@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
+ } else {
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
++ edma->hw_synchronize = m2p_hw_synchronize;
+ edma->hw_setup = m2p_hw_setup;
+ edma->hw_shutdown = m2p_hw_shutdown;
+ edma->hw_submit = m2p_hw_submit;
+diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
+index a28a01fcba67..f3e211f8f6c5 100644
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
+ struct mv_xor_v2_sw_desc *sw_desq;
+ int desc_size;
+ unsigned int npendings;
++ unsigned int hw_queue_idx;
+ };
+
+ /**
+@@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
+ }
+
+ /*
+- * Return the next available index in the DESQ.
+- */
+-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
+-{
+- /* read the index for the next available descriptor in the DESQ */
+- u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
+-
+- return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
+- & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
+-}
+-
+-/*
+ * notify the engine of new descriptors, and update the available index.
+ */
+ static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
+@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
+ return MV_XOR_V2_EXT_DESC_SIZE;
+ }
+
+-/*
+- * Set the IMSG threshold
+- */
+-static inline
+-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
+-{
+- u32 reg;
+-
+- reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+-
+- reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+- reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+-
+- writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+-}
+-
+ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+ {
+ struct mv_xor_v2_device *xor_dev = data;
+@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+ if (!ndescs)
+ return IRQ_NONE;
+
+- /*
+- * Update IMSG threshold, to disable new IMSG interrupts until
+- * end of the tasklet
+- */
+- mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
+-
+ /* schedule a tasklet to handle descriptors callbacks */
+ tasklet_schedule(&xor_dev->irq_tasklet);
+
+@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+ static dma_cookie_t
+ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
+ {
+- int desq_ptr;
+ void *dest_hw_desc;
+ dma_cookie_t cookie;
+ struct mv_xor_v2_sw_desc *sw_desc =
+@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
+ spin_lock_bh(&xor_dev->lock);
+ cookie = dma_cookie_assign(tx);
+
+- /* get the next available slot in the DESQ */
+- desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
+-
+ /* copy the HW descriptor from the SW descriptor to the DESQ */
+- dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
++ dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
+
+ memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
+
+ xor_dev->npendings++;
++ xor_dev->hw_queue_idx++;
++ if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
++ xor_dev->hw_queue_idx = 0;
+
+ spin_unlock_bh(&xor_dev->lock);
+
+@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc *
+ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
+ {
+ struct mv_xor_v2_sw_desc *sw_desc;
++ bool found = false;
+
+ /* Lock the channel */
+ spin_lock_bh(&xor_dev->lock);
+@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
+ return NULL;
+ }
+
+- /* get a free SW descriptor from the SW DESQ */
+- sw_desc = list_first_entry(&xor_dev->free_sw_desc,
+- struct mv_xor_v2_sw_desc, free_list);
++ list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
++ if (async_tx_test_ack(&sw_desc->async_tx)) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ spin_unlock_bh(&xor_dev->lock);
++ return NULL;
++ }
++
+ list_del(&sw_desc->free_list);
+
+ /* Release the channel */
+ spin_unlock_bh(&xor_dev->lock);
+
+- /* set the async tx descriptor */
+- dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
+- sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+- async_tx_ack(&sw_desc->async_tx);
+-
+ return sw_desc;
+ }
+
+@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ __func__, len, &src, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
++ if (!sw_desc)
++ return NULL;
+
+ sw_desc->async_tx.flags = flags;
+
+@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ __func__, src_cnt, len, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
++ if (!sw_desc)
++ return NULL;
+
+ sw_desc->async_tx.flags = flags;
+
+@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
++ if (!sw_desc)
++ return NULL;
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
+ {
+ struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
+ int pending_ptr, num_of_pending, i;
+- struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
+ struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
+
+ dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
+@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
+ /* get the pending descriptors parameters */
+ num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
+
+- /* next HW descriptor */
+- next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
+-
+ /* loop over free descriptors */
+ for (i = 0; i < num_of_pending; i++) {
+-
+- if (pending_ptr > MV_XOR_V2_DESC_NUM)
+- pending_ptr = 0;
+-
+- if (next_pending_sw_desc != NULL)
+- next_pending_hw_desc++;
++ struct mv_xor_v2_descriptor *next_pending_hw_desc =
++ xor_dev->hw_desq_virt + pending_ptr;
+
+ /* get the SW descriptor related to the HW descriptor */
+ next_pending_sw_desc =
+@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
+
+ /* increment the next descriptor */
+ pending_ptr++;
++ if (pending_ptr >= MV_XOR_V2_DESC_NUM)
++ pending_ptr = 0;
+ }
+
+ if (num_of_pending != 0) {
+ /* free the descriptores */
+ mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
+ }
+-
+- /* Update IMSG threshold, to enable new IMSG interrupts */
+- mv_xor_v2_set_imsg_thrd(xor_dev, 0);
+ }
+
+ /*
+@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
+ writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
+
+- /* enable the DMA engine */
+- writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+-
+ /*
+ * This is a temporary solution, until we activate the
+ * SMMU. Set the attributes for reading & writing data buffers
+@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
+ reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
+ writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+
++ /* enable the DMA engine */
++ writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
++
+ return 0;
+ }
+
+@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, xor_dev);
+
++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
++ if (ret)
++ return ret;
++
+ xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
+
+ /* add all SW descriptors to the free list */
+ for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
+- xor_dev->sw_desq[i].idx = i;
+- list_add(&xor_dev->sw_desq[i].free_list,
++ struct mv_xor_v2_sw_desc *sw_desc =
++ xor_dev->sw_desq + i;
++ sw_desc->idx = i;
++ dma_async_tx_descriptor_init(&sw_desc->async_tx,
++ &xor_dev->dmachan);
++ sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
++ async_tx_ack(&sw_desc->async_tx);
++
++ list_add(&sw_desc->free_list,
+ &xor_dev->free_sw_desc);
+ }
+
+diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
+index 72c649713ace..31a145154e9f 100644
+--- a/drivers/dma/sh/usb-dmac.c
++++ b/drivers/dma/sh/usb-dmac.c
+@@ -117,7 +117,7 @@ struct usb_dmac {
+ #define USB_DMASWR 0x0008
+ #define USB_DMASWR_SWR (1 << 0)
+ #define USB_DMAOR 0x0060
+-#define USB_DMAOR_AE (1 << 2)
++#define USB_DMAOR_AE (1 << 1)
+ #define USB_DMAOR_DME (1 << 0)
+
+ #define USB_DMASAR 0x0000
+diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+index f97ecb49972e..167f029f5fad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
+ u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
+
++ /* disable mclk switching if the refresh is >120Hz, even if the
++ * blanking period would allow it
++ */
++ if (amdgpu_dpm_get_vrefresh(adev) > 120)
++ return true;
++
+ if (vblank_time < switch_limit)
+ return true;
+ else
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index b5c6bb46a425..37b8ad3e30d8 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
+ void drm_unplug_dev(struct drm_device *dev)
+ {
+ /* for a USB device */
+- drm_dev_unregister(dev);
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ drm_modeset_unregister_all(dev);
++
++ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
++ drm_minor_unregister(dev, DRM_MINOR_RENDER);
++ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+
+ mutex_lock(&drm_global_mutex);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 5c089b3c2a7e..66dbb3c4c6d8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -565,9 +565,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+- ret = intel_bios_init(dev_priv);
+- if (ret)
+- DRM_INFO("failed to find VBIOS tables\n");
++ intel_bios_init(dev_priv);
+
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 46fcd8b7080a..959e22dc94ba 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3629,7 +3629,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
+
+ /* intel_bios.c */
+-int intel_bios_init(struct drm_i915_private *dev_priv);
++void intel_bios_init(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index e144f033f4b5..639d45c1dd2e 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1341,6 +1341,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
+ return;
+ }
+
++/* Common defaults which may be overridden by VBT. */
+ static void
+ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ {
+@@ -1377,6 +1378,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ &dev_priv->vbt.ddi_port_info[port];
+
+ info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
++ }
++}
++
++/* Defaults to initialize only if there is no VBT. */
++static void
++init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
++{
++ enum port port;
++
++ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
++ struct ddi_vbt_port_info *info =
++ &dev_priv->vbt.ddi_port_info[port];
+
+ info->supports_dvi = (port != PORT_A && port != PORT_E);
+ info->supports_hdmi = info->supports_dvi;
+@@ -1462,36 +1475,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+ * intel_bios_init - find VBT and initialize settings from the BIOS
+ * @dev_priv: i915 device instance
+ *
+- * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+- * to appropriate values.
+- *
+- * Returns 0 on success, nonzero on failure.
++ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
++ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
++ * initialize some defaults if the VBT is not present at all.
+ */
+-int
+-intel_bios_init(struct drm_i915_private *dev_priv)
++void intel_bios_init(struct drm_i915_private *dev_priv)
+ {
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ const struct vbt_header *vbt = dev_priv->opregion.vbt;
+ const struct bdb_header *bdb;
+ u8 __iomem *bios = NULL;
+
+- if (HAS_PCH_NOP(dev_priv))
+- return -ENODEV;
++ if (HAS_PCH_NOP(dev_priv)) {
++ DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
++ return;
++ }
+
+ init_vbt_defaults(dev_priv);
+
++ /* If the OpRegion does not have VBT, look in PCI ROM. */
+ if (!vbt) {
+ size_t size;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+- return -1;
++ goto out;
+
+ vbt = find_vbt(bios, size);
+- if (!vbt) {
+- pci_unmap_rom(pdev, bios);
+- return -1;
+- }
++ if (!vbt)
++ goto out;
+
+ DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+ }
+@@ -1516,10 +1528,14 @@ intel_bios_init(struct drm_i915_private *dev_priv)
+ parse_mipi_sequence(dev_priv, bdb);
+ parse_ddi_ports(dev_priv, bdb);
+
++out:
++ if (!vbt) {
++ DRM_INFO("Failed to find VBIOS tables (VBT)\n");
++ init_vbt_missing_defaults(dev_priv);
++ }
++
+ if (bios)
+ pci_unmap_rom(pdev, bios);
+-
+- return 0;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
+index 7a5b41b1c024..999cb31ba63d 100644
+--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
++++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
+@@ -63,6 +63,7 @@
+ #include <linux/acpi.h>
+ #include <linux/device.h>
+ #include <linux/pci.h>
++#include <linux/pm_runtime.h>
+
+ #include "i915_drv.h"
+ #include <linux/delay.h>
+@@ -121,6 +122,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+
+ kfree(rsc);
+
++ pm_runtime_forbid(&platdev->dev);
++ pm_runtime_set_active(&platdev->dev);
++ pm_runtime_enable(&platdev->dev);
++
+ return platdev;
+
+ err:
+diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+index 0ffb8affef35..4a81d67b0d69 100644
+--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+@@ -220,9 +220,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
++ if (!mdp5_state)
++ return NULL;
+
+- if (mdp5_state && mdp5_state->base.fb)
+- drm_framebuffer_reference(mdp5_state->base.fb);
++ __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
+
+ return &mdp5_state->base;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 70226eaa5cac..eba4c3e8e156 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -828,6 +828,7 @@ static struct drm_driver msm_driver = {
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
++ .gem_prime_res_obj = msm_gem_prime_res_obj,
+ .gem_prime_pin = msm_gem_prime_pin,
+ .gem_prime_unpin = msm_gem_prime_unpin,
+ .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index c3b14876edaa..0e56a8bb7b59 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -223,6 +223,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+ void *msm_gem_prime_vmap(struct drm_gem_object *obj);
+ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
++struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
+ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg);
+ int msm_gem_prime_pin(struct drm_gem_object *obj);
+diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
+index 60bb290700ce..13403c6da6c7 100644
+--- a/drivers/gpu/drm/msm/msm_gem_prime.c
++++ b/drivers/gpu/drm/msm/msm_gem_prime.c
+@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
+ if (!obj->import_attach)
+ msm_gem_put_pages(obj);
+ }
++
++struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
++{
++ struct msm_gem_object *msm_obj = to_msm_bo(obj);
++
++ return msm_obj->resv;
++}
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+index 6a567fe347b3..820a4805916f 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+@@ -4,6 +4,7 @@
+
+ struct nvkm_alarm {
+ struct list_head head;
++ struct list_head exec;
+ u64 timestamp;
+ void (*func)(struct nvkm_alarm *);
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+index f2a86eae0a0d..2437f7d41ca2 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
+ /* Move to completed list. We'll drop the lock before
+ * executing the callback so it can reschedule itself.
+ */
+- list_move_tail(&alarm->head, &exec);
++ list_del_init(&alarm->head);
++ list_add(&alarm->exec, &exec);
+ }
+
+ /* Shut down interrupt if no more pending alarms. */
+@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
+ spin_unlock_irqrestore(&tmr->lock, flags);
+
+ /* Execute completed callbacks. */
+- list_for_each_entry_safe(alarm, atemp, &exec, head) {
+- list_del_init(&alarm->head);
++ list_for_each_entry_safe(alarm, atemp, &exec, exec) {
++ list_del(&alarm->exec);
+ alarm->func(alarm);
+ }
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index b6a0806b06bf..a1c68e6a689e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+ return fifo_state->static_buffer;
+ else {
+ fifo_state->dynamic_buffer = vmalloc(bytes);
++ if (!fifo_state->dynamic_buffer)
++ goto out_err;
+ return fifo_state->dynamic_buffer;
+ }
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 05fa092c942b..56b803384ea2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -1275,11 +1275,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+- uint32_t backup_handle;
++ uint32_t backup_handle = 0;
+
+ if (req->multisample_count != 0)
+ return -EINVAL;
+
++ if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
++ return -EINVAL;
++
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+@@ -1315,12 +1318,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+ &res->backup,
+ &user_srf->backup_base);
+- if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+- res->backup_size) {
+- DRM_ERROR("Surface backup buffer is too small.\n");
+- vmw_dmabuf_unreference(&res->backup);
+- ret = -EINVAL;
+- goto out_unlock;
++ if (ret == 0) {
++ if (res->backup->base.num_pages * PAGE_SIZE <
++ res->backup_size) {
++ DRM_ERROR("Surface backup buffer is too small.\n");
++ vmw_dmabuf_unreference(&res->backup);
++ ret = -EINVAL;
++ goto out_unlock;
++ } else {
++ backup_handle = req->buffer_handle;
++ }
+ }
+ } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 3ac4c03ba77b..c13a4fd86b3c 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -605,6 +605,13 @@ static int coretemp_cpu_online(unsigned int cpu)
+ struct platform_data *pdata;
+
+ /*
++ * Don't execute this on resume as the offline callback did
++ * not get executed on suspend.
++ */
++ if (cpuhp_tasks_frozen)
++ return 0;
++
++ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+@@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ struct temp_data *tdata;
+ int indx, target;
+
++ /*
++ * Don't execute this on suspend as the device remove locks
++ * up the machine.
++ */
++ if (cpuhp_tasks_frozen)
++ return 0;
++
+ /* If the physical CPU device does not exist, just return */
+ if (!pdev)
+ return 0;
+diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
+index 21d38c8af21e..7f4f9c4150e3 100644
+--- a/drivers/iio/adc/bcm_iproc_adc.c
++++ b/drivers/iio/adc/bcm_iproc_adc.c
+@@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
+ }
+
+-static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
++static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+ {
+ u32 channel_intr_status;
+ u32 intr_status;
+@@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+ return IRQ_NONE;
+ }
+
+-static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
++static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+ {
+ irqreturn_t retval = IRQ_NONE;
+ struct iproc_adc_priv *adc_priv;
+@@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+ adc_priv = iio_priv(indio_dev);
+
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+- dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
++ dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
+ intr_status);
+
+ intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
+@@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
+- iproc_adc_interrupt_thread,
+ iproc_adc_interrupt_handler,
++ iproc_adc_interrupt_thread,
+ IRQF_SHARED, "iproc-adc", indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error %d\n", ret);
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index 978e1592c2a3..4061fed93f1f 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
+ return len;
+
+ out_trigger_put:
+- iio_trigger_put(trig);
++ if (trig)
++ iio_trigger_put(trig);
+ return ret;
+ }
+
+diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
+index b30e0c1c6cc4..67838edd8b37 100644
+--- a/drivers/iio/light/ltr501.c
++++ b/drivers/iio/light/ltr501.c
+@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
+ static const struct reg_field reg_field_it =
+ REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
+ static const struct reg_field reg_field_als_intr =
+- REG_FIELD(LTR501_INTR, 0, 0);
+-static const struct reg_field reg_field_ps_intr =
+ REG_FIELD(LTR501_INTR, 1, 1);
++static const struct reg_field reg_field_ps_intr =
++ REG_FIELD(LTR501_INTR, 0, 0);
+ static const struct reg_field reg_field_als_rate =
+ REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
+ static const struct reg_field reg_field_ps_rate =
+diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
+index 020459513384..268210ea4990 100644
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -40,9 +40,9 @@
+ #define AS3935_AFE_PWR_BIT BIT(0)
+
+ #define AS3935_INT 0x03
+-#define AS3935_INT_MASK 0x07
++#define AS3935_INT_MASK 0x0f
+ #define AS3935_EVENT_INT BIT(3)
+-#define AS3935_NOISE_INT BIT(1)
++#define AS3935_NOISE_INT BIT(0)
+
+ #define AS3935_DATA 0x07
+ #define AS3935_DATA_MASK 0x3F
+@@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
+
+ st->buffer[0] = val & AS3935_DATA_MASK;
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+- pf->timestamp);
++ iio_get_time_ns(indio_dev));
+ err_read:
+ iio_trigger_notify_done(indio_dev->trig);
+
+@@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
+
+ switch (val) {
+ case AS3935_EVENT_INT:
+- iio_trigger_poll(st->trig);
++ iio_trigger_poll_chained(st->trig);
+ break;
+ case AS3935_NOISE_INT:
+ dev_warn(&st->spi->dev, "noise level is too high\n");
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index e73d968023f7..f1fa1f172107 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
++ * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
++ * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
+ * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+@@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ },
+ },
+ {
++ /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
++ },
++ },
++ {
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+@@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ },
+ },
+ {
++ /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
++ },
++ },
++ {
+ /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
+index 7fa84b64a2ae..0bdc161c76cd 100644
+--- a/drivers/media/rc/rc-ir-raw.c
++++ b/drivers/media/rc/rc-ir-raw.c
+@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
+ */
+ void ir_raw_event_handle(struct rc_dev *dev)
+ {
+- if (!dev->raw)
++ if (!dev->raw || !dev->raw->thread)
+ return;
+
+ wake_up_process(dev->raw->thread);
+@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
+ {
+ int rc;
+ struct ir_raw_handler *handler;
++ struct task_struct *thread;
+
+ if (!dev)
+ return -EINVAL;
+@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
+ * because the event is coming from userspace
+ */
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+- dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
+- "rc%u", dev->minor);
++ thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
++ dev->minor);
+
+- if (IS_ERR(dev->raw->thread)) {
+- rc = PTR_ERR(dev->raw->thread);
++ if (IS_ERR(thread)) {
++ rc = PTR_ERR(thread);
+ goto out;
+ }
++
++ dev->raw->thread = thread;
+ }
+
+ mutex_lock(&ir_raw_handler_lock);
+diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
+index e7139c76f961..072064220707 100644
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -158,11 +158,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
+- if (copy_from_user(&work, uwork,
+- sizeof(struct cxl_ioctl_start_work))) {
+- rc = -EFAULT;
+- goto out;
+- }
++ if (copy_from_user(&work, uwork, sizeof(work)))
++ return -EFAULT;
+
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
+index 7ae710585267..47b777234c54 100644
+--- a/drivers/misc/cxl/native.c
++++ b/drivers/misc/cxl/native.c
+@@ -1075,13 +1075,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
+
+ void cxl_native_release_psl_err_irq(struct cxl *adapter)
+ {
+- if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
++ if (adapter->native->err_virq == 0 ||
++ adapter->native->err_virq !=
++ irq_find_mapping(NULL, adapter->native->err_hwirq))
+ return;
+
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+ cxl_unmap_irq(adapter->native->err_virq, adapter);
+ cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
+ kfree(adapter->irq_name);
++ adapter->native->err_virq = 0;
+ }
+
+ int cxl_native_register_serr_irq(struct cxl_afu *afu)
+@@ -1111,13 +1114,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
+
+ void cxl_native_release_serr_irq(struct cxl_afu *afu)
+ {
+- if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
++ if (afu->serr_virq == 0 ||
++ afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+ return;
+
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
+ cxl_unmap_irq(afu->serr_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
+ kfree(afu->err_irq_name);
++ afu->serr_virq = 0;
+ }
+
+ int cxl_native_register_psl_irq(struct cxl_afu *afu)
+@@ -1140,12 +1145,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
+
+ void cxl_native_release_psl_irq(struct cxl_afu *afu)
+ {
+- if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
++ if (afu->native->psl_virq == 0 ||
++ afu->native->psl_virq !=
++ irq_find_mapping(NULL, afu->native->psl_hwirq))
+ return;
+
+ cxl_unmap_irq(afu->native->psl_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
+ kfree(afu->psl_irq_name);
++ afu->native->psl_virq = 0;
+ }
+
+ static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index df5f78ae3d25..8b96c074799c 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ {
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
++ u8 version = mei_me_cl_ver(cldev->me_cl);
+
+- return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
++ return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
++ cldev->name, uuid, version);
+ }
+ static DEVICE_ATTR_RO(modalias);
+
+diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
+index 4a5e948c62df..25b7b930e02a 100644
+--- a/drivers/mtd/nand/tango_nand.c
++++ b/drivers/mtd/nand/tango_nand.c
+@@ -55,10 +55,10 @@
+ * byte 1 for other packets in the page (PKT_N, for N > 0)
+ * ERR_COUNT_PKT_N is the max error count over all but the first packet.
+ */
+-#define DECODE_OK_PKT_0(v) ((v) & BIT(7))
+-#define DECODE_OK_PKT_N(v) ((v) & BIT(15))
+ #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
+ #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
++#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0)
++#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0)
+
+ /* Offsets relative to pbus_base */
+ #define PBUS_CS_CTRL 0x83c
+@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
++ else
++ mtd->ecc_stats.corrected += res;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
+ return bitflips;
+ }
+
+-static int decode_error_report(struct tango_nfc *nfc)
++static int decode_error_report(struct nand_chip *chip)
+ {
+ u32 status, res;
++ struct mtd_info *mtd = nand_to_mtd(chip);
++ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+
+ status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
+ if (status & PAGE_IS_EMPTY)
+@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc)
+
+ res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
+
+- if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
+- return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
++ if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
++ return -EBADMSG;
++
++ /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
++ mtd->ecc_stats.corrected +=
++ ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
+
+- return -EBADMSG;
++ return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+ }
+
+ static void tango_dma_callback(void *arg)
+@@ -280,7 +288,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ if (err)
+ return err;
+
+- res = decode_error_report(nfc);
++ res = decode_error_report(chip);
+ if (res < 0) {
+ chip->ecc.read_oob_raw(mtd, chip, page);
+ res = check_erased_page(chip, buf);
+@@ -661,6 +669,7 @@ static const struct of_device_id tango_nand_ids[] = {
+ { .compatible = "sigma,smp8758-nand" },
+ { /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, tango_nand_ids);
+
+ static struct platform_driver tango_nand_driver = {
+ .probe = tango_nand_probe,
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index a68d4889f5db..a96916a63fa3 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1968,9 +1968,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ priv->num_rx_desc_words = params->num_rx_desc_words;
+
+ priv->irq0 = platform_get_irq(pdev, 0);
+- if (!priv->is_lite)
++ if (!priv->is_lite) {
+ priv->irq1 = platform_get_irq(pdev, 1);
+- priv->wol_irq = platform_get_irq(pdev, 2);
++ priv->wol_irq = platform_get_irq(pdev, 2);
++ } else {
++ priv->wol_irq = platform_get_irq(pdev, 1);
++ }
+ if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
+ dev_err(&pdev->dev, "invalid interrupts\n");
+ ret = -EINVAL;
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 9e8c06130c09..c2f9a1f93c70 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ }
+
+ /* select a non-FCoE queue */
+- return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
++ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ }
+
+ void bnx2x_set_num_queues(struct bnx2x *bp)
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index afb0967d2ce6..012194bc92d3 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2217,10 +2217,14 @@ static int cxgb_up(struct adapter *adap)
+ if (err)
+ goto irq_err;
+ }
++
++ mutex_lock(&uld_mutex);
+ enable_rx(adap);
+ t4_sge_start(adap);
+ t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
++ mutex_unlock(&uld_mutex);
++
+ notify_ulds(adap, CXGB4_STATE_UP);
+ #if IS_ENABLED(CONFIG_IPV6)
+ update_clip(adap);
+diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
+index 23d82748f52b..4f33660134b8 100644
+--- a/drivers/net/ethernet/ethoc.c
++++ b/drivers/net/ethernet/ethoc.c
+@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
+ if (ret)
+ return ret;
+
++ napi_enable(&priv->napi);
++
+ ethoc_init_ring(priv, dev->mem_start);
+ ethoc_reset(priv);
+
+@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
+ priv->old_duplex = -1;
+
+ phy_start(dev->phydev);
+- napi_enable(&priv->napi);
+
+ if (netif_msg_ifup(priv)) {
+ dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+index cc065ffbe4b5..bcd4708b3745 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
+ emac_mac_config(adpt);
+ emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
+
+- adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
++ adpt->phydev->irq = PHY_POLL;
+ ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+index 441c19366489..18461fcb9815 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+@@ -13,15 +13,11 @@
+ /* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
+ */
+
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_net.h>
+ #include <linux/of_mdio.h>
+ #include <linux/phy.h>
+ #include <linux/iopoll.h>
+ #include <linux/acpi.h>
+ #include "emac.h"
+-#include "emac-mac.h"
+
+ /* EMAC base register offsets */
+ #define EMAC_MDIO_CTRL 0x001414
+@@ -52,62 +48,10 @@
+
+ #define MDIO_WAIT_TIMES 1000
+
+-#define EMAC_LINK_SPEED_DEFAULT (\
+- EMAC_LINK_SPEED_10_HALF |\
+- EMAC_LINK_SPEED_10_FULL |\
+- EMAC_LINK_SPEED_100_HALF |\
+- EMAC_LINK_SPEED_100_FULL |\
+- EMAC_LINK_SPEED_1GB_FULL)
+-
+-/**
+- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
+- * @adpt: the emac adapter
+- *
+- * The autopoll feature takes over the MDIO bus. In order for
+- * the PHY driver to be able to talk to the PHY over the MDIO
+- * bus, we need to temporarily disable the autopoll feature.
+- */
+-static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
+-{
+- u32 val;
+-
+- /* disable autopoll */
+- emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
+-
+- /* wait for any mdio polling to complete */
+- if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
+- !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
+- return 0;
+-
+- /* failed to disable; ensure it is enabled before returning */
+- emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
+-
+- return -EBUSY;
+-}
+-
+-/**
+- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
+- * @adpt: the emac adapter
+- *
+- * The EMAC has the ability to poll the external PHY on the MDIO
+- * bus for link state changes. This eliminates the need for the
+- * driver to poll the phy. If if the link state does change,
+- * the EMAC issues an interrupt on behalf of the PHY.
+- */
+-static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
+-{
+- emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
+-}
+-
+ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+ {
+ struct emac_adapter *adpt = bus->priv;
+ u32 reg;
+- int ret;
+-
+- ret = emac_phy_mdio_autopoll_disable(adpt);
+- if (ret)
+- return ret;
+
+ emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+ (addr << PHY_ADDR_SHFT));
+@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+ if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+ !(reg & (MDIO_START | MDIO_BUSY)),
+ 100, MDIO_WAIT_TIMES * 100))
+- ret = -EIO;
+- else
+- ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
++ return -EIO;
+
+- emac_phy_mdio_autopoll_enable(adpt);
+-
+- return ret;
++ return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+ }
+
+ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+ {
+ struct emac_adapter *adpt = bus->priv;
+ u32 reg;
+- int ret;
+-
+- ret = emac_phy_mdio_autopoll_disable(adpt);
+- if (ret)
+- return ret;
+
+ emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+ (addr << PHY_ADDR_SHFT));
+@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+ if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+ !(reg & (MDIO_START | MDIO_BUSY)), 100,
+ MDIO_WAIT_TIMES * 100))
+- ret = -EIO;
++ return -EIO;
+
+- emac_phy_mdio_autopoll_enable(adpt);
+-
+- return ret;
++ return 0;
+ }
+
+ /* Configure the MDIO bus and connect the external PHY */
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
+index 28a8cdc36485..98a326faea29 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
+@@ -50,19 +50,7 @@
+ #define DMAR_DLY_CNT_DEF 15
+ #define DMAW_DLY_CNT_DEF 4
+
+-#define IMR_NORMAL_MASK (\
+- ISR_ERROR |\
+- ISR_GPHY_LINK |\
+- ISR_TX_PKT |\
+- GPHY_WAKEUP_INT)
+-
+-#define IMR_EXTENDED_MASK (\
+- SW_MAN_INT |\
+- ISR_OVER |\
+- ISR_ERROR |\
+- ISR_GPHY_LINK |\
+- ISR_TX_PKT |\
+- GPHY_WAKEUP_INT)
++#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
+
+ #define ISR_TX_PKT (\
+ TX_PKT_INT |\
+@@ -70,10 +58,6 @@
+ TX_PKT_INT2 |\
+ TX_PKT_INT3)
+
+-#define ISR_GPHY_LINK (\
+- GPHY_LINK_UP_INT |\
+- GPHY_LINK_DOWN_INT)
+-
+ #define ISR_OVER (\
+ RFD0_UR_INT |\
+ RFD1_UR_INT |\
+@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
+ if (status & ISR_OVER)
+ net_warn_ratelimited("warning: TX/RX overflow\n");
+
+- /* link event */
+- if (status & ISR_GPHY_LINK)
+- phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
+-
+ exit:
+ /* enable the interrupt */
+ writel(irq->mask, adpt->base + EMAC_INT_MASK);
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 3cd7989c007d..784782da3a85 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
+ int ring_size;
+ int i;
+
+- /* Free RX skb ringbuffer */
+- if (priv->rx_skb[q]) {
+- for (i = 0; i < priv->num_rx_ring[q]; i++)
+- dev_kfree_skb(priv->rx_skb[q][i]);
+- }
+- kfree(priv->rx_skb[q]);
+- priv->rx_skb[q] = NULL;
+-
+- /* Free aligned TX buffers */
+- kfree(priv->tx_align[q]);
+- priv->tx_align[q] = NULL;
+-
+ if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
+ priv->tx_ring[q] = NULL;
+ }
+
++ /* Free RX skb ringbuffer */
++ if (priv->rx_skb[q]) {
++ for (i = 0; i < priv->num_rx_ring[q]; i++)
++ dev_kfree_skb(priv->rx_skb[q][i]);
++ }
++ kfree(priv->rx_skb[q]);
++ priv->rx_skb[q] = NULL;
++
++ /* Free aligned TX buffers */
++ kfree(priv->tx_align[q]);
++ priv->tx_align[q] = NULL;
++
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4498a3861aa3..67e86ace5d92 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1950,7 +1950,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+
+ priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+ 0, 1,
+- (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
++ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 959fd12d2e67..6ebb0f559a42 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
+
+ /* make enough headroom for basic scenario */
+ encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
+- if (ip_tunnel_info_af(info) == AF_INET) {
++ if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
+ encap_len += sizeof(struct iphdr);
+ dev->max_mtu -= sizeof(struct iphdr);
+ } else {
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index bdb6ae16d4a8..70dbd5a48b6b 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
+
+ static int vxlan_sock_add(struct vxlan_dev *vxlan);
+
++static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
++
+ /* per-network namespace private data for this module */
+ struct vxlan_net {
+ struct list_head vxlan_list;
+@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+ call_rcu(&f->rcu, vxlan_fdb_free);
+ }
+
++static void vxlan_dst_free(struct rcu_head *head)
++{
++ struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
++
++ dst_cache_destroy(&rd->dst_cache);
++ kfree(rd);
++}
++
++static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
++ struct vxlan_rdst *rd)
++{
++ list_del_rcu(&rd->list);
++ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
++ call_rcu(&rd->rcu, vxlan_dst_free);
++}
++
+ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
+ union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
+ __be32 *vni, u32 *ifindex)
+@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ * otherwise destroy the fdb entry
+ */
+ if (rd && !list_is_singular(&f->remotes)) {
+- list_del_rcu(&rd->list);
+- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+- kfree_rcu(rd, rcu);
++ vxlan_fdb_dst_destroy(vxlan, f, rd);
+ goto out;
+ }
+
+@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
+ rcu_assign_pointer(vxlan->vn4_sock, NULL);
+ synchronize_net();
+
++ vxlan_vs_del_dev(vxlan);
++
+ if (__vxlan_sock_release_prep(sock4)) {
+ udp_tunnel_sock_release(sock4->sock);
+ kfree(sock4);
+@@ -2338,6 +2356,15 @@ static void vxlan_cleanup(unsigned long arg)
+ mod_timer(&vxlan->age_timer, next_timer);
+ }
+
++static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
++{
++ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
++
++ spin_lock(&vn->sock_lock);
++ hlist_del_init_rcu(&vxlan->hlist);
++ spin_unlock(&vn->sock_lock);
++}
++
+ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+ {
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+@@ -3275,15 +3302,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+ vxlan_flush(vxlan, true);
+
+- spin_lock(&vn->sock_lock);
+- if (!hlist_unhashed(&vxlan->hlist))
+- hlist_del_rcu(&vxlan->hlist);
+- spin_unlock(&vn->sock_lock);
+-
+ gro_cells_destroy(&vxlan->gro_cells);
+ list_del(&vxlan->next);
+ unregister_netdevice_queue(dev, head);
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 9ff790174906..1d09097dec88 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1542,7 +1542,8 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+ DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+ },
+- }
++ },
++ {}
+ };
+
+ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 84c9098cc089..aea10682e0fc 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
+ return -EIO;
+ }
+
++ memset(&elreq, 0, sizeof(elreq));
++
+ elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
+
+ if (atomic_read(&vha->loop_state) == LOOP_READY &&
+ (ha->current_topology == ISP_CFG_F ||
+- ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
+- le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
+- && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+- elreq.options == EXTERNAL_LOOPBACK) {
++ (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
++ req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
++ elreq.options == EXTERNAL_LOOPBACK) {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ ql_dbg(ql_dbg_user, vha, 0x701e,
+ "BSG request type: %s.\n", type);
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index 51b4179469d1..88748a6ab73f 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
++ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+
+ /* Transfer sequence registers. */
+@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
++ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+
+ /* Transfer sequence registers. */
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index ae119018dfaa..eddbc1218a39 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -3425,6 +3425,7 @@ struct qla_hw_data {
+ uint8_t max_req_queues;
+ uint8_t max_rsp_queues;
+ uint8_t max_qpairs;
++ uint8_t num_qpairs;
+ struct qla_qpair *base_qpair;
+ struct qla_npiv_entry *npiv_info;
+ uint16_t nvram_npiv_size;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index f9d2fe7b1ade..98a2ca4fe03c 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
+ /* Assign available que pair id */
+ mutex_lock(&ha->mq_lock);
+ qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+- if (qpair_id >= ha->max_qpairs) {
++ if (ha->num_qpairs >= ha->max_qpairs) {
+ mutex_unlock(&ha->mq_lock);
+ ql_log(ql_log_warn, vha, 0x0183,
+ "No resources to create additional q pair.\n");
+ goto fail_qid_map;
+ }
++ ha->num_qpairs++;
+ set_bit(qpair_id, ha->qpair_qid_map);
+ ha->queue_pair_map[qpair_id] = qpair;
+ qpair->id = qpair_id;
+@@ -7635,6 +7636,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
+ fail_msix:
+ ha->queue_pair_map[qpair_id] = NULL;
+ clear_bit(qpair_id, ha->qpair_qid_map);
++ ha->num_qpairs--;
+ mutex_unlock(&ha->mq_lock);
+ fail_qid_map:
+ kfree(qpair);
+@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+ mutex_lock(&ha->mq_lock);
+ ha->queue_pair_map[qpair->id] = NULL;
+ clear_bit(qpair->id, ha->qpair_qid_map);
++ ha->num_qpairs--;
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 3203367a4f42..189f72d5aa4f 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3282,7 +3282,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ }
+
+ /* Enable MSI-X vector for response queue update for queue 0 */
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->msixbase && ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index a113ab3592a7..cba1fc5e8be9 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+ qlt_update_host_map(vha, id);
+ }
+
+- fc_host_port_name(vha->host) =
+- wwn_to_u64(vha->port_name);
+-
+- if (qla_ini_mode_enabled(vha))
+- ql_dbg(ql_dbg_mbx, vha, 0x1018,
+- "FA-WWN portname %016llx (%x)\n",
+- fc_host_port_name(vha->host),
+- rptid_entry->vp_status);
+-
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ } else {
+@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
+- mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
++ /* BIT_6 specifies 64bit address */
++ mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
+ if (IS_CNA_CAPABLE(ha)) {
+- mcp->mb[1] |= BIT_15;
+ mcp->mb[2] = vha->fcoe_fcf_idx;
+ }
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 83d61d2142e9..190f609317af 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2626,10 +2626,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ if (mem_only) {
+ if (pci_enable_device_mem(pdev))
+- goto probe_out;
++ return ret;
+ } else {
+ if (pci_enable_device(pdev))
+- goto probe_out;
++ return ret;
+ }
+
+ /* This may fail but that's ok */
+@@ -2639,7 +2639,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (!ha) {
+ ql_log_pci(ql_log_fatal, pdev, 0x0009,
+ "Unable to allocate memory for ha.\n");
+- goto probe_out;
++ goto disable_device;
+ }
+ ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
+ "Memory allocated for ha=%p.\n", ha);
+@@ -3258,7 +3258,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ kfree(ha);
+ ha = NULL;
+
+-probe_out:
++disable_device:
+ pci_disable_device(pdev);
+ return ret;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
+index 8a58ef3adab4..c197972a3e2d 100644
+--- a/drivers/scsi/qla2xxx/qla_tmpl.c
++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
+@@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
+ goto done;
+ }
+
+- if (end <= start || start == 0 || end == 0) {
++ if (end < start || start == 0 || end == 0) {
+ ql_dbg(ql_dbg_misc, vha, 0xd023,
+ "%s: unusable range (start=%x end=%x)\n", __func__,
+ ent->t262.end_addr, ent->t262.start_addr);
+diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
+index 2e1bd47337fd..e6727cefde05 100644
+--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
++++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
+@@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
+ size_t lmmk_size;
+ size_t lum_size;
+ int rc;
+- mm_segment_t seg;
+
+ if (!lsm)
+ return -ENODATA;
+
+- /*
+- * "Switch to kernel segment" to allow copying from kernel space by
+- * copy_{to,from}_user().
+- */
+- seg = get_fs();
+- set_fs(KERNEL_DS);
+-
+ if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
+ CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+@@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
+ out_free:
+ kvfree(lmmk);
+ out:
+- set_fs(seg);
+ return rc;
+ }
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index a0cd56ee5fe9..ff26626d94ef 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+ if (cmd->unknown_data_length) {
+ cmd->data_length = size;
+ } else if (size != cmd->data_length) {
+- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
++ pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ cmd->data_length, size, cmd->t_task_cdb[0]);
+
+- if (cmd->data_direction == DMA_TO_DEVICE &&
+- cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+- pr_err("Rejecting underflow/overflow WRITE data\n");
+- return TCM_INVALID_CDB_FIELD;
++ if (cmd->data_direction == DMA_TO_DEVICE) {
++ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
++ pr_err_ratelimited("Rejecting underflow/overflow"
++ " for WRITE data CDB\n");
++ return TCM_INVALID_CDB_FIELD;
++ }
++ /*
++ * Some fabric drivers like iscsi-target still expect to
++ * always reject overflow writes. Reject this case until
++ * full fabric driver level support for overflow writes
++ * is introduced tree-wide.
++ */
++ if (size > cmd->data_length) {
++ pr_err_ratelimited("Rejecting overflow for"
++ " WRITE control CDB\n");
++ return TCM_INVALID_CDB_FIELD;
++ }
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 4c26d15ad7d9..579706d36f5c 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -47,6 +47,7 @@
+ /*
+ * These are definitions for the Exar XR17V35X and XR17(C|D)15X
+ */
++#define UART_EXAR_INT0 0x80
+ #define UART_EXAR_SLEEP 0x8b /* Sleep mode */
+ #define UART_EXAR_DVID 0x8d /* Device identification */
+
+@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port)
+ static int exar_handle_irq(struct uart_port *port)
+ {
+ unsigned int iir = serial_port_in(port, UART_IIR);
+- int ret;
++ int ret = 0;
+
+- ret = serial8250_handle_irq(port, iir);
++ if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
++ serial_port_in(port, UART_EXAR_INT0) != 0)
++ ret = 1;
+
+- if ((port->type == PORT_XR17V35X) ||
+- (port->type == PORT_XR17D15X)) {
+- serial_port_in(port, 0x80);
+- serial_port_in(port, 0x81);
+- serial_port_in(port, 0x82);
+- serial_port_in(port, 0x83);
+- }
++ ret |= serial8250_handle_irq(port, iir);
+
+ return ret;
+ }
+@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port)
+ serial_port_in(port, UART_RX);
+ serial_port_in(port, UART_IIR);
+ serial_port_in(port, UART_MSR);
++ if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
++ serial_port_in(port, UART_EXAR_INT0);
+
+ /*
+ * At this point, there's no way the LSR could still be 0xff;
+@@ -2335,6 +2334,8 @@ int serial8250_do_startup(struct uart_port *port)
+ serial_port_in(port, UART_RX);
+ serial_port_in(port, UART_IIR);
+ serial_port_in(port, UART_MSR);
++ if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
++ serial_port_in(port, UART_EXAR_INT0);
+ up->lsr_saved_flags = 0;
+ up->msr_saved_flags = 0;
+
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index 157883653256..f190a84a0246 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = {
+ static void __exit ifx_spi_exit(void)
+ {
+ /* unregister */
++ spi_unregister_driver(&ifx_spi_driver);
+ tty_unregister_driver(tty_drv);
+ put_tty_driver(tty_drv);
+- spi_unregister_driver(&ifx_spi_driver);
+ unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
+ }
+
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 3fe56894974a..7f9139445f2a 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
+ mutex_lock(&port->mutex);
+
+ tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+- if (device_may_wakeup(tty_dev)) {
++ if (tty_dev && device_may_wakeup(tty_dev)) {
+ if (!enable_irq_wake(uport->irq))
+ uport->irq_wake = 1;
+ put_device(tty_dev);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 9a47cc4f16a2..1df57461ece4 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1985,11 +1985,13 @@ static int sci_startup(struct uart_port *port)
+
+ dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+
++ sci_request_dma(port);
++
+ ret = sci_request_irq(s);
+- if (unlikely(ret < 0))
++ if (unlikely(ret < 0)) {
++ sci_free_dma(port);
+ return ret;
+-
+- sci_request_dma(port);
++ }
+
+ return 0;
+ }
+@@ -2021,8 +2023,8 @@ static void sci_shutdown(struct uart_port *port)
+ }
+ #endif
+
+- sci_free_dma(port);
+ sci_free_irq(s);
++ sci_free_dma(port);
+ }
+
+ static int sci_sck_calc(struct sci_port *s, unsigned int bps,
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index 6d23eede4d8c..1c31e8a08810 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
+ {
+ struct ci_hdrc *ci = s->private;
+
+- seq_printf(s, "%s\n", ci_role(ci)->name);
++ if (ci->role != CI_ROLE_END)
++ seq_printf(s, "%s\n", ci_role(ci)->name);
+
+ return 0;
+ }
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index f88e9157fad0..60a786c87c06 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1984,6 +1984,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
+ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
+ {
+ struct ci_role_driver *rdrv;
++ int ret;
+
+ if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
+ return -ENXIO;
+@@ -1996,7 +1997,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
+ rdrv->stop = udc_id_switch_for_host;
+ rdrv->irq = udc_irq;
+ rdrv->name = "gadget";
+- ci->roles[CI_ROLE_GADGET] = rdrv;
+
+- return udc_start(ci);
++ ret = udc_start(ci);
++ if (!ret)
++ ci->roles[CI_ROLE_GADGET] = rdrv;
++
++ return ret;
+ }
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index e77a4ed4f021..9f4a0185dd60 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -108,6 +108,8 @@ struct imx_usbmisc {
+ const struct usbmisc_ops *ops;
+ };
+
++static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
++
+ static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
+ {
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+@@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+- /* Disable internal 60Mhz clock */
+- reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+- val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+- writel(val, reg);
++ if (is_imx53_usbmisc(data)) {
++ /* Disable internal 60Mhz clock */
++ reg = usbmisc->base +
++ MX53_USB_CLKONOFF_CTRL_OFFSET;
++ val = readl(reg) |
++ MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
++ writel(val, reg);
++ }
++
+ }
+ if (data->disable_oc) {
+ reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
+@@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
+ val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+ | MX53_USB_UHx_CTRL_ULPI_INT_EN;
+ writel(val, reg);
+- /* Disable internal 60Mhz clock */
+- reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+- val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+- writel(val, reg);
++
++ if (is_imx53_usbmisc(data)) {
++ /* Disable internal 60Mhz clock */
++ reg = usbmisc->base +
++ MX53_USB_CLKONOFF_CTRL_OFFSET;
++ val = readl(reg) |
++ MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
++ writel(val, reg);
++ }
+ }
+ if (data->disable_oc) {
+ reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
+@@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = {
+ .init = usbmisc_imx27_init,
+ };
+
++static const struct usbmisc_ops imx51_usbmisc_ops = {
++ .init = usbmisc_imx53_init,
++};
++
+ static const struct usbmisc_ops imx53_usbmisc_ops = {
+ .init = usbmisc_imx53_init,
+ };
+@@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
+ .set_wakeup = usbmisc_imx7d_set_wakeup,
+ };
+
++static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
++{
++ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
++
++ return usbmisc->ops == &imx53_usbmisc_ops;
++}
++
+ int imx_usbmisc_init(struct imx_usbmisc_data *data)
+ {
+ struct imx_usbmisc *usbmisc;
+@@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
+ },
+ {
+ .compatible = "fsl,imx51-usbmisc",
+- .data = &imx53_usbmisc_ops,
++ .data = &imx51_usbmisc_ops,
+ },
+ {
+ .compatible = "fsl,imx53-usbmisc",
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 4c8aacc232c0..74d57d6994da 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+ /* Caller must hold fsg->lock */
+ static void wakeup_thread(struct fsg_common *common)
+ {
+- smp_wmb(); /* ensure the write of bh->state is complete */
++ /*
++ * Ensure the reading of thread_wakeup_needed
++ * and the writing of bh->state are completed
++ */
++ smp_mb();
+ /* Tell the main thread that something has happened */
+ common->thread_wakeup_needed = 1;
+ if (common->thread_task)
+@@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
+ }
+ __set_current_state(TASK_RUNNING);
+ common->thread_wakeup_needed = 0;
+- smp_rmb(); /* ensure the latest bh->state is visible */
++
++ /*
++ * Ensure the writing of thread_wakeup_needed
++ * and the reading of bh->state are completed
++ */
++ smp_mb();
+ return rc;
+ }
+
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 9c7ee26ef388..bc6a9be2ccc5 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused)
+ dsps_mod_timer_optional(glue);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
++ /* keep VBUS on for host-only mode */
++ if (musb->port_mode == MUSB_PORT_MODE_HOST) {
++ dsps_mod_timer_optional(glue);
++ break;
++ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ skip_session = 1;
+ /* fall */
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index 7a92a5e1d40c..feca75b07fdd 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
+ st->global_error = 1;
+ }
+ }
+- st->va += PAGE_SIZE * nr;
+- st->index += nr;
++ st->va += XEN_PAGE_SIZE * nr;
++ st->index += nr / XEN_PFN_PER_PAGE;
+
+ return 0;
+ }
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index c4115901d906..7a945a1f076b 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2547,7 +2547,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
+ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
+ unsigned num_items)
+ {
+- return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
++ return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+ }
+
+ /*
+@@ -2557,7 +2557,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
+ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
+ unsigned num_items)
+ {
+- return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
++ return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
+ }
+
+ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index be5477676cc8..ed3fefc9e5e7 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3983,6 +3983,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
+ info->space_info_kobj, "%s",
+ alloc_name(found->flags));
+ if (ret) {
++ percpu_counter_destroy(&found->total_bytes_pinned);
+ kfree(found);
+ return ret;
+ }
+@@ -4834,7 +4835,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
+ spin_unlock(&delayed_rsv->lock);
+
+ commit:
+- trans = btrfs_join_transaction(fs_info->fs_root);
++ trans = btrfs_join_transaction(fs_info->extent_root);
+ if (IS_ERR(trans))
+ return -ENOSPC;
+
+@@ -4852,7 +4853,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info, u64 num_bytes,
+ u64 orig_bytes, int state)
+ {
+- struct btrfs_root *root = fs_info->fs_root;
++ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_trans_handle *trans;
+ int nr;
+ int ret = 0;
+@@ -5052,7 +5053,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
+ int flush_state = FLUSH_DELAYED_ITEMS_NR;
+
+ spin_lock(&space_info->lock);
+- to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
++ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
+ space_info);
+ if (!to_reclaim) {
+ spin_unlock(&space_info->lock);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5e71f1ea3391..65fc76a47094 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7359,8 +7359,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
+ int found = false;
+ void **pagep = NULL;
+ struct page *page = NULL;
+- int start_idx;
+- int end_idx;
++ unsigned long start_idx;
++ unsigned long end_idx;
+
+ start_idx = start >> PAGE_SHIFT;
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2a97dff87b96..3e36508610b7 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ struct ext4_sb_info *sbi;
+ struct ext4_extent_header *eh;
+ struct ext4_map_blocks split_map;
+- struct ext4_extent zero_ex;
++ struct ext4_extent zero_ex1, zero_ex2;
+ struct ext4_extent *ex, *abut_ex;
+ ext4_lblk_t ee_block, eof_block;
+ unsigned int ee_len, depth, map_len = map->m_len;
+ int allocated = 0, max_zeroout = 0;
+ int err = 0;
+- int split_flag = 0;
++ int split_flag = EXT4_EXT_DATA_VALID2;
+
+ ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+- zero_ex.ee_len = 0;
++ zero_ex1.ee_len = 0;
++ zero_ex2.ee_len = 0;
+
+ trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
+
+@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ if (ext4_encrypted_inode(inode))
+ max_zeroout = 0;
+
+- /* If extent is less than s_max_zeroout_kb, zeroout directly */
+- if (max_zeroout && (ee_len <= max_zeroout)) {
+- err = ext4_ext_zeroout(inode, ex);
+- if (err)
+- goto out;
+- zero_ex.ee_block = ex->ee_block;
+- zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
+- ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
+-
+- err = ext4_ext_get_access(handle, inode, path + depth);
+- if (err)
+- goto out;
+- ext4_ext_mark_initialized(ex);
+- ext4_ext_try_to_merge(handle, inode, path, ex);
+- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+- goto out;
+- }
+-
+ /*
+- * four cases:
++ * five cases:
+ * 1. split the extent into three extents.
+- * 2. split the extent into two extents, zeroout the first half.
+- * 3. split the extent into two extents, zeroout the second half.
++ * 2. split the extent into two extents, zeroout the head of the first
++ * extent.
++ * 3. split the extent into two extents, zeroout the tail of the second
++ * extent.
+ * 4. split the extent into two extents with out zeroout.
++ * 5. no splitting needed, just possibly zeroout the head and / or the
++ * tail of the extent.
+ */
+ split_map.m_lblk = map->m_lblk;
+ split_map.m_len = map->m_len;
+
+- if (max_zeroout && (allocated > map->m_len)) {
++ if (max_zeroout && (allocated > split_map.m_len)) {
+ if (allocated <= max_zeroout) {
+- /* case 3 */
+- zero_ex.ee_block =
+- cpu_to_le32(map->m_lblk);
+- zero_ex.ee_len = cpu_to_le16(allocated);
+- ext4_ext_store_pblock(&zero_ex,
+- ext4_ext_pblock(ex) + map->m_lblk - ee_block);
+- err = ext4_ext_zeroout(inode, &zero_ex);
++ /* case 3 or 5 */
++ zero_ex1.ee_block =
++ cpu_to_le32(split_map.m_lblk +
++ split_map.m_len);
++ zero_ex1.ee_len =
++ cpu_to_le16(allocated - split_map.m_len);
++ ext4_ext_store_pblock(&zero_ex1,
++ ext4_ext_pblock(ex) + split_map.m_lblk +
++ split_map.m_len - ee_block);
++ err = ext4_ext_zeroout(inode, &zero_ex1);
+ if (err)
+ goto out;
+- split_map.m_lblk = map->m_lblk;
+ split_map.m_len = allocated;
+- } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
+- /* case 2 */
+- if (map->m_lblk != ee_block) {
+- zero_ex.ee_block = ex->ee_block;
+- zero_ex.ee_len = cpu_to_le16(map->m_lblk -
++ }
++ if (split_map.m_lblk - ee_block + split_map.m_len <
++ max_zeroout) {
++ /* case 2 or 5 */
++ if (split_map.m_lblk != ee_block) {
++ zero_ex2.ee_block = ex->ee_block;
++ zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
+ ee_block);
+- ext4_ext_store_pblock(&zero_ex,
++ ext4_ext_store_pblock(&zero_ex2,
+ ext4_ext_pblock(ex));
+- err = ext4_ext_zeroout(inode, &zero_ex);
++ err = ext4_ext_zeroout(inode, &zero_ex2);
+ if (err)
+ goto out;
+ }
+
++ split_map.m_len += split_map.m_lblk - ee_block;
+ split_map.m_lblk = ee_block;
+- split_map.m_len = map->m_lblk - ee_block + map->m_len;
+ allocated = map->m_len;
+ }
+ }
+@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ err = 0;
+ out:
+ /* If we have gotten a failure, don't zero out status tree */
+- if (!err)
+- err = ext4_zeroout_es(inode, &zero_ex);
++ if (!err) {
++ err = ext4_zeroout_es(inode, &zero_ex1);
++ if (!err)
++ err = ext4_zeroout_es(inode, &zero_ex2);
++ }
+ return err ? err : allocated;
+ }
+
+@@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+
+ /* Zero out partial block at the edges of the range */
+ ret = ext4_zero_partial_blocks(handle, inode, offset, len);
++ if (ret >= 0)
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+ if (file->f_flags & O_SYNC)
+ ext4_handle_sync(handle);
+@@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ ext4_handle_sync(handle);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+ out_stop:
+ ext4_journal_stop(handle);
+@@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
++ if (ret >= 0)
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+ out_stop:
+ ext4_journal_stop(handle);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 831fd6beebf0..bbea2dccd584 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -484,47 +484,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+ (pgoff_t)num);
+- if (nr_pages == 0) {
+- if (whence == SEEK_DATA)
+- break;
+-
+- BUG_ON(whence != SEEK_HOLE);
+- /*
+- * If this is the first time to go into the loop and
+- * offset is not beyond the end offset, it will be a
+- * hole at this offset
+- */
+- if (lastoff == startoff || lastoff < endoff)
+- found = 1;
+- break;
+- }
+-
+- /*
+- * If this is the first time to go into the loop and
+- * offset is smaller than the first page offset, it will be a
+- * hole at this offset.
+- */
+- if (lastoff == startoff && whence == SEEK_HOLE &&
+- lastoff < page_offset(pvec.pages[0])) {
+- found = 1;
++ if (nr_pages == 0)
+ break;
+- }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ struct buffer_head *bh, *head;
+
+ /*
+- * If the current offset is not beyond the end of given
+- * range, it will be a hole.
++ * If current offset is smaller than the page offset,
++ * there is a hole at this offset.
+ */
+- if (lastoff < endoff && whence == SEEK_HOLE &&
+- page->index > end) {
++ if (whence == SEEK_HOLE && lastoff < endoff &&
++ lastoff < page_offset(pvec.pages[i])) {
+ found = 1;
+ *offset = lastoff;
+ goto out;
+ }
+
++ if (page->index > end)
++ goto out;
++
+ lock_page(page);
+
+ if (unlikely(page->mapping != inode->i_mapping)) {
+@@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ unlock_page(page);
+ }
+
+- /*
+- * The no. of pages is less than our desired, that would be a
+- * hole in there.
+- */
+- if (nr_pages < num && whence == SEEK_HOLE) {
+- found = 1;
+- *offset = lastoff;
++ /* The no. of pages is less than our desired, we are done. */
++ if (nr_pages < num)
+ break;
+- }
+
+ index = pvec.pages[i - 1]->index + 1;
+ pagevec_release(&pvec);
+ } while (index <= end);
+
++ if (whence == SEEK_HOLE && lastoff < endoff) {
++ found = 1;
++ *offset = lastoff;
++ }
+ out:
+ pagevec_release(&pvec);
+ return found;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 88203ae5b154..7090752ec2cb 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4165,6 +4165,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
++ if (ret >= 0)
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ out_stop:
+ ext4_journal_stop(handle);
+ out_dio:
+@@ -5621,8 +5623,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
+ /* No extended attributes present */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
+- new_extra_isize);
++ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
++ EXT4_I(inode)->i_extra_isize, 0,
++ new_extra_isize - EXT4_I(inode)->i_extra_isize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ return 0;
+ }
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index f865b96374df..d2955daf17a4 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
+ struct gfs2_log_header *lh;
+ unsigned int tail;
+ u32 hash;
+- int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
++ int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
+ struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+ enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ lh = page_address(page);
+diff --git a/fs/iomap.c b/fs/iomap.c
+index 1c25ae30500e..258fb4100b1d 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -909,6 +909,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ break;
+ }
+ pos += ret;
++
++ if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
++ break;
+ } while ((count = iov_iter_count(iter)) > 0);
+ blk_finish_plug(&plug);
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index c453a1998e00..dadb3bf305b2 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
+ opdesc->op_get_currentstateid(cstate, &op->u);
+ op->status = opdesc->op_func(rqstp, cstate, &op->u);
+
++ /* Only from SEQUENCE */
++ if (cstate->status == nfserr_replay_cache) {
++ dprintk("%s NFS4.1 replay from cache\n", __func__);
++ status = op->status;
++ goto out;
++ }
+ if (!op->status) {
+ if (opdesc->op_set_currentstateid)
+ opdesc->op_set_currentstateid(cstate, &op->u);
+@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
+ if (need_wrongsec_check(rqstp))
+ op->status = check_nfsd_access(current_fh->fh_export, rqstp);
+ }
+-
+ encode_op:
+- /* Only from SEQUENCE */
+- if (cstate->status == nfserr_replay_cache) {
+- dprintk("%s NFS4.1 replay from cache\n", __func__);
+- status = op->status;
+- goto out;
+- }
+ if (op->status == nfserr_replay_me) {
+ op->replay = &cstate->replay_owner->so_replay;
+ nfsd4_encode_replay(&resp->xdr, op);
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 906ea6c93260..5b14c16d1b77 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -269,12 +269,13 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
+ temp = ovl_do_tmpfile(upperdir, stat->mode);
+ else
+ temp = ovl_lookup_temp(workdir, dentry);
+- err = PTR_ERR(temp);
+- if (IS_ERR(temp))
+- goto out1;
+-
+ err = 0;
+- if (!tmpfile)
++ if (IS_ERR(temp)) {
++ err = PTR_ERR(temp);
++ temp = NULL;
++ }
++
++ if (!err && !tmpfile)
+ err = ovl_create_real(wdir, temp, &cattr, NULL, true);
+
+ if (new_creds) {
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index aa40c242f1db..64a4d3c82125 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
+ depth = reiserfs_write_unlock_nested(s);
+ if (reiserfs_barrier_flush(s))
+ __sync_dirty_buffer(jl->j_commit_bh,
+- REQ_PREFLUSH | REQ_FUA);
++ REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ else
+ sync_dirty_buffer(jl->j_commit_bh);
+ reiserfs_write_lock_nested(s, depth);
+@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
+
+ if (reiserfs_barrier_flush(sb))
+ __sync_dirty_buffer(journal->j_header_bh,
+- REQ_PREFLUSH | REQ_FUA);
++ REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ else
+ sync_dirty_buffer(journal->j_header_bh);
+
+diff --git a/fs/stat.c b/fs/stat.c
+index a257b872a53d..ea6235a31ec8 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -586,6 +586,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
+ inode->i_bytes -= 512;
+ }
+ }
++EXPORT_SYMBOL(__inode_add_bytes);
+
+ void inode_add_bytes(struct inode *inode, loff_t bytes)
+ {
+diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
+index a0376a2c1c29..d642cc0a8271 100644
+--- a/fs/ufs/balloc.c
++++ b/fs/ufs/balloc.c
+@@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
+ ufs_error (sb, "ufs_free_fragments",
+ "bit already cleared for fragment %u", i);
+ }
+-
++
++ inode_sub_bytes(inode, count << uspi->s_fshift);
+ fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
+ uspi->cs_total.cs_nffree += count;
+ fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
+@@ -184,6 +185,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
+ ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
+ }
+ ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
++ inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
+ if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+ ufs_clusteracct (sb, ucpi, blkno, 1);
+
+@@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ return 0;
+ }
+
++static bool try_add_frags(struct inode *inode, unsigned frags)
++{
++ unsigned size = frags * i_blocksize(inode);
++ spin_lock(&inode->i_lock);
++ __inode_add_bytes(inode, size);
++ if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
++ __inode_sub_bytes(inode, size);
++ spin_unlock(&inode->i_lock);
++ return false;
++ }
++ spin_unlock(&inode->i_lock);
++ return true;
++}
++
+ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
+ unsigned oldcount, unsigned newcount)
+ {
+@@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
+ for (i = oldcount; i < newcount; i++)
+ if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
+ return 0;
++
++ if (!try_add_frags(inode, count))
++ return 0;
+ /*
+ * Block can be extended
+ */
+@@ -647,6 +666,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
+ ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
+ i = uspi->s_fpb - count;
+
++ inode_sub_bytes(inode, i << uspi->s_fshift);
+ fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
+ uspi->cs_total.cs_nffree += i;
+ fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
+@@ -657,6 +677,8 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
+ result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
+ if (result == INVBLOCK)
+ return 0;
++ if (!try_add_frags(inode, count))
++ return 0;
+ for (i = 0; i < count; i++)
+ ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
+
+@@ -716,6 +738,8 @@ static u64 ufs_alloccg_block(struct inode *inode,
+ return INVBLOCK;
+ ucpi->c_rotor = result;
+ gotit:
++ if (!try_add_frags(inode, uspi->s_fpb))
++ return 0;
+ blkno = ufs_fragstoblks(result);
+ ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+ if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
+index 7e41aee7b69a..34f11cf0900a 100644
+--- a/fs/ufs/inode.c
++++ b/fs/ufs/inode.c
+@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
+
+ p = ufs_get_direct_data_ptr(uspi, ufsi, block);
+ tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
+- new_size, err, locked_page);
++ new_size - (lastfrag & uspi->s_fpbmask), err,
++ locked_page);
+ return tmp != 0;
+ }
+
+@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
+ goal += uspi->s_fpb;
+ }
+ tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
+- goal, uspi->s_fpb, err, locked_page);
++ goal, nfrags, err, locked_page);
+
+ if (!tmp) {
+ *err = -ENOSPC;
+@@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
+
+ if (!create) {
+ phys64 = ufs_frag_map(inode, offsets, depth);
+- goto out;
++ if (phys64)
++ map_bh(bh_result, sb, phys64 + frag);
++ return 0;
+ }
+
+ /* This code entered only while writing ....? */
+@@ -841,7 +844,9 @@ void ufs_evict_inode(struct inode * inode)
+ truncate_inode_pages_final(&inode->i_data);
+ if (want_delete) {
+ inode->i_size = 0;
+- if (inode->i_blocks)
++ if (inode->i_blocks &&
++ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
++ S_ISLNK(inode->i_mode)))
+ ufs_truncate_blocks(inode);
+ }
+
+@@ -1100,7 +1105,7 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
+ return err;
+ }
+
+-static void __ufs_truncate_blocks(struct inode *inode)
++static void ufs_truncate_blocks(struct inode *inode)
+ {
+ struct ufs_inode_info *ufsi = UFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+@@ -1183,7 +1188,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
+
+ truncate_setsize(inode, size);
+
+- __ufs_truncate_blocks(inode);
++ ufs_truncate_blocks(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+ out:
+@@ -1191,16 +1196,6 @@ static int ufs_truncate(struct inode *inode, loff_t size)
+ return err;
+ }
+
+-static void ufs_truncate_blocks(struct inode *inode)
+-{
+- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+- S_ISLNK(inode->i_mode)))
+- return;
+- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+- return;
+- __ufs_truncate_blocks(inode);
+-}
+-
+ int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+ struct inode *inode = d_inode(dentry);
+diff --git a/fs/ufs/super.c b/fs/ufs/super.c
+index 29ecaf739449..878cc6264f1a 100644
+--- a/fs/ufs/super.c
++++ b/fs/ufs/super.c
+@@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
+ return;
+ }
+
++static u64 ufs_max_bytes(struct super_block *sb)
++{
++ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
++ int bits = uspi->s_apbshift;
++ u64 res;
++
++ if (bits > 21)
++ res = ~0ULL;
++ else
++ res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
++ (1LL << (3*bits));
++
++ if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
++ return MAX_LFS_FILESIZE;
++ return res << uspi->s_bshift;
++}
++
+ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ struct ufs_sb_info * sbi;
+@@ -1211,6 +1228,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
+ "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
+ uspi->s_maxsymlinklen = maxsymlen;
+ }
++ sb->s_maxbytes = ufs_max_bytes(sb);
+ sb->s_max_links = UFS_LINK_MAX;
+
+ inode = ufs_iget(sb, UFS_ROOTINO);
+diff --git a/fs/ufs/util.h b/fs/ufs/util.h
+index b7fbf53dbc81..398019fb1448 100644
+--- a/fs/ufs/util.h
++++ b/fs/ufs/util.h
+@@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
+ static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
+ struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
+ {
++ u8 mask;
+ switch (uspi->s_fpb) {
+ case 8:
+ return (*ubh_get_addr (ubh, begin + block) == 0xff);
+ case 4:
+- return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
++ mask = 0x0f << ((block & 0x01) << 2);
++ return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
+ case 2:
+- return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
++ mask = 0x03 << ((block & 0x03) << 1);
++ return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
+ case 1:
+- return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
++ mask = 0x01 << (block & 0x07);
++ return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
+ }
+ return 0;
+ }
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index a1dd21d6b723..466c71592a6f 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -265,7 +265,8 @@
+ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
+- INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \
++ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
++ INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
+
+ #define INTEL_SKL_GT4_IDS(info) \
+ INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 6a3f850cabab..14db95e9b529 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -47,6 +47,7 @@ enum {
+ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
+ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+ CSS_VISIBLE = (1 << 3), /* css is visible to userland */
++ CSS_DYING = (1 << 4), /* css is dying */
+ };
+
+ /* bits in struct cgroup flags field */
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index af9c86e958bd..b48579d9b806 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
+ }
+
+ /**
++ * css_is_dying - test whether the specified css is dying
++ * @css: target css
++ *
++ * Test whether @css is in the process of offlining or already offline. In
++ * most cases, ->css_online() and ->css_offline() callbacks should be
++ * enough; however, the actual offline operations are RCU delayed and this
++ * test returns %true also when @css is scheduled to be offlined.
++ *
++ * This is useful, for example, when the use case requires synchronous
++ * behavior with respect to cgroup removal. cgroup removal schedules css
++ * offlining but the css can seem alive while the operation is being
++ * delayed. If the delay affects user visible semantics, this test can be
++ * used to resolve the situation.
++ */
++static inline bool css_is_dying(struct cgroup_subsys_state *css)
++{
++ return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
++}
++
++/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 422bc2e4cb6a..ef3eb8bbfee4 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
+ extern void ptrace_notify(int exit_code);
+ extern void __ptrace_link(struct task_struct *child,
+- struct task_struct *new_parent);
++ struct task_struct *new_parent,
++ const struct cred *ptracer_cred);
+ extern void __ptrace_unlink(struct task_struct *child);
+ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
+ #define PTRACE_MODE_READ 0x01
+@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
+
+ if (unlikely(ptrace) && current->ptrace) {
+ child->ptrace = current->ptrace;
+- __ptrace_link(child, current->parent);
++ __ptrace_link(child, current->parent, current->ptracer_cred);
+
+ if (child->ptrace & PT_SEIZED)
+ task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
+@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
+
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+ }
++ else
++ child->ptracer_cred = NULL;
+ }
+
+ /**
+diff --git a/include/linux/srcu.h b/include/linux/srcu.h
+index a598cf3ac70c..8a95e5d0fdf9 100644
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -232,9 +232,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
+ {
+ int retval;
+
+- preempt_disable();
+ retval = __srcu_read_lock(sp);
+- preempt_enable();
+ rcu_lock_acquire(&(sp)->dep_map);
+ return retval;
+ }
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index dbf0abba33b8..3e505bbff8ca 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+ */
+ extern const struct proto_ops inet6_stream_ops;
+ extern const struct proto_ops inet6_dgram_ops;
++extern const struct proto_ops inet6_sockraw_ops;
+
+ struct group_source_req;
+ struct group_filter;
+diff --git a/kernel/audit.c b/kernel/audit.c
+index a871bf80fde1..dd2c339c8eb9 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -110,18 +110,19 @@ struct audit_net {
+ * @pid: auditd PID
+ * @portid: netlink portid
+ * @net: the associated network namespace
+- * @lock: spinlock to protect write access
++ * @rcu: RCU head
+ *
+ * Description:
+ * This struct is RCU protected; you must either hold the RCU lock for reading
+- * or the included spinlock for writing.
++ * or the associated spinlock for writing.
+ */
+ static struct auditd_connection {
+ int pid;
+ u32 portid;
+ struct net *net;
+- spinlock_t lock;
+-} auditd_conn;
++ struct rcu_head rcu;
++} *auditd_conn = NULL;
++static DEFINE_SPINLOCK(auditd_conn_lock);
+
+ /* If audit_rate_limit is non-zero, limit the rate of sending audit records
+ * to that number per second. This prevents DoS attacks, but results in
+@@ -223,15 +224,39 @@ struct audit_reply {
+ int auditd_test_task(const struct task_struct *task)
+ {
+ int rc;
++ struct auditd_connection *ac;
+
+ rcu_read_lock();
+- rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
++ ac = rcu_dereference(auditd_conn);
++ rc = (ac && ac->pid == task->tgid ? 1 : 0);
+ rcu_read_unlock();
+
+ return rc;
+ }
+
+ /**
++ * auditd_pid_vnr - Return the auditd PID relative to the namespace
++ *
++ * Description:
++ * Returns the PID in relation to the namespace, 0 on failure.
++ */
++static pid_t auditd_pid_vnr(void)
++{
++ pid_t pid;
++ const struct auditd_connection *ac;
++
++ rcu_read_lock();
++ ac = rcu_dereference(auditd_conn);
++ if (!ac)
++ pid = 0;
++ else
++ pid = ac->pid;
++ rcu_read_unlock();
++
++ return pid;
++}
++
++/**
+ * audit_get_sk - Return the audit socket for the given network namespace
+ * @net: the destination network namespace
+ *
+@@ -427,6 +452,23 @@ static int audit_set_failure(u32 state)
+ }
+
+ /**
++ * auditd_conn_free - RCU helper to release an auditd connection struct
++ * @rcu: RCU head
++ *
++ * Description:
++ * Drop any references inside the auditd connection tracking struct and free
++ * the memory.
++ */
++static void auditd_conn_free(struct rcu_head *rcu)
++{
++ struct auditd_connection *ac;
++
++ ac = container_of(rcu, struct auditd_connection, rcu);
++ put_net(ac->net);
++ kfree(ac);
++}
++
++/**
+ * auditd_set - Set/Reset the auditd connection state
+ * @pid: auditd PID
+ * @portid: auditd netlink portid
+@@ -434,22 +476,33 @@ static int audit_set_failure(u32 state)
+ *
+ * Description:
+ * This function will obtain and drop network namespace references as
+- * necessary.
++ * necessary. Returns zero on success, negative values on failure.
+ */
+-static void auditd_set(int pid, u32 portid, struct net *net)
++static int auditd_set(int pid, u32 portid, struct net *net)
+ {
+ unsigned long flags;
++ struct auditd_connection *ac_old, *ac_new;
+
+- spin_lock_irqsave(&auditd_conn.lock, flags);
+- auditd_conn.pid = pid;
+- auditd_conn.portid = portid;
+- if (auditd_conn.net)
+- put_net(auditd_conn.net);
+- if (net)
+- auditd_conn.net = get_net(net);
+- else
+- auditd_conn.net = NULL;
+- spin_unlock_irqrestore(&auditd_conn.lock, flags);
++ if (!pid || !net)
++ return -EINVAL;
++
++ ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL);
++ if (!ac_new)
++ return -ENOMEM;
++ ac_new->pid = pid;
++ ac_new->portid = portid;
++ ac_new->net = get_net(net);
++
++ spin_lock_irqsave(&auditd_conn_lock, flags);
++ ac_old = rcu_dereference_protected(auditd_conn,
++ lockdep_is_held(&auditd_conn_lock));
++ rcu_assign_pointer(auditd_conn, ac_new);
++ spin_unlock_irqrestore(&auditd_conn_lock, flags);
++
++ if (ac_old)
++ call_rcu(&ac_old->rcu, auditd_conn_free);
++
++ return 0;
+ }
+
+ /**
+@@ -544,13 +597,19 @@ static void kauditd_retry_skb(struct sk_buff *skb)
+ */
+ static void auditd_reset(void)
+ {
++ unsigned long flags;
+ struct sk_buff *skb;
++ struct auditd_connection *ac_old;
+
+ /* if it isn't already broken, break the connection */
+- rcu_read_lock();
+- if (auditd_conn.pid)
+- auditd_set(0, 0, NULL);
+- rcu_read_unlock();
++ spin_lock_irqsave(&auditd_conn_lock, flags);
++ ac_old = rcu_dereference_protected(auditd_conn,
++ lockdep_is_held(&auditd_conn_lock));
++ rcu_assign_pointer(auditd_conn, NULL);
++ spin_unlock_irqrestore(&auditd_conn_lock, flags);
++
++ if (ac_old)
++ call_rcu(&ac_old->rcu, auditd_conn_free);
+
+ /* flush all of the main and retry queues to the hold queue */
+ while ((skb = skb_dequeue(&audit_retry_queue)))
+@@ -576,6 +635,7 @@ static int auditd_send_unicast_skb(struct sk_buff *skb)
+ u32 portid;
+ struct net *net;
+ struct sock *sk;
++ struct auditd_connection *ac;
+
+ /* NOTE: we can't call netlink_unicast while in the RCU section so
+ * take a reference to the network namespace and grab local
+@@ -585,15 +645,15 @@ static int auditd_send_unicast_skb(struct sk_buff *skb)
+ * section netlink_unicast() should safely return an error */
+
+ rcu_read_lock();
+- if (!auditd_conn.pid) {
++ ac = rcu_dereference(auditd_conn);
++ if (!ac) {
+ rcu_read_unlock();
+ rc = -ECONNREFUSED;
+ goto err;
+ }
+- net = auditd_conn.net;
+- get_net(net);
++ net = get_net(ac->net);
+ sk = audit_get_sk(net);
+- portid = auditd_conn.portid;
++ portid = ac->portid;
+ rcu_read_unlock();
+
+ rc = netlink_unicast(sk, skb, portid, 0);
+@@ -728,6 +788,7 @@ static int kauditd_thread(void *dummy)
+ u32 portid = 0;
+ struct net *net = NULL;
+ struct sock *sk = NULL;
++ struct auditd_connection *ac;
+
+ #define UNICAST_RETRIES 5
+
+@@ -735,14 +796,14 @@ static int kauditd_thread(void *dummy)
+ while (!kthread_should_stop()) {
+ /* NOTE: see the lock comments in auditd_send_unicast_skb() */
+ rcu_read_lock();
+- if (!auditd_conn.pid) {
++ ac = rcu_dereference(auditd_conn);
++ if (!ac) {
+ rcu_read_unlock();
+ goto main_queue;
+ }
+- net = auditd_conn.net;
+- get_net(net);
++ net = get_net(ac->net);
+ sk = audit_get_sk(net);
+- portid = auditd_conn.portid;
++ portid = ac->portid;
+ rcu_read_unlock();
+
+ /* attempt to flush the hold queue */
+@@ -1102,9 +1163,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ memset(&s, 0, sizeof(s));
+ s.enabled = audit_enabled;
+ s.failure = audit_failure;
+- rcu_read_lock();
+- s.pid = auditd_conn.pid;
+- rcu_read_unlock();
++ s.pid = auditd_pid_vnr();
+ s.rate_limit = audit_rate_limit;
+ s.backlog_limit = audit_backlog_limit;
+ s.lost = atomic_read(&audit_lost);
+@@ -1143,38 +1202,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ /* test the auditd connection */
+ audit_replace(requesting_pid);
+
+- rcu_read_lock();
+- auditd_pid = auditd_conn.pid;
++ auditd_pid = auditd_pid_vnr();
+ /* only the current auditd can unregister itself */
+ if ((!new_pid) && (requesting_pid != auditd_pid)) {
+- rcu_read_unlock();
+ audit_log_config_change("audit_pid", new_pid,
+ auditd_pid, 0);
+ return -EACCES;
+ }
+ /* replacing a healthy auditd is not allowed */
+ if (auditd_pid && new_pid) {
+- rcu_read_unlock();
+ audit_log_config_change("audit_pid", new_pid,
+ auditd_pid, 0);
+ return -EEXIST;
+ }
+- rcu_read_unlock();
+-
+- if (audit_enabled != AUDIT_OFF)
+- audit_log_config_change("audit_pid", new_pid,
+- auditd_pid, 1);
+
+ if (new_pid) {
+ /* register a new auditd connection */
+- auditd_set(new_pid,
+- NETLINK_CB(skb).portid,
+- sock_net(NETLINK_CB(skb).sk));
++ err = auditd_set(new_pid,
++ NETLINK_CB(skb).portid,
++ sock_net(NETLINK_CB(skb).sk));
++ if (audit_enabled != AUDIT_OFF)
++ audit_log_config_change("audit_pid",
++ new_pid,
++ auditd_pid,
++ err ? 0 : 1);
++ if (err)
++ return err;
++
+ /* try to process any backlog */
+ wake_up_interruptible(&kauditd_wait);
+- } else
++ } else {
++ if (audit_enabled != AUDIT_OFF)
++ audit_log_config_change("audit_pid",
++ new_pid,
++ auditd_pid, 1);
++
+ /* unregister the auditd connection */
+ auditd_reset();
++ }
+ }
+ if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
+ err = audit_set_rate_limit(s.rate_limit);
+@@ -1447,10 +1512,11 @@ static void __net_exit audit_net_exit(struct net *net)
+ {
+ struct audit_net *aunet = net_generic(net, audit_net_id);
+
+- rcu_read_lock();
+- if (net == auditd_conn.net)
+- auditd_reset();
+- rcu_read_unlock();
++ /* NOTE: you would think that we would want to check the auditd
++ * connection and potentially reset it here if it lives in this
++ * namespace, but since the auditd connection tracking struct holds a
++ * reference to this namespace (see auditd_set()) we are only ever
++ * going to get here after that connection has been released */
+
+ netlink_kernel_release(aunet->sk);
+ }
+@@ -1470,9 +1536,6 @@ static int __init audit_init(void)
+ if (audit_initialized == AUDIT_DISABLED)
+ return 0;
+
+- memset(&auditd_conn, 0, sizeof(auditd_conn));
+- spin_lock_init(&auditd_conn.lock);
+-
+ skb_queue_head_init(&audit_queue);
+ skb_queue_head_init(&audit_retry_queue);
+ skb_queue_head_init(&audit_hold_queue);
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index b507f1889a72..e21c9321101f 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -436,7 +436,7 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
+ return css;
+ }
+
+-static void cgroup_get(struct cgroup *cgrp)
++static void __maybe_unused cgroup_get(struct cgroup *cgrp)
+ {
+ css_get(&cgrp->self);
+ }
+@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
+ {
+ lockdep_assert_held(&cgroup_mutex);
+
++ if (css->flags & CSS_DYING)
++ return;
++
++ css->flags |= CSS_DYING;
++
+ /*
+ * This must happen before css is disassociated with its cgroup.
+ * See seq_css() for details.
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 0f41292be0fb..943481230cf8 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -176,9 +176,9 @@ typedef enum {
+ } cpuset_flagbits_t;
+
+ /* convenient tests for these bits */
+-static inline bool is_cpuset_online(const struct cpuset *cs)
++static inline bool is_cpuset_online(struct cpuset *cs)
+ {
+- return test_bit(CS_ONLINE, &cs->flags);
++ return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
+ }
+
+ static inline int is_cpu_exclusive(const struct cpuset *cs)
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 37b223e4fc05..e27838ab275d 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1656,13 +1656,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
+ ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
+ mutex_unlock(&cpuhp_state_mutex);
+ if (ret)
+- return ret;
++ goto out;
+
+ if (st->state < target)
+ ret = do_cpu_up(dev->id, target);
+ else
+ ret = do_cpu_down(dev->id, target);
+-
++out:
+ unlock_device_hotplug();
+ return ret ? ret : count;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index ff01cba86f43..95c7fa675009 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7184,6 +7184,21 @@ int perf_event_account_interrupt(struct perf_event *event)
+ return __perf_event_account_interrupt(event, 1);
+ }
+
++static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
++{
++ /*
++ * Due to interrupt latency (AKA "skid"), we may enter the
++ * kernel before taking an overflow, even if the PMU is only
++ * counting user events.
++ * To avoid leaking information to userspace, we must always
++ * reject kernel samples when exclude_kernel is set.
++ */
++ if (event->attr.exclude_kernel && !user_mode(regs))
++ return false;
++
++ return true;
++}
++
+ /*
+ * Generic event overflow handling, sampling.
+ */
+@@ -7205,6 +7220,12 @@ static int __perf_event_overflow(struct perf_event *event,
+ ret = __perf_event_account_interrupt(event, throttle);
+
+ /*
++ * For security, drop the skid kernel samples if necessary.
++ */
++ if (!sample_is_allowed(event, regs))
++ return ret;
++
++ /*
+ * XXX event_limit might not quite work as expected on inherited
+ * events
+ */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 4cc564ece2cf..4f7151d1716b 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1552,6 +1552,18 @@ static __latent_entropy struct task_struct *copy_process(
+ if (!p)
+ goto fork_out;
+
++ /*
++ * This _must_ happen before we call free_task(), i.e. before we jump
++ * to any of the bad_fork_* labels. This is to avoid freeing
++ * p->set_child_tid which is (ab)used as a kthread's data pointer for
++ * kernel threads (PF_KTHREAD).
++ */
++ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
++ /*
++ * Clear TID on mm_release()?
++ */
++ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
++
+ ftrace_graph_init_task(p);
+
+ rt_mutex_init_task(p);
+@@ -1715,11 +1727,6 @@ static __latent_entropy struct task_struct *copy_process(
+ }
+ }
+
+- p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+- /*
+- * Clear TID on mm_release()?
+- */
+- p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+ #ifdef CONFIG_BLOCK
+ p->plug = NULL;
+ #endif
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 266ddcc1d8bb..60f356d91060 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
+ }
+
+
++void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
++ const struct cred *ptracer_cred)
++{
++ BUG_ON(!list_empty(&child->ptrace_entry));
++ list_add(&child->ptrace_entry, &new_parent->ptraced);
++ child->parent = new_parent;
++ child->ptracer_cred = get_cred(ptracer_cred);
++}
++
+ /*
+ * ptrace a task: make the debugger its new parent and
+ * move it to the ptrace list.
+ *
+ * Must be called with the tasklist lock write-held.
+ */
+-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
++static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+ {
+- BUG_ON(!list_empty(&child->ptrace_entry));
+- list_add(&child->ptrace_entry, &new_parent->ptraced);
+- child->parent = new_parent;
+ rcu_read_lock();
+- child->ptracer_cred = get_cred(__task_cred(new_parent));
++ __ptrace_link(child, new_parent, __task_cred(new_parent));
+ rcu_read_unlock();
+ }
+
+@@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ flags |= PT_SEIZED;
+ task->ptrace = flags;
+
+- __ptrace_link(task, current);
++ ptrace_link(task, current);
+
+ /* SEIZE doesn't trap tracee on attach */
+ if (!seize)
+@@ -459,7 +465,7 @@ static int ptrace_traceme(void)
+ */
+ if (!ret && !(current->real_parent->flags & PF_EXITING)) {
+ current->ptrace = PT_PTRACED;
+- __ptrace_link(current, current->real_parent);
++ ptrace_link(current, current->real_parent);
+ }
+ }
+ write_unlock_irq(&tasklist_lock);
+diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
+index ef3bcfb15b39..6e48a6b6a564 100644
+--- a/kernel/rcu/srcu.c
++++ b/kernel/rcu/srcu.c
+@@ -257,7 +257,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
+
+ /*
+ * Counts the new reader in the appropriate per-CPU element of the
+- * srcu_struct. Must be called from process context.
++ * srcu_struct.
+ * Returns an index that must be passed to the matching srcu_read_unlock().
+ */
+ int __srcu_read_lock(struct srcu_struct *sp)
+@@ -265,7 +265,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
+ int idx;
+
+ idx = READ_ONCE(sp->completed) & 0x1;
+- __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
++ this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
+ smp_mb(); /* B */ /* Avoid leaking the critical section. */
+ return idx;
+ }
+@@ -275,7 +275,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
+ * Removes the count for the old reader from the appropriate per-CPU
+ * element of the srcu_struct. Note that this may well be a different
+ * CPU than that which was incremented by the corresponding srcu_read_lock().
+- * Must be called from process context.
+ */
+ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+ {
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index dd3e91d68dc7..8827ee31cbf5 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4859,7 +4859,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
+ }
+
+ out:
+- kfree(fgd->new_hash);
++ free_ftrace_hash(fgd->new_hash);
+ kfree(fgd);
+
+ return ret;
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index 0488c6735c46..8a05a98a8666 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -591,7 +591,7 @@ static int br_afspec(struct net_bridge *br,
+ err = 0;
+ switch (nla_type(attr)) {
+ case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
+- if (!(p->flags & BR_VLAN_TUNNEL))
++ if (!p || !(p->flags & BR_VLAN_TUNNEL))
+ return -EINVAL;
+ err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
+ if (err)
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 0db8102995a5..6f12a5271219 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
+ br_debug(br, "using kernel STP\n");
+
+ /* To start timers on any ports left in blocking */
+- mod_timer(&br->hello_timer, jiffies + br->hello_time);
++ if (br->dev->flags & IFF_UP)
++ mod_timer(&br->hello_timer, jiffies + br->hello_time);
+ br_port_state_selection(br);
+ }
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index f1d04592ace0..ac5059aad313 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3755,8 +3755,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb = __skb_dequeue(q);
+- if (skb && (skb_next = skb_peek(q)))
++ if (skb && (skb_next = skb_peek(q))) {
+ icmp_next = is_icmp_err_skb(skb_next);
++ if (icmp_next)
++ sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
++ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ if (is_icmp_err_skb(skb) && !icmp_next)
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 737be6470c7f..ffaa4fb33d0a 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -440,8 +440,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
+ dsa_ds_unapply(dst, ds);
+ }
+
+- if (dst->cpu_switch)
++ if (dst->cpu_switch) {
+ dsa_cpu_port_ethtool_restore(dst->cpu_switch);
++ dst->cpu_switch = NULL;
++ }
+
+ pr_info("DSA: tree %d unapplied\n", dst->tree);
+ dst->applied = false;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 13a9a3297eae..b9c1bc5e54db 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMP,
+ .prot = &ping_prot,
+- .ops = &inet_dgram_ops,
++ .ops = &inet_sockraw_ops,
+ .flags = INET_PROTOSW_REUSE,
+ },
+
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 6e3c512054a6..324c9bcc5456 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
+ {
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
++ tcp_sk(sk)->prior_ssthresh = 0;
+ if (icsk->icsk_ca_ops->init)
+ icsk->icsk_ca_ops->init(sk);
+ if (tcp_ca_needs_ecn(sk))
+diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
+index 37ac9de713c6..8d772fea1dde 100644
+--- a/net/ipv6/calipso.c
++++ b/net/ipv6/calipso.c
+@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
+ struct ipv6hdr *ip6_hdr;
+ struct ipv6_opt_hdr *hop;
+ unsigned char buf[CALIPSO_MAX_BUFFER];
+- int len_delta, new_end, pad;
++ int len_delta, new_end, pad, payload;
+ unsigned int start, end;
+
+ ip6_hdr = ipv6_hdr(skb);
+@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
+ if (ret_val < 0)
+ return ret_val;
+
++ ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
++
+ if (len_delta) {
+ if (len_delta > 0)
+ skb_push(skb, len_delta);
+@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
+ sizeof(*ip6_hdr) + start);
+ skb_reset_network_header(skb);
+ ip6_hdr = ipv6_hdr(skb);
++ payload = ntohs(ip6_hdr->payload_len);
++ ip6_hdr->payload_len = htons(payload + len_delta);
+ }
+
+ hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 280268f1dd7b..cdb3728faca7 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+
+ if (udpfrag) {
+ int err = ip6_find_1stfragopt(skb, &prevhdr);
+- if (err < 0)
++ if (err < 0) {
++ kfree_skb_list(segs);
+ return ERR_PTR(err);
++ }
+ fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
+ fptr->frag_off = htons(offset);
+ if (skb->next)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 15ff33934f79..e2d7867f3112 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+
+ if (!dst) {
+ route_lookup:
++ /* add dsfield to flowlabel for route lookup */
++ fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
++
+ dst = ip6_route_output(net, NULL, fl6);
+
+ if (dst->error)
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 9b522fa90e6d..ac826dd338ff 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMPV6,
+ .prot = &pingv6_prot,
+- .ops = &inet6_dgram_ops,
++ .ops = &inet6_sockraw_ops,
+ .flags = INET_PROTOSW_REUSE,
+ };
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 1f992d9e261d..60be012fe708 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
+ #endif /* CONFIG_PROC_FS */
+
+ /* Same as inet6_dgram_ops, sans udp_poll. */
+-static const struct proto_ops inet6_sockraw_ops = {
++const struct proto_ops inet6_sockraw_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = inet6_release,
+diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
+index 0e015906f9ca..07d36573f50b 100644
+--- a/net/ipv6/xfrm6_mode_ro.c
++++ b/net/ipv6/xfrm6_mode_ro.c
+@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
+ iph = ipv6_hdr(skb);
+
+ hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
++ if (hdr_len < 0)
++ return hdr_len;
+ skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
+ skb_set_network_header(skb, -x->props.header_len);
+ skb->transport_header = skb->network_header + hdr_len;
+diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
+index 4439ee44c8b0..5e304102287c 100644
+--- a/net/ipv6/xfrm6_mode_transport.c
++++ b/net/ipv6/xfrm6_mode_transport.c
+@@ -28,6 +28,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
+ iph = ipv6_hdr(skb);
+
+ hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
++ if (hdr_len < 0)
++ return hdr_len;
+ skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
+ skb_set_network_header(skb, -x->props.header_len);
+ skb->transport_header = skb->network_header + hdr_len;
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 78dfbf9588b3..99aff9618fb8 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -117,17 +117,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ else if (d > 0)
+ p = &parent->rb_right;
+ else {
+- if (nft_set_elem_active(&rbe->ext, genmask)) {
+- if (nft_rbtree_interval_end(rbe) &&
+- !nft_rbtree_interval_end(new))
+- p = &parent->rb_left;
+- else if (!nft_rbtree_interval_end(rbe) &&
+- nft_rbtree_interval_end(new))
+- p = &parent->rb_right;
+- else {
+- *ext = &rbe->ext;
+- return -EEXIST;
+- }
++ if (nft_rbtree_interval_end(rbe) &&
++ !nft_rbtree_interval_end(new)) {
++ p = &parent->rb_left;
++ } else if (!nft_rbtree_interval_end(rbe) &&
++ nft_rbtree_interval_end(new)) {
++ p = &parent->rb_right;
++ } else if (nft_set_elem_active(&rbe->ext, genmask)) {
++ *ext = &rbe->ext;
++ return -EEXIST;
++ } else {
++ p = &parent->rb_left;
+ }
+ }
+ }
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 0010955d7876..1845d47474a0 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -480,12 +480,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+ struct skcipher_request *req;
+ unsigned int encrypted_datalen;
+ u8 iv[AES_BLOCK_SIZE];
+- unsigned int padlen;
+- char pad[16];
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+- padlen = encrypted_datalen - epayload->decrypted_datalen;
+
+ req = init_skcipher_req(derived_key, derived_keylen);
+ ret = PTR_ERR(req);
+@@ -493,11 +490,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+ goto out;
+ dump_decrypted_data(epayload);
+
+- memset(pad, 0, sizeof pad);
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+- sg_set_buf(&sg_in[1], pad, padlen);
++ sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
+
+ sg_init_table(sg_out, 1);
+ sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
+@@ -584,9 +580,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+ struct skcipher_request *req;
+ unsigned int encrypted_datalen;
+ u8 iv[AES_BLOCK_SIZE];
+- char pad[16];
++ u8 *pad;
+ int ret;
+
++ /* Throwaway buffer to hold the unused zero padding at the end */
++ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
++ if (!pad)
++ return -ENOMEM;
++
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ req = init_skcipher_req(derived_key, derived_keylen);
+ ret = PTR_ERR(req);
+@@ -594,13 +595,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+ goto out;
+ dump_encrypted_data(epayload, encrypted_datalen);
+
+- memset(pad, 0, sizeof pad);
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+ sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
+ sg_set_buf(&sg_out[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+- sg_set_buf(&sg_out[1], pad, sizeof pad);
++ sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
+
+ memcpy(iv, epayload->iv, sizeof(iv));
+ skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
+@@ -612,6 +612,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+ goto out;
+ dump_decrypted_data(epayload);
+ out:
++ kfree(pad);
+ return ret;
+ }
+
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 346fbf201c22..2f4ce35ae2aa 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -962,12 +962,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
+ /* the key must be writable */
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
+ if (ret < 0)
+- goto error;
++ return ret;
+
+ /* attempt to update it if supported */
+- ret = -EOPNOTSUPP;
+ if (!key->type->update)
+- goto error;
++ return -EOPNOTSUPP;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 4ad3212adebe..3663a98b473d 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+- if (_payload) {
++ if (plen) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
+ if (!payload) {
+@@ -329,7 +329,7 @@ long keyctl_update_key(key_serial_t id,
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+- if (_payload) {
++ if (plen) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL);
+ if (!payload)
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 6d4fbc439246..171c01ad9375 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1623,6 +1623,7 @@ static int snd_timer_user_tselect(struct file *file,
+ if (err < 0)
+ goto __err;
+
++ tu->qhead = tu->qtail = tu->qused = 0;
+ kfree(tu->queue);
+ tu->queue = NULL;
+ kfree(tu->tqueue);
+@@ -1964,6 +1965,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+
+ tu = file->private_data;
+ unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
++ mutex_lock(&tu->ioctl_lock);
+ spin_lock_irq(&tu->qlock);
+ while ((long)count - result >= unit) {
+ while (!tu->qused) {
+@@ -1979,7 +1981,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ add_wait_queue(&tu->qchange_sleep, &wait);
+
+ spin_unlock_irq(&tu->qlock);
++ mutex_unlock(&tu->ioctl_lock);
+ schedule();
++ mutex_lock(&tu->ioctl_lock);
+ spin_lock_irq(&tu->qlock);
+
+ remove_wait_queue(&tu->qchange_sleep, &wait);
+@@ -1999,7 +2003,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ tu->qused--;
+ spin_unlock_irq(&tu->qlock);
+
+- mutex_lock(&tu->ioctl_lock);
+ if (tu->tread) {
+ if (copy_to_user(buffer, &tu->tqueue[qhead],
+ sizeof(struct snd_timer_tread)))
+@@ -2009,7 +2012,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ sizeof(struct snd_timer_read)))
+ err = -EFAULT;
+ }
+- mutex_unlock(&tu->ioctl_lock);
+
+ spin_lock_irq(&tu->qlock);
+ if (err < 0)
+@@ -2019,6 +2021,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ }
+ _error:
+ spin_unlock_irq(&tu->qlock);
++ mutex_unlock(&tu->ioctl_lock);
+ return result > 0 ? result : err;
+ }
+
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 2722bb0c5573..98d60f471c5d 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
+ list_for_each_entry(rtd, &card->rtd_list, list)
+ flush_delayed_work(&rtd->delayed_work);
+
++ /* free the ALSA card at first; this syncs with pending operations */
++ snd_card_free(card->snd_card);
++
+ /* remove and free each DAI */
+ soc_remove_dai_links(card);
+ soc_remove_pcm_runtimes(card);
+@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
+ if (card->remove)
+ card->remove(card);
+
+- snd_card_free(card->snd_card);
+ return 0;
+-
+ }
+
+ /* removes a socdev */
+diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
+index c505b019e09c..bfac6f21ae5e 100644
+--- a/sound/x86/intel_hdmi_audio.c
++++ b/sound/x86/intel_hdmi_audio.c
+@@ -1809,10 +1809,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+ pdata->notify_pending = false;
+ spin_unlock_irq(&pdata->lpe_audio_slock);
+
+- /* runtime PM isn't enabled as default, since it won't save much on
+- * BYT/CHT devices; user who want the runtime PM should adjust the
+- * power/ontrol and power/autosuspend_delay_ms sysfs entries instead
+- */
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);