diff options
author | Mike Pagano <mpagano@gentoo.org> | 2019-04-03 06:57:54 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2019-04-03 06:57:54 -0400 |
commit | 86c7214cb6309544bbd293287019dfdcd331f87e (patch) | |
tree | ddcfe11e1da7355ccf9af96eb178fef7c4def14f | |
parent | Linux patch 4.14.109 (diff) | |
download | linux-patches-86c7214cb6309544bbd293287019dfdcd331f87e.tar.gz linux-patches-86c7214cb6309544bbd293287019dfdcd331f87e.tar.bz2 linux-patches-86c7214cb6309544bbd293287019dfdcd331f87e.zip |
Linux patch 4.14.1104.14-118
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1109_linux-4.14.110.patch | 3409 |
2 files changed, 3413 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 4a5cbb0b..a696c1c1 100644 --- a/0000_README +++ b/0000_README @@ -479,6 +479,10 @@ Patch: 1108_4.14.109.patch From: http://www.kernel.org Desc: Linux 4.14.109 +Patch: 1109_4.14.110.patch +From: http://www.kernel.org +Desc: Linux 4.14.110 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1109_linux-4.14.110.patch b/1109_linux-4.14.110.patch new file mode 100644 index 00000000..34378efd --- /dev/null +++ b/1109_linux-4.14.110.patch @@ -0,0 +1,3409 @@ +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt +index 5d12166bd66b..f67ed33d1054 100644 +--- a/Documentation/virtual/kvm/api.txt ++++ b/Documentation/virtual/kvm/api.txt +@@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes + + - VM ioctls: These query and set attributes that affect an entire virtual + machine, for example memory layout. In addition a VM ioctl is used to +- create virtual cpus (vcpus). ++ create virtual cpus (vcpus) and devices. + + Only run VM ioctls from the same process (address space) that was used + to create the VM. +@@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes + Only run vcpu ioctls from the same thread that was used to create the + vcpu. + ++ - device ioctls: These query and set attributes that control the operation ++ of a single device. ++ ++ device ioctls must be issued from the same process (address space) that ++ was used to create the VM. + + 2. File descriptors + ------------------- +@@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial + open("/dev/kvm") obtains a handle to the kvm subsystem; this handle + can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this + handle will create a VM file descriptor which can be used to issue VM +-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu +-and return a file descriptor pointing to it. Finally, ioctls on a vcpu +-fd can be used to control the vcpu, including the important task of +-actually running guest code. ++ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will ++create a virtual cpu or device and return a file descriptor pointing to ++the new resource. Finally, ioctls on a vcpu or device fd can be used ++to control the vcpu or device. For vcpus, this includes the important ++task of actually running guest code. + + In general file descriptors can be migrated among processes by means + of fork() and the SCM_RIGHTS facility of unix domain socket. These +diff --git a/Makefile b/Makefile +index e02bced59a57..37bd0b40876d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 109 ++SUBLEVEL = 110 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c +index bfeb25aaf9a2..326e870d7123 100644 +--- a/arch/arm/mach-imx/cpuidle-imx6q.c ++++ b/arch/arm/mach-imx/cpuidle-imx6q.c +@@ -16,30 +16,23 @@ + #include "cpuidle.h" + #include "hardware.h" + +-static atomic_t master = ATOMIC_INIT(0); +-static DEFINE_SPINLOCK(master_lock); ++static int num_idle_cpus = 0; ++static DEFINE_SPINLOCK(cpuidle_lock); + + static int imx6q_enter_wait(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) + { +- if (atomic_inc_return(&master) == num_online_cpus()) { +- /* +- * With this lock, we prevent other cpu to exit and enter +- * this function again and become the master. +- */ +- if (!spin_trylock(&master_lock)) +- goto idle; ++ spin_lock(&cpuidle_lock); ++ if (++num_idle_cpus == num_online_cpus()) + imx6_set_lpm(WAIT_UNCLOCKED); +- cpu_do_idle(); +- imx6_set_lpm(WAIT_CLOCKED); +- spin_unlock(&master_lock); +- goto done; +- } ++ spin_unlock(&cpuidle_lock); + +-idle: + cpu_do_idle(); +-done: +- atomic_dec(&master); ++ ++ spin_lock(&cpuidle_lock); ++ if (num_idle_cpus-- == num_online_cpus()) ++ imx6_set_lpm(WAIT_CLOCKED); ++ spin_unlock(&cpuidle_lock); + + return index; + } +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index fe418226df7f..de3b07c7be30 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -164,7 +164,7 @@ config PPC + select GENERIC_CLOCKEVENTS_BROADCAST if SMP + select GENERIC_CMOS_UPDATE + select GENERIC_CPU_AUTOPROBE +- select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 ++ select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_SMP_IDLE_THREAD +@@ -236,6 +236,11 @@ config PPC + # Please keep this list sorted alphabetically. + # + ++config PPC_BARRIER_NOSPEC ++ bool ++ default y ++ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E ++ + config GENERIC_CSUM + def_bool n + +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h +index 7330150bfe34..ba4c75062d49 100644 +--- a/arch/powerpc/include/asm/asm-prototypes.h ++++ b/arch/powerpc/include/asm/asm-prototypes.h +@@ -126,4 +126,10 @@ extern int __ucmpdi2(u64, u64); + void _mcount(void); + unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); + ++/* Patch sites */ ++extern s32 patch__call_flush_count_cache; ++extern s32 patch__flush_count_cache_return; ++ ++extern long flush_count_cache; ++ + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h +index e582d2c88092..449474f667c4 100644 +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -77,19 +77,25 @@ do { \ + }) + + #ifdef CONFIG_PPC_BOOK3S_64 ++#define NOSPEC_BARRIER_SLOT nop ++#elif defined(CONFIG_PPC_FSL_BOOK3E) ++#define NOSPEC_BARRIER_SLOT nop; nop ++#endif ++ ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + /* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +-#define barrier_nospec_asm ori 31,31,0 ++#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT + + // This also acts as a compiler barrier due to the memory clobber. + #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") + +-#else /* !CONFIG_PPC_BOOK3S_64 */ ++#else /* !CONFIG_PPC_BARRIER_NOSPEC */ + #define barrier_nospec_asm + #define barrier_nospec() +-#endif ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + + #include <asm-generic/barrier.h> + +diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h +new file mode 100644 +index 000000000000..ed7b1448493a +--- /dev/null ++++ b/arch/powerpc/include/asm/code-patching-asm.h +@@ -0,0 +1,18 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2018, Michael Ellerman, IBM Corporation. ++ */ ++#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H ++#define _ASM_POWERPC_CODE_PATCHING_ASM_H ++ ++/* Define a "site" that can be patched */ ++.macro patch_site label name ++ .pushsection ".rodata" ++ .balign 4 ++ .global \name ++\name: ++ .4byte \label - . ++ .popsection ++.endm ++ ++#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */ +diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h +index 812535f40124..b2051234ada8 100644 +--- a/arch/powerpc/include/asm/code-patching.h ++++ b/arch/powerpc/include/asm/code-patching.h +@@ -32,6 +32,8 @@ unsigned int create_cond_branch(const unsigned int *addr, + int patch_branch(unsigned int *addr, unsigned long target, int flags); + int patch_instruction(unsigned int *addr, unsigned int instr); + int raw_patch_instruction(unsigned int *addr, unsigned int instr); ++int patch_instruction_site(s32 *addr, unsigned int instr); ++int patch_branch_site(s32 *site, unsigned long target, int flags); + + int instr_is_relative_branch(unsigned int instr); + int instr_is_relative_link_branch(unsigned int instr); +diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h +index a9b64df34e2a..b1d478acbaec 100644 +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -211,6 +211,25 @@ label##3: \ + FTR_ENTRY_OFFSET 951b-952b; \ + .popsection; + ++#define NOSPEC_BARRIER_FIXUP_SECTION \ ++953: \ ++ .pushsection __barrier_nospec_fixup,"a"; \ ++ .align 2; \ ++954: \ ++ FTR_ENTRY_OFFSET 953b-954b; \ ++ .popsection; ++ ++#define START_BTB_FLUSH_SECTION \ ++955: \ ++ ++#define END_BTB_FLUSH_SECTION \ ++956: \ ++ .pushsection __btb_flush_fixup,"a"; \ ++ .align 2; \ ++957: \ ++ FTR_ENTRY_OFFSET 955b-957b; \ ++ FTR_ENTRY_OFFSET 956b-957b; \ ++ .popsection; + + #ifndef __ASSEMBLY__ + #include <linux/types.h> +@@ -219,6 +238,8 @@ extern long stf_barrier_fallback; + extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; ++extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; ++extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; + + void apply_feature_fixups(void); + void setup_feature_keys(void); +diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h +index 5a740feb7bd7..15cef59092c7 100644 +--- a/arch/powerpc/include/asm/hvcall.h ++++ b/arch/powerpc/include/asm/hvcall.h +@@ -340,10 +340,12 @@ + #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 + #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 + #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 ++#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9 + + #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 + #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 + #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 ++#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 + + /* Flag values used in H_REGISTER_PROC_TBL hcall */ + #define PROC_TABLE_OP_MASK 0x18 +diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h +index ce0930d68857..b991bd31b383 100644 +--- a/arch/powerpc/include/asm/ppc-opcode.h ++++ b/arch/powerpc/include/asm/ppc-opcode.h +@@ -288,6 +288,7 @@ + /* Misc instructions for BPF compiler */ + #define PPC_INST_LBZ 0x88000000 + #define PPC_INST_LD 0xe8000000 ++#define PPC_INST_LDX 0x7c00002a + #define PPC_INST_LHZ 0xa0000000 + #define PPC_INST_LWZ 0x80000000 + #define PPC_INST_LHBRX 0x7c00062c +@@ -295,6 +296,7 @@ + #define PPC_INST_STB 0x98000000 + #define PPC_INST_STH 0xb0000000 + #define PPC_INST_STD 0xf8000000 ++#define PPC_INST_STDX 0x7c00012a + #define PPC_INST_STDU 0xf8000001 + #define PPC_INST_STW 0x90000000 + #define PPC_INST_STWU 0x94000000 +diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h +index 36f3e41c9fbe..3e1b8de72776 100644 +--- a/arch/powerpc/include/asm/ppc_asm.h ++++ b/arch/powerpc/include/asm/ppc_asm.h +@@ -802,4 +802,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) + stringify_in_c(.long (_target) - . ;) \ + stringify_in_c(.previous) + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++#define BTB_FLUSH(reg) \ ++ lis reg,BUCSR_INIT@h; \ ++ ori reg,reg,BUCSR_INIT@l; \ ++ mtspr SPRN_BUCSR,reg; \ ++ isync; ++#else ++#define BTB_FLUSH(reg) ++#endif /* CONFIG_PPC_FSL_BOOK3E */ ++ + #endif /* _ASM_POWERPC_PPC_ASM_H */ +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h +index 44989b22383c..759597bf0fd8 100644 +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -22,6 +22,7 @@ enum stf_barrier_type { + + void setup_stf_barrier(void); + void do_stf_barrier_fixups(enum stf_barrier_type types); ++void setup_count_cache_flush(void); + + static inline void security_ftr_set(unsigned long feature) + { +@@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature) + // Indirect branch prediction cache disabled + #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull + ++// bcctr 2,0,0 triggers a hardware assisted count cache flush ++#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull ++ + + // Features indicating need for Spectre/Meltdown mitigations + +@@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long feature) + // Firmware configuration indicates user favours security over performance + #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull + ++// Software required to flush count cache on context switch ++#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull ++ + + // Features enabled by default + #define SEC_FTR_DEFAULT \ +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h +index a5e919e34c42..5ceab440ecb9 100644 +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -52,6 +52,26 @@ enum l1d_flush_type { + + void setup_rfi_flush(enum l1d_flush_type, bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); ++#ifdef CONFIG_PPC_BARRIER_NOSPEC ++void setup_barrier_nospec(void); ++#else ++static inline void setup_barrier_nospec(void) { }; ++#endif ++void do_barrier_nospec_fixups(bool enable); ++extern bool barrier_nospec_enabled; ++ ++#ifdef CONFIG_PPC_BARRIER_NOSPEC ++void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); ++#else ++static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; ++#endif ++ ++#ifdef CONFIG_PPC_FSL_BOOK3E ++void setup_spectre_v2(void); ++#else ++static inline void setup_spectre_v2(void) {}; ++#endif ++void do_btb_flush_fixups(void); + + #endif /* !__ASSEMBLY__ */ + +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index cf26e62b268d..bd6d0fb5be9f 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -238,6 +238,7 @@ do { \ + __chk_user_ptr(ptr); \ + if (!is_kernel_addr((unsigned long)__gu_addr)) \ + might_fault(); \ ++ barrier_nospec(); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +@@ -249,8 +250,10 @@ do { \ + __long_type(*(ptr)) __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + might_fault(); \ +- if (access_ok(VERIFY_READ, __gu_addr, (size))) \ ++ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ ++ barrier_nospec(); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ } \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) +@@ -261,6 +264,7 @@ do { \ + __long_type(*(ptr)) __gu_val; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __chk_user_ptr(ptr); \ ++ barrier_nospec(); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +@@ -288,15 +292,19 @@ static inline unsigned long raw_copy_from_user(void *to, + + switch (n) { + case 1: ++ barrier_nospec(); + __get_user_size(*(u8 *)to, from, 1, ret); + break; + case 2: ++ barrier_nospec(); + __get_user_size(*(u16 *)to, from, 2, ret); + break; + case 4: ++ barrier_nospec(); + __get_user_size(*(u32 *)to, from, 4, ret); + break; + case 8: ++ barrier_nospec(); + __get_user_size(*(u64 *)to, from, 8, ret); + break; + } +@@ -304,6 +312,7 @@ static inline unsigned long raw_copy_from_user(void *to, + return 0; + } + ++ barrier_nospec(); + return __copy_tofrom_user((__force void __user *)to, from, n); + } + +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile +index a1089c9a9aa5..142b08d40642 100644 +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -45,9 +45,10 @@ obj-$(CONFIG_VDSO32) += vdso32/ + obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o + obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o +-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o ++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o + obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o + obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o ++obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o + obj-$(CONFIG_PPC64) += vdso64/ + obj-$(CONFIG_ALTIVEC) += vecemu.o + obj-$(CONFIG_PPC_970_NAP) += idle_power4.o +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S +index 4ae464b9d490..a2999cd73a82 100644 +--- a/arch/powerpc/kernel/entry_32.S ++++ b/arch/powerpc/kernel/entry_32.S +@@ -33,6 +33,7 @@ + #include <asm/unistd.h> + #include <asm/ptrace.h> + #include <asm/export.h> ++#include <asm/barrier.h> + + /* + * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. +@@ -358,6 +359,15 @@ syscall_dotrace_cont: + ori r10,r10,sys_call_table@l + slwi r0,r0,2 + bge- 66f ++ ++ barrier_nospec_asm ++ /* ++ * Prevent the load of the handler below (based on the user-passed ++ * system call number) being speculatively executed until the test ++ * against NR_syscalls and branch to .66f above has ++ * committed. ++ */ ++ + lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ + mtlr r10 + addi r9,r1,STACK_FRAME_OVERHEAD +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S +index c194f4c8e66b..12395895b9aa 100644 +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -25,6 +25,7 @@ + #include <asm/page.h> + #include <asm/mmu.h> + #include <asm/thread_info.h> ++#include <asm/code-patching-asm.h> + #include <asm/ppc_asm.h> + #include <asm/asm-offsets.h> + #include <asm/cputable.h> +@@ -36,6 +37,7 @@ + #include <asm/context_tracking.h> + #include <asm/tm.h> + #include <asm/ppc-opcode.h> ++#include <asm/barrier.h> + #include <asm/export.h> + #ifdef CONFIG_PPC_BOOK3S + #include <asm/exception-64s.h> +@@ -76,6 +78,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) + std r0,GPR0(r1) + std r10,GPR1(r1) + beq 2f /* if from kernel mode */ ++#ifdef CONFIG_PPC_FSL_BOOK3E ++START_BTB_FLUSH_SECTION ++ BTB_FLUSH(r10) ++END_BTB_FLUSH_SECTION ++#endif + ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) + 2: std r2,GPR2(r1) + std r3,GPR3(r1) +@@ -179,6 +186,15 @@ system_call: /* label this so stack traces look sane */ + clrldi r8,r8,32 + 15: + slwi r0,r0,4 ++ ++ barrier_nospec_asm ++ /* ++ * Prevent the load of the handler below (based on the user-passed ++ * system call number) being speculatively executed until the test ++ * against NR_syscalls and branch to .Lsyscall_enosys above has ++ * committed. ++ */ ++ + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ + mtctr r12 + bctrl /* Call handler */ +@@ -487,6 +503,57 @@ _GLOBAL(ret_from_kernel_thread) + li r3,0 + b .Lsyscall_exit + ++#ifdef CONFIG_PPC_BOOK3S_64 ++ ++#define FLUSH_COUNT_CACHE \ ++1: nop; \ ++ patch_site 1b, patch__call_flush_count_cache ++ ++ ++#define BCCTR_FLUSH .long 0x4c400420 ++ ++.macro nops number ++ .rept \number ++ nop ++ .endr ++.endm ++ ++.balign 32 ++.global flush_count_cache ++flush_count_cache: ++ /* Save LR into r9 */ ++ mflr r9 ++ ++ .rept 64 ++ bl .+4 ++ .endr ++ b 1f ++ nops 6 ++ ++ .balign 32 ++ /* Restore LR */ ++1: mtlr r9 ++ li r9,0x7fff ++ mtctr r9 ++ ++ BCCTR_FLUSH ++ ++2: nop ++ patch_site 2b patch__flush_count_cache_return ++ ++ nops 3 ++ ++ .rept 278 ++ .balign 32 ++ BCCTR_FLUSH ++ nops 7 ++ .endr ++ ++ blr ++#else ++#define FLUSH_COUNT_CACHE ++#endif /* CONFIG_PPC_BOOK3S_64 */ ++ + /* + * This routine switches between two different tasks. The process + * state of one is saved on its kernel stack. Then the state +@@ -518,6 +585,8 @@ _GLOBAL(_switch) + std r23,_CCR(r1) + std r1,KSP(r3) /* Set old stack pointer */ + ++ FLUSH_COUNT_CACHE ++ + /* + * On SMP kernels, care must be taken because a task may be + * scheduled off CPUx and on to CPUy. Memory ordering must be +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index acd8ca76233e..2edc1b7b34cc 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -295,7 +295,8 @@ ret_from_mc_except: + andi. r10,r11,MSR_PR; /* save stack pointer */ \ + beq 1f; /* branch around if supervisor */ \ + ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ +-1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ ++1: type##_BTB_FLUSH \ ++ cmpdi cr1,r1,0; /* check if SP makes sense */ \ + bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ + mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ + +@@ -327,6 +328,30 @@ ret_from_mc_except: + #define SPRN_MC_SRR0 SPRN_MCSRR0 + #define SPRN_MC_SRR1 SPRN_MCSRR1 + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++#define GEN_BTB_FLUSH \ ++ START_BTB_FLUSH_SECTION \ ++ beq 1f; \ ++ BTB_FLUSH(r10) \ ++ 1: \ ++ END_BTB_FLUSH_SECTION ++ ++#define CRIT_BTB_FLUSH \ ++ START_BTB_FLUSH_SECTION \ ++ BTB_FLUSH(r10) \ ++ END_BTB_FLUSH_SECTION ++ ++#define DBG_BTB_FLUSH CRIT_BTB_FLUSH ++#define MC_BTB_FLUSH CRIT_BTB_FLUSH ++#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH ++#else ++#define GEN_BTB_FLUSH ++#define CRIT_BTB_FLUSH ++#define DBG_BTB_FLUSH ++#define MC_BTB_FLUSH ++#define GDBELL_BTB_FLUSH ++#endif ++ + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) + +diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h +index d0862a100d29..306e26c073a0 100644 +--- a/arch/powerpc/kernel/head_booke.h ++++ b/arch/powerpc/kernel/head_booke.h +@@ -32,6 +32,16 @@ + */ + #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++#define BOOKE_CLEAR_BTB(reg) \ ++START_BTB_FLUSH_SECTION \ ++ BTB_FLUSH(reg) \ ++END_BTB_FLUSH_SECTION ++#else ++#define BOOKE_CLEAR_BTB(reg) ++#endif ++ ++ + #define NORMAL_EXCEPTION_PROLOG(intno) \ + mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ + mfspr r10, SPRN_SPRG_THREAD; \ +@@ -43,6 +53,7 @@ + andi. r11, r11, MSR_PR; /* check whether user or kernel */\ + mr r11, r1; \ + beq 1f; \ ++ BOOKE_CLEAR_BTB(r11) \ + /* if from user, start at top of this thread's kernel stack */ \ + lwz r11, THREAD_INFO-THREAD(r10); \ + ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ +@@ -128,6 +139,7 @@ + stw r9,_CCR(r8); /* save CR on stack */\ + mfspr r11,exc_level_srr1; /* check whether user or kernel */\ + DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ ++ BOOKE_CLEAR_BTB(r10) \ + andi. r11,r11,MSR_PR; \ + mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ +diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S +index bf4c6021515f..60a0aeefc4a7 100644 +--- a/arch/powerpc/kernel/head_fsl_booke.S ++++ b/arch/powerpc/kernel/head_fsl_booke.S +@@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) + DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 ++START_BTB_FLUSH_SECTION ++ mfspr r11, SPRN_SRR1 ++ andi. r10,r11,MSR_PR ++ beq 1f ++ BTB_FLUSH(r10) ++1: ++END_BTB_FLUSH_SECTION + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the +@@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) + DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 ++START_BTB_FLUSH_SECTION ++ mfspr r11, SPRN_SRR1 ++ andi. r10,r11,MSR_PR ++ beq 1f ++ BTB_FLUSH(r10) ++1: ++END_BTB_FLUSH_SECTION ++ + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the +diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c +index 3f7ba0f5bf29..77371c9ef3d8 100644 +--- a/arch/powerpc/kernel/module.c ++++ b/arch/powerpc/kernel/module.c +@@ -72,7 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr, + do_feature_fixups(powerpc_firmware_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); +-#endif ++#endif /* CONFIG_PPC64 */ ++ ++#ifdef CONFIG_PPC_BARRIER_NOSPEC ++ sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); ++ if (sect != NULL) ++ do_barrier_nospec_fixups_range(barrier_nospec_enabled, ++ (void *)sect->sh_addr, ++ (void *)sect->sh_addr + sect->sh_size); ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + + sect = find_section(hdr, sechdrs, "__lwsync_fixup"); + if (sect != NULL) +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c +index b98a722da915..48b50fb8dc4b 100644 +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -9,11 +9,120 @@ + #include <linux/seq_buf.h> + + #include <asm/debugfs.h> ++#include <asm/asm-prototypes.h> ++#include <asm/code-patching.h> + #include <asm/security_features.h> ++#include <asm/setup.h> + + + unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + ++enum count_cache_flush_type { ++ COUNT_CACHE_FLUSH_NONE = 0x1, ++ COUNT_CACHE_FLUSH_SW = 0x2, ++ COUNT_CACHE_FLUSH_HW = 0x4, ++}; ++static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; ++ ++bool barrier_nospec_enabled; ++static bool no_nospec; ++static bool btb_flush_enabled; ++#ifdef CONFIG_PPC_FSL_BOOK3E ++static bool no_spectrev2; ++#endif ++ ++static void enable_barrier_nospec(bool enable) ++{ ++ barrier_nospec_enabled = enable; ++ do_barrier_nospec_fixups(enable); ++} ++ ++void setup_barrier_nospec(void) ++{ ++ bool enable; ++ ++ /* ++ * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. ++ * But there's a good reason not to. The two flags we check below are ++ * both are enabled by default in the kernel, so if the hcall is not ++ * functional they will be enabled. ++ * On a system where the host firmware has been updated (so the ori ++ * functions as a barrier), but on which the hypervisor (KVM/Qemu) has ++ * not been updated, we would like to enable the barrier. Dropping the ++ * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is ++ * we potentially enable the barrier on systems where the host firmware ++ * is not updated, but that's harmless as it's a no-op. ++ */ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); ++ ++ if (!no_nospec) ++ enable_barrier_nospec(enable); ++} ++ ++static int __init handle_nospectre_v1(char *p) ++{ ++ no_nospec = true; ++ ++ return 0; ++} ++early_param("nospectre_v1", handle_nospectre_v1); ++ ++#ifdef CONFIG_DEBUG_FS ++static int barrier_nospec_set(void *data, u64 val) ++{ ++ switch (val) { ++ case 0: ++ case 1: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (!!val == !!barrier_nospec_enabled) ++ return 0; ++ ++ enable_barrier_nospec(!!val); ++ ++ return 0; ++} ++ ++static int barrier_nospec_get(void *data, u64 *val) ++{ ++ *val = barrier_nospec_enabled ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, ++ barrier_nospec_get, barrier_nospec_set, "%llu\n"); ++ ++static __init int barrier_nospec_debugfs_init(void) ++{ ++ debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, ++ &fops_barrier_nospec); ++ return 0; ++} ++device_initcall(barrier_nospec_debugfs_init); ++#endif /* CONFIG_DEBUG_FS */ ++ ++#ifdef CONFIG_PPC_FSL_BOOK3E ++static int __init handle_nospectre_v2(char *p) ++{ ++ no_spectrev2 = true; ++ ++ return 0; ++} ++early_param("nospectre_v2", handle_nospectre_v2); ++void setup_spectre_v2(void) ++{ ++ if (no_spectrev2) ++ do_btb_flush_fixups(); ++ else ++ btb_flush_enabled = true; ++} ++#endif /* CONFIG_PPC_FSL_BOOK3E */ ++ ++#ifdef CONFIG_PPC_BOOK3S_64 + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { + bool thread_priv; +@@ -46,25 +155,39 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha + + return sprintf(buf, "Vulnerable\n"); + } ++#endif + + ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) + { +- if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) +- return sprintf(buf, "Not affected\n"); ++ struct seq_buf s; + +- return sprintf(buf, "Vulnerable\n"); ++ seq_buf_init(&s, buf, PAGE_SIZE - 1); ++ ++ if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { ++ if (barrier_nospec_enabled) ++ seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); ++ else ++ seq_buf_printf(&s, "Vulnerable"); ++ ++ if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) ++ seq_buf_printf(&s, ", ori31 speculation barrier enabled"); ++ ++ seq_buf_printf(&s, "\n"); ++ } else ++ seq_buf_printf(&s, "Not affected\n"); ++ ++ return s.len; + } + + ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) + { +- bool bcs, ccd, ori; + struct seq_buf s; ++ bool bcs, ccd; + + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); +- ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); + + if (bcs || ccd) { + seq_buf_printf(&s, "Mitigation: "); +@@ -77,17 +200,23 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c + + if (ccd) + seq_buf_printf(&s, "Indirect branch cache disabled"); +- } else ++ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { ++ seq_buf_printf(&s, "Mitigation: Software count cache flush"); ++ ++ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) ++ seq_buf_printf(&s, " (hardware accelerated)"); ++ } else if (btb_flush_enabled) { ++ seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); ++ } else { + seq_buf_printf(&s, "Vulnerable"); +- +- if (ori) +- seq_buf_printf(&s, ", ori31 speculation barrier enabled"); ++ } + + seq_buf_printf(&s, "\n"); + + return s.len; + } + ++#ifdef CONFIG_PPC_BOOK3S_64 + /* + * Store-forwarding barrier support. + */ +@@ -235,3 +364,71 @@ static __init int stf_barrier_debugfs_init(void) + } + device_initcall(stf_barrier_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ ++ ++static void toggle_count_cache_flush(bool enable) ++{ ++ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { ++ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); ++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; ++ pr_info("count-cache-flush: software flush disabled.\n"); ++ return; ++ } ++ ++ patch_branch_site(&patch__call_flush_count_cache, ++ (u64)&flush_count_cache, BRANCH_SET_LINK); ++ ++ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { ++ count_cache_flush_type = COUNT_CACHE_FLUSH_SW; ++ pr_info("count-cache-flush: full software flush sequence enabled.\n"); ++ return; ++ } ++ ++ patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); ++ count_cache_flush_type = COUNT_CACHE_FLUSH_HW; ++ pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); ++} ++ ++void setup_count_cache_flush(void) ++{ ++ toggle_count_cache_flush(true); ++} ++ ++#ifdef CONFIG_DEBUG_FS ++static int count_cache_flush_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ toggle_count_cache_flush(enable); ++ ++ return 0; ++} ++ ++static int count_cache_flush_get(void *data, u64 *val) ++{ ++ if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) ++ *val = 0; ++ else ++ *val = 1; ++ ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, ++ count_cache_flush_set, "%llu\n"); ++ ++static __init int count_cache_flush_debugfs_init(void) ++{ ++ debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, ++ NULL, &fops_count_cache_flush); ++ return 0; ++} ++device_initcall(count_cache_flush_debugfs_init); ++#endif /* CONFIG_DEBUG_FS */ ++#endif /* CONFIG_PPC_BOOK3S_64 */ +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c +index 008447664643..c58364c74dad 100644 +--- a/arch/powerpc/kernel/setup-common.c ++++ b/arch/powerpc/kernel/setup-common.c +@@ -937,6 +937,9 @@ void __init setup_arch(char **cmdline_p) + if (ppc_md.setup_arch) + ppc_md.setup_arch(); + ++ setup_barrier_nospec(); ++ setup_spectre_v2(); ++ + paging_init(); + + /* Initialize the MMU context management stuff. */ +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S +index c89ffb88fa3b..b0cf4af7ba84 100644 +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -153,8 +153,25 @@ SECTIONS + *(__rfi_flush_fixup) + __stop___rfi_flush_fixup = .; + } +-#endif ++#endif /* CONFIG_PPC64 */ ++ ++#ifdef CONFIG_PPC_BARRIER_NOSPEC ++ . = ALIGN(8); ++ __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { ++ __start___barrier_nospec_fixup = .; ++ *(__barrier_nospec_fixup) ++ __stop___barrier_nospec_fixup = .; ++ } ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++ . = ALIGN(8); ++ __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { ++ __start__btb_flush_fixup = .; ++ *(__btb_flush_fixup) ++ __stop__btb_flush_fixup = .; ++ } ++#endif + EXCEPTION_TABLE(0) + + NOTES :kernel :notes +diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S +index 81bd8a07aa51..612b7f6a887f 100644 +--- a/arch/powerpc/kvm/bookehv_interrupts.S ++++ b/arch/powerpc/kvm/bookehv_interrupts.S +@@ -75,6 +75,10 @@ + PPC_LL r1, VCPU_HOST_STACK(r4) + PPC_LL r2, HOST_R2(r1) + ++START_BTB_FLUSH_SECTION ++ BTB_FLUSH(r10) ++END_BTB_FLUSH_SECTION ++ + mfspr r10, SPRN_PID + lwz r8, VCPU_HOST_PID(r4) + PPC_LL r11, VCPU_SHARED(r4) +diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c +index 990db69a1d0b..fa88f641ac03 100644 +--- a/arch/powerpc/kvm/e500_emulate.c ++++ b/arch/powerpc/kvm/e500_emulate.c +@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va + vcpu->arch.pwrmgtcr0 = spr_val; + break; + ++ case SPRN_BUCSR: ++ /* ++ * If we are here, it means that we have already flushed the ++ * branch predictor, so just return to guest. ++ */ ++ break; ++ + /* extra exceptions */ + #ifdef CONFIG_SPE_POSSIBLE + case SPRN_IVOR32: +diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c +index 130405158afa..c5154817178b 100644 +--- a/arch/powerpc/lib/code-patching.c ++++ b/arch/powerpc/lib/code-patching.c +@@ -206,6 +206,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags) + return patch_instruction(addr, create_branch(addr, target, flags)); + } + ++int patch_branch_site(s32 *site, unsigned long target, int flags) ++{ ++ unsigned int *addr; ++ ++ addr = (unsigned int *)((unsigned long)site + *site); ++ return patch_instruction(addr, create_branch(addr, target, flags)); ++} ++ ++int patch_instruction_site(s32 *site, unsigned int instr) ++{ ++ unsigned int *addr; ++ ++ addr = (unsigned int *)((unsigned long)site + *site); ++ return patch_instruction(addr, instr); ++} ++ + bool is_offset_in_branch_range(long offset) + { + /* +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c +index e1bcdc32a851..de7861e09b41 100644 +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -277,8 +277,101 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); + } ++ ++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) ++{ ++ unsigned int instr, *dest; ++ long *start, *end; ++ int i; ++ ++ start = fixup_start; ++ end = fixup_end; ++ ++ instr = 0x60000000; /* nop */ ++ ++ if (enable) { ++ pr_info("barrier-nospec: using ORI speculation barrier\n"); ++ instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ } ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ patch_instruction(dest, instr); ++ } ++ ++ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); ++} ++ + #endif /* CONFIG_PPC_BOOK3S_64 */ + ++#ifdef CONFIG_PPC_BARRIER_NOSPEC ++void do_barrier_nospec_fixups(bool enable) ++{ ++ void *start, *end; ++ ++ start = PTRRELOC(&__start___barrier_nospec_fixup), ++ end = PTRRELOC(&__stop___barrier_nospec_fixup); ++ ++ do_barrier_nospec_fixups_range(enable, start, end); ++} ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ ++ ++#ifdef CONFIG_PPC_FSL_BOOK3E ++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) ++{ ++ unsigned int instr[2], *dest; ++ long *start, *end; ++ int i; ++ ++ start = fixup_start; ++ end = fixup_end; ++ ++ instr[0] = PPC_INST_NOP; ++ instr[1] = PPC_INST_NOP; ++ ++ if (enable) { ++ pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); ++ instr[0] = PPC_INST_ISYNC; ++ instr[1] = PPC_INST_SYNC; ++ } ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ patch_instruction(dest, instr[0]); ++ patch_instruction(dest + 1, instr[1]); ++ } ++ ++ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); ++} ++ ++static void patch_btb_flush_section(long *curr) ++{ ++ unsigned int *start, *end; ++ ++ start = (void *)curr + *curr; ++ end = (void *)curr + *(curr + 1); ++ for (; start < end; start++) { ++ pr_devel("patching dest %lx\n", (unsigned long)start); ++ patch_instruction(start, PPC_INST_NOP); ++ } ++} ++ ++void do_btb_flush_fixups(void) ++{ ++ long *start, *end; ++ ++ start = PTRRELOC(&__start__btb_flush_fixup); ++ end = PTRRELOC(&__stop__btb_flush_fixup); ++ ++ for (; start < end; start += 2) ++ patch_btb_flush_section(start); ++} ++#endif /* CONFIG_PPC_FSL_BOOK3E */ ++ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) + { + long *start, *end; +diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S +index eb82d787d99a..b7e9c09dfe19 100644 +--- a/arch/powerpc/mm/tlb_low_64e.S ++++ b/arch/powerpc/mm/tlb_low_64e.S +@@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + std r15,EX_TLB_R15(r12) + std r10,EX_TLB_CR(r12) + #ifdef CONFIG_PPC_FSL_BOOK3E ++START_BTB_FLUSH_SECTION ++ mfspr r11, SPRN_SRR1 ++ andi. r10,r11,MSR_PR ++ beq 1f ++ BTB_FLUSH(r10) ++1: ++END_BTB_FLUSH_SECTION + std r7,EX_TLB_R7(r12) + #endif + TLB_MISS_PROLOG_STATS +diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h +index 47fc6660845d..68dece206048 100644 +--- a/arch/powerpc/net/bpf_jit.h ++++ b/arch/powerpc/net/bpf_jit.h +@@ -51,6 +51,8 @@ + #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) + #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) ++#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \ ++ ___PPC_RA(base) | ___PPC_RB(b)) + #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) + #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ +@@ -65,7 +67,9 @@ + #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) + #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ +- ___PPC_RA(base) | IMM_L(i)) ++ ___PPC_RA(base) | ((i) & 0xfffc)) ++#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \ ++ ___PPC_RA(base) | ___PPC_RB(b)) + #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) + #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ +@@ -85,17 +89,6 @@ + ___PPC_RA(a) | ___PPC_RB(b)) + #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +- +-#ifdef CONFIG_PPC64 +-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0) +-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0) +-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) +-#else +-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) +-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) +-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) +-#endif +- + #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) + #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) + #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ +diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h +index a8cd7e289ecd..81a9045d8410 100644 +--- a/arch/powerpc/net/bpf_jit32.h ++++ b/arch/powerpc/net/bpf_jit32.h +@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); + #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) + #endif + ++#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) ++#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) ++#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) ++ + #define SEEN_DATAREF 0x10000 /* might call external helpers */ + #define SEEN_XREG 0x20000 /* X reg is used */ + #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary +diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h +index 62fa7589db2b..bb944b6018d7 100644 +--- a/arch/powerpc/net/bpf_jit64.h ++++ b/arch/powerpc/net/bpf_jit64.h +@@ -86,6 +86,26 @@ DECLARE_LOAD_FUNC(sk_load_byte); + (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \ + func##_positive_offset) + ++/* ++ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary, ++ * so ensure that it isn't in use already. ++ */ ++#define PPC_BPF_LL(r, base, i) do { \ ++ if ((i) % 4) { \ ++ PPC_LI(b2p[TMP_REG_2], (i)); \ ++ PPC_LDX(r, base, b2p[TMP_REG_2]); \ ++ } else \ ++ PPC_LD(r, base, i); \ ++ } while(0) ++#define PPC_BPF_STL(r, base, i) do { \ ++ if ((i) % 4) { \ ++ PPC_LI(b2p[TMP_REG_2], (i)); \ ++ PPC_STDX(r, base, b2p[TMP_REG_2]); \ ++ } else \ ++ PPC_STD(r, base, i); \ ++ } while(0) ++#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) ++ + #define SEEN_FUNC 0x1000 /* might call external helpers */ + #define SEEN_STACK 0x2000 /* uses BPF stack */ + #define SEEN_SKB 0x4000 /* uses sk_buff */ +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c +index fee1e1f8c9d3..3a21d3956ad4 100644 +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -261,7 +261,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 + * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + */ +- PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); ++ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); + PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); + PPC_BCC(COND_GT, out); + +@@ -274,7 +274,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 + /* prog = array->ptrs[index]; */ + PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); + PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); +- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); ++ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); + + /* + * if (prog == NULL) +@@ -284,7 +284,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 + PPC_BCC(COND_EQ, out); + + /* goto *(prog->bpf_func + prologue_size); */ +- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); ++ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); + #ifdef PPC64_ELF_ABI_v1 + /* skip past the function descriptor */ + PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], +@@ -616,7 +616,7 @@ bpf_alu32_trunc: + * the instructions generated will remain the + * same across all passes + */ +- PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx)); ++ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); + PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); + PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); + break; +@@ -672,7 +672,7 @@ emit_clear: + PPC_LI32(b2p[TMP_REG_1], imm); + src_reg = b2p[TMP_REG_1]; + } +- PPC_STD(src_reg, dst_reg, off); ++ PPC_BPF_STL(src_reg, dst_reg, off); + break; + + /* +@@ -719,7 +719,7 @@ emit_clear: + break; + /* dst = *(u64 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEM | BPF_DW: +- PPC_LD(dst_reg, src_reg, off); ++ PPC_BPF_LL(dst_reg, src_reg, off); + break; + + /* +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c +index fd143c934768..888aa9584e94 100644 +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np) + if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + ++ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np)) ++ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); ++ ++ if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np)) ++ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); ++ + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. +@@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); ++ setup_count_cache_flush(); + } + + static void __init pnv_setup_arch(void) +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index 45f814041448..6a0ad56e89b9 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -484,6 +484,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + ++ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) ++ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); ++ ++ if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE) ++ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); ++ + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. +@@ -534,6 +540,7 @@ void pseries_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + + setup_rfi_flush(types, enable); ++ setup_count_cache_flush(); + } + + static void __init pSeries_setup_arch(void) +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 4f393eb9745f..8fec1585ac7a 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2139,14 +2139,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING + If unsure, leave at the default value. + + config HOTPLUG_CPU +- bool "Support for hot-pluggable CPUs" ++ def_bool y + depends on SMP +- ---help--- +- Say Y here to allow turning CPUs off and on. CPUs can be +- controlled through /sys/devices/system/cpu. +- ( Note: power management support will enable this option +- automatically on SMP systems. ) +- Say N if you want to disable CPU hotplug. + + config BOOTPARAM_HOTPLUG_CPU0 + bool "Set default setting of cpu0_hotpluggable" +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index d2ae93faafe8..f9a4b85d7309 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -509,6 +509,7 @@ struct kvm_vcpu_arch { + bool tpr_access_reporting; + u64 ia32_xss; + u64 microcode_version; ++ u64 arch_capabilities; + + /* + * Paging state of the vcpu +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 229d5e39f5c0..4bd878c9f7d2 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -740,7 +740,6 @@ struct vcpu_vmx { + u64 msr_guest_kernel_gs_base; + #endif + +- u64 arch_capabilities; + u64 spec_ctrl; + + u32 vm_entry_controls_shadow; +@@ -3493,12 +3492,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + + msr_info->data = to_vmx(vcpu)->spec_ctrl; + break; +- case MSR_IA32_ARCH_CAPABILITIES: +- if (!msr_info->host_initiated && +- !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) +- return 1; +- msr_info->data = to_vmx(vcpu)->arch_capabilities; +- break; + case MSR_IA32_SYSENTER_CS: + msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); + break; +@@ -3663,11 +3656,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, + MSR_TYPE_W); + break; +- case MSR_IA32_ARCH_CAPABILITIES: +- if (!msr_info->host_initiated) +- return 1; +- vmx->arch_capabilities = data; +- break; + case MSR_IA32_CR_PAT: + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) +@@ -5929,8 +5917,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) + ++vmx->nmsrs; + } + +- vmx->arch_capabilities = kvm_get_arch_capabilities(); +- + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); + + /* 22.2.1, 20.8.1 */ +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index ce5b3dc348ce..5f85f17ffb75 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -2234,6 +2234,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (msr_info->host_initiated) + vcpu->arch.microcode_version = data; + break; ++ case MSR_IA32_ARCH_CAPABILITIES: ++ if (!msr_info->host_initiated) ++ return 1; ++ vcpu->arch.arch_capabilities = data; ++ break; + case MSR_EFER: + return set_efer(vcpu, data); + case MSR_K7_HWCR: +@@ -2523,6 +2528,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + case MSR_IA32_UCODE_REV: + msr_info->data = vcpu->arch.microcode_version; + break; ++ case MSR_IA32_ARCH_CAPABILITIES: ++ if (!msr_info->host_initiated && ++ !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) ++ return 1; ++ msr_info->data = vcpu->arch.arch_capabilities; ++ break; + case MSR_MTRRcap: + case 0x200 ... 0x2ff: + return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); +@@ -7918,6 +7929,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) + { + int r; + ++ vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); + kvm_vcpu_mtrr_init(vcpu); + r = vcpu_load(vcpu); + if (r) +diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c +index 89863ea25de1..ee923b1b820c 100644 +--- a/drivers/gpio/gpio-adnp.c ++++ b/drivers/gpio/gpio-adnp.c +@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset) + if (err < 0) + goto out; + +- if (err & BIT(pos)) +- err = -EACCES; ++ if (value & BIT(pos)) { ++ err = -EPERM; ++ goto out; ++ } + + err = 0; + +diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c +index 0ecd2369c2ca..a09d2f9ebacc 100644 +--- a/drivers/gpio/gpio-exar.c ++++ b/drivers/gpio/gpio-exar.c +@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev) + mutex_init(&exar_gpio->lock); + + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); ++ if (index < 0) ++ goto err_destroy; + + sprintf(exar_gpio->name, "exar_gpio%d", index); + exar_gpio->gpio_chip.label = exar_gpio->name; +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c +index 81c7ab10c083..aa592277d510 100644 +--- a/drivers/gpu/drm/vgem/vgem_drv.c ++++ b/drivers/gpu/drm/vgem/vgem_drv.c +@@ -192,13 +192,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, + ret = drm_gem_handle_create(file, &obj->base, handle); + drm_gem_object_put_unlocked(&obj->base); + if (ret) +- goto err; ++ return ERR_PTR(ret); + + return &obj->base; +- +-err: +- __vgem_gem_destroy(obj); +- return ERR_PTR(ret); + } + + static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, +diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c +index 3cf07b8ced1c..df01018acff1 100644 +--- a/drivers/isdn/hardware/mISDN/hfcmulti.c ++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c +@@ -4367,7 +4367,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev, + if (m->clock2) + test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); + +- if (ent->device == 0xB410) { ++ if (ent->vendor == PCI_VENDOR_ID_DIGIUM && ++ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) { + test_and_set_bit(HFC_CHIP_B410P, &hc->chip); + test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); + test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c +index 9645c8f05c7f..c3c9d7e33bd6 100644 +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -629,22 +629,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) + qca8k_port_set_status(priv, port, 1); + } + +-static int +-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) +-{ +- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +- +- return mdiobus_read(priv->bus, phy, regnum); +-} +- +-static int +-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) +-{ +- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; +- +- return mdiobus_write(priv->bus, phy, regnum, val); +-} +- + static void + qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data) + { +@@ -879,8 +863,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { + .setup = qca8k_setup, + .adjust_link = qca8k_adjust_link, + .get_strings = qca8k_get_strings, +- .phy_read = qca8k_phy_read, +- .phy_write = qca8k_phy_write, + .get_ethtool_stats = qca8k_get_ethtool_stats, + .get_sset_count = qca8k_get_sset_count, + .get_mac_eee = qca8k_get_mac_eee, +diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c +index 9497f18eaba0..e95a7567bb23 100644 +--- a/drivers/net/ethernet/8390/mac8390.c ++++ b/drivers/net/ethernet/8390/mac8390.c +@@ -156,8 +156,6 @@ static void dayna_block_output(struct net_device *dev, int count, + #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) + #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) + +-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) +- + /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ + static void slow_sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +@@ -237,19 +235,26 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) + + static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) + { +- unsigned long outdata = 0xA5A0B5B0; +- unsigned long indata = 0x00000000; ++ u32 outdata = 0xA5A0B5B0; ++ u32 indata = 0; ++ + /* Try writing 32 bits */ +- memcpy_toio(membase, &outdata, 4); +- /* Now compare them */ +- if (memcmp_withio(&outdata, membase, 4) == 0) ++ nubus_writel(outdata, membase); ++ /* Now read it back */ ++ indata = nubus_readl(membase); ++ if (outdata == indata) + return ACCESS_32; ++ ++ outdata = 0xC5C0D5D0; ++ indata = 0; ++ + /* Write 16 bit output */ + word_memcpy_tocard(membase, &outdata, 4); + /* Now read it back */ + word_memcpy_fromcard(&indata, membase, 4); + if (outdata == indata) + return ACCESS_16; ++ + return ACCESS_UNKNOWN; + } + +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +index 640babf752ea..784c3522aaa3 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +@@ -270,11 +270,12 @@ int aq_ring_rx_clean(struct aq_ring_s *self, + } else { + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); +- if (buff->is_udp_cso || buff->is_tcp_cso) +- __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } ++ ++ if (buff->is_udp_cso || buff->is_tcp_cso) ++ __skb_incr_checksum_unnecessary(skb); + } + + skb_set_hash(skb, buff->rss_hash, +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +index 09494e1c77c5..7ad1d56d8389 100644 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, + /* Check if page can be recycled */ + if (page) { + ref_count = page_ref_count(page); +- /* Check if this page has been used once i.e 'put_page' +- * called after packet transmission i.e internal ref_count +- * and page's ref_count are equal i.e page can be recycled. ++ /* This page can be recycled if internal ref_count and page's ++ * ref_count are equal, indicating that the page has been used ++ * once for packet transmission. For non-XDP mode, internal ++ * ref_count is always '1'. + */ +- if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) +- pgcache->ref_count--; +- else +- page = NULL; +- +- /* In non-XDP mode, page's ref_count needs to be '1' for it +- * to be recycled. +- */ +- if (!rbdr->is_xdp && (ref_count != 1)) ++ if (rbdr->is_xdp) { ++ if (ref_count == pgcache->ref_count) ++ pgcache->ref_count--; ++ else ++ page = NULL; ++ } else if (ref_count != 1) { + page = NULL; ++ } + } + + if (!page) { +@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) + while (head < rbdr->pgcnt) { + pgcache = &rbdr->pgcache[head]; + if (pgcache->page && page_ref_count(pgcache->page) != 0) { +- if (!rbdr->is_xdp) { +- put_page(pgcache->page); +- continue; ++ if (rbdr->is_xdp) { ++ page_ref_sub(pgcache->page, ++ pgcache->ref_count - 1); + } +- page_ref_sub(pgcache->page, pgcache->ref_count - 1); + put_page(pgcache->page); + } + head++; +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +index 1af7b078b94d..d4c3bf78d928 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -114,10 +114,11 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) + + static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) + { +- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; ++ struct stmmac_rx_queue *rx_q = priv_ptr; ++ struct stmmac_priv *priv = rx_q->priv_data; + + /* Fill DES3 in case of RING mode */ +- if (priv->dma_buf_sz >= BUF_SIZE_8KiB) ++ if (priv->dma_buf_sz == BUF_SIZE_16KiB) + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 0cc83e8417ef..4a9dbee6f054 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3787,6 +3787,20 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + return ret; + } + ++static int stmmac_set_mac_address(struct net_device *ndev, void *addr) ++{ ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ int ret = 0; ++ ++ ret = eth_mac_addr(ndev, addr); ++ if (ret) ++ return ret; ++ ++ priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0); ++ ++ return ret; ++} ++ + #ifdef CONFIG_DEBUG_FS + static struct dentry *stmmac_fs_dir; + +@@ -4014,7 +4028,7 @@ static const struct net_device_ops stmmac_netdev_ops = { + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = stmmac_poll_controller, + #endif +- .ndo_set_mac_address = eth_mac_addr, ++ .ndo_set_mac_address = stmmac_set_mac_address, + }; + + /** +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 4227ee33ef19..3b13d9e4030a 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1403,9 +1403,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + u32 rxhash; + int skb_xdp = 1; + +- if (!(tun->dev->flags & IFF_UP)) +- return -EIO; +- + if (!(tun->flags & IFF_NO_PI)) { + if (len < sizeof(pi)) + return -EINVAL; +@@ -1493,9 +1490,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + err = skb_copy_datagram_from_iter(skb, 0, from, len); + + if (err) { ++ err = -EFAULT; ++drop: + this_cpu_inc(tun->pcpu_stats->rx_dropped); + kfree_skb(skb); +- return -EFAULT; ++ return err; + } + } + +@@ -1566,11 +1565,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + } + + rxhash = __skb_get_hash_symmetric(skb); ++ ++ rcu_read_lock(); ++ if (unlikely(!(tun->dev->flags & IFF_UP))) { ++ err = -EIO; ++ rcu_read_unlock(); ++ goto drop; ++ } ++ + #ifndef CONFIG_4KSTACKS + tun_rx_batched(tun, tfile, skb, more); + #else + netif_rx_ni(skb); + #endif ++ rcu_read_unlock(); + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index df48f65c4f90..2fbaa279988e 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -3793,10 +3793,8 @@ static void __net_exit vxlan_exit_net(struct net *net) + /* If vxlan->dev is in the same netns, it has already been added + * to the list by the previous loop. + */ +- if (!net_eq(dev_net(vxlan->dev), net)) { +- gro_cells_destroy(&vxlan->gro_cells); ++ if (!net_eq(dev_net(vxlan->dev), net)) + unregister_netdevice_queue(vxlan->dev, &list); +- } + } + + unregister_netdevice_many(&list); +diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c +index d1ccff527756..4d34dfb64998 100644 +--- a/drivers/phy/allwinner/phy-sun4i-usb.c ++++ b/drivers/phy/allwinner/phy-sun4i-usb.c +@@ -480,8 +480,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode) + struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); + int new_mode; + +- if (phy->index != 0) ++ if (phy->index != 0) { ++ if (mode == PHY_MODE_USB_HOST) ++ return 0; + return -EINVAL; ++ } + + switch (mode) { + case PHY_MODE_USB_HOST: +diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c +index ae7a49ade414..d22759eb6640 100644 +--- a/drivers/s390/cio/vfio_ccw_drv.c ++++ b/drivers/s390/cio/vfio_ccw_drv.c +@@ -70,20 +70,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) + { + struct vfio_ccw_private *private; + struct irb *irb; ++ bool is_final; + + private = container_of(work, struct vfio_ccw_private, io_work); + irb = &private->irb; + ++ is_final = !(scsw_actl(&irb->scsw) & ++ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); + if (scsw_is_solicited(&irb->scsw)) { + cp_update_scsw(&private->cp, &irb->scsw); +- cp_free(&private->cp); ++ if (is_final) ++ cp_free(&private->cp); + } + memcpy(private->io_region.irb_area, irb, sizeof(*irb)); + + if (private->io_trigger) + eventfd_signal(private->io_trigger, 1); + +- if (private->mdev) ++ if (private->mdev && is_final) + private->state = VFIO_CCW_STATE_IDLE; + } + +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index 7aa243a6cdbf..6d5065f679ac 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -652,6 +652,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) + add_timer(&erp_action->timer); + } + ++void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, ++ int clear, char *dbftag) ++{ ++ unsigned long flags; ++ struct zfcp_port *port; ++ ++ write_lock_irqsave(&adapter->erp_lock, flags); ++ read_lock(&adapter->port_list_lock); ++ list_for_each_entry(port, &adapter->port_list, list) ++ _zfcp_erp_port_forced_reopen(port, clear, dbftag); ++ read_unlock(&adapter->port_list_lock); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++} ++ + static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, + int clear, char *id) + { +@@ -1306,6 +1320,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) + struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); + int lun_status; + ++ if (sdev->sdev_state == SDEV_DEL || ++ sdev->sdev_state == SDEV_CANCEL) ++ continue; + if (zsdev->port != port) + continue; + /* LUN under port of interest */ +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h +index c1092a11e728..1b2e2541b1de 100644 +--- a/drivers/s390/scsi/zfcp_ext.h ++++ b/drivers/s390/scsi/zfcp_ext.h +@@ -68,6 +68,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); + extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *); + extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); + extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); ++extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, ++ int clear, char *dbftag); + extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); + extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); + extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c +index 0b6f51424745..6f6bc73a3a10 100644 +--- a/drivers/s390/scsi/zfcp_scsi.c ++++ b/drivers/s390/scsi/zfcp_scsi.c +@@ -327,6 +327,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; + int ret = SUCCESS, fc_ret; + ++ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { ++ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p"); ++ zfcp_erp_wait(adapter); ++ } + zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); + zfcp_erp_wait(adapter); + fc_ret = fc_block_scsi_eh(scpnt); +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index d0cc8fb40f63..e0c0fea227c1 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -1420,11 +1420,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) + scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); + } + +- /* +- * XXX and what if there are packets in flight and this close() +- * XXX is followed by a "rmmod sd_mod"? +- */ +- + scsi_disk_put(sdkp); + } + +@@ -3089,6 +3084,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, + unsigned int opt_xfer_bytes = + logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + ++ if (sdkp->opt_xfer_blocks == 0) ++ return false; ++ + if (sdkp->opt_xfer_blocks > dev_max) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ +@@ -3521,11 +3519,23 @@ static void scsi_disk_release(struct device *dev) + { + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct gendisk *disk = sdkp->disk; +- ++ struct request_queue *q = disk->queue; ++ + spin_lock(&sd_index_lock); + ida_remove(&sd_index_ida, sdkp->index); + spin_unlock(&sd_index_lock); + ++ /* ++ * Wait until all requests that are in progress have completed. ++ * This is necessary to avoid that e.g. scsi_end_request() crashes ++ * due to clearing the disk->private_data pointer. Wait from inside ++ * scsi_disk_release() instead of from sd_release() to avoid that ++ * freezing and unfreezing the request queue affects user space I/O ++ * in case multiple processes open a /dev/sd... node concurrently. ++ */ ++ blk_mq_freeze_queue(q); ++ blk_mq_unfreeze_queue(q); ++ + disk->private_data = NULL; + put_disk(disk); + put_device(&sdkp->device->sdev_gendev); +diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h +index 1bb9986f865e..33f249af0063 100644 +--- a/drivers/staging/comedi/comedidev.h ++++ b/drivers/staging/comedi/comedidev.h +@@ -992,6 +992,8 @@ int comedi_dio_insn_config(struct comedi_device *dev, + unsigned int mask); + unsigned int comedi_dio_update_state(struct comedi_subdevice *s, + unsigned int *data); ++unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, ++ struct comedi_cmd *cmd); + unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); + unsigned int comedi_nscans_left(struct comedi_subdevice *s, + unsigned int nscans); +diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c +index c11c22bd6d13..2e532219f08b 100644 +--- a/drivers/staging/comedi/drivers.c ++++ b/drivers/staging/comedi/drivers.c +@@ -390,11 +390,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s, + EXPORT_SYMBOL_GPL(comedi_dio_update_state); + + /** +- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes ++ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in ++ * bytes + * @s: COMEDI subdevice. ++ * @cmd: COMEDI command. + * + * Determines the overall scan length according to the subdevice type and the +- * number of channels in the scan. ++ * number of channels in the scan for the specified command. + * + * For digital input, output or input/output subdevices, samples for + * multiple channels are assumed to be packed into one or more unsigned +@@ -404,9 +406,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state); + * + * Returns the overall scan length in bytes. + */ +-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) ++unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, ++ struct comedi_cmd *cmd) + { +- struct comedi_cmd *cmd = &s->async->cmd; + unsigned int num_samples; + unsigned int bits_per_sample; + +@@ -423,6 +425,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) + } + return comedi_samples_to_bytes(s, num_samples); + } ++EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd); ++ ++/** ++ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes ++ * @s: COMEDI subdevice. ++ * ++ * Determines the overall scan length according to the subdevice type and the ++ * number of channels in the scan for the current command. ++ * ++ * For digital input, output or input/output subdevices, samples for ++ * multiple channels are assumed to be packed into one or more unsigned ++ * short or unsigned int values according to the subdevice's %SDF_LSAMPL ++ * flag. For other types of subdevice, samples are assumed to occupy a ++ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. ++ * ++ * Returns the overall scan length in bytes. ++ */ ++unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) ++{ ++ struct comedi_cmd *cmd = &s->async->cmd; ++ ++ return comedi_bytes_per_scan_cmd(s, cmd); ++} + EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); + + static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c +index 158f3e83efb6..36361bdf934a 100644 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c +@@ -3523,6 +3523,7 @@ static int ni_cdio_check_chanlist(struct comedi_device *dev, + static int ni_cdio_cmdtest(struct comedi_device *dev, + struct comedi_subdevice *s, struct comedi_cmd *cmd) + { ++ unsigned int bytes_per_scan; + int err = 0; + int tmp; + +@@ -3552,9 +3553,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); + err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, + cmd->chanlist_len); +- err |= comedi_check_trigger_arg_max(&cmd->stop_arg, +- s->async->prealloc_bufsz / +- comedi_bytes_per_scan(s)); ++ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd); ++ if (bytes_per_scan) { ++ err |= comedi_check_trigger_arg_max(&cmd->stop_arg, ++ s->async->prealloc_bufsz / ++ bytes_per_scan); ++ } + + if (err) + return 3; +diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c +index 1123b4f1e1d6..84a915199e64 100644 +--- a/drivers/staging/vt6655/device_main.c ++++ b/drivers/staging/vt6655/device_main.c +@@ -973,8 +973,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) + return; + } + +- MACvIntDisable(priv->PortOffset); +- + spin_lock_irqsave(&priv->lock, flags); + + /* Read low level stats */ +@@ -1062,8 +1060,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) + } + + spin_unlock_irqrestore(&priv->lock, flags); +- +- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); + } + + static void vnt_interrupt_work(struct work_struct *work) +@@ -1073,14 +1069,17 @@ static void vnt_interrupt_work(struct work_struct *work) + + if (priv->vif) + vnt_interrupt_process(priv); ++ ++ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); + } + + static irqreturn_t vnt_interrupt(int irq, void *arg) + { + struct vnt_private *priv = arg; + +- if (priv->vif) +- schedule_work(&priv->interrupt_work); ++ schedule_work(&priv->interrupt_work); ++ ++ MACvIntDisable(priv->PortOffset); + + return IRQ_HANDLED; + } +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 2286e9d73115..9f1cef59fa28 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -1163,6 +1163,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port) + sg_dma_len(&atmel_port->sg_rx)/2, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); ++ if (!desc) { ++ dev_err(port->dev, "Preparing DMA cyclic failed\n"); ++ goto chan_err; ++ } + desc->callback = atmel_complete_rx_dma; + desc->callback_param = port; + atmel_port->desc_rx = desc; +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c +index f2b0d8cee8ef..0314e78e31ff 100644 +--- a/drivers/tty/serial/kgdboc.c ++++ b/drivers/tty/serial/kgdboc.c +@@ -148,8 +148,10 @@ static int configure_kgdboc(void) + char *cptr = config; + struct console *cons; + +- if (!strlen(config) || isspace(config[0])) ++ if (!strlen(config) || isspace(config[0])) { ++ err = 0; + goto noconfig; ++ } + + kgdboc_io_ops.is_console = 0; + kgdb_tty_driver = NULL; +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c +index 9dfedbe6c071..54660002271a 100644 +--- a/drivers/tty/serial/max310x.c ++++ b/drivers/tty/serial/max310x.c +@@ -1323,6 +1323,8 @@ static int max310x_spi_probe(struct spi_device *spi) + if (spi->dev.of_node) { + const struct of_device_id *of_id = + of_match_device(max310x_dt_ids, &spi->dev); ++ if (!of_id) ++ return -ENODEV; + + devtype = (struct max310x_devtype *)of_id->data; + } else { +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index 37dba940d898..d5f933ec153c 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -806,19 +806,9 @@ static void sci_transmit_chars(struct uart_port *port) + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +- if (uart_circ_empty(xmit)) { ++ if (uart_circ_empty(xmit)) + sci_stop_tx(port); +- } else { +- ctrl = serial_port_in(port, SCSCR); +- +- if (port->type != PORT_SCI) { +- serial_port_in(port, SCxSR); /* Dummy read */ +- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port)); +- } + +- ctrl |= SCSCR_TIE; +- serial_port_out(port, SCSCR, ctrl); +- } + } + + /* On SH3, SCIF may read end-of-break as a space->mark char */ +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 8ab0195f8d32..f736c8895089 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -570,10 +570,8 @@ static void acm_softint(struct work_struct *work) + clear_bit(EVENT_RX_STALL, &acm->flags); + } + +- if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { ++ if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) + tty_port_tty_wakeup(&acm->port); +- clear_bit(EVENT_TTY_WAKEUP, &acm->flags); +- } + } + + /* +diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c +index 552ff7ac5a6b..c1ab14145f62 100644 +--- a/drivers/usb/common/common.c ++++ b/drivers/usb/common/common.c +@@ -148,6 +148,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) + + do { + controller = of_find_node_with_property(controller, "phys"); ++ if (!of_device_is_available(controller)) ++ continue; + index = 0; + do { + if (arg0 == -1) { +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 1a6ccdd5a5fc..bd749e78df59 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -768,21 +768,18 @@ void usb_destroy_configuration(struct usb_device *dev) + return; + + if (dev->rawdescriptors) { +- for (i = 0; i < dev->descriptor.bNumConfigurations && +- i < USB_MAXCONFIG; i++) ++ for (i = 0; i < dev->descriptor.bNumConfigurations; i++) + kfree(dev->rawdescriptors[i]); + + kfree(dev->rawdescriptors); + dev->rawdescriptors = NULL; + } + +- for (c = 0; c < dev->descriptor.bNumConfigurations && +- c < USB_MAXCONFIG; c++) { ++ for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { + struct usb_host_config *cf = &dev->config[c]; + + kfree(cf->string); +- for (i = 0; i < cf->desc.bNumInterfaces && +- i < USB_MAXINTERFACES; i++) { ++ for (i = 0; i < cf->desc.bNumInterfaces; i++) { + if (cf->intf_cache[i]) + kref_put(&cf->intf_cache[i]->ref, + usb_release_interface_cache); +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c +index d8e359ef6eb1..63f6e344d5b0 100644 +--- a/drivers/usb/gadget/function/f_hid.c ++++ b/drivers/usb/gadget/function/f_hid.c +@@ -395,20 +395,20 @@ try_again: + req->complete = f_hidg_req_complete; + req->context = hidg; + ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags); ++ + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); + if (status < 0) { + ERROR(hidg->func.config->cdev, + "usb_ep_queue error on int endpoint %zd\n", status); +- goto release_write_pending_unlocked; ++ goto release_write_pending; + } else { + status = count; + } +- spin_unlock_irqrestore(&hidg->write_spinlock, flags); + + return status; + release_write_pending: + spin_lock_irqsave(&hidg->write_spinlock, flags); +-release_write_pending_unlocked: + hidg->write_pending = 0; + spin_unlock_irqrestore(&hidg->write_spinlock, flags); + +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c +index 97f23cc31f4c..425c2edfd6ea 100644 +--- a/drivers/usb/host/xhci-rcar.c ++++ b/drivers/usb/host/xhci-rcar.c +@@ -236,6 +236,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd) + xhci_rcar_is_gen3(hcd->self.controller)) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH; + return xhci_rcar_download_firmware(hcd); + } + +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index aa230706b875..9a7e77a09080 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1715,10 +1715,13 @@ static void handle_port_status(struct xhci_hcd *xhci, + } + } + +- if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && +- DEV_SUPERSPEED_ANY(portsc)) { ++ if ((portsc & PORT_PLC) && ++ DEV_SUPERSPEED_ANY(portsc) && ++ ((portsc & PORT_PLS_MASK) == XDEV_U0 || ++ (portsc & PORT_PLS_MASK) == XDEV_U1 || ++ (portsc & PORT_PLS_MASK) == XDEV_U2)) { + xhci_dbg(xhci, "resume SS port %d finished\n", port_id); +- /* We've just brought the device into U0 through either the ++ /* We've just brought the device into U0/1/2 through either the + * Resume state after a device remote wakeup, or through the + * U3Exit state after a host-initiated resume. If it's a device + * initiated remote wake, don't pass up the link state change, +diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig +index 25cd61947bee..a213ce94f6eb 100644 +--- a/drivers/usb/mtu3/Kconfig ++++ b/drivers/usb/mtu3/Kconfig +@@ -4,6 +4,7 @@ config USB_MTU3 + tristate "MediaTek USB3 Dual Role controller" + depends on EXTCON && (USB || USB_GADGET) && HAS_DMA + depends on ARCH_MEDIATEK || COMPILE_TEST ++ depends on EXTCON || !EXTCON + select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD + help + Say Y or M here if your system runs on MediaTek SoCs with +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index d8e6790ccffe..98e466c3cfca 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -83,6 +83,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ + { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ + { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ ++ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */ + { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ + { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index d45a2c352c98..e76395d7f17d 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -604,6 +604,8 @@ static const struct usb_device_id id_table_combined[] = { + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index b863bedb55a1..5755f0df0025 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -567,7 +567,9 @@ + /* + * NovaTech product ids (FTDI_VID) + */ +-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ ++#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ ++#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */ ++#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */ + + /* + * Synapse Wireless product ids (FTDI_VID) +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index a453965f9e9a..393a91ab56ed 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -368,8 +368,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, + if (!urbtrack) + return -ENOMEM; + +- kref_get(&mos_parport->ref_count); +- urbtrack->mos_parport = mos_parport; + urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urbtrack->urb) { + kfree(urbtrack); +@@ -390,6 +388,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, + usb_sndctrlpipe(usbdev, 0), + (unsigned char *)urbtrack->setup, + NULL, 0, async_complete, urbtrack); ++ kref_get(&mos_parport->ref_count); ++ urbtrack->mos_parport = mos_parport; + kref_init(&urbtrack->ref_count); + INIT_LIST_HEAD(&urbtrack->urblist_entry); + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index bf72245f1cea..3311f569aa17 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -249,6 +249,7 @@ static void option_instat_callback(struct urb *urb); + #define QUECTEL_PRODUCT_EC25 0x0125 + #define QUECTEL_PRODUCT_BG96 0x0296 + #define QUECTEL_PRODUCT_EP06 0x0306 ++#define QUECTEL_PRODUCT_EM12 0x0512 + + #define CMOTECH_VENDOR_ID 0x16d8 + #define CMOTECH_PRODUCT_6001 0x6001 +@@ -1069,7 +1070,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(3) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ +- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */ ++ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) }, + /* Quectel products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), +@@ -1090,6 +1092,9 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), ++ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1942,10 +1947,12 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ + .driver_info = RSVD(4) }, +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ +- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ ++ .driver_info = RSVD(4) }, ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, +diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c +index 14a93cb21310..66d58e93bc32 100644 +--- a/drivers/video/fbdev/goldfishfb.c ++++ b/drivers/video/fbdev/goldfishfb.c +@@ -234,7 +234,7 @@ static int goldfish_fb_probe(struct platform_device *pdev) + fb->fb.var.activate = FB_ACTIVATE_NOW; + fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT); + fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH); +- fb->fb.var.pixclock = 10000; ++ fb->fb.var.pixclock = 0; + + fb->fb.var.red.offset = 11; + fb->fb.var.red.length = 5; +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c +index 2e995e565633..1e35a2327478 100644 +--- a/fs/btrfs/raid56.c ++++ b/fs/btrfs/raid56.c +@@ -2414,8 +2414,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + bitmap_clear(rbio->dbitmap, pagenr, 1); + kunmap(p); + +- for (stripe = 0; stripe < rbio->real_stripes; stripe++) ++ for (stripe = 0; stripe < nr_data; stripe++) + kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); ++ kunmap(p_page); + } + + __free_page(p_page); +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 179a383a4aaa..9d72882b0f72 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -3422,9 +3422,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, + } + btrfs_release_path(path); + +- /* find the first key from this transaction again */ ++ /* ++ * Find the first key from this transaction again. See the note for ++ * log_new_dir_dentries, if we're logging a directory recursively we ++ * won't be holding its i_mutex, which means we can modify the directory ++ * while we're logging it. If we remove an entry between our first ++ * search and this search we'll not find the key again and can just ++ * bail. ++ */ + ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); +- if (WARN_ON(ret != 0)) ++ if (ret != 0) + goto done; + + /* +@@ -4501,6 +4508,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_item); + *size_ret = btrfs_inode_size(path->nodes[0], item); ++ /* ++ * If the in-memory inode's i_size is smaller then the inode ++ * size stored in the btree, return the inode's i_size, so ++ * that we get a correct inode size after replaying the log ++ * when before a power failure we had a shrinking truncate ++ * followed by addition of a new name (rename / new hard link). ++ * Otherwise return the inode size from the btree, to avoid ++ * data loss when replaying a log due to previously doing a ++ * write that expands the inode's size and logging a new name ++ * immediately after. ++ */ ++ if (*size_ret > inode->vfs_inode.i_size) ++ *size_ret = inode->vfs_inode.i_size; + } + + btrfs_release_path(path); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 9041a892701f..a225f98c9903 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2746,7 +2746,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + nfs4_schedule_stateid_recovery(server, state); + } + out: +- nfs4_sequence_free_slot(&opendata->o_res.seq_res); ++ if (!opendata->cancelled) ++ nfs4_sequence_free_slot(&opendata->o_res.seq_res); + return ret; + } + +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c +index 824f407df1db..3a4e1bca5e31 100644 +--- a/fs/ocfs2/refcounttree.c ++++ b/fs/ocfs2/refcounttree.c +@@ -4716,22 +4716,23 @@ out: + + /* Lock an inode and grab a bh pointing to the inode. */ + static int ocfs2_reflink_inodes_lock(struct inode *s_inode, +- struct buffer_head **bh1, ++ struct buffer_head **bh_s, + struct inode *t_inode, +- struct buffer_head **bh2) ++ struct buffer_head **bh_t) + { +- struct inode *inode1; +- struct inode *inode2; ++ struct inode *inode1 = s_inode; ++ struct inode *inode2 = t_inode; + struct ocfs2_inode_info *oi1; + struct ocfs2_inode_info *oi2; ++ struct buffer_head *bh1 = NULL; ++ struct buffer_head *bh2 = NULL; + bool same_inode = (s_inode == t_inode); ++ bool need_swap = (inode1->i_ino > inode2->i_ino); + int status; + + /* First grab the VFS and rw locks. */ + lock_two_nondirectories(s_inode, t_inode); +- inode1 = s_inode; +- inode2 = t_inode; +- if (inode1->i_ino > inode2->i_ino) ++ if (need_swap) + swap(inode1, inode2); + + status = ocfs2_rw_lock(inode1, 1); +@@ -4754,17 +4755,13 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode, + trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, + (unsigned long long)oi2->ip_blkno); + +- if (*bh1) +- *bh1 = NULL; +- if (*bh2) +- *bh2 = NULL; +- + /* We always want to lock the one with the lower lockid first. */ + if (oi1->ip_blkno > oi2->ip_blkno) + mlog_errno(-ENOLCK); + + /* lock id1 */ +- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET); ++ status = ocfs2_inode_lock_nested(inode1, &bh1, 1, ++ OI_LS_REFLINK_TARGET); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); +@@ -4773,15 +4770,25 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode, + + /* lock id2 */ + if (!same_inode) { +- status = ocfs2_inode_lock_nested(inode2, bh2, 1, ++ status = ocfs2_inode_lock_nested(inode2, &bh2, 1, + OI_LS_REFLINK_TARGET); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto out_cl1; + } +- } else +- *bh2 = *bh1; ++ } else { ++ bh2 = bh1; ++ } ++ ++ /* ++ * If we swapped inode order above, we have to swap the buffer heads ++ * before passing them back to the caller. ++ */ ++ if (need_swap) ++ swap(bh1, bh2); ++ *bh_s = bh1; ++ *bh_t = bh2; + + trace_ocfs2_double_lock_end( + (unsigned long long)OCFS2_I(inode1)->ip_blkno, +@@ -4791,8 +4798,7 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode, + + out_cl1: + ocfs2_inode_unlock(inode1, 1); +- brelse(*bh1); +- *bh1 = NULL; ++ brelse(bh1); + out_rw2: + ocfs2_rw_unlock(inode2, 1); + out_i2: +diff --git a/fs/open.c b/fs/open.c +index 7ea118471dce..28a3956c4479 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -716,6 +716,12 @@ static int do_dentry_open(struct file *f, + return 0; + } + ++ /* Any file opened for execve()/uselib() has to be a regular file. */ ++ if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) { ++ error = -EACCES; ++ goto cleanup_file; ++ } ++ + if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { + error = get_write_access(inode); + if (unlikely(error)) +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index f69c545f5868..8d5422bb9c1a 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -1620,7 +1620,8 @@ static void drop_sysctl_table(struct ctl_table_header *header) + if (--header->nreg) + return; + +- put_links(header); ++ if (parent) ++ put_links(header); + start_unregistering(header); + if (!--header->count) + kfree_rcu(header, rcu); +diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h +index 4a5b9a306c69..803fc26ef0ba 100644 +--- a/include/net/sctp/checksum.h ++++ b/include/net/sctp/checksum.h +@@ -60,7 +60,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, + static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, + unsigned int offset) + { +- struct sctphdr *sh = sctp_hdr(skb); ++ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); + __le32 ret, old = sh->checksum; + const struct skb_checksum_ops ops = { + .update = sctp_csum_update, +diff --git a/include/net/sock.h b/include/net/sock.h +index 4280e96d4b46..60eef7f1ac05 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -682,6 +682,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) + hlist_add_head_rcu(&sk->sk_node, list); + } + ++static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) ++{ ++ sock_hold(sk); ++ hlist_add_tail_rcu(&sk->sk_node, list); ++} ++ + static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) + { + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 0171754db32b..32f0432f0c26 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -538,6 +538,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) + } + } + ++static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) ++{ ++ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) ++ return true; ++ /* ++ * When CPU hotplug is disabled, then taking the CPU down is not ++ * possible because takedown_cpu() and the architecture and ++ * subsystem specific mechanisms are not available. So the CPU ++ * which would be completely unplugged again needs to stay around ++ * in the current state. ++ */ ++ return st->state <= CPUHP_BRINGUP_CPU; ++} ++ + static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, + enum cpuhp_state target) + { +@@ -548,8 +562,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, + st->state++; + ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); + if (ret) { +- st->target = prev_state; +- undo_cpu_up(cpu, st); ++ if (can_rollback_cpu(st)) { ++ st->target = prev_state; ++ undo_cpu_up(cpu, st); ++ } + break; + } + } +diff --git a/lib/rhashtable.c b/lib/rhashtable.c +index cebbcec877d7..cb577ca65fa9 100644 +--- a/lib/rhashtable.c ++++ b/lib/rhashtable.c +@@ -459,8 +459,12 @@ static void rht_deferred_worker(struct work_struct *work) + else if (tbl->nest) + err = rhashtable_rehash_alloc(ht, tbl, tbl->size); + +- if (!err) +- err = rhashtable_rehash_table(ht); ++ if (!err || err == -EEXIST) { ++ int nerr; ++ ++ nerr = rhashtable_rehash_table(ht); ++ err = err ?: nerr; ++ } + + mutex_unlock(&ht->mutex); + +diff --git a/mm/migrate.c b/mm/migrate.c +index 877269339fa7..9a3ce8847308 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -247,10 +247,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, + pte = swp_entry_to_pte(entry); + } else if (is_device_public_page(new)) { + pte = pte_mkdevmap(pte); +- flush_dcache_page(new); + } +- } else +- flush_dcache_page(new); ++ } + + #ifdef CONFIG_HUGETLB_PAGE + if (PageHuge(new)) { +@@ -971,6 +969,13 @@ static int move_to_new_page(struct page *newpage, struct page *page, + */ + if (!PageMappingFlags(page)) + page->mapping = NULL; ++ ++ if (unlikely(is_zone_device_page(newpage))) { ++ if (is_device_public_page(newpage)) ++ flush_dcache_page(newpage); ++ } else ++ flush_dcache_page(newpage); ++ + } + out: + return rc; +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 9b7907ebfa01..b510da76170e 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -3336,16 +3336,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&req, &type, &olen, &val); ++ if (len < 0) ++ break; + + hint = type & L2CAP_CONF_HINT; + type &= L2CAP_CONF_MASK; + + switch (type) { + case L2CAP_CONF_MTU: ++ if (olen != 2) ++ break; + mtu = val; + break; + + case L2CAP_CONF_FLUSH_TO: ++ if (olen != 2) ++ break; + chan->flush_to = val; + break; + +@@ -3353,26 +3359,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data + break; + + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *) val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ memcpy(&rfc, (void *) val, olen); + break; + + case L2CAP_CONF_FCS: ++ if (olen != 1) ++ break; + if (val == L2CAP_FCS_NONE) + set_bit(CONF_RECV_NO_FCS, &chan->conf_state); + break; + + case L2CAP_CONF_EFS: +- if (olen == sizeof(efs)) { +- remote_efs = 1; +- memcpy(&efs, (void *) val, olen); +- } ++ if (olen != sizeof(efs)) ++ break; ++ remote_efs = 1; ++ memcpy(&efs, (void *) val, olen); + break; + + case L2CAP_CONF_EWS: ++ if (olen != 2) ++ break; + if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) + return -ECONNREFUSED; +- + set_bit(FLAG_EXT_CTRL, &chan->flags); + set_bit(CONF_EWS_RECV, &chan->conf_state); + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; +@@ -3382,7 +3392,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data + default: + if (hint) + break; +- + result = L2CAP_CONF_UNKNOWN; + *((u8 *) ptr++) = type; + break; +@@ -3547,58 +3556,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); ++ if (len < 0) ++ break; + + switch (type) { + case L2CAP_CONF_MTU: ++ if (olen != 2) ++ break; + if (val < L2CAP_DEFAULT_MIN_MTU) { + *result = L2CAP_CONF_UNACCEPT; + chan->imtu = L2CAP_DEFAULT_MIN_MTU; + } else + chan->imtu = val; +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, ++ endptr - ptr); + break; + + case L2CAP_CONF_FLUSH_TO: ++ if (olen != 2) ++ break; + chan->flush_to = val; +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, +- 2, chan->flush_to, endptr - ptr); ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, ++ chan->flush_to, endptr - ptr); + break; + + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); +- ++ if (olen != sizeof(rfc)) ++ break; ++ memcpy(&rfc, (void *)val, olen); + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && + rfc.mode != chan->mode) + return -ECONNREFUSED; +- + chan->fcs = 0; +- +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, +- sizeof(rfc), (unsigned long) &rfc, endptr - ptr); ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), ++ (unsigned long) &rfc, endptr - ptr); + break; + + case L2CAP_CONF_EWS: ++ if (olen != 2) ++ break; + chan->ack_win = min_t(u16, val, chan->ack_win); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, + chan->tx_win, endptr - ptr); + break; + + case L2CAP_CONF_EFS: +- if (olen == sizeof(efs)) { +- memcpy(&efs, (void *)val, olen); +- +- if (chan->local_stype != L2CAP_SERV_NOTRAFIC && +- efs.stype != L2CAP_SERV_NOTRAFIC && +- efs.stype != chan->local_stype) +- return -ECONNREFUSED; +- +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), +- (unsigned long) &efs, endptr - ptr); +- } ++ if (olen != sizeof(efs)) ++ break; ++ memcpy(&efs, (void *)val, olen); ++ if (chan->local_stype != L2CAP_SERV_NOTRAFIC && ++ efs.stype != L2CAP_SERV_NOTRAFIC && ++ efs.stype != chan->local_stype) ++ return -ECONNREFUSED; ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), ++ (unsigned long) &efs, endptr - ptr); + break; + + case L2CAP_CONF_FCS: ++ if (olen != 1) ++ break; + if (*result == L2CAP_CONF_PENDING) + if (val == L2CAP_FCS_NONE) + set_bit(CONF_RECV_NO_FCS, +@@ -3727,13 +3743,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); ++ if (len < 0) ++ break; + + switch (type) { + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ memcpy(&rfc, (void *)val, olen); + break; + case L2CAP_CONF_EWS: ++ if (olen != 2) ++ break; + txwin_ext = val; + break; + } +diff --git a/net/core/datagram.c b/net/core/datagram.c +index d8a0774f7608..dcb333e95702 100644 +--- a/net/core/datagram.c ++++ b/net/core/datagram.c +@@ -281,7 +281,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, + break; + + sk_busy_loop(sk, flags & MSG_DONTWAIT); +- } while (!skb_queue_empty(&sk->sk_receive_queue)); ++ } while (sk->sk_receive_queue.prev != *last); + + error = -EAGAIN; + +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c +index c6a2655cc28a..dee57c5ff738 100644 +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -917,6 +917,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) + if (error) + return error; + ++ dev_hold(queue->dev); ++ + if (dev->sysfs_rx_queue_group) { + error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); + if (error) { +@@ -926,7 +928,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) + } + + kobject_uevent(kobj, KOBJ_ADD); +- dev_hold(queue->dev); + + return error; + } +@@ -1327,6 +1328,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) + if (error) + return error; + ++ dev_hold(queue->dev); ++ + #ifdef CONFIG_BQL + error = sysfs_create_group(kobj, &dql_group); + if (error) { +@@ -1336,7 +1339,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) + #endif + + kobject_uevent(kobj, KOBJ_ADD); +- dev_hold(queue->dev); + + return 0; + } +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 6344f1b18a6a..58a401e9cf09 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -433,8 +433,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, + newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; +- newnp->mcast_oif = inet6_iif(skb); +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; ++ newnp->mcast_oif = inet_iif(skb); ++ newnp->mcast_hops = ip_hdr(skb)->ttl; + + /* + * No need to charge this sock to the relevant IPv6 refcnt debug socks count +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index ba8586aadffa..7b4ce3f9e2f4 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1083,11 +1083,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * + newnp->ipv6_fl_list = NULL; + newnp->pktoptions = NULL; + newnp->opt = NULL; +- newnp->mcast_oif = tcp_v6_iif(skb); +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; +- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); ++ newnp->mcast_oif = inet_iif(skb); ++ newnp->mcast_hops = ip_hdr(skb)->ttl; ++ newnp->rcv_flowinfo = 0; + if (np->repflow) +- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); ++ newnp->flow_label = 0; + + /* + * No need to charge this sock to the relevant IPv6 refcnt debug socks count +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c +index b9ce82c9440f..e9b8b0b0ac43 100644 +--- a/net/netlink/genetlink.c ++++ b/net/netlink/genetlink.c +@@ -365,7 +365,7 @@ int genl_register_family(struct genl_family *family) + start, end + 1, GFP_KERNEL); + if (family->id < 0) { + err = family->id; +- goto errout_locked; ++ goto errout_free; + } + + err = genl_validate_assign_mc_groups(family); +@@ -384,6 +384,7 @@ int genl_register_family(struct genl_family *family) + + errout_remove: + idr_remove(&genl_fam_idr, family->id); ++errout_free: + kfree(family->attrbuf); + errout_locked: + genl_unlock_all(); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index a2bd5917a2a9..e8ca6aa3a32f 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3281,7 +3281,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, + } + + mutex_lock(&net->packet.sklist_lock); +- sk_add_node_rcu(sk, &net->packet.sklist); ++ sk_add_node_tail_rcu(sk, &net->packet.sklist); + mutex_unlock(&net->packet.sklist_lock); + + preempt_disable(); +@@ -4232,7 +4232,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) + struct pgv *pg_vec; + int i; + +- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); ++ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); + if (unlikely(!pg_vec)) + goto out; + +diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c +index 7ca57741b2fb..7849f286bb93 100644 +--- a/net/rose/rose_subr.c ++++ b/net/rose/rose_subr.c +@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype) + struct sk_buff *skb; + unsigned char *dptr; + unsigned char lci1, lci2; +- char buffer[100]; +- int len, faclen = 0; ++ int maxfaclen = 0; ++ int len, faclen; ++ int reserve; + +- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1; ++ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1; ++ len = ROSE_MIN_LEN; + + switch (frametype) { + case ROSE_CALL_REQUEST: + len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; +- faclen = rose_create_facilities(buffer, rose); +- len += faclen; ++ maxfaclen = 256; + break; + case ROSE_CALL_ACCEPTED: + case ROSE_CLEAR_REQUEST: +@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype) + break; + } + +- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) ++ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC); ++ if (!skb) + return; + + /* + * Space for AX.25 header and PID. + */ +- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1); ++ skb_reserve(skb, reserve); + +- dptr = skb_put(skb, skb_tailroom(skb)); ++ dptr = skb_put(skb, len); + + lci1 = (rose->lci >> 8) & 0x0F; + lci2 = (rose->lci >> 0) & 0xFF; +@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype) + dptr += ROSE_ADDR_LEN; + memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); + dptr += ROSE_ADDR_LEN; +- memcpy(dptr, buffer, faclen); ++ faclen = rose_create_facilities(dptr, rose); ++ skb_put(skb, faclen); + dptr += faclen; + break; + +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c +index e36a673833ae..c22041a4fc36 100644 +--- a/scripts/mod/modpost.c ++++ b/scripts/mod/modpost.c +@@ -645,7 +645,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info, + info->sechdrs[sym->st_shndx].sh_offset - + (info->hdr->e_type != ET_REL ? + info->sechdrs[sym->st_shndx].sh_addr : 0); +- crc = *crcp; ++ crc = TO_NATIVE(*crcp); + } + sym_update_crc(symname + strlen(CRC_PFX), mod, crc, + export); +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index df358e838b5b..bb0ab0f6ce9d 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) + oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * + params_channels(params) / 8; + ++ err = snd_pcm_oss_period_size(substream, params, sparams); ++ if (err < 0) ++ goto failure; ++ ++ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); ++ if (err < 0) ++ goto failure; ++ ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS, ++ runtime->oss.periods, NULL); ++ if (err < 0) ++ goto failure; ++ ++ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); ++ ++ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams); ++ if (err < 0) { ++ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err); ++ goto failure; ++ } ++ + #ifdef CONFIG_SND_PCM_OSS_PLUGINS + snd_pcm_oss_plugin_clear(substream); + if (!direct) { +@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) + } + #endif + +- err = snd_pcm_oss_period_size(substream, params, sparams); +- if (err < 0) +- goto failure; +- +- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); +- if (err < 0) +- goto failure; +- +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS, +- runtime->oss.periods, NULL); +- if (err < 0) +- goto failure; +- +- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); +- +- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) { +- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err); +- goto failure; +- } +- + if (runtime->oss.trigger) { + sw_params->start_threshold = 1; + } else { +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 966ac384c3f4..1a63d456a3dc 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -1395,8 +1395,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) + static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) + { + struct snd_pcm_runtime *runtime = substream->runtime; +- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) ++ switch (runtime->status->state) { ++ case SNDRV_PCM_STATE_SUSPENDED: + return -EBUSY; ++ /* unresumable PCM state; return -EBUSY for skipping suspend */ ++ case SNDRV_PCM_STATE_OPEN: ++ case SNDRV_PCM_STATE_SETUP: ++ case SNDRV_PCM_STATE_DISCONNECTED: ++ return -EBUSY; ++ } + runtime->trigger_master = substream; + return 0; + } +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c +index abacbbc0b0e8..d22472ba211e 100644 +--- a/sound/core/rawmidi.c ++++ b/sound/core/rawmidi.c +@@ -29,6 +29,7 @@ + #include <linux/mutex.h> + #include <linux/module.h> + #include <linux/delay.h> ++#include <linux/nospec.h> + #include <sound/rawmidi.h> + #include <sound/info.h> + #include <sound/control.h> +@@ -591,6 +592,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card, + return -ENXIO; + if (info->stream < 0 || info->stream > 1) + return -EINVAL; ++ info->stream = array_index_nospec(info->stream, 2); + pstr = &rmidi->streams[info->stream]; + if (pstr->substream_count == 0) + return -ENOENT; +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c +index 278ebb993122..c93945917235 100644 +--- a/sound/core/seq/oss/seq_oss_synth.c ++++ b/sound/core/seq/oss/seq_oss_synth.c +@@ -617,13 +617,14 @@ int + snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) + { + struct seq_oss_synth *rec; ++ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); + +- if (dev < 0 || dev >= dp->max_synthdev) ++ if (!info) + return -ENXIO; + +- if (dp->synths[dev].is_midi) { ++ if (info->is_midi) { + struct midi_info minf; +- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf); ++ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf); + inf->synth_type = SYNTH_TYPE_MIDI; + inf->synth_subtype = 0; + inf->nr_voices = 16; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 972fd95f08ca..9637d0bbdeb5 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5436,6 +5436,9 @@ enum { + ALC298_FIXUP_TPT470_DOCK, + ALC255_FIXUP_DUMMY_LINEOUT_VERB, + ALC255_FIXUP_DELL_HEADSET_MIC, ++ ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE, ++ ALC225_FIXUP_WYSE_AUTO_MUTE, ++ ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -6311,6 +6314,28 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, ++ [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x16, 0x01011020 }, /* Rear Line out */ ++ { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE ++ }, ++ [ALC225_FIXUP_WYSE_AUTO_MUTE] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_auto_mute_via_amp, ++ .chained = true, ++ .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF ++ }, ++ [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc_fixup_disable_mic_vref, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -6369,6 +6394,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), ++ SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +index f3db68abbd9a..0bc3e6e93c31 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) + if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) + decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / + decoder->tsc_ctc_ratio_d; +- +- /* +- * Allow for timestamps appearing to backwards because a TSC +- * packet has slipped past a MTC packet, so allow 2 MTC ticks +- * or ... +- */ +- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift, +- decoder->tsc_ctc_ratio_n, +- decoder->tsc_ctc_ratio_d); + } +- /* ... or 0x100 paranoia */ +- if (decoder->tsc_slip < 0x100) +- decoder->tsc_slip = 0x100; ++ ++ /* ++ * A TSC packet can slip past MTC packets so that the timestamp appears ++ * to go backwards. One estimate is that can be up to about 40 CPU ++ * cycles, which is certainly less than 0x1000 TSC ticks, but accept ++ * slippage an order of magnitude more to be on the safe side. ++ */ ++ decoder->tsc_slip = 0x10000; + + intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); + intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 66cc315efa6d..a373c60ef1c0 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -2812,6 +2812,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, + { + struct kvm_device *dev = filp->private_data; + ++ if (dev->kvm->mm != current->mm) ++ return -EIO; ++ + switch (ioctl) { + case KVM_SET_DEVICE_ATTR: + return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); |