diff options
author | 2019-02-20 06:17:26 -0500 | |
---|---|---|
committer | 2019-02-20 06:17:26 -0500 | |
commit | 04bedcff79ab8090728a041e9acff364903c4239 (patch) | |
tree | 9c542a27262b93b6e6ebcb2cb13129b51743d9ed | |
parent | proj/linux-patches: Rename patch for clarity (diff) | |
download | linux-patches-04bedcff79ab8090728a041e9acff364903c4239.tar.gz linux-patches-04bedcff79ab8090728a041e9acff364903c4239.tar.bz2 linux-patches-04bedcff79ab8090728a041e9acff364903c4239.zip |
proj/linux-patches: Linux patch 4.14.1024.14-108
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1101_linux-4.14.102.patch | 2404 |
2 files changed, 2408 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 62f0745b..439bffe5 100644 --- a/0000_README +++ b/0000_README @@ -447,6 +447,10 @@ Patch: 1100_4.14.101.patch From: http://www.kernel.org Desc: Linux 4.14.101 +Patch: 1101_4.14.102.patch +From: http://www.kernel.org +Desc: Linux 4.14.102 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1101_linux-4.14.102.patch b/1101_linux-4.14.102.patch new file mode 100644 index 00000000..56ab0620 --- /dev/null +++ b/1101_linux-4.14.102.patch @@ -0,0 +1,2404 @@ +diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt +index afc04589eadf..3c9a822d576c 100644 +--- a/Documentation/devicetree/bindings/eeprom/eeprom.txt ++++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt +@@ -6,7 +6,8 @@ Required properties: + + "atmel,24c00", "atmel,24c01", "atmel,24c02", "atmel,24c04", + "atmel,24c08", "atmel,24c16", "atmel,24c32", "atmel,24c64", +- "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024" ++ "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024", ++ "atmel,24c2048" + + "catalyst,24c32" + +@@ -23,7 +24,7 @@ Required properties: + device with <type> and manufacturer "atmel" should be used. + Possible types are: + "24c00", "24c01", "24c02", "24c04", "24c08", "24c16", "24c32", "24c64", +- "24c128", "24c256", "24c512", "24c1024", "spd" ++ "24c128", "24c256", "24c512", "24c1024", "24c2048", "spd" + + - reg : the I2C address of the EEPROM + +diff --git a/Makefile b/Makefile +index d5b20b618517..837059a07bb3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 101 ++SUBLEVEL = 102 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h +index 4d17cacd1462..432402c8e47f 100644 +--- a/arch/alpha/include/asm/irq.h ++++ b/arch/alpha/include/asm/irq.h +@@ -56,15 +56,15 @@ + + #elif defined(CONFIG_ALPHA_DP264) || \ + defined(CONFIG_ALPHA_LYNX) || \ +- defined(CONFIG_ALPHA_SHARK) || \ +- defined(CONFIG_ALPHA_EIGER) ++ defined(CONFIG_ALPHA_SHARK) + # define NR_IRQS 64 + + #elif defined(CONFIG_ALPHA_TITAN) + #define NR_IRQS 80 + + #elif defined(CONFIG_ALPHA_RAWHIDE) || \ +- defined(CONFIG_ALPHA_TAKARA) ++ defined(CONFIG_ALPHA_TAKARA) || \ ++ defined(CONFIG_ALPHA_EIGER) + # define NR_IRQS 128 + + #elif defined(CONFIG_ALPHA_WILDFIRE) +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c +index cd3c572ee912..e9392302c5da 100644 +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) + /* Macro for exception fixup code to access integer registers. */ + #define dpf_reg(r) \ + (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ +- (r) <= 18 ? (r)+8 : (r)-10]) ++ (r) <= 18 ? (r)+10 : (r)-10]) + + asmlinkage void + do_page_fault(unsigned long address, unsigned long mmcsr, +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts +index c75507922f7d..f5902bd1a972 100644 +--- a/arch/arm/boot/dts/da850-evm.dts ++++ b/arch/arm/boot/dts/da850-evm.dts +@@ -169,7 +169,7 @@ + + sound { + compatible = "simple-audio-card"; +- simple-audio-card,name = "DA850/OMAP-L138 EVM"; ++ simple-audio-card,name = "DA850-OMAPL138 EVM"; + simple-audio-card,widgets = + "Line", "Line In", + "Line", "Line Out"; +diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts +index a0f0916156e6..c9d4cb212b72 100644 +--- a/arch/arm/boot/dts/da850-lcdk.dts ++++ b/arch/arm/boot/dts/da850-lcdk.dts +@@ -28,7 +28,7 @@ + + sound { + compatible = "simple-audio-card"; +- simple-audio-card,name = "DA850/OMAP-L138 LCDK"; ++ simple-audio-card,name = "DA850-OMAPL138 LCDK"; + simple-audio-card,widgets = + "Line", "Line In", + "Line", "Line Out"; +diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi +index cbaf06f2f78e..eb917462b219 100644 +--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi ++++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi +@@ -36,8 +36,8 @@ + compatible = "gpio-fan"; + pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; + pinctrl-names = "default"; +- gpios = <&gpio1 14 GPIO_ACTIVE_LOW +- &gpio1 13 GPIO_ACTIVE_LOW>; ++ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH ++ &gpio1 13 GPIO_ACTIVE_HIGH>; + gpio-fan,speed-map = <0 0 + 3000 1 + 6000 2>; +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h +index b17ee03d280b..88286dd483ff 100644 +--- a/arch/arm/include/asm/assembler.h ++++ b/arch/arm/include/asm/assembler.h +@@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) + #endif + .endm + ++ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req ++#ifdef CONFIG_CPU_SPECTRE ++ sub \tmp, \limit, #1 ++ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr ++ addhs \tmp, \tmp, #1 @ if (tmp >= 0) { ++ subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) } ++ movlo \addr, #0 @ if (tmp < 0) addr = NULL ++ csdb ++#endif ++ .endm ++ + .macro uaccess_disable, tmp, isb=1 + #ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h +index 3379c2c684c2..25d523185c6a 100644 +--- a/arch/arm/include/asm/cputype.h ++++ b/arch/arm/include/asm/cputype.h +@@ -107,6 +107,7 @@ + #define ARM_CPU_PART_SCORPION 0x510002d0 + + extern unsigned int processor_id; ++struct proc_info_list *lookup_processor(u32 midr); + + #ifdef CONFIG_CPU_CP15 + #define read_cpuid(reg) \ +diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h +index e25f4392e1b2..e1b6f280ab08 100644 +--- a/arch/arm/include/asm/proc-fns.h ++++ b/arch/arm/include/asm/proc-fns.h +@@ -23,7 +23,7 @@ struct mm_struct; + /* + * Don't change this structure - ASM code relies on it. + */ +-extern struct processor { ++struct processor { + /* MISC + * get data abort address/flags + */ +@@ -79,9 +79,13 @@ extern struct processor { + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); +-} processor; ++}; + + #ifndef MULTI_CPU ++static inline void init_proc_vtable(const struct processor *p) ++{ ++} ++ + extern void cpu_proc_init(void); + extern void cpu_proc_fin(void); + extern int cpu_do_idle(void); +@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn)); + extern void cpu_do_suspend(void *); + extern void cpu_do_resume(void *); + #else +-#define cpu_proc_init processor._proc_init +-#define cpu_proc_fin processor._proc_fin +-#define cpu_reset processor.reset +-#define cpu_do_idle processor._do_idle +-#define cpu_dcache_clean_area processor.dcache_clean_area +-#define cpu_set_pte_ext processor.set_pte_ext +-#define cpu_do_switch_mm processor.switch_mm + +-/* These three are private to arch/arm/kernel/suspend.c */ +-#define cpu_do_suspend processor.do_suspend +-#define cpu_do_resume processor.do_resume ++extern struct processor processor; ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++#include <linux/smp.h> ++/* ++ * This can't be a per-cpu variable because we need to access it before ++ * per-cpu has been initialised. We have a couple of functions that are ++ * called in a pre-emptible context, and so can't use smp_processor_id() ++ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the ++ * function pointers for these are identical across all CPUs. ++ */ ++extern struct processor *cpu_vtable[]; ++#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f ++#define PROC_TABLE(f) cpu_vtable[0]->f ++static inline void init_proc_vtable(const struct processor *p) ++{ ++ unsigned int cpu = smp_processor_id(); ++ *cpu_vtable[cpu] = *p; ++ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != ++ cpu_vtable[0]->dcache_clean_area); ++ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != ++ cpu_vtable[0]->set_pte_ext); ++} ++#else ++#define PROC_VTABLE(f) processor.f ++#define PROC_TABLE(f) processor.f ++static inline void init_proc_vtable(const struct processor *p) ++{ ++ processor = *p; ++} ++#endif ++ ++#define cpu_proc_init PROC_VTABLE(_proc_init) ++#define cpu_check_bugs PROC_VTABLE(check_bugs) ++#define cpu_proc_fin PROC_VTABLE(_proc_fin) ++#define cpu_reset PROC_VTABLE(reset) ++#define cpu_do_idle PROC_VTABLE(_do_idle) ++#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) ++#define cpu_set_pte_ext PROC_TABLE(set_pte_ext) ++#define cpu_do_switch_mm PROC_VTABLE(switch_mm) ++ ++/* These two are private to arch/arm/kernel/suspend.c */ ++#define cpu_do_suspend PROC_VTABLE(do_suspend) ++#define cpu_do_resume PROC_VTABLE(do_resume) + #endif + + extern void cpu_resume(void); +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h +index 57d2ad9c75ca..df8420672c7e 100644 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -124,8 +124,8 @@ extern void vfp_flush_hwstate(struct thread_info *); + struct user_vfp; + struct user_vfp_exc; + +-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, +- struct user_vfp_exc __user *); ++extern int vfp_preserve_user_clear_hwstate(struct user_vfp *, ++ struct user_vfp_exc *); + extern int vfp_restore_user_hwstate(struct user_vfp *, + struct user_vfp_exc *); + #endif +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h +index 4140be431087..a5807b67ca8a 100644 +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -69,6 +69,14 @@ extern int __put_user_bad(void); + static inline void set_fs(mm_segment_t fs) + { + current_thread_info()->addr_limit = fs; ++ ++ /* ++ * Prevent a mispredicted conditional call to set_fs from forwarding ++ * the wrong address limit to access_ok under speculation. ++ */ ++ dsb(nsh); ++ isb(); ++ + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); + } + +@@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs) + #define __inttype(x) \ + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + ++/* ++ * Sanitise a uaccess pointer such that it becomes NULL if addr+size ++ * is above the current addr_limit. ++ */ ++#define uaccess_mask_range_ptr(ptr, size) \ ++ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) ++static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, ++ size_t size) ++{ ++ void __user *safe_ptr = (void __user *)ptr; ++ unsigned long tmp; ++ ++ asm volatile( ++ " sub %1, %3, #1\n" ++ " subs %1, %1, %0\n" ++ " addhs %1, %1, #1\n" ++ " subhss %1, %1, %2\n" ++ " movlo %0, #0\n" ++ : "+r" (safe_ptr), "=&r" (tmp) ++ : "r" (size), "r" (current_thread_info()->addr_limit) ++ : "cc"); ++ ++ csdb(); ++ return safe_ptr; ++} ++ + /* + * Single-value transfer routines. They automatically use the right + * size if we just have the right pointer type. Note that the functions +@@ -362,6 +396,14 @@ do { \ + __pu_err; \ + }) + ++#ifdef CONFIG_CPU_SPECTRE ++/* ++ * When mitigating Spectre variant 1.1, all accessors need to include ++ * verification of the address space. ++ */ ++#define __put_user(x, ptr) put_user(x, ptr) ++ ++#else + #define __put_user(x, ptr) \ + ({ \ + long __pu_err = 0; \ +@@ -369,12 +411,6 @@ do { \ + __pu_err; \ + }) + +-#define __put_user_error(x, ptr, err) \ +-({ \ +- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ +- (void) 0; \ +-}) +- + #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ + do { \ + unsigned long __pu_addr = (unsigned long)__pu_ptr; \ +@@ -454,6 +490,7 @@ do { \ + : "r" (x), "i" (-EFAULT) \ + : "cc") + ++#endif /* !CONFIG_CPU_SPECTRE */ + + #ifdef CONFIG_MMU + extern unsigned long __must_check +diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c +index 7be511310191..d41d3598e5e5 100644 +--- a/arch/arm/kernel/bugs.c ++++ b/arch/arm/kernel/bugs.c +@@ -6,8 +6,8 @@ + void check_other_bugs(void) + { + #ifdef MULTI_CPU +- if (processor.check_bugs) +- processor.check_bugs(); ++ if (cpu_check_bugs) ++ cpu_check_bugs(); + #endif + } + +diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S +index 8733012d231f..7e662bdd5cb3 100644 +--- a/arch/arm/kernel/head-common.S ++++ b/arch/arm/kernel/head-common.S +@@ -122,6 +122,9 @@ __mmap_switched_data: + .long init_thread_union + THREAD_START_SP @ sp + .size __mmap_switched_data, . - __mmap_switched_data + ++ __FINIT ++ .text ++ + /* + * This provides a C-API version of __lookup_processor_type + */ +@@ -133,9 +136,6 @@ ENTRY(lookup_processor_type) + ldmfd sp!, {r4 - r6, r9, pc} + ENDPROC(lookup_processor_type) + +- __FINIT +- .text +- + /* + * Read processor ID register (CP#15, CR0), and look up in the linker-built + * supported processor list. Note that we can't use the absolute addresses +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c +index 8e9a3e40d949..a6d27284105a 100644 +--- a/arch/arm/kernel/setup.c ++++ b/arch/arm/kernel/setup.c +@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2); + + #ifdef MULTI_CPU + struct processor processor __ro_after_init; ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++struct processor *cpu_vtable[NR_CPUS] = { ++ [0] = &processor, ++}; ++#endif + #endif + #ifdef MULTI_TLB + struct cpu_tlb_fns cpu_tlb __ro_after_init; +@@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void) + } + #endif + +-static void __init setup_processor(void) ++/* ++ * locate processor in the list of supported processor types. The linker ++ * builds this table for us from the entries in arch/arm/mm/proc-*.S ++ */ ++struct proc_info_list *lookup_processor(u32 midr) + { +- struct proc_info_list *list; ++ struct proc_info_list *list = lookup_processor_type(midr); + +- /* +- * locate processor in the list of supported processor +- * types. The linker builds this table for us from the +- * entries in arch/arm/mm/proc-*.S +- */ +- list = lookup_processor_type(read_cpuid_id()); + if (!list) { +- pr_err("CPU configuration botched (ID %08x), unable to continue.\n", +- read_cpuid_id()); +- while (1); ++ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", ++ smp_processor_id(), midr); ++ while (1) ++ /* can't use cpu_relax() here as it may require MMU setup */; + } + ++ return list; ++} ++ ++static void __init setup_processor(void) ++{ ++ unsigned int midr = read_cpuid_id(); ++ struct proc_info_list *list = lookup_processor(midr); ++ + cpu_name = list->cpu_name; + __cpu_architecture = __get_cpu_architecture(); + +-#ifdef MULTI_CPU +- processor = *list->proc; +-#endif ++ init_proc_vtable(list->proc); + #ifdef MULTI_TLB + cpu_tlb = *list->tlb; + #endif +@@ -700,7 +710,7 @@ static void __init setup_processor(void) + #endif + + pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", +- cpu_name, read_cpuid_id(), read_cpuid_id() & 15, ++ list->cpu_name, midr, midr & 15, + proc_arch[cpu_architecture()], get_cr()); + + snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c +index cdfe52b15a0a..02e6b6dfffa7 100644 +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -76,8 +76,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) + kframe->magic = IWMMXT_MAGIC; + kframe->size = IWMMXT_STORAGE_SIZE; + iwmmxt_task_copy(current_thread_info(), &kframe->storage); +- +- err = __copy_to_user(frame, kframe, sizeof(*frame)); + } else { + /* + * For bug-compatibility with older kernels, some space +@@ -85,10 +83,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) + * Set the magic and size appropriately so that properly + * written userspace can skip it reliably: + */ +- __put_user_error(DUMMY_MAGIC, &frame->magic, err); +- __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err); ++ *kframe = (struct iwmmxt_sigframe) { ++ .magic = DUMMY_MAGIC, ++ .size = IWMMXT_STORAGE_SIZE, ++ }; + } + ++ err = __copy_to_user(frame, kframe, sizeof(*kframe)); ++ + return err; + } + +@@ -134,17 +136,18 @@ static int restore_iwmmxt_context(char __user **auxp) + + static int preserve_vfp_context(struct vfp_sigframe __user *frame) + { +- const unsigned long magic = VFP_MAGIC; +- const unsigned long size = VFP_STORAGE_SIZE; ++ struct vfp_sigframe kframe; + int err = 0; + +- __put_user_error(magic, &frame->magic, err); +- __put_user_error(size, &frame->size, err); ++ memset(&kframe, 0, sizeof(kframe)); ++ kframe.magic = VFP_MAGIC; ++ kframe.size = VFP_STORAGE_SIZE; + ++ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); + if (err) +- return -EFAULT; ++ return err; + +- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); ++ return __copy_to_user(frame, &kframe, sizeof(kframe)); + } + + static int restore_vfp_context(char __user **auxp) +@@ -296,30 +299,35 @@ static int + setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) + { + struct aux_sigframe __user *aux; ++ struct sigcontext context; + int err = 0; + +- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); +- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); +- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); +- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); +- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); +- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); +- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); +- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); +- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); +- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); +- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); +- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); +- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); +- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); +- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); +- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); +- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); +- +- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); +- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); +- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); +- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); ++ context = (struct sigcontext) { ++ .arm_r0 = regs->ARM_r0, ++ .arm_r1 = regs->ARM_r1, ++ .arm_r2 = regs->ARM_r2, ++ .arm_r3 = regs->ARM_r3, ++ .arm_r4 = regs->ARM_r4, ++ .arm_r5 = regs->ARM_r5, ++ .arm_r6 = regs->ARM_r6, ++ .arm_r7 = regs->ARM_r7, ++ .arm_r8 = regs->ARM_r8, ++ .arm_r9 = regs->ARM_r9, ++ .arm_r10 = regs->ARM_r10, ++ .arm_fp = regs->ARM_fp, ++ .arm_ip = regs->ARM_ip, ++ .arm_sp = regs->ARM_sp, ++ .arm_lr = regs->ARM_lr, ++ .arm_pc = regs->ARM_pc, ++ .arm_cpsr = regs->ARM_cpsr, ++ ++ .trap_no = current->thread.trap_no, ++ .error_code = current->thread.error_code, ++ .fault_address = current->thread.address, ++ .oldmask = set->sig[0], ++ }; ++ ++ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); + + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); + +@@ -336,7 +344,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) + if (err == 0) + err |= preserve_vfp_context(&aux->vfp); + #endif +- __put_user_error(0, &aux->end_magic, err); ++ err |= __put_user(0, &aux->end_magic); + + return err; + } +@@ -468,7 +476,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) + /* + * Set uc.uc_flags to a value which sc.trap_no would never have. + */ +- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); ++ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); + + err |= setup_sigframe(frame, regs, set); + if (err == 0) +@@ -488,8 +496,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) + + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + +- __put_user_error(0, &frame->sig.uc.uc_flags, err); +- __put_user_error(NULL, &frame->sig.uc.uc_link, err); ++ err |= __put_user(0, &frame->sig.uc.uc_flags); ++ err |= __put_user(NULL, &frame->sig.uc.uc_link); + + err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); + err |= setup_sigframe(&frame->sig, regs, set); +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c +index 5e31c62127a0..f57333f46242 100644 +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -42,6 +42,7 @@ + #include <asm/mmu_context.h> + #include <asm/pgtable.h> + #include <asm/pgalloc.h> ++#include <asm/procinfo.h> + #include <asm/processor.h> + #include <asm/sections.h> + #include <asm/tlbflush.h> +@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd) + #endif + } + ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++static int secondary_biglittle_prepare(unsigned int cpu) ++{ ++ if (!cpu_vtable[cpu]) ++ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); ++ ++ return cpu_vtable[cpu] ? 0 : -ENOMEM; ++} ++ ++static void secondary_biglittle_init(void) ++{ ++ init_proc_vtable(lookup_processor(read_cpuid_id())->proc); ++} ++#else ++static int secondary_biglittle_prepare(unsigned int cpu) ++{ ++ return 0; ++} ++ ++static void secondary_biglittle_init(void) ++{ ++} ++#endif ++ + int __cpu_up(unsigned int cpu, struct task_struct *idle) + { + int ret; +@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) + if (!smp_ops.smp_boot_secondary) + return -ENOSYS; + ++ ret = secondary_biglittle_prepare(cpu); ++ if (ret) ++ return ret; ++ + /* + * We need to tell the secondary core where to find + * its stack and the page tables. +@@ -360,6 +389,8 @@ asmlinkage void secondary_start_kernel(void) + struct mm_struct *mm = &init_mm; + unsigned int cpu; + ++ secondary_biglittle_init(); ++ + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. +diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c +index 4abe4909417f..a87684532327 100644 +--- a/arch/arm/kernel/sys_oabi-compat.c ++++ b/arch/arm/kernel/sys_oabi-compat.c +@@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, + int maxevents, int timeout) + { + struct epoll_event *kbuf; ++ struct oabi_epoll_event e; + mm_segment_t fs; + long ret, err, i; + +@@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, + set_fs(fs); + err = 0; + for (i = 0; i < ret; i++) { +- __put_user_error(kbuf[i].events, &events->events, err); +- __put_user_error(kbuf[i].data, &events->data, err); ++ e.events = kbuf[i].events; ++ e.data = kbuf[i].data; ++ err = __copy_to_user(events, &e, sizeof(e)); ++ if (err) ++ break; + events++; + } + kfree(kbuf); +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S +index a826df3d3814..6709a8d33963 100644 +--- a/arch/arm/lib/copy_from_user.S ++++ b/arch/arm/lib/copy_from_user.S +@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user) + #ifdef CONFIG_CPU_SPECTRE + get_thread_info r3 + ldr r3, [r3, #TI_ADDR_LIMIT] +- adds ip, r1, r2 @ ip=addr+size +- sub r3, r3, #1 @ addr_limit - 1 +- cmpcc ip, r3 @ if (addr+size > addr_limit - 1) +- movcs r1, #0 @ addr = NULL +- csdb ++ uaccess_mask_range_ptr r1, r2, r3, ip + #endif + + #include "copy_template.S" +diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S +index caf5019d8161..970abe521197 100644 +--- a/arch/arm/lib/copy_to_user.S ++++ b/arch/arm/lib/copy_to_user.S +@@ -94,6 +94,11 @@ + + ENTRY(__copy_to_user_std) + WEAK(arm_copy_to_user) ++#ifdef CONFIG_CPU_SPECTRE ++ get_thread_info r3 ++ ldr r3, [r3, #TI_ADDR_LIMIT] ++ uaccess_mask_range_ptr r0, r2, r3, ip ++#endif + + #include "copy_template.S" + +@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std) + rsb r0, r0, r2 + copy_abort_end + .popsection +- +diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c +index 9b4ed1728616..73dc7360cbdd 100644 +--- a/arch/arm/lib/uaccess_with_memcpy.c ++++ b/arch/arm/lib/uaccess_with_memcpy.c +@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) + n = __copy_to_user_std(to, from, n); + uaccess_restore(ua_flags); + } else { +- n = __copy_to_user_memcpy(to, from, n); ++ n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n), ++ from, n); + } + return n; + } +diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c +index a109f6482413..0f916c245a2e 100644 +--- a/arch/arm/mach-integrator/impd1.c ++++ b/arch/arm/mach-integrator/impd1.c +@@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev) + sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), + GFP_KERNEL); + chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); +- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); ++ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL, ++ "lm%x:00700", dev->id); ++ if (!lookup || !chipname || !mmciname) ++ return -ENOMEM; ++ + lookup->dev_id = mmciname; + /* + * Offsets on GPIO block 1: +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S +index 81d0efb055c6..5461d589a1e2 100644 +--- a/arch/arm/mm/proc-macros.S ++++ b/arch/arm/mm/proc-macros.S +@@ -274,6 +274,13 @@ + .endm + + .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 ++/* ++ * If we are building for big.Little with branch predictor hardening, ++ * we need the processor function tables to remain available after boot. ++ */ ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++ .section ".rodata" ++#endif + .type \name\()_processor_functions, #object + .align 2 + ENTRY(\name\()_processor_functions) +@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions) + .endif + + .size \name\()_processor_functions, . - \name\()_processor_functions ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ++ .previous ++#endif + .endm + + .macro define_cache_functions name:req +diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c +index 5544b82a2e7a..9a07916af8dd 100644 +--- a/arch/arm/mm/proc-v7-bugs.c ++++ b/arch/arm/mm/proc-v7-bugs.c +@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void) + case ARM_CPU_PART_CORTEX_A17: + case ARM_CPU_PART_CORTEX_A73: + case ARM_CPU_PART_CORTEX_A75: +- if (processor.switch_mm != cpu_v7_bpiall_switch_mm) +- goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_bpiall; + spectre_v2_method = "BPIALL"; +@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void) + + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_BRAHMA_B15: +- if (processor.switch_mm != cpu_v7_iciallu_switch_mm) +- goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_iciallu; + spectre_v2_method = "ICIALLU"; +@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void) + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 != 0) + break; +- if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) +- goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + call_hvc_arch_workaround_1; +- processor.switch_mm = cpu_v7_hvc_switch_mm; ++ cpu_do_switch_mm = cpu_v7_hvc_switch_mm; + spectre_v2_method = "hypervisor"; + break; + +@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void) + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 != 0) + break; +- if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) +- goto bl_error; + per_cpu(harden_branch_predictor_fn, cpu) = + call_smc_arch_workaround_1; +- processor.switch_mm = cpu_v7_smc_switch_mm; ++ cpu_do_switch_mm = cpu_v7_smc_switch_mm; + spectre_v2_method = "firmware"; + break; + +@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void) + if (spectre_v2_method) + pr_info("CPU%u: Spectre v2: using %s workaround\n", + smp_processor_id(), spectre_v2_method); +- return; +- +-bl_error: +- pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", +- cpu); + } + #else + static void cpu_v7_spectre_init(void) +diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c +index 6abcd4af8274..8e11223d32a1 100644 +--- a/arch/arm/vfp/vfpmodule.c ++++ b/arch/arm/vfp/vfpmodule.c +@@ -554,12 +554,11 @@ void vfp_flush_hwstate(struct thread_info *thread) + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, +- struct user_vfp_exc __user *ufp_exc) ++int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, ++ struct user_vfp_exc *ufp_exc) + { + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; +- int err = 0; + + /* Ensure that the saved hwstate is up-to-date. */ + vfp_sync_hwstate(thread); +@@ -568,22 +567,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ +- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, +- sizeof(hwstate->fpregs)); ++ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); ++ + /* + * Copy the status and control register. + */ +- __put_user_error(hwstate->fpscr, &ufp->fpscr, err); ++ ufp->fpscr = hwstate->fpscr; + + /* + * Copy the exception registers. + */ +- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); +- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); +- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); +- +- if (err) +- return -EFAULT; ++ ufp_exc->fpexc = hwstate->fpexc; ++ ufp_exc->fpinst = hwstate->fpinst; ++ ufp_exc->fpinst2 = hwstate->fpinst2; + + /* Ensure that VFP is disabled. */ + vfp_flush_hwstate(thread); +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 7d12b0d1f359..e14a39598e8a 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -2250,6 +2250,19 @@ void perf_check_microcode(void) + x86_pmu.check_microcode(); + } + ++static int x86_pmu_check_period(struct perf_event *event, u64 value) ++{ ++ if (x86_pmu.check_period && x86_pmu.check_period(event, value)) ++ return -EINVAL; ++ ++ if (value && x86_pmu.limit_period) { ++ if (x86_pmu.limit_period(event, value) > value) ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + static struct pmu pmu = { + .pmu_enable = x86_pmu_enable, + .pmu_disable = x86_pmu_disable, +@@ -2274,6 +2287,7 @@ static struct pmu pmu = { + .event_idx = x86_pmu_event_idx, + .sched_task = x86_pmu_sched_task, + .task_ctx_size = sizeof(struct x86_perf_task_context), ++ .check_period = x86_pmu_check_period, + }; + + void arch_perf_update_userpage(struct perf_event *event, +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 1cb5ff3ee728..9f556c94a0b8 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3445,6 +3445,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, + intel_pmu_lbr_sched_task(ctx, sched_in); + } + ++static int intel_pmu_check_period(struct perf_event *event, u64 value) ++{ ++ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; ++} ++ + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); + + PMU_FORMAT_ATTR(ldlat, "config1:0-15"); +@@ -3525,6 +3530,8 @@ static __initconst const struct x86_pmu core_pmu = { + .cpu_starting = intel_pmu_cpu_starting, + .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, ++ ++ .check_period = intel_pmu_check_period, + }; + + static struct attribute *intel_pmu_attrs[]; +@@ -3568,6 +3575,8 @@ static __initconst const struct x86_pmu intel_pmu = { + + .guest_get_msrs = intel_guest_get_msrs, + .sched_task = intel_pmu_sched_task, ++ ++ .check_period = intel_pmu_check_period, + }; + + static __init void intel_clovertown_quirk(void) +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h +index 3c51fcaf1e34..fbbc10338987 100644 +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -639,6 +639,11 @@ struct x86_pmu { + * Intel host/guest support (KVM) + */ + struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); ++ ++ /* ++ * Check period value for PERF_EVENT_IOC_PERIOD ioctl. ++ */ ++ int (*check_period) (struct perf_event *event, u64 period); + }; + + struct x86_perf_task_context { +@@ -848,7 +853,7 @@ static inline int amd_pmu_init(void) + + #ifdef CONFIG_CPU_SUP_INTEL + +-static inline bool intel_pmu_has_bts(struct perf_event *event) ++static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) + { + struct hw_perf_event *hwc = &event->hw; + unsigned int hw_event, bts_event; +@@ -859,7 +864,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); + +- return hw_event == bts_event && hwc->sample_period == 1; ++ return hw_event == bts_event && period == 1; ++} ++ ++static inline bool intel_pmu_has_bts(struct perf_event *event) ++{ ++ struct hw_perf_event *hwc = &event->hw; ++ ++ return intel_pmu_has_bts_period(event, hwc->sample_period); + } + + int intel_pmu_save_and_restart(struct perf_event *event); +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c +index 8e02b30cf08e..3ebd77770f98 100644 +--- a/arch/x86/ia32/ia32_aout.c ++++ b/arch/x86/ia32/ia32_aout.c +@@ -51,7 +51,7 @@ static unsigned long get_dr(int n) + /* + * fill in the user structure for a core dump.. + */ +-static void dump_thread32(struct pt_regs *regs, struct user32 *dump) ++static void fill_dump(struct pt_regs *regs, struct user32 *dump) + { + u32 fs, gs; + memset(dump, 0, sizeof(*dump)); +@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; ++ ++ fill_dump(cprm->regs, &dump); ++ + strncpy(dump.u_comm, current->comm, sizeof(current->comm)); + dump.u_ar0 = offsetof(struct user32, regs); + dump.signal = cprm->siginfo->si_signo; +- dump_thread32(cprm->regs, &dump); + + /* + * If the size of the dump file exceeds the rlimit, then see +diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h +index e652a7cc6186..3f697a9e3f59 100644 +--- a/arch/x86/include/asm/uv/bios.h ++++ b/arch/x86/include/asm/uv/bios.h +@@ -48,7 +48,8 @@ enum { + BIOS_STATUS_SUCCESS = 0, + BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, + BIOS_STATUS_EINVAL = -EINVAL, +- BIOS_STATUS_UNAVAIL = -EBUSY ++ BIOS_STATUS_UNAVAIL = -EBUSY, ++ BIOS_STATUS_ABORT = -EINTR, + }; + + /* Address map parameters */ +@@ -167,4 +168,9 @@ extern long system_serial_number; + + extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ + ++/* ++ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details ++ */ ++extern struct semaphore __efi_uv_runtime_lock; ++ + #endif /* _ASM_X86_UV_BIOS_H */ +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 1f5de4314291..8e5a977bf50e 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -2230,7 +2230,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + if (!entry_only) + j = find_msr(&m->host, msr); + +- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { ++ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || ++ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { + printk_once(KERN_WARNING "Not enough msr switch entries. " + "Can't add msr %x\n", msr); + return; +diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c +index 4a6a5a26c582..eb33432f2f24 100644 +--- a/arch/x86/platform/uv/bios_uv.c ++++ b/arch/x86/platform/uv/bios_uv.c +@@ -29,7 +29,8 @@ + + struct uv_systab *uv_systab; + +-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) ++static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, ++ u64 a4, u64 a5) + { + struct uv_systab *tab = uv_systab; + s64 ret; +@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) + + return ret; + } ++ ++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) ++{ ++ s64 ret; ++ ++ if (down_interruptible(&__efi_uv_runtime_lock)) ++ return BIOS_STATUS_ABORT; ++ ++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5); ++ up(&__efi_uv_runtime_lock); ++ ++ return ret; ++} + EXPORT_SYMBOL_GPL(uv_bios_call); + + s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, +@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + unsigned long bios_flags; + s64 ret; + ++ if (down_interruptible(&__efi_uv_runtime_lock)) ++ return BIOS_STATUS_ABORT; ++ + local_irq_save(bios_flags); +- ret = uv_bios_call(which, a1, a2, a3, a4, a5); ++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5); + local_irq_restore(bios_flags); + ++ up(&__efi_uv_runtime_lock); ++ + return ret; + } + +diff --git a/block/blk-flush.c b/block/blk-flush.c +index 4938bec8cfef..6603352879e7 100644 +--- a/block/blk-flush.c ++++ b/block/blk-flush.c +@@ -402,7 +402,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) + blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); + spin_unlock_irqrestore(&fq->mq_flush_lock, flags); + +- blk_mq_run_hw_queue(hctx, true); ++ blk_mq_sched_restart(hctx); + } + + /** +diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c +index 8fb74d9011da..a7907b58562a 100644 +--- a/drivers/acpi/numa.c ++++ b/drivers/acpi/numa.c +@@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) + { + struct acpi_srat_mem_affinity *p = + (struct acpi_srat_mem_affinity *)header; +- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", +- (unsigned long)p->base_address, +- (unsigned long)p->length, ++ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", ++ (unsigned long long)p->base_address, ++ (unsigned long long)p->length, + p->proximity_domain, + (p->flags & ACPI_SRAT_MEM_ENABLED) ? + "enabled" : "disabled", +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 93754300cb57..66c2790dcc5f 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1523,17 +1523,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) + { + unsigned int ret_freq = 0; + +- if (!cpufreq_driver->get) ++ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) + return ret_freq; + + ret_freq = cpufreq_driver->get(policy->cpu); + + /* +- * Updating inactive policies is invalid, so avoid doing that. Also +- * if fast frequency switching is used with the given policy, the check ++ * If fast frequency switching is used with the given policy, the check + * against policy->cur is pointless, so skip it in that case too. + */ +- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) ++ if (policy->fast_switch_enabled) + return ret_freq; + + if (ret_freq && policy->cur && +@@ -1562,10 +1561,7 @@ unsigned int cpufreq_get(unsigned int cpu) + + if (policy) { + down_read(&policy->rwsem); +- +- if (!policy_is_inactive(policy)) +- ret_freq = __cpufreq_get(policy); +- ++ ret_freq = __cpufreq_get(policy); + up_read(&policy->rwsem); + + cpufreq_cpu_put(policy); +diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c +index ae54870b2788..dd7f63354ca0 100644 +--- a/drivers/firmware/efi/runtime-wrappers.c ++++ b/drivers/firmware/efi/runtime-wrappers.c +@@ -49,6 +49,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) + local_irq_restore(flags); + } + ++/* ++ * Expose the EFI runtime lock to the UV platform ++ */ ++#ifdef CONFIG_X86_UV ++extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); ++#endif ++ + /* + * According to section 7.1 of the UEFI spec, Runtime Services are not fully + * reentrant, and there are particular combinations of calls that need to be +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c +index 8636e7eeb731..6eebd8ad0c52 100644 +--- a/drivers/gpu/drm/bridge/tc358767.c ++++ b/drivers/gpu/drm/bridge/tc358767.c +@@ -96,6 +96,8 @@ + #define DP0_STARTVAL 0x064c + #define DP0_ACTIVEVAL 0x0650 + #define DP0_SYNCVAL 0x0654 ++#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) ++#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) + #define DP0_MISC 0x0658 + #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ + #define BPC_6 (0 << 5) +@@ -140,6 +142,8 @@ + #define DP0_LTLOOPCTRL 0x06d8 + #define DP0_SNKLTCTRL 0x06e4 + ++#define DP1_SRCCTRL 0x07a0 ++ + /* PHY */ + #define DP_PHY_CTRL 0x0800 + #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ +@@ -148,6 +152,7 @@ + #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ + #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ + #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ ++#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ + #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ + #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ + +@@ -538,6 +543,7 @@ static int tc_aux_link_setup(struct tc_data *tc) + unsigned long rate; + u32 value; + int ret; ++ u32 dp_phy_ctrl; + + rate = clk_get_rate(tc->refclk); + switch (rate) { +@@ -562,7 +568,10 @@ static int tc_aux_link_setup(struct tc_data *tc) + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; + tc_write(SYS_PLLPARAM, value); + +- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); ++ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN; ++ if (tc->link.base.num_lanes == 2) ++ dp_phy_ctrl |= PHY_2LANE; ++ tc_write(DP_PHY_CTRL, dp_phy_ctrl); + + /* + * Initially PLLs are in bypass. Force PLL parameter update, +@@ -717,7 +726,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) + + tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); + +- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); ++ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) | ++ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) | ++ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0)); + + tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | + DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); +@@ -827,12 +838,11 @@ static int tc_main_link_setup(struct tc_data *tc) + if (!tc->mode) + return -EINVAL; + +- /* from excel file - DP0_SrcCtrl */ +- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | +- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | +- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); +- /* from excel file - DP1_SrcCtrl */ +- tc_write(0x07a0, 0x00003083); ++ tc_write(DP0_SRCCTRL, tc_srcctrl(tc)); ++ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ ++ tc_write(DP1_SRCCTRL, ++ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | ++ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); + + rate = clk_get_rate(tc->refclk); + switch (rate) { +@@ -853,8 +863,11 @@ static int tc_main_link_setup(struct tc_data *tc) + } + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; + tc_write(SYS_PLLPARAM, value); ++ + /* Setup Main Link */ +- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; ++ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; ++ if (tc->link.base.num_lanes == 2) ++ dp_phy_ctrl |= PHY_2LANE; + tc_write(DP_PHY_CTRL, dp_phy_ctrl); + msleep(100); + +@@ -1103,10 +1116,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, + static int tc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { ++ struct tc_data *tc = connector_to_tc(connector); ++ u32 req, avail; ++ u32 bits_per_pixel = 24; ++ + /* DPI interface clock limitation: upto 154 MHz */ + if (mode->clock > 154000) + return MODE_CLOCK_HIGH; + ++ req = mode->clock * bits_per_pixel / 8; ++ avail = tc->link.base.num_lanes * tc->link.base.rate; ++ ++ if (req > avail) ++ return MODE_BAD; ++ + return MODE_OK; + } + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 1f19e6d9a717..5d8a67c65141 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1633,6 +1633,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + return 0; + } + ++static inline bool ++__vma_matches(struct vm_area_struct *vma, struct file *filp, ++ unsigned long addr, unsigned long size) ++{ ++ if (vma->vm_file != filp) ++ return false; ++ ++ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; ++} ++ + /** + * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address + * it is mapped to. +@@ -1691,7 +1701,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + return -EINTR; + } + vma = find_vma(mm, addr); +- if (vma) ++ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + else +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +index 2e7b4e2105ef..62cb376e2c01 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +@@ -22,6 +22,7 @@ + #include <engine/falcon.h> + + #include <core/gpuobj.h> ++#include <subdev/mc.h> + #include <subdev/timer.h> + #include <engine/fifo.h> + +@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) + } + } + +- nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); +- nvkm_wr32(device, base + 0x014, 0xffffffff); ++ if (nvkm_mc_enabled(device, engine->subdev.index)) { ++ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); ++ nvkm_wr32(device, base + 0x014, 0xffffffff); ++ } + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +index 952a7cb0a59a..692d4d96766a 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +@@ -131,11 +131,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) + duty = nvkm_therm_update_linear(therm); + break; + case NVBIOS_THERM_FAN_OTHER: +- if (therm->cstate) ++ if (therm->cstate) { + duty = therm->cstate; +- else ++ poll = false; ++ } else { + duty = nvkm_therm_update_linear_fallback(therm); +- poll = false; ++ } + break; + } + immd = false; +diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c +index 1efcfdf9f8a8..dd9dd4e40827 100644 +--- a/drivers/input/misc/bma150.c ++++ b/drivers/input/misc/bma150.c +@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) + idev->close = bma150_irq_close; + input_set_drvdata(idev, bma150); + ++ bma150->input = idev; ++ + error = input_register_device(idev); + if (error) { + input_free_device(idev); + return error; + } + +- bma150->input = idev; + return 0; + } + +@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) + + bma150_init_input_device(bma150, ipoll_dev->input); + ++ bma150->input_polled = ipoll_dev; ++ bma150->input = ipoll_dev->input; ++ + error = input_register_polled_device(ipoll_dev); + if (error) { + input_free_polled_device(ipoll_dev); + return error; + } + +- bma150->input_polled = ipoll_dev; +- bma150->input = ipoll_dev->input; +- + return 0; + } + +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index f2bf8fa1ab04..fce70f4ef004 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1251,7 +1251,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); + static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, +- { "ELAN0501", 0 }, + { "ELAN0600", 0 }, + { "ELAN0602", 0 }, + { "ELAN0605", 0 }, +@@ -1262,6 +1261,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN060C", 0 }, + { "ELAN0611", 0 }, + { "ELAN0612", 0 }, ++ { "ELAN0617", 0 }, + { "ELAN0618", 0 }, + { "ELAN061C", 0 }, + { "ELAN061D", 0 }, +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 84c69e962230..fda33fc3ffcc 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1121,6 +1121,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, + * Asus UX31 0x361f00 20, 15, 0e clickpad + * Asus UX32VD 0x361f02 00, 15, 0e clickpad + * Avatar AVIU-145A2 0x361f00 ? clickpad ++ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) ++ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons +@@ -1173,6 +1175,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), + }, + }, ++ { ++ /* Fujitsu H780 also has a middle button */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), ++ }, ++ }, + #endif + { } + }; +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 1f6d8b6be5c7..0d2005e5b24c 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -935,7 +935,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) + if (IS_ERR(bip)) + return PTR_ERR(bip); + +- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); ++ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); + + bip->bip_iter.bi_size = tag_len; + bip->bip_iter.bi_sector = io->cc->start + io->sector; +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index 40b624d8255d..18d6a8a10d5d 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -257,6 +257,7 @@ struct pool { + + spinlock_t lock; + struct bio_list deferred_flush_bios; ++ struct bio_list deferred_flush_completions; + struct list_head prepared_mappings; + struct list_head prepared_discards; + struct list_head prepared_discards_pt2; +@@ -950,6 +951,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) + mempool_free(m, m->tc->pool->mapping_pool); + } + ++static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) ++{ ++ struct pool *pool = tc->pool; ++ unsigned long flags; ++ ++ /* ++ * If the bio has the REQ_FUA flag set we must commit the metadata ++ * before signaling its completion. ++ */ ++ if (!bio_triggers_commit(tc, bio)) { ++ bio_endio(bio); ++ return; ++ } ++ ++ /* ++ * Complete bio with an error if earlier I/O caused changes to the ++ * metadata that can't be committed, e.g, due to I/O errors on the ++ * metadata device. ++ */ ++ if (dm_thin_aborted_changes(tc->td)) { ++ bio_io_error(bio); ++ return; ++ } ++ ++ /* ++ * Batch together any bios that trigger commits and then issue a ++ * single commit for them in process_deferred_bios(). ++ */ ++ spin_lock_irqsave(&pool->lock, flags); ++ bio_list_add(&pool->deferred_flush_completions, bio); ++ spin_unlock_irqrestore(&pool->lock, flags); ++} ++ + static void process_prepared_mapping(struct dm_thin_new_mapping *m) + { + struct thin_c *tc = m->tc; +@@ -982,7 +1016,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) + */ + if (bio) { + inc_remap_and_issue_cell(tc, m->cell, m->data_block); +- bio_endio(bio); ++ complete_overwrite_bio(tc, bio); + } else { + inc_all_io_entry(tc->pool, m->cell->holder); + remap_and_issue(tc, m->cell->holder, m->data_block); +@@ -2328,7 +2362,7 @@ static void process_deferred_bios(struct pool *pool) + { + unsigned long flags; + struct bio *bio; +- struct bio_list bios; ++ struct bio_list bios, bio_completions; + struct thin_c *tc; + + tc = get_first_thin(pool); +@@ -2339,26 +2373,36 @@ static void process_deferred_bios(struct pool *pool) + } + + /* +- * If there are any deferred flush bios, we must commit +- * the metadata before issuing them. ++ * If there are any deferred flush bios, we must commit the metadata ++ * before issuing them or signaling their completion. + */ + bio_list_init(&bios); ++ bio_list_init(&bio_completions); ++ + spin_lock_irqsave(&pool->lock, flags); + bio_list_merge(&bios, &pool->deferred_flush_bios); + bio_list_init(&pool->deferred_flush_bios); ++ ++ bio_list_merge(&bio_completions, &pool->deferred_flush_completions); ++ bio_list_init(&pool->deferred_flush_completions); + spin_unlock_irqrestore(&pool->lock, flags); + +- if (bio_list_empty(&bios) && ++ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) + return; + + if (commit(pool)) { ++ bio_list_merge(&bios, &bio_completions); ++ + while ((bio = bio_list_pop(&bios))) + bio_io_error(bio); + return; + } + pool->last_commit_jiffies = jiffies; + ++ while ((bio = bio_list_pop(&bio_completions))) ++ bio_endio(bio); ++ + while ((bio = bio_list_pop(&bios))) + generic_make_request(bio); + } +@@ -2965,6 +3009,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); + spin_lock_init(&pool->lock); + bio_list_init(&pool->deferred_flush_bios); ++ bio_list_init(&pool->deferred_flush_completions); + INIT_LIST_HEAD(&pool->prepared_mappings); + INIT_LIST_HEAD(&pool->prepared_discards); + INIT_LIST_HEAD(&pool->prepared_discards_pt2); +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 205f86f1a6cb..31c4391f6a62 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1854,6 +1854,20 @@ static void end_sync_read(struct bio *bio) + reschedule_retry(r1_bio); + } + ++static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) ++{ ++ sector_t sync_blocks = 0; ++ sector_t s = r1_bio->sector; ++ long sectors_to_go = r1_bio->sectors; ++ ++ /* make sure these bits don't get cleared. */ ++ do { ++ bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); ++ s += sync_blocks; ++ sectors_to_go -= sync_blocks; ++ } while (sectors_to_go > 0); ++} ++ + static void end_sync_write(struct bio *bio) + { + int uptodate = !bio->bi_status; +@@ -1865,16 +1879,7 @@ static void end_sync_write(struct bio *bio) + struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; + + if (!uptodate) { +- sector_t sync_blocks = 0; +- sector_t s = r1_bio->sector; +- long sectors_to_go = r1_bio->sectors; +- /* make sure these bits doesn't get cleared. */ +- do { +- bitmap_end_sync(mddev->bitmap, s, +- &sync_blocks, 1); +- s += sync_blocks; +- sectors_to_go -= sync_blocks; +- } while (sectors_to_go > 0); ++ abort_sync_write(mddev, r1_bio); + set_bit(WriteErrorSeen, &rdev->flags); + if (!test_and_set_bit(WantReplacement, &rdev->flags)) + set_bit(MD_RECOVERY_NEEDED, & +@@ -2164,8 +2169,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) + (i == r1_bio->read_disk || + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) + continue; +- if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) ++ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { ++ abort_sync_write(mddev, r1_bio); + continue; ++ } + + bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); + if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) +diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig +index de58762097c4..3f93e4564cab 100644 +--- a/drivers/misc/eeprom/Kconfig ++++ b/drivers/misc/eeprom/Kconfig +@@ -12,7 +12,7 @@ config EEPROM_AT24 + ones like at24c64, 24lc02 or fm24c04: + + 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08, +- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024 ++ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048 + + Unless you like data loss puzzles, always be sure that any chip + you configure as a 24c32 (32 kbit) or larger is NOT really a +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c +index ded48a0c77ee..59dcd97ee3de 100644 +--- a/drivers/misc/eeprom/at24.c ++++ b/drivers/misc/eeprom/at24.c +@@ -170,6 +170,7 @@ static const struct i2c_device_id at24_ids[] = { + { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) }, + { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) }, + { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) }, ++ { "24c2048", AT24_DEVICE_MAGIC(2097152 / 8, AT24_FLAG_ADDR16) }, + { "at24", 0 }, + { /* END OF LIST */ } + }; +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +index 022b06e770d1..41ac9a2bc153 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +@@ -12978,6 +12978,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) + { ++ /* ++ * A skb with gso_size + header length > 9700 will cause a ++ * firmware panic. Drop GSO support. ++ * ++ * Eventually the upper layer should not pass these packets down. ++ * ++ * For speed, if the gso_size is <= 9000, assume there will ++ * not be 700 bytes of headers and pass it through. Only do a ++ * full (slow) validation if the gso_size is > 9000. ++ * ++ * (Due to the way SKB_BY_FRAGS works this will also do a full ++ * validation in that case.) ++ */ ++ if (unlikely(skb_is_gso(skb) && ++ (skb_shinfo(skb)->gso_size > 9000) && ++ !skb_gso_validate_mac_len(skb, 9700))) ++ features &= ~NETIF_F_GSO_MASK; ++ + features = vlan_features_check(skb, features); + return vxlan_features_check(skb, features); + } +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index afb99876fa9e..06355ca832db 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -1624,8 +1624,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) + struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; + size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; + +- dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], +- le64_to_cpu(desc->addr)); ++ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], ++ le64_to_cpu(desc->addr), ++ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); + } + + kfree(dev->host_mem_desc_bufs); +@@ -1691,8 +1692,9 @@ out_free_bufs: + while (--i >= 0) { + size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; + +- dma_free_coherent(dev->dev, size, bufs[i], +- le64_to_cpu(descs[i].addr)); ++ dma_free_attrs(dev->dev, size, bufs[i], ++ le64_to_cpu(descs[i].addr), ++ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); + } + + kfree(bufs); +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index 31632c087504..8f0368330a04 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -839,11 +839,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) + return ret; + } + +- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio); +- if (ret) { +- dev_err(pctrl->dev, "Failed to add pin range\n"); +- gpiochip_remove(&pctrl->chip); +- return ret; ++ /* ++ * For DeviceTree-supported systems, the gpio core checks the ++ * pinctrl's device node for the "gpio-ranges" property. ++ * If it is present, it takes care of adding the pin ranges ++ * for the driver. In this case the driver can skip ahead. ++ * ++ * In order to remain compatible with older, existing DeviceTree ++ * files which don't set the "gpio-ranges" property or systems that ++ * utilize ACPI the driver has to call gpiochip_add_pin_range(). ++ */ ++ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) { ++ ret = gpiochip_add_pin_range(&pctrl->chip, ++ dev_name(pctrl->dev), 0, 0, chip->ngpio); ++ if (ret) { ++ dev_err(pctrl->dev, "Failed to add pin range\n"); ++ gpiochip_remove(&pctrl->chip); ++ return ret; ++ } + } + + ret = gpiochip_irqchip_add(chip, +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 1e176e11dbfa..852d7d1dcbbd 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -1128,6 +1128,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) + return -EINVAL; + } + ++ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > ++ PAGE_SIZE); ++ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), ++ PAGE_SIZE); + max_num = (max_buf - sizeof(struct smb_hdr)) / + sizeof(LOCKING_ANDX_RANGE); + buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); +@@ -1466,6 +1470,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) + return -EINVAL; + ++ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > ++ PAGE_SIZE); ++ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), ++ PAGE_SIZE); + max_num = (max_buf - sizeof(struct smb_hdr)) / + sizeof(LOCKING_ANDX_RANGE); + buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c +index 79078533f807..1add404618f0 100644 +--- a/fs/cifs/smb2file.c ++++ b/fs/cifs/smb2file.c +@@ -130,6 +130,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, + if (max_buf < sizeof(struct smb2_lock_element)) + return -EINVAL; + ++ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); ++ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); + max_num = max_buf / sizeof(struct smb2_lock_element); + buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); + if (!buf) +@@ -266,6 +268,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) + return -EINVAL; + } + ++ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE); ++ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE); + max_num = max_buf / sizeof(struct smb2_lock_element); + buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); + if (!buf) { +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 2b47757c9c68..5e63c459dc61 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -459,7 +459,7 @@ struct mem_size_stats { + }; + + static void smaps_account(struct mem_size_stats *mss, struct page *page, +- bool compound, bool young, bool dirty) ++ bool compound, bool young, bool dirty, bool locked) + { + int i, nr = compound ? 1 << compound_order(page) : 1; + unsigned long size = nr * PAGE_SIZE; +@@ -486,24 +486,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, + else + mss->private_clean += size; + mss->pss += (u64)size << PSS_SHIFT; ++ if (locked) ++ mss->pss_locked += (u64)size << PSS_SHIFT; + return; + } + + for (i = 0; i < nr; i++, page++) { + int mapcount = page_mapcount(page); ++ unsigned long pss = (PAGE_SIZE << PSS_SHIFT); + + if (mapcount >= 2) { + if (dirty || PageDirty(page)) + mss->shared_dirty += PAGE_SIZE; + else + mss->shared_clean += PAGE_SIZE; +- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; ++ mss->pss += pss / mapcount; ++ if (locked) ++ mss->pss_locked += pss / mapcount; + } else { + if (dirty || PageDirty(page)) + mss->private_dirty += PAGE_SIZE; + else + mss->private_clean += PAGE_SIZE; +- mss->pss += PAGE_SIZE << PSS_SHIFT; ++ mss->pss += pss; ++ if (locked) ++ mss->pss_locked += pss; + } + } + } +@@ -526,6 +533,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, + { + struct mem_size_stats *mss = walk->private; + struct vm_area_struct *vma = walk->vma; ++ bool locked = !!(vma->vm_flags & VM_LOCKED); + struct page *page = NULL; + + if (pte_present(*pte)) { +@@ -568,7 +576,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, + if (!page) + return; + +- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); ++ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +@@ -577,6 +585,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, + { + struct mem_size_stats *mss = walk->private; + struct vm_area_struct *vma = walk->vma; ++ bool locked = !!(vma->vm_flags & VM_LOCKED); + struct page *page; + + /* FOLL_DUMP will return -EFAULT on huge zero page */ +@@ -591,7 +600,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, + /* pass */; + else + VM_BUG_ON_PAGE(1, page); +- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); ++ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); + } + #else + static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, +@@ -792,11 +801,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) + } + } + #endif +- + /* mmap_sem is held in m_start */ + walk_page_vma(vma, &smaps_walk); +- if (vma->vm_flags & VM_LOCKED) +- mss->pss_locked += mss->pss; + + if (!rollup_mode) { + show_map_vma(m, vma, is_pid); +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index 8e22f24ded6a..956d76744c91 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -446,6 +446,11 @@ struct pmu { + * Filter events for PMU-specific reasons. + */ + int (*filter_match) (struct perf_event *event); /* optional */ ++ ++ /* ++ * Check period value for PERF_EVENT_IOC_PERIOD ioctl. ++ */ ++ int (*check_period) (struct perf_event *event, u64 value); /* optional */ + }; + + /** +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 39c2570ddcf6..50a4a5968f3a 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -3317,6 +3317,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); + void skb_scrub_packet(struct sk_buff *skb, bool xnet); + unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); + bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); ++bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); + struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); + struct sk_buff *skb_vlan_untag(struct sk_buff *skb); + int skb_ensure_writable(struct sk_buff *skb, int write_len); +@@ -4087,6 +4088,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) + return hdr_len + skb_gso_transport_seglen(skb); + } + ++/** ++ * skb_gso_mac_seglen - Return length of individual segments of a gso packet ++ * ++ * @skb: GSO skb ++ * ++ * skb_gso_mac_seglen is used to determine the real size of the ++ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 ++ * headers (TCP/UDP). ++ */ ++static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) ++{ ++ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); ++ return hdr_len + skb_gso_transport_seglen(skb); ++} ++ + /* Local Checksum Offload. + * Compute outer checksum based on the assumption that the + * inner checksum will be offloaded later. +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h +index 0812cd5408c9..6e692a52936c 100644 +--- a/include/trace/events/sched.h ++++ b/include/trace/events/sched.h +@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, + #ifdef CREATE_TRACE_POINTS + static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) + { ++ unsigned int state; ++ + #ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); + #endif /* CONFIG_SCHED_DEBUG */ +@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * + if (preempt) + return TASK_REPORT_MAX; + +- return 1 << __get_task_state(p); ++ /* ++ * task_state_index() uses fls() and returns a value from 0-8 range. ++ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using ++ * it for left shift operation to get the correct task->state ++ * mapping. ++ */ ++ state = __get_task_state(p); ++ ++ return state ? (1 << (state - 1)) : state; + } + #endif /* CREATE_TRACE_POINTS */ + +diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h +index 244e3213ecb0..1d1157edcf40 100644 +--- a/include/uapi/linux/if_ether.h ++++ b/include/uapi/linux/if_ether.h +@@ -150,11 +150,18 @@ + * This is an Ethernet frame header. + */ + ++/* allow libcs like musl to deactivate this, glibc does not implement this. */ ++#ifndef __UAPI_DEF_ETHHDR ++#define __UAPI_DEF_ETHHDR 1 ++#endif ++ ++#if __UAPI_DEF_ETHHDR + struct ethhdr { + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ + unsigned char h_source[ETH_ALEN]; /* source ether addr */ + __be16 h_proto; /* packet type ID field */ + } __attribute__((packed)); ++#endif + + + #endif /* _UAPI_LINUX_IF_ETHER_H */ +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 991af683ef9e..17d5d41464c6 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -4738,6 +4738,11 @@ static void __perf_event_period(struct perf_event *event, + } + } + ++static int perf_event_check_period(struct perf_event *event, u64 value) ++{ ++ return event->pmu->check_period(event, value); ++} ++ + static int perf_event_period(struct perf_event *event, u64 __user *arg) + { + u64 value; +@@ -4754,6 +4759,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) + if (event->attr.freq && value > sysctl_perf_event_sample_rate) + return -EINVAL; + ++ if (perf_event_check_period(event, value)) ++ return -EINVAL; ++ + event_function_call(event, __perf_event_period, &value); + + return 0; +@@ -8951,6 +8959,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) + return 0; + } + ++static int perf_event_nop_int(struct perf_event *event, u64 value) ++{ ++ return 0; ++} ++ + static DEFINE_PER_CPU(unsigned int, nop_txn_flags); + + static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) +@@ -9251,6 +9264,9 @@ got_cpu_context: + pmu->pmu_disable = perf_pmu_nop_void; + } + ++ if (!pmu->check_period) ++ pmu->check_period = perf_event_nop_int; ++ + if (!pmu->event_idx) + pmu->event_idx = perf_event_idx_default; + +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c +index 8b311340b241..489dc6b60053 100644 +--- a/kernel/events/ring_buffer.c ++++ b/kernel/events/ring_buffer.c +@@ -719,7 +719,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) + size = sizeof(struct ring_buffer); + size += nr_pages * sizeof(void *); + +- if (order_base_2(size) >= MAX_ORDER) ++ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) + goto fail; + + rb = kzalloc(size, GFP_KERNEL); +diff --git a/kernel/futex.c b/kernel/futex.c +index abe04a2bb5b9..29d708d0b3d1 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -1166,11 +1166,65 @@ out_error: + return ret; + } + ++static int handle_exit_race(u32 __user *uaddr, u32 uval, ++ struct task_struct *tsk) ++{ ++ u32 uval2; ++ ++ /* ++ * If PF_EXITPIDONE is not yet set, then try again. ++ */ ++ if (tsk && !(tsk->flags & PF_EXITPIDONE)) ++ return -EAGAIN; ++ ++ /* ++ * Reread the user space value to handle the following situation: ++ * ++ * CPU0 CPU1 ++ * ++ * sys_exit() sys_futex() ++ * do_exit() futex_lock_pi() ++ * futex_lock_pi_atomic() ++ * exit_signals(tsk) No waiters: ++ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID ++ * mm_release(tsk) Set waiter bit ++ * exit_robust_list(tsk) { *uaddr = 0x80000PID; ++ * Set owner died attach_to_pi_owner() { ++ * *uaddr = 0xC0000000; tsk = get_task(PID); ++ * } if (!tsk->flags & PF_EXITING) { ++ * ... attach(); ++ * tsk->flags |= PF_EXITPIDONE; } else { ++ * if (!(tsk->flags & PF_EXITPIDONE)) ++ * return -EAGAIN; ++ * return -ESRCH; <--- FAIL ++ * } ++ * ++ * Returning ESRCH unconditionally is wrong here because the ++ * user space value has been changed by the exiting task. ++ * ++ * The same logic applies to the case where the exiting task is ++ * already gone. ++ */ ++ if (get_futex_value_locked(&uval2, uaddr)) ++ return -EFAULT; ++ ++ /* If the user space value has changed, try again. */ ++ if (uval2 != uval) ++ return -EAGAIN; ++ ++ /* ++ * The exiting task did not have a robust list, the robust list was ++ * corrupted or the user space value in *uaddr is simply bogus. ++ * Give up and tell user space. ++ */ ++ return -ESRCH; ++} ++ + /* + * Lookup the task for the TID provided from user space and attach to + * it after doing proper sanity checks. + */ +-static int attach_to_pi_owner(u32 uval, union futex_key *key, ++static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, + struct futex_pi_state **ps) + { + pid_t pid = uval & FUTEX_TID_MASK; +@@ -1180,12 +1234,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + /* + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when TID = 0 [1] ++ * ++ * The !pid check is paranoid. None of the call sites should end up ++ * with pid == 0, but better safe than sorry. Let the caller retry + */ + if (!pid) +- return -ESRCH; ++ return -EAGAIN; + p = futex_find_get_task(pid); + if (!p) +- return -ESRCH; ++ return handle_exit_race(uaddr, uval, NULL); + + if (unlikely(p->flags & PF_KTHREAD)) { + put_task_struct(p); +@@ -1205,7 +1262,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + * set, we know that the task has finished the + * cleanup: + */ +- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; ++ int ret = handle_exit_race(uaddr, uval, p); + + raw_spin_unlock_irq(&p->pi_lock); + put_task_struct(p); +@@ -1262,7 +1319,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval, + * We are the first waiter - try to look up the owner based on + * @uval and attach to it. + */ +- return attach_to_pi_owner(uval, key, ps); ++ return attach_to_pi_owner(uaddr, uval, key, ps); + } + + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) +@@ -1370,7 +1427,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + * attach to the owner. If that fails, no harm done, we only + * set the FUTEX_WAITERS bit in the user space variable. + */ +- return attach_to_pi_owner(uval, key, ps); ++ return attach_to_pi_owner(uaddr, newval, key, ps); + } + + /** +diff --git a/kernel/signal.c b/kernel/signal.c +index 04b3a621b3cc..619c6160f64f 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -2268,9 +2268,12 @@ relock: + } + + /* Has this task already been marked for death? */ +- ksig->info.si_signo = signr = SIGKILL; +- if (signal_group_exit(signal)) ++ if (signal_group_exit(signal)) { ++ ksig->info.si_signo = signr = SIGKILL; ++ sigdelset(¤t->pending.signal, SIGKILL); ++ recalc_sigpending(); + goto fatal; ++ } + + for (;;) { + struct k_sigaction *ka; +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c +index 86718c85d8d3..fdf2ea4d64ec 100644 +--- a/kernel/trace/trace_uprobe.c ++++ b/kernel/trace/trace_uprobe.c +@@ -153,7 +153,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + + ret = strncpy_from_user(dst, src, maxlen); + if (ret == maxlen) +- dst[--ret] = '\0'; ++ dst[ret - 1] = '\0'; ++ else if (ret >= 0) ++ /* ++ * Include the terminating null byte. In this case it ++ * was copied by strncpy_from_user but not accounted ++ * for in ret. ++ */ ++ ret++; + + if (ret < 0) { /* Failed to fetch string */ + ((u8 *)get_rloc_data(dest))[0] = '\0'; +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 873032d1a083..6dbd2c54b2c9 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -4930,37 +4930,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) + EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); + + /** +- * skb_gso_validate_mtu - Return in case such skb fits a given MTU ++ * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS + * +- * @skb: GSO skb +- * @mtu: MTU to validate against ++ * There are a couple of instances where we have a GSO skb, and we ++ * want to determine what size it would be after it is segmented. + * +- * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU +- * once split. ++ * We might want to check: ++ * - L3+L4+payload size (e.g. IP forwarding) ++ * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) ++ * ++ * This is a helper to do that correctly considering GSO_BY_FRAGS. ++ * ++ * @seg_len: The segmented length (from skb_gso_*_seglen). In the ++ * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. ++ * ++ * @max_len: The maximum permissible length. ++ * ++ * Returns true if the segmented length <= max length. + */ +-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) +-{ ++static inline bool skb_gso_size_check(const struct sk_buff *skb, ++ unsigned int seg_len, ++ unsigned int max_len) { + const struct skb_shared_info *shinfo = skb_shinfo(skb); + const struct sk_buff *iter; +- unsigned int hlen; +- +- hlen = skb_gso_network_seglen(skb); + + if (shinfo->gso_size != GSO_BY_FRAGS) +- return hlen <= mtu; ++ return seg_len <= max_len; + + /* Undo this so we can re-use header sizes */ +- hlen -= GSO_BY_FRAGS; ++ seg_len -= GSO_BY_FRAGS; + + skb_walk_frags(skb, iter) { +- if (hlen + skb_headlen(iter) > mtu) ++ if (seg_len + skb_headlen(iter) > max_len) + return false; + } + + return true; + } ++ ++/** ++ * skb_gso_validate_mtu - Return in case such skb fits a given MTU ++ * ++ * @skb: GSO skb ++ * @mtu: MTU to validate against ++ * ++ * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU ++ * once split. ++ */ ++bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) ++{ ++ return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); ++} + EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); + ++/** ++ * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? ++ * ++ * @skb: GSO skb ++ * @len: length to validate against ++ * ++ * skb_gso_validate_mac_len validates if a given skb will fit a wanted ++ * length once split, including L2, L3 and L4 headers and the payload. ++ */ ++bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) ++{ ++ return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); ++} ++EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); ++ + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) + { + int mac_len; +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c +index b36ecb58aa6e..107cc76b6e24 100644 +--- a/net/sched/sch_tbf.c ++++ b/net/sched/sch_tbf.c +@@ -142,16 +142,6 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r, + return len; + } + +-/* +- * Return length of individual segments of a gso packet, +- * including all headers (MAC, IP, TCP/UDP) +- */ +-static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) +-{ +- unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); +- return hdr_len + skb_gso_transport_seglen(skb); +-} +- + /* GSO packet is too big, segment it so that tbf can transmit + * each segment in time + */ +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index fb1cec46380d..d14516f31679 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -962,6 +962,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), ++ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index d8a46d46bcd2..b1a1eb1f65aa 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -313,6 +313,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum, + return 0; + } + ++/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk ++ * applies. Returns 1 if a quirk was found. ++ */ + static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, + struct usb_device *dev, + struct usb_interface_descriptor *altsd, +@@ -391,7 +394,7 @@ add_sync_ep: + + subs->data_endpoint->sync_master = subs->sync_endpoint; + +- return 0; ++ return 1; + } + + static int set_sync_endpoint(struct snd_usb_substream *subs, +@@ -430,6 +433,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, + if (err < 0) + return err; + ++ /* endpoint set by quirk */ ++ if (err > 0) ++ return 0; ++ + if (altsd->bNumEndpoints < 2) + return 0; + +diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh +index 30a950c9d407..068d463e5cbf 100644 +--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh ++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh +@@ -13,7 +13,8 @@ add_probe_vfs_getname() { + local verbose=$1 + if [ $had_vfs_getname -eq 1 ] ; then + line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') +- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" ++ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ ++ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" + fi + } + |