diff options
author | Thomas Deutschmann <whissi@whissi.de> | 2019-12-01 15:08:19 +0100 |
---|---|---|
committer | Thomas Deutschmann <whissi@whissi.de> | 2019-12-01 15:08:19 +0100 |
commit | b2b0eeb65d78aca5c04e40e87175b97bd02e67d8 (patch) | |
tree | 6398bf41f1ca06b69461a55b7d337bad901d8f30 | |
parent | Linux patch 4.14.156 (diff) | |
download | linux-patches-b2b0eeb65d78aca5c04e40e87175b97bd02e67d8.tar.gz linux-patches-b2b0eeb65d78aca5c04e40e87175b97bd02e67d8.tar.bz2 linux-patches-b2b0eeb65d78aca5c04e40e87175b97bd02e67d8.zip |
Linux patch 4.14.1574.14-166
Signed-off-by: Thomas Deutschmann <whissi@whissi.de>
-rw-r--r-- | 1156_linux-4.14.157.patch | 7579 |
1 files changed, 7579 insertions, 0 deletions
diff --git a/1156_linux-4.14.157.patch b/1156_linux-4.14.157.patch new file mode 100644 index 00000000..ec8cede8 --- /dev/null +++ b/1156_linux-4.14.157.patch @@ -0,0 +1,7579 @@ +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst +index e3a796c0d3a2..2d19c9f4c1fe 100644 +--- a/Documentation/admin-guide/hw-vuln/mds.rst ++++ b/Documentation/admin-guide/hw-vuln/mds.rst +@@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are: + + ============ ============================================================= + +-Not specifying this option is equivalent to "mds=full". +- ++Not specifying this option is equivalent to "mds=full". For processors ++that are affected by both TAA (TSX Asynchronous Abort) and MDS, ++specifying just "mds=off" without an accompanying "tsx_async_abort=off" ++will have no effect as the same mitigation is used for both ++vulnerabilities. + + Mitigation selection guide + -------------------------- +diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst +index fddbd7579c53..af6865b822d2 100644 +--- a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst ++++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst +@@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are: + CPU is not vulnerable to cross-thread TAA attacks. + ============ ============================================================= + +-Not specifying this option is equivalent to "tsx_async_abort=full". ++Not specifying this option is equivalent to "tsx_async_abort=full". For ++processors that are affected by both TAA and MDS, specifying just ++"tsx_async_abort=off" without an accompanying "mds=off" will have no ++effect as the same mitigation is used for both vulnerabilities. + + The kernel command line also allows to control the TSX feature using the + parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 05596e05bc71..b0da6050a254 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2254,6 +2254,12 @@ + SMT on vulnerable CPUs + off - Unconditionally disable MDS mitigation + ++ On TAA-affected machines, mds=off can be prevented by ++ an active TAA mitigation as both vulnerabilities are ++ mitigated with the same mechanism so in order to disable ++ this mitigation, you need to specify tsx_async_abort=off ++ too. ++ + Not specifying this option is equivalent to + mds=full. + +@@ -4588,6 +4594,11 @@ + vulnerable to cross-thread TAA attacks. + off - Unconditionally disable TAA mitigation + ++ On MDS-affected machines, tsx_async_abort=off can be ++ prevented by an active MDS mitigation as both vulnerabilities ++ are mitigated with the same mechanism so in order to disable ++ this mitigation, you need to specify mds=off too. ++ + Not specifying this option is equivalent to + tsx_async_abort=full. On CPUs which are MDS affected + and deploy MDS mitigation, TAA mitigation is not +diff --git a/Makefile b/Makefile +index b1db48ad832e..dad90f53faeb 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 156 ++SUBLEVEL = 157 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c +index 2ce24e74f879..a509b77ef80d 100644 +--- a/arch/arc/kernel/perf_event.c ++++ b/arch/arc/kernel/perf_event.c +@@ -488,8 +488,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev) + /* loop thru all available h/w condition indexes */ + for (j = 0; j < cc_bcr.c; j++) { + write_aux_reg(ARC_REG_CC_INDEX, j); +- cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); +- cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); ++ cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0)); ++ cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1)); + + /* See if it has been mapped to a perf event_id */ + for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index 70e560cf8ca0..d8cbe772f690 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -1195,6 +1195,9 @@ void __init adjust_lowmem_bounds(void) + phys_addr_t block_start = reg->base; + phys_addr_t block_end = reg->base + reg->size; + ++ if (memblock_is_nomap(reg)) ++ continue; ++ + if (reg->base < vmalloc_limit) { + if (block_end > lowmem_limit) + /* +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 0c5f70e6d5cf..8c4bc5a2c61f 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -149,6 +149,7 @@ archclean: + $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=$(boot)/dts + ++ifeq ($(KBUILD_EXTMOD),) + # We need to generate vdso-offsets.h before compiling certain files in kernel/. + # In order to do that, we should use the archprepare target, but we can't since + # asm-offsets.h is included in some files used to generate vdso-offsets.h, and +@@ -158,6 +159,7 @@ archclean: + prepare: vdso_prepare + vdso_prepare: prepare0 + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h ++endif + + define archhelp + echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' +diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h +index dd95d33a5bd5..03a6c256b7ec 100644 +--- a/arch/arm64/include/asm/string.h ++++ b/arch/arm64/include/asm/string.h +@@ -16,6 +16,7 @@ + #ifndef __ASM_STRING_H + #define __ASM_STRING_H + ++#ifndef CONFIG_KASAN + #define __HAVE_ARCH_STRRCHR + extern char *strrchr(const char *, int c); + +@@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *); + #define __HAVE_ARCH_STRNLEN + extern __kernel_size_t strnlen(const char *, __kernel_size_t); + ++#define __HAVE_ARCH_MEMCMP ++extern int memcmp(const void *, const void *, size_t); ++ ++#define __HAVE_ARCH_MEMCHR ++extern void *memchr(const void *, int, __kernel_size_t); ++#endif ++ + #define __HAVE_ARCH_MEMCPY + extern void *memcpy(void *, const void *, __kernel_size_t); + extern void *__memcpy(void *, const void *, __kernel_size_t); +@@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t); + extern void *memmove(void *, const void *, __kernel_size_t); + extern void *__memmove(void *, const void *, __kernel_size_t); + +-#define __HAVE_ARCH_MEMCHR +-extern void *memchr(const void *, int, __kernel_size_t); +- + #define __HAVE_ARCH_MEMSET + extern void *memset(void *, int, __kernel_size_t); + extern void *__memset(void *, int, __kernel_size_t); + +-#define __HAVE_ARCH_MEMCMP +-extern int memcmp(const void *, const void *, size_t); +- + #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE + #define __HAVE_ARCH_MEMCPY_FLUSHCACHE + void memcpy_flushcache(void *dst, const void *src, size_t cnt); +diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c +index 66be504edb6c..9eedf839e739 100644 +--- a/arch/arm64/kernel/arm64ksyms.c ++++ b/arch/arm64/kernel/arm64ksyms.c +@@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user); + EXPORT_SYMBOL(memstart_addr); + + /* string / mem functions */ ++#ifndef CONFIG_KASAN + EXPORT_SYMBOL(strchr); + EXPORT_SYMBOL(strrchr); + EXPORT_SYMBOL(strcmp); + EXPORT_SYMBOL(strncmp); + EXPORT_SYMBOL(strlen); + EXPORT_SYMBOL(strnlen); ++EXPORT_SYMBOL(memcmp); ++EXPORT_SYMBOL(memchr); ++#endif ++ + EXPORT_SYMBOL(memset); + EXPORT_SYMBOL(memcpy); + EXPORT_SYMBOL(memmove); + EXPORT_SYMBOL(__memset); + EXPORT_SYMBOL(__memcpy); + EXPORT_SYMBOL(__memmove); +-EXPORT_SYMBOL(memchr); +-EXPORT_SYMBOL(memcmp); + + /* atomic bitops */ + EXPORT_SYMBOL(set_bit); +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index a4e49e947684..5ae9c86c30d1 100644 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -648,7 +648,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + handler[reason], smp_processor_id(), esr, + esr_get_class_string(esr)); + +- die("Oops - bad mode", regs, 0); + local_irq_disable(); + panic("bad mode"); + } +diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S +index 4444c1d25f4b..0f164a4baf52 100644 +--- a/arch/arm64/lib/memchr.S ++++ b/arch/arm64/lib/memchr.S +@@ -30,7 +30,7 @@ + * Returns: + * x0 - address of first occurrence of 'c' or 0 + */ +-ENTRY(memchr) ++WEAK(memchr) + and w1, w1, #0xff + 1: subs x2, x2, #1 + b.mi 2f +diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S +index 2a4e239bd17a..fb295f52e9f8 100644 +--- a/arch/arm64/lib/memcmp.S ++++ b/arch/arm64/lib/memcmp.S +@@ -58,7 +58,7 @@ pos .req x11 + limit_wd .req x12 + mask .req x13 + +-ENTRY(memcmp) ++WEAK(memcmp) + cbz limit, .Lret0 + eor tmp1, src1, src2 + tst tmp1, #7 +diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S +index dae0cf5591f9..7c83091d1bcd 100644 +--- a/arch/arm64/lib/strchr.S ++++ b/arch/arm64/lib/strchr.S +@@ -29,7 +29,7 @@ + * Returns: + * x0 - address of first occurrence of 'c' or 0 + */ +-ENTRY(strchr) ++WEAK(strchr) + and w1, w1, #0xff + 1: ldrb w2, [x0], #1 + cmp w2, w1 +diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S +index 471fe61760ef..7d5d15398bfb 100644 +--- a/arch/arm64/lib/strcmp.S ++++ b/arch/arm64/lib/strcmp.S +@@ -60,7 +60,7 @@ tmp3 .req x9 + zeroones .req x10 + pos .req x11 + +-ENTRY(strcmp) ++WEAK(strcmp) + eor tmp1, src1, src2 + mov zeroones, #REP8_01 + tst tmp1, #7 +diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S +index 55ccc8e24c08..8e0b14205dcb 100644 +--- a/arch/arm64/lib/strlen.S ++++ b/arch/arm64/lib/strlen.S +@@ -56,7 +56,7 @@ pos .req x12 + #define REP8_7f 0x7f7f7f7f7f7f7f7f + #define REP8_80 0x8080808080808080 + +-ENTRY(strlen) ++WEAK(strlen) + mov zeroones, #REP8_01 + bic src, srcin, #15 + ands tmp1, srcin, #15 +diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S +index e267044761c6..66bd145935d9 100644 +--- a/arch/arm64/lib/strncmp.S ++++ b/arch/arm64/lib/strncmp.S +@@ -64,7 +64,7 @@ limit_wd .req x13 + mask .req x14 + endloop .req x15 + +-ENTRY(strncmp) ++WEAK(strncmp) + cbz limit, .Lret0 + eor tmp1, src1, src2 + mov zeroones, #REP8_01 +diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S +index eae38da6e0bb..355be04441fe 100644 +--- a/arch/arm64/lib/strnlen.S ++++ b/arch/arm64/lib/strnlen.S +@@ -59,7 +59,7 @@ limit_wd .req x14 + #define REP8_7f 0x7f7f7f7f7f7f7f7f + #define REP8_80 0x8080808080808080 + +-ENTRY(strnlen) ++WEAK(strnlen) + cbz limit, .Lhit_limit + mov zeroones, #REP8_01 + bic src, srcin, #15 +diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S +index 61eabd9a289a..f3b9f8e2917c 100644 +--- a/arch/arm64/lib/strrchr.S ++++ b/arch/arm64/lib/strrchr.S +@@ -29,7 +29,7 @@ + * Returns: + * x0 - address of last occurrence of 'c' or 0 + */ +-ENTRY(strrchr) ++WEAK(strrchr) + mov x3, #0 + and w1, w1, #0xff + 1: ldrb w2, [x0], #1 +diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c +index b29c3b241e1b..107082877064 100644 +--- a/arch/m68k/kernel/uboot.c ++++ b/arch/m68k/kernel/uboot.c +@@ -102,5 +102,5 @@ __init void process_uboot_commandline(char *commandp, int size) + } + + parse_uboot_commandline(commandp, len); +- commandp[size - 1] = 0; ++ commandp[len - 1] = 0; + } +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile +index e2a5a932c24a..5807c9d8e56d 100644 +--- a/arch/powerpc/boot/Makefile ++++ b/arch/powerpc/boot/Makefile +@@ -24,8 +24,8 @@ compress-$(CONFIG_KERNEL_GZIP) := CONFIG_KERNEL_GZIP + compress-$(CONFIG_KERNEL_XZ) := CONFIG_KERNEL_XZ + + BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ +- -fno-strict-aliasing -Os -msoft-float -pipe \ +- -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ ++ -fno-strict-aliasing -Os -msoft-float -mno-altivec -mno-vsx \ ++ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ + -D$(compress-y) + + BOOTCC := $(CC) +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h +index ba4c75062d49..2d4444981c2c 100644 +--- a/arch/powerpc/include/asm/asm-prototypes.h ++++ b/arch/powerpc/include/asm/asm-prototypes.h +@@ -129,7 +129,10 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); + /* Patch sites */ + extern s32 patch__call_flush_count_cache; + extern s32 patch__flush_count_cache_return; ++extern s32 patch__flush_link_stack_return; ++extern s32 patch__call_kvm_flush_link_stack; + + extern long flush_count_cache; ++extern long kvm_flush_link_stack; + + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h +index 759597bf0fd8..ccf44c135389 100644 +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature) + // Software required to flush count cache on context switch + #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull + ++// Software required to flush link stack on context switch ++#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull ++ + + // Features enabled by default + #define SEC_FTR_DEFAULT \ +diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c +index 8545a9523b9b..7339ca4fdc19 100644 +--- a/arch/powerpc/kernel/eeh_pe.c ++++ b/arch/powerpc/kernel/eeh_pe.c +@@ -381,7 +381,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) + while (parent) { + if (!(parent->type & EEH_PE_INVALID)) + break; +- parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); ++ parent->type &= ~EEH_PE_INVALID; + parent = parent->parent; + } + +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S +index 12395895b9aa..02a0bf52aec0 100644 +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -524,6 +524,7 @@ flush_count_cache: + /* Save LR into r9 */ + mflr r9 + ++ // Flush the link stack + .rept 64 + bl .+4 + .endr +@@ -533,6 +534,11 @@ flush_count_cache: + .balign 32 + /* Restore LR */ + 1: mtlr r9 ++ ++ // If we're just flushing the link stack, return here ++3: nop ++ patch_site 3b patch__flush_link_stack_return ++ + li r9,0x7fff + mtctr r9 + +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index 5e5da2073fdf..ba0d4f9a99ba 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -567,12 +567,11 @@ void flush_all_to_thread(struct task_struct *tsk) + if (tsk->thread.regs) { + preempt_disable(); + BUG_ON(tsk != current); +- save_all(tsk); +- + #ifdef CONFIG_SPE + if (tsk->thread.regs->msr & MSR_SPE) + tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); + #endif ++ save_all(tsk); + + preempt_enable(); + } +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c +index 68d4ec373cfc..f5d6541bf8c2 100644 +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -24,11 +24,12 @@ enum count_cache_flush_type { + COUNT_CACHE_FLUSH_HW = 0x4, + }; + static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; ++static bool link_stack_flush_enabled; + + bool barrier_nospec_enabled; + static bool no_nospec; + static bool btb_flush_enabled; +-#ifdef CONFIG_PPC_FSL_BOOK3E ++#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) + static bool no_spectrev2; + #endif + +@@ -106,7 +107,7 @@ static __init int barrier_nospec_debugfs_init(void) + device_initcall(barrier_nospec_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ + +-#ifdef CONFIG_PPC_FSL_BOOK3E ++#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) + static int __init handle_nospectre_v2(char *p) + { + no_spectrev2 = true; +@@ -114,6 +115,9 @@ static int __init handle_nospectre_v2(char *p) + return 0; + } + early_param("nospectre_v2", handle_nospectre_v2); ++#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ ++ ++#ifdef CONFIG_PPC_FSL_BOOK3E + void setup_spectre_v2(void) + { + if (no_spectrev2 || cpu_mitigations_off()) +@@ -201,11 +205,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c + + if (ccd) + seq_buf_printf(&s, "Indirect branch cache disabled"); ++ ++ if (link_stack_flush_enabled) ++ seq_buf_printf(&s, ", Software link stack flush"); ++ + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { + seq_buf_printf(&s, "Mitigation: Software count cache flush"); + + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) + seq_buf_printf(&s, " (hardware accelerated)"); ++ ++ if (link_stack_flush_enabled) ++ seq_buf_printf(&s, ", Software link stack flush"); ++ + } else if (btb_flush_enabled) { + seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); + } else { +@@ -366,18 +378,49 @@ static __init int stf_barrier_debugfs_init(void) + device_initcall(stf_barrier_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ + ++static void no_count_cache_flush(void) ++{ ++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; ++ pr_info("count-cache-flush: software flush disabled.\n"); ++} ++ + static void toggle_count_cache_flush(bool enable) + { +- if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { ++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && ++ !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) ++ enable = false; ++ ++ if (!enable) { + patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); +- count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; +- pr_info("count-cache-flush: software flush disabled.\n"); ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ++ patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); ++#endif ++ pr_info("link-stack-flush: software flush disabled.\n"); ++ link_stack_flush_enabled = false; ++ no_count_cache_flush(); + return; + } + ++ // This enables the branch from _switch to flush_count_cache + patch_branch_site(&patch__call_flush_count_cache, + (u64)&flush_count_cache, BRANCH_SET_LINK); + ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ++ // This enables the branch from guest_exit_cont to kvm_flush_link_stack ++ patch_branch_site(&patch__call_kvm_flush_link_stack, ++ (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); ++#endif ++ ++ pr_info("link-stack-flush: software flush enabled.\n"); ++ link_stack_flush_enabled = true; ++ ++ // If we just need to flush the link stack, patch an early return ++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { ++ patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); ++ no_count_cache_flush(); ++ return; ++ } ++ + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { + count_cache_flush_type = COUNT_CACHE_FLUSH_SW; + pr_info("count-cache-flush: full software flush sequence enabled.\n"); +@@ -391,7 +434,26 @@ static void toggle_count_cache_flush(bool enable) + + void setup_count_cache_flush(void) + { +- toggle_count_cache_flush(true); ++ bool enable = true; ++ ++ if (no_spectrev2 || cpu_mitigations_off()) { ++ if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || ++ security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) ++ pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); ++ ++ enable = false; ++ } ++ ++ /* ++ * There's no firmware feature flag/hypervisor bit to tell us we need to ++ * flush the link stack on context switch. So we set it here if we see ++ * either of the Spectre v2 mitigations that aim to protect userspace. ++ */ ++ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || ++ security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) ++ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); ++ ++ toggle_count_cache_flush(enable); + } + + #ifdef CONFIG_DEBUG_FS +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index 663a398449b7..46ea42f40334 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -18,6 +18,7 @@ + */ + + #include <asm/ppc_asm.h> ++#include <asm/code-patching-asm.h> + #include <asm/kvm_asm.h> + #include <asm/reg.h> + #include <asm/mmu.h> +@@ -1445,6 +1446,10 @@ mc_cont: + 1: + #endif /* CONFIG_KVM_XICS */ + ++ /* Possibly flush the link stack here. */ ++1: nop ++ patch_site 1b patch__call_kvm_flush_link_stack ++ + stw r12, STACK_SLOT_TRAP(r1) + mr r3, r12 + /* Increment exit count, poke other threads to exit */ +@@ -1957,6 +1962,28 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) + mtlr r0 + blr + ++.balign 32 ++.global kvm_flush_link_stack ++kvm_flush_link_stack: ++ /* Save LR into r0 */ ++ mflr r0 ++ ++ /* Flush the link stack. On Power8 it's up to 32 entries in size. */ ++ .rept 32 ++ bl .+4 ++ .endr ++ ++ /* And on Power9 it's up to 64. */ ++BEGIN_FTR_SECTION ++ .rept 32 ++ bl .+4 ++ .endr ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ++ ++ /* Restore LR */ ++ mtlr r0 ++ blr ++ + /* + * Check whether an HDSI is an HPTE not found fault or something else. + * If it is an HPTE not found fault that is due to the guest accessing +diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c +index 3db53e8aff92..9b2ef76578f0 100644 +--- a/arch/powerpc/platforms/ps3/os-area.c ++++ b/arch/powerpc/platforms/ps3/os-area.c +@@ -664,7 +664,7 @@ static int update_flash_db(void) + db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff); + + count = os_area_flash_write(db, sizeof(struct os_area_db), pos); +- if (count < sizeof(struct os_area_db)) { ++ if (count < 0 || count < sizeof(struct os_area_db)) { + pr_debug("%s: os_area_flash_write failed %zd\n", __func__, + count); + error = count < 0 ? count : -EIO; +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c +index 93e09f108ca1..99a3cf51c5ba 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -787,7 +787,7 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) + nid = memory_add_physaddr_to_nid(lmb->base_addr); + + /* Add the memory */ +- rc = add_memory(nid, lmb->base_addr, block_sz); ++ rc = __add_memory(nid, lmb->base_addr, block_sz); + if (rc) { + dlpar_remove_device_tree_lmb(lmb); + return rc; +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c +index eb738ef57792..c0ae3847b8db 100644 +--- a/arch/powerpc/platforms/pseries/lpar.c ++++ b/arch/powerpc/platforms/pseries/lpar.c +@@ -48,6 +48,7 @@ + #include <asm/kexec.h> + #include <asm/fadump.h> + #include <asm/asm-prototypes.h> ++#include <asm/debugfs.h> + + #include "pseries.h" + +@@ -1036,3 +1037,56 @@ static int __init reserve_vrma_context_id(void) + return 0; + } + machine_device_initcall(pseries, reserve_vrma_context_id); ++ ++#ifdef CONFIG_DEBUG_FS ++/* debugfs file interface for vpa data */ ++static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len, ++ loff_t *pos) ++{ ++ int cpu = (long)filp->private_data; ++ struct lppaca *lppaca = &lppaca_of(cpu); ++ ++ return simple_read_from_buffer(buf, len, pos, lppaca, ++ sizeof(struct lppaca)); ++} ++ ++static const struct file_operations vpa_fops = { ++ .open = simple_open, ++ .read = vpa_file_read, ++ .llseek = default_llseek, ++}; ++ ++static int __init vpa_debugfs_init(void) ++{ ++ char name[16]; ++ long i; ++ static struct dentry *vpa_dir; ++ ++ if (!firmware_has_feature(FW_FEATURE_SPLPAR)) ++ return 0; ++ ++ vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); ++ if (!vpa_dir) { ++ pr_warn("%s: can't create vpa root dir\n", __func__); ++ return -ENOMEM; ++ } ++ ++ /* set up the per-cpu vpa file*/ ++ for_each_possible_cpu(i) { ++ struct dentry *d; ++ ++ sprintf(name, "cpu-%ld", i); ++ ++ d = debugfs_create_file(name, 0400, vpa_dir, (void *)i, ++ &vpa_fops); ++ if (!d) { ++ pr_warn("%s: can't create per-cpu vpa file\n", ++ __func__); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++machine_arch_initcall(pseries, vpa_debugfs_init); ++#endif /* CONFIG_DEBUG_FS */ +diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile +index 549e99e71112..ac5ee067aa51 100644 +--- a/arch/powerpc/xmon/Makefile ++++ b/arch/powerpc/xmon/Makefile +@@ -13,6 +13,12 @@ UBSAN_SANITIZE := n + ORIG_CFLAGS := $(KBUILD_CFLAGS) + KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))) + ++ifdef CONFIG_CC_IS_CLANG ++# clang stores addresses on the stack causing the frame size to blow ++# out. See https://github.com/ClangBuiltLinux/linux/issues/252 ++KBUILD_CFLAGS += -Wframe-larger-than=4096 ++endif ++ + ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) + + obj-y += xmon.o nonstdio.o spr_access.o +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c +index d99155793c26..2e2fd9535f86 100644 +--- a/arch/s390/kernel/perf_cpum_sf.c ++++ b/arch/s390/kernel/perf_cpum_sf.c +@@ -1610,14 +1610,17 @@ static int __init init_cpum_sampling_pmu(void) + } + + sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); +- if (!sfdbg) ++ if (!sfdbg) { + pr_err("Registering for s390dbf failed\n"); ++ return -ENOMEM; ++ } + debug_register_view(sfdbg, &debug_sprintf_view); + + err = register_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + if (err) { + pr_cpumsf_err(RS_INIT_FAILURE_ALRT); ++ debug_unregister(sfdbg); + goto out; + } + +@@ -1626,6 +1629,7 @@ static int __init init_cpum_sampling_pmu(void) + pr_cpumsf_err(RS_INIT_FAILURE_PERF); + unregister_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); ++ debug_unregister(sfdbg); + goto out; + } + +diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h +index f71ef3729888..316faa0130ba 100644 +--- a/arch/sparc/include/asm/cmpxchg_64.h ++++ b/arch/sparc/include/asm/cmpxchg_64.h +@@ -52,7 +52,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long + return val; + } + +-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ++#define xchg(ptr,x) \ ++({ __typeof__(*(ptr)) __ret; \ ++ __ret = (__typeof__(*(ptr))) \ ++ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ ++ __ret; \ ++}) + + void __xchg_called_with_bad_pointer(void); + +diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h +index 05df5f043053..3c5a1c620f0f 100644 +--- a/arch/sparc/include/asm/parport.h ++++ b/arch/sparc/include/asm/parport.h +@@ -21,6 +21,7 @@ + */ + #define HAS_DMA + ++#ifdef CONFIG_PARPORT_PC_FIFO + static DEFINE_SPINLOCK(dma_spin_lock); + + #define claim_dma_lock() \ +@@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock); + + #define release_dma_lock(__flags) \ + spin_unlock_irqrestore(&dma_spin_lock, __flags); ++#endif + + static struct sparc_ebus_info { + struct ebus_dma_info info; +diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c +index 366e57f5e8d6..7e524efed584 100644 +--- a/arch/um/drivers/line.c ++++ b/arch/um/drivers/line.c +@@ -261,7 +261,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) + if (err == 0) { + spin_unlock(&line->lock); + return IRQ_NONE; +- } else if (err < 0) { ++ } else if ((err < 0) && (err != -EAGAIN)) { + line->head = line->buffer; + line->tail = line->buffer; + } +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c +index 2653b7b25d17..924fa9c07368 100644 +--- a/arch/x86/hyperv/hv_init.c ++++ b/arch/x86/hyperv/hv_init.c +@@ -125,7 +125,7 @@ static int __init hv_pci_init(void) + * 1. Setup the hypercall page. + * 2. Register Hyper-V specific clocksource. + */ +-void hyperv_init(void) ++void __init hyperv_init(void) + { + u64 guest_id, required_msrs; + union hv_x64_msr_hypercall_contents hypercall_msr; +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h +index 14131dd06b29..8603d127f73c 100644 +--- a/arch/x86/include/asm/ptrace.h ++++ b/arch/x86/include/asm/ptrace.h +@@ -231,24 +231,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs, + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); + } + ++/** ++ * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack ++ * @regs: pt_regs which contains kernel stack pointer. ++ * @n: stack entry number. ++ * ++ * regs_get_kernel_stack_nth() returns the address of the @n th entry of the ++ * kernel stack which is specified by @regs. If the @n th entry is NOT in ++ * the kernel stack, this returns NULL. ++ */ ++static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n) ++{ ++ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); ++ ++ addr += n; ++ if (regs_within_kernel_stack(regs, (unsigned long)addr)) ++ return addr; ++ else ++ return NULL; ++} ++ ++/* To avoid include hell, we can't include uaccess.h */ ++extern long probe_kernel_read(void *dst, const void *src, size_t size); ++ + /** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which +- * is specified by @regs. If the @n th entry is NOT in the kernel stack, ++ * is specified by @regs. If the @n th entry is NOT in the kernel stack + * this returns 0. + */ + static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n) + { +- unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); +- addr += n; +- if (regs_within_kernel_stack(regs, (unsigned long)addr)) +- return *addr; +- else +- return 0; ++ unsigned long *addr; ++ unsigned long val; ++ long ret; ++ ++ addr = regs_get_kernel_stack_nth_addr(regs, n); ++ if (addr) { ++ ret = probe_kernel_read(&val, addr, sizeof(val)); ++ if (!ret) ++ return val; ++ } ++ return 0; + } + + #define arch_has_single_step() (1) +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 8596811843cc..7896a34f53b5 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void); + static void __init ssb_select_mitigation(void); + static void __init l1tf_select_mitigation(void); + static void __init mds_select_mitigation(void); ++static void __init mds_print_mitigation(void); + static void __init taa_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ +@@ -108,6 +109,12 @@ void __init check_bugs(void) + mds_select_mitigation(); + taa_select_mitigation(); + ++ /* ++ * As MDS and TAA mitigations are inter-related, print MDS ++ * mitigation until after TAA mitigation selection is done. ++ */ ++ mds_print_mitigation(); ++ + arch_smt_update(); + + #ifdef CONFIG_X86_32 +@@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void) + (mds_nosmt || cpu_mitigations_auto_nosmt())) + cpu_smt_disable(false); + } ++} ++ ++static void __init mds_print_mitigation(void) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) ++ return; + + pr_info("%s\n", mds_strings[mds_mitigation]); + } +@@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void) + return; + } + +- /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ +- if (taa_mitigation == TAA_MITIGATION_OFF) ++ /* ++ * TAA mitigation via VERW is turned off if both ++ * tsx_async_abort=off and mds=off are specified. ++ */ ++ if (taa_mitigation == TAA_MITIGATION_OFF && ++ mds_mitigation == MDS_MITIGATION_OFF) + goto out; + + if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) +@@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void) + if (taa_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); + ++ /* ++ * Update MDS mitigation, if necessary, as the mds_user_clear is ++ * now enabled for TAA mitigation. ++ */ ++ if (mds_mitigation == MDS_MITIGATION_OFF && ++ boot_cpu_has_bug(X86_BUG_MDS)) { ++ mds_mitigation = MDS_MITIGATION_FULL; ++ mds_select_mitigation(); ++ } + out: + pr_info("%s\n", taa_strings[taa_mitigation]); + } +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 8cd26e50d41c..c0b0135ef07f 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -3177,7 +3177,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, + * here. + */ + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && +- level == PT_PAGE_TABLE_LEVEL && ++ !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && + PageTransCompoundMap(pfn_to_page(pfn)) && + !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { + unsigned long mask; +@@ -5344,9 +5344,9 @@ restart: + * the guest, and the guest page table is using 4K page size + * mapping if the indirect sp has level = 1. + */ +- if (sp->role.direct && +- !kvm_is_reserved_pfn(pfn) && +- PageTransCompoundMap(pfn_to_page(pfn))) { ++ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && ++ !kvm_is_zone_device_pfn(pfn) && ++ PageTransCompoundMap(pfn_to_page(pfn))) { + drop_spte(kvm, sptep); + need_tlb_flush = 1; + goto restart; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index ab6384efc791..f67fc0f359ff 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1602,7 +1602,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) + return -1; + } + +-static inline void __invvpid(int ext, u16 vpid, gva_t gva) ++static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) + { + struct { + u64 vpid : 16; +@@ -1616,7 +1616,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) + : : "a"(&operand), "c"(ext) : "cc", "memory"); + } + +-static inline void __invept(int ext, u64 eptp, gpa_t gpa) ++static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) + { + struct { + u64 eptp, gpa; +@@ -10000,6 +10000,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) + vmx_vcpu_load(vcpu, cpu); + vcpu->cpu = cpu; + put_cpu(); ++ ++ vm_entry_controls_reset_shadow(vmx); ++ vm_exit_controls_reset_shadow(vmx); ++ vmx_segment_cache_clear(vmx); + } + + /* +@@ -11428,7 +11432,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) + vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); + + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); +- vmx_segment_cache_clear(vmx); + + if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { + leave_guest_mode(vcpu); +@@ -12172,9 +12175,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + } + + vmx_switch_vmcs(vcpu, &vmx->vmcs01); +- vm_entry_controls_reset_shadow(vmx); +- vm_exit_controls_reset_shadow(vmx); +- vmx_segment_cache_clear(vmx); + + /* Update any VMCS fields that might have changed while L2 ran */ + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); +diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk +index b02a36b2c14f..a42015b305f4 100644 +--- a/arch/x86/tools/gen-insn-attr-x86.awk ++++ b/arch/x86/tools/gen-insn-attr-x86.awk +@@ -69,7 +69,7 @@ BEGIN { + + lprefix1_expr = "\\((66|!F3)\\)" + lprefix2_expr = "\\(F3\\)" +- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" ++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)" + lprefix_expr = "\\((66|F2|F3)\\)" + max_lprefix = 4 + +@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) + return add_flags(imm, mod) + } + +-/^[0-9a-f]+\:/ { ++/^[0-9a-f]+:/ { + if (NR == 1) + next + # get index +diff --git a/block/blk-merge.c b/block/blk-merge.c +index 8d60a5bbcef9..94650cdf2924 100644 +--- a/block/blk-merge.c ++++ b/block/blk-merge.c +@@ -659,6 +659,31 @@ static void blk_account_io_merge(struct request *req) + part_stat_unlock(); + } + } ++/* ++ * Two cases of handling DISCARD merge: ++ * If max_discard_segments > 1, the driver takes every bio ++ * as a range and send them to controller together. The ranges ++ * needn't to be contiguous. ++ * Otherwise, the bios/requests will be handled as same as ++ * others which should be contiguous. ++ */ ++static inline bool blk_discard_mergable(struct request *req) ++{ ++ if (req_op(req) == REQ_OP_DISCARD && ++ queue_max_discard_segments(req->q) > 1) ++ return true; ++ return false; ++} ++ ++enum elv_merge blk_try_req_merge(struct request *req, struct request *next) ++{ ++ if (blk_discard_mergable(req)) ++ return ELEVATOR_DISCARD_MERGE; ++ else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) ++ return ELEVATOR_BACK_MERGE; ++ ++ return ELEVATOR_NO_MERGE; ++} + + /* + * For non-mq, this has to be called with the request spinlock acquired. +@@ -676,12 +701,6 @@ static struct request *attempt_merge(struct request_queue *q, + if (req_op(req) != req_op(next)) + return NULL; + +- /* +- * not contiguous +- */ +- if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) +- return NULL; +- + if (rq_data_dir(req) != rq_data_dir(next) + || req->rq_disk != next->rq_disk + || req_no_special_merge(next)) +@@ -705,11 +724,19 @@ static struct request *attempt_merge(struct request_queue *q, + * counts here. Handle DISCARDs separately, as they + * have separate settings. + */ +- if (req_op(req) == REQ_OP_DISCARD) { ++ ++ switch (blk_try_req_merge(req, next)) { ++ case ELEVATOR_DISCARD_MERGE: + if (!req_attempt_discard_merge(q, req, next)) + return NULL; +- } else if (!ll_merge_requests_fn(q, req, next)) ++ break; ++ case ELEVATOR_BACK_MERGE: ++ if (!ll_merge_requests_fn(q, req, next)) ++ return NULL; ++ break; ++ default: + return NULL; ++ } + + /* + * If failfast settings disagree or any of the two is already +@@ -834,8 +861,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) + + enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) + { +- if (req_op(rq) == REQ_OP_DISCARD && +- queue_max_discard_segments(rq->q) > 1) ++ if (blk_discard_mergable(rq)) + return ELEVATOR_DISCARD_MERGE; + else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) + return ELEVATOR_BACK_MERGE; +diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c +index 6b0d3ef7309c..2ccfbb61ca89 100644 +--- a/drivers/acpi/acpi_memhotplug.c ++++ b/drivers/acpi/acpi_memhotplug.c +@@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) + if (node < 0) + node = memory_add_physaddr_to_nid(info->start_addr); + +- result = add_memory(node, info->start_addr, info->length); ++ result = __add_memory(node, info->start_addr, info->length); + + /* + * If the memory block has been used by the kernel, add_memory() +diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c +index 2c288d1f42bb..817c7edfec0b 100644 +--- a/drivers/atm/zatm.c ++++ b/drivers/atm/zatm.c +@@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0}; + #define zin_n(r) inl(zatm_dev->base+r*4) + #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) + #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) +-#define zwait while (zin(CMR) & uPD98401_BUSY) ++#define zwait() do {} while (zin(CMR) & uPD98401_BUSY) + + /* RX0, RX1, TX0, TX1 */ + static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; +@@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ + + static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) + { +- zwait; ++ zwait(); + zout(value,CER); + zout(uPD98401_IND_ACC | uPD98401_IA_BALL | + (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); +@@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) + + static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) + { +- zwait; ++ zwait(); + zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | + (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); +- zwait; ++ zwait(); + return zin(CER); + } + +@@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool) + } + if (first) { + spin_lock_irqsave(&zatm_dev->lock, flags); +- zwait; ++ zwait(); + zout(virt_to_bus(first),CER); + zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, + CMR); +@@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc) + } + if (zatm_vcc->pool < 0) return -EMSGSIZE; + spin_lock_irqsave(&zatm_dev->lock, flags); +- zwait; ++ zwait(); + zout(uPD98401_OPEN_CHAN,CMR); +- zwait; ++ zwait(); + DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); + chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; + spin_unlock_irqrestore(&zatm_dev->lock, flags); +@@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc) + pos = vcc->vci >> 1; + shift = (1-(vcc->vci & 1)) << 4; + zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); +- zwait; ++ zwait(); + zout(uPD98401_NOP,CMR); +- zwait; ++ zwait(); + zout(uPD98401_NOP,CMR); + spin_unlock_irqrestore(&zatm_dev->lock, flags); + } + spin_lock_irqsave(&zatm_dev->lock, flags); +- zwait; ++ zwait(); + zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << + uPD98401_CHAN_ADDR_SHIFT),CMR); +- zwait; ++ zwait(); + udelay(10); /* why oh why ... ? */ + zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << + uPD98401_CHAN_ADDR_SHIFT),CMR); +- zwait; ++ zwait(); + if (!(zin(CMR) & uPD98401_CHAN_ADDR)) + printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " + "%d\n",vcc->dev->number,zatm_vcc->rx_chan); +@@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n"); + skb_queue_tail(&zatm_vcc->tx_queue,skb); + DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ + uPD98401_TXVC_QRP)); +- zwait; ++ zwait(); + zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << + uPD98401_CHAN_ADDR_SHIFT),CMR); + spin_unlock_irqrestore(&zatm_dev->lock, flags); +@@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc) + } + spin_lock_irqsave(&zatm_dev->lock, flags); + #if 0 +- zwait; ++ zwait(); + zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); + #endif +- zwait; ++ zwait(); + zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); +- zwait; ++ zwait(); + if (!(zin(CMR) & uPD98401_CHAN_ADDR)) + printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " + "%d\n",vcc->dev->number,chan); +@@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc) + zatm_vcc->tx_chan = 0; + if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; + spin_lock_irqsave(&zatm_dev->lock, flags); +- zwait; ++ zwait(); + zout(uPD98401_OPEN_CHAN,CMR); +- zwait; ++ zwait(); + DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); + chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; + spin_unlock_irqrestore(&zatm_dev->lock, flags); +@@ -1559,7 +1559,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value, + struct zatm_dev *zatm_dev; + + zatm_dev = ZATM_DEV(dev); +- zwait; ++ zwait(); + zout(value,CER); + zout(uPD98401_IND_ACC | uPD98401_IA_B0 | + (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); +@@ -1571,10 +1571,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) + struct zatm_dev *zatm_dev; + + zatm_dev = ZATM_DEV(dev); +- zwait; ++ zwait(); + zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | + (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); +- zwait; ++ zwait(); + return zin(CER) & 0xff; + } + +diff --git a/drivers/base/memory.c b/drivers/base/memory.c +index c617e00f4361..fe1557aa9b10 100644 +--- a/drivers/base/memory.c ++++ b/drivers/base/memory.c +@@ -517,15 +517,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, + if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) + return -EINVAL; + ++ ret = lock_device_hotplug_sysfs(); ++ if (ret) ++ return ret; ++ + nid = memory_add_physaddr_to_nid(phys_addr); +- ret = add_memory(nid, phys_addr, +- MIN_MEMORY_BLOCK_SIZE * sections_per_block); ++ ret = __add_memory(nid, phys_addr, ++ MIN_MEMORY_BLOCK_SIZE * sections_per_block); + + if (ret) + goto out; + + ret = count; + out: ++ unlock_device_hotplug(); + return ret; + } + +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index c276ba1c0a19..e811f2414889 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -369,6 +369,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, + return -EAGAIN; + } + ++ /* Default to shallowest state. */ ++ if (!genpd->gov) ++ genpd->state_idx = 0; ++ + if (genpd->power_off) { + int ret; + +@@ -1598,6 +1602,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, + ret = genpd_set_default_power_state(genpd); + if (ret) + return ret; ++ } else if (!gov) { ++ pr_warn("%s : no governor for states\n", genpd->name); + } + + mutex_lock(&gpd_list_lock); +diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c +index 49908c74bfcb..b0e8d8364876 100644 +--- a/drivers/block/amiflop.c ++++ b/drivers/block/amiflop.c +@@ -1699,11 +1699,41 @@ static const struct block_device_operations floppy_fops = { + .check_events = amiga_check_events, + }; + ++static struct gendisk *fd_alloc_disk(int drive) ++{ ++ struct gendisk *disk; ++ ++ disk = alloc_disk(1); ++ if (!disk) ++ goto out; ++ ++ disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); ++ if (IS_ERR(disk->queue)) { ++ disk->queue = NULL; ++ goto out_put_disk; ++ } ++ ++ unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL); ++ if (!unit[drive].trackbuf) ++ goto out_cleanup_queue; ++ ++ return disk; ++ ++out_cleanup_queue: ++ blk_cleanup_queue(disk->queue); ++ disk->queue = NULL; ++out_put_disk: ++ put_disk(disk); ++out: ++ unit[drive].type->code = FD_NODRIVE; ++ return NULL; ++} ++ + static int __init fd_probe_drives(void) + { + int drive,drives,nomem; + +- printk(KERN_INFO "FD: probing units\nfound "); ++ pr_info("FD: probing units\nfound"); + drives=0; + nomem=0; + for(drive=0;drive<FD_MAX_UNITS;drive++) { +@@ -1711,27 +1741,17 @@ static int __init fd_probe_drives(void) + fd_probe(drive); + if (unit[drive].type->code == FD_NODRIVE) + continue; +- disk = alloc_disk(1); ++ ++ disk = fd_alloc_disk(drive); + if (!disk) { +- unit[drive].type->code = FD_NODRIVE; ++ pr_cont(" no mem for fd%d", drive); ++ nomem = 1; + continue; + } + unit[drive].gendisk = disk; +- +- disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); +- if (!disk->queue) { +- unit[drive].type->code = FD_NODRIVE; +- continue; +- } +- + drives++; +- if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { +- printk("no mem for "); +- unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */ +- drives--; +- nomem = 1; +- } +- printk("fd%d ",drive); ++ ++ pr_cont(" fd%d",drive); + disk->major = FLOPPY_MAJOR; + disk->first_minor = drive; + disk->fops = &floppy_fops; +@@ -1742,11 +1762,11 @@ static int __init fd_probe_drives(void) + } + if ((drives > 0) || (nomem == 0)) { + if (drives == 0) +- printk("no drives"); +- printk("\n"); ++ pr_cont(" no drives"); ++ pr_cont("\n"); + return drives; + } +- printk("\n"); ++ pr_cont("\n"); + return -ENOMEM; + } + +@@ -1837,30 +1857,6 @@ out_blkdev: + return ret; + } + +-#if 0 /* not safe to unload */ +-static int __exit amiga_floppy_remove(struct platform_device *pdev) +-{ +- int i; +- +- for( i = 0; i < FD_MAX_UNITS; i++) { +- if (unit[i].type->code != FD_NODRIVE) { +- struct request_queue *q = unit[i].gendisk->queue; +- del_gendisk(unit[i].gendisk); +- put_disk(unit[i].gendisk); +- kfree(unit[i].trackbuf); +- if (q) +- blk_cleanup_queue(q); +- } +- } +- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); +- free_irq(IRQ_AMIGA_CIAA_TB, NULL); +- free_irq(IRQ_AMIGA_DSKBLK, NULL); +- custom.dmacon = DMAF_DISK; /* disable DMA */ +- amiga_chip_free(raw_buf); +- unregister_blkdev(FLOPPY_MAJOR, "fd"); +-} +-#endif +- + static struct platform_driver amiga_floppy_driver = { + .driver = { + .name = "amiga-floppy", +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 34dfadd4dcd4..929bd255a290 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -931,6 +931,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, + if (sock->ops->shutdown == sock_no_shutdown) { + dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); + *err = -EINVAL; ++ sockfd_put(sock); + return NULL; + } + +@@ -969,14 +970,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + sockfd_put(sock); + return -ENOMEM; + } ++ ++ config->socks = socks; ++ + nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); + if (!nsock) { + sockfd_put(sock); + return -ENOMEM; + } + +- config->socks = socks; +- + nsock->fallback_index = -1; + nsock->dead = false; + mutex_init(&nsock->tx_lock); +diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c +index 64d0fc17c174..95649025cde7 100644 +--- a/drivers/block/skd_main.c ++++ b/drivers/block/skd_main.c +@@ -1417,7 +1417,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, + + case SKD_CHECK_STATUS_BUSY_IMMINENT: + skd_log_skreq(skdev, skreq, "retry(busy)"); +- blk_requeue_request(skdev->queue, req); ++ blk_mq_requeue_request(req, true); + dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); + skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; + skdev->timer_countdown = SKD_TIMER_MINUTES(20); +@@ -1427,7 +1427,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, + case SKD_CHECK_STATUS_REQUEUE_REQUEST: + if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { + skd_log_skreq(skdev, skreq, "retry"); +- blk_requeue_request(skdev->queue, req); ++ blk_mq_requeue_request(req, true); + break; + } + /* fall through */ +diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c +index 57a7f4255ac0..ee6c403de6af 100644 +--- a/drivers/bluetooth/hci_bcsp.c ++++ b/drivers/bluetooth/hci_bcsp.c +@@ -605,6 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) + if (*ptr == 0xc0) { + BT_ERR("Short BCSP packet"); + kfree_skb(bcsp->rx_skb); ++ bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_START; + bcsp->rx_count = 0; + } else +@@ -620,6 +621,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { + BT_ERR("Error in BCSP hdr checksum"); + kfree_skb(bcsp->rx_skb); ++ bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; +@@ -644,6 +646,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) + bscp_get_crc(bcsp)); + + kfree_skb(bcsp->rx_skb); ++ bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index ea6558d4864c..90dd8e7291da 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -410,10 +410,10 @@ static int cdrom_get_disc_info(struct cdrom_device_info *cdi, + * hack to have the capability flags defined const, while we can still + * change it here without gcc complaining at every line. + */ +-#define ENSURE(call, bits) \ +-do { \ +- if (cdo->call == NULL) \ +- *change_capability &= ~(bits); \ ++#define ENSURE(cdo, call, bits) \ ++do { \ ++ if (cdo->call == NULL) \ ++ WARN_ON_ONCE((cdo)->capability & (bits)); \ + } while (0) + + /* +@@ -589,7 +589,6 @@ int register_cdrom(struct cdrom_device_info *cdi) + { + static char banner_printed; + const struct cdrom_device_ops *cdo = cdi->ops; +- int *change_capability = (int *)&cdo->capability; /* hack */ + + cd_dbg(CD_OPEN, "entering register_cdrom\n"); + +@@ -601,16 +600,16 @@ int register_cdrom(struct cdrom_device_info *cdi) + cdrom_sysctl_register(); + } + +- ENSURE(drive_status, CDC_DRIVE_STATUS); ++ ENSURE(cdo, drive_status, CDC_DRIVE_STATUS); + if (cdo->check_events == NULL && cdo->media_changed == NULL) +- *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); +- ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); +- ENSURE(lock_door, CDC_LOCK); +- ENSURE(select_speed, CDC_SELECT_SPEED); +- ENSURE(get_last_session, CDC_MULTI_SESSION); +- ENSURE(get_mcn, CDC_MCN); +- ENSURE(reset, CDC_RESET); +- ENSURE(generic_packet, CDC_GENERIC_PACKET); ++ WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC)); ++ ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); ++ ENSURE(cdo, lock_door, CDC_LOCK); ++ ENSURE(cdo, select_speed, CDC_SELECT_SPEED); ++ ENSURE(cdo, get_last_session, CDC_MULTI_SESSION); ++ ENSURE(cdo, get_mcn, CDC_MCN); ++ ENSURE(cdo, reset, CDC_RESET); ++ ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET); + cdi->mc_flags = 0; + cdi->options = CDO_USE_FFLAGS; + +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 65454acd4b97..5200772ab0bd 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -1366,24 +1366,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols) + port->cons.ws.ws_col = cols; + } + +-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) ++static int fill_queue(struct virtqueue *vq, spinlock_t *lock) + { + struct port_buffer *buf; +- unsigned int nr_added_bufs; ++ int nr_added_bufs; + int ret; + + nr_added_bufs = 0; + do { + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); + if (!buf) +- break; ++ return -ENOMEM; + + spin_lock_irq(lock); + ret = add_inbuf(vq, buf); + if (ret < 0) { + spin_unlock_irq(lock); + free_buf(buf, true); +- break; ++ return ret; + } + nr_added_bufs++; + spin_unlock_irq(lock); +@@ -1403,7 +1403,6 @@ static int add_port(struct ports_device *portdev, u32 id) + char debugfs_name[16]; + struct port *port; + dev_t devt; +- unsigned int nr_added_bufs; + int err; + + port = kmalloc(sizeof(*port), GFP_KERNEL); +@@ -1462,11 +1461,13 @@ static int add_port(struct ports_device *portdev, u32 id) + spin_lock_init(&port->outvq_lock); + init_waitqueue_head(&port->waitqueue); + +- /* Fill the in_vq with buffers so the host can send us data. */ +- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); +- if (!nr_added_bufs) { ++ /* We can safely ignore ENOSPC because it means ++ * the queue already has buffers. Buffers are removed ++ * only by virtcons_remove(), not by unplug_port() ++ */ ++ err = fill_queue(port->in_vq, &port->inbuf_lock); ++ if (err < 0 && err != -ENOSPC) { + dev_err(port->dev, "Error allocating inbufs\n"); +- err = -ENOMEM; + goto free_device; + } + +@@ -2099,14 +2100,11 @@ static int virtcons_probe(struct virtio_device *vdev) + INIT_WORK(&portdev->control_work, &control_work_handler); + + if (multiport) { +- unsigned int nr_added_bufs; +- + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); + +- nr_added_bufs = fill_queue(portdev->c_ivq, +- &portdev->c_ivq_lock); +- if (!nr_added_bufs) { ++ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); ++ if (err < 0) { + dev_err(&vdev->dev, + "Error allocating buffers for control queue\n"); + /* +@@ -2117,7 +2115,7 @@ static int virtcons_probe(struct virtio_device *vdev) + VIRTIO_CONSOLE_DEVICE_READY, 0); + /* Device was functional: we need full cleanup. */ + virtcons_remove(vdev); +- return -ENOMEM; ++ return err; + } + } else { + /* +diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c +index da7bafcfbe70..b3eaf654fac9 100644 +--- a/drivers/clk/at91/clk-audio-pll.c ++++ b/drivers/clk/at91/clk-audio-pll.c +@@ -509,7 +509,7 @@ static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np) + + static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np) + { +- struct clk_audio_pad *apmc_ck; ++ struct clk_audio_pmc *apmc_ck; + struct clk_init_data init = {}; + + apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL); +diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c +index 0fc75c395957..d083b860f083 100644 +--- a/drivers/clk/mmp/clk-of-mmp2.c ++++ b/drivers/clk/mmp/clk-of-mmp2.c +@@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { + /* The gate clocks has mux parent. */ + {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, +- {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, +- {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, ++ {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, ++ {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, + {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, + {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, +diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +index 2bb4cabf802f..36a30a3cfad7 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +@@ -158,7 +158,12 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu", + #define SUN50I_A64_PLL_MIPI_REG 0x040 + + static struct ccu_nkm pll_mipi_clk = { +- .enable = BIT(31), ++ /* ++ * The bit 23 and 22 are called "LDO{1,2}_EN" on the SoC's ++ * user manual, and by experiments the PLL doesn't work without ++ * these bits toggled. ++ */ ++ .enable = BIT(31) | BIT(23) | BIT(22), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 4), + .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 4aa3c5331666..480e8c13567c 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -911,6 +911,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) + struct freq_attr *fattr = to_attr(attr); + ssize_t ret; + ++ if (!fattr->show) ++ return -EIO; ++ + down_read(&policy->rwsem); + ret = fattr->show(policy, buf); + up_read(&policy->rwsem); +@@ -925,6 +928,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, + struct freq_attr *fattr = to_attr(attr); + ssize_t ret = -EINVAL; + ++ if (!fattr->store) ++ return -EIO; ++ + cpus_read_lock(); + + if (cpu_online(policy->cpu)) { +@@ -1673,6 +1679,9 @@ void cpufreq_resume(void) + if (!cpufreq_driver) + return; + ++ if (unlikely(!cpufreq_suspended)) ++ return; ++ + cpufreq_suspended = false; + + if (!has_target() && !cpufreq_driver->resume) +diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c +index f35d87519a3e..dfefa39e9351 100644 +--- a/drivers/edac/thunderx_edac.c ++++ b/drivers/edac/thunderx_edac.c +@@ -1905,7 +1905,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) + default: + dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n", + l2c->pdev->device); +- return IRQ_NONE; ++ goto err_free; + } + + while (CIRC_CNT(l2c->ring_head, l2c->ring_tail, +@@ -1927,7 +1927,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) + l2c->ring_tail++; + } + +- return IRQ_HANDLED; ++ ret = IRQ_HANDLED; + + err_free: + kfree(other); +diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c +index c8f169bf2e27..62337be07afc 100644 +--- a/drivers/firmware/google/gsmi.c ++++ b/drivers/firmware/google/gsmi.c +@@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, + if (count < sizeof(u32)) + return -EINVAL; + param.type = *(u32 *)buf; +- count -= sizeof(u32); + buf += sizeof(u32); + + /* The remaining buffer is the data payload */ +- if (count > gsmi_dev.data_buf->length) ++ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length) + return -EINVAL; + param.data_len = count - sizeof(u32); + +@@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + +- return rc; ++ return (rc == 0) ? count : rc; + + } + +diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c +index ac6c1c0548b6..78254ed93206 100644 +--- a/drivers/gpio/gpio-max77620.c ++++ b/drivers/gpio/gpio-max77620.c +@@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio, + case 0: + val = MAX77620_CNFG_GPIO_DBNC_None; + break; +- case 1000 ... 8000: ++ case 1 ... 8000: + val = MAX77620_CNFG_GPIO_DBNC_8ms; + break; +- case 9000 ... 16000: ++ case 8001 ... 16000: + val = MAX77620_CNFG_GPIO_DBNC_16ms; + break; +- case 17000 ... 32000: ++ case 16001 ... 32000: + val = MAX77620_CNFG_GPIO_DBNC_32ms; + break; + default: +diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c +index 05ae8c4a8a1b..480d20758324 100644 +--- a/drivers/gpu/drm/i915/i915_gem_userptr.c ++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c +@@ -690,8 +690,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, + i915_gem_gtt_finish_pages(obj, pages); + + for_each_sgt_page(page, sgt_iter, pages) { +- if (obj->mm.dirty) ++ if (obj->mm.dirty && trylock_page(page)) { ++ /* ++ * As this may not be anonymous memory (e.g. shmem) ++ * but exist on a real mapping, we have to lock ++ * the page in order to dirty it -- holding ++ * the page reference is not sufficient to ++ * prevent the inode from being truncated. ++ * Play safe and take the lock. ++ * ++ * However...! ++ * ++ * The mmu-notifier can be invalidated for a ++ * migrate_page, that is alreadying holding the lock ++ * on the page. Such a try_to_unmap() will result ++ * in us calling put_pages() and so recursively try ++ * to lock the page. We avoid that deadlock with ++ * a trylock_page() and in exchange we risk missing ++ * some page dirtying. ++ */ + set_page_dirty(page); ++ unlock_page(page); ++ } + + mark_page_accessed(page); + put_page(page); +diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c +index bc26ec822e26..dd0687e36a47 100644 +--- a/drivers/i2c/busses/i2c-uniphier-f.c ++++ b/drivers/i2c/busses/i2c-uniphier-f.c +@@ -98,6 +98,7 @@ struct uniphier_fi2c_priv { + unsigned int flags; + unsigned int busy_cnt; + unsigned int clk_cycle; ++ spinlock_t lock; /* IRQ synchronization */ + }; + + static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv, +@@ -142,9 +143,10 @@ static void uniphier_fi2c_set_irqs(struct uniphier_fi2c_priv *priv) + writel(priv->enabled_irqs, priv->membase + UNIPHIER_FI2C_IE); + } + +-static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv) ++static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv, ++ u32 mask) + { +- writel(-1, priv->membase + UNIPHIER_FI2C_IC); ++ writel(mask, priv->membase + UNIPHIER_FI2C_IC); + } + + static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv) +@@ -162,7 +164,10 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) + struct uniphier_fi2c_priv *priv = dev_id; + u32 irq_status; + ++ spin_lock(&priv->lock); ++ + irq_status = readl(priv->membase + UNIPHIER_FI2C_INT); ++ irq_status &= priv->enabled_irqs; + + dev_dbg(&priv->adap.dev, + "interrupt: enabled_irqs=%04x, irq_status=%04x\n", +@@ -207,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) + + if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) { + uniphier_fi2c_drain_rxfifo(priv); +- if (!priv->len) ++ /* ++ * If the number of bytes to read is multiple of the FIFO size ++ * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little ++ * earlier than INT_RB. We wait for INT_RB to confirm the ++ * completion of the current message. ++ */ ++ if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB)) + goto data_done; + + if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) { +@@ -230,6 +241,8 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) + goto handled; + } + ++ spin_unlock(&priv->lock); ++ + return IRQ_NONE; + + data_done: +@@ -244,7 +257,14 @@ complete: + } + + handled: +- uniphier_fi2c_clear_irqs(priv); ++ /* ++ * This controller makes a pause while any bit of the IRQ status is ++ * asserted. Clear the asserted bit to kick the controller just before ++ * exiting the handler. ++ */ ++ uniphier_fi2c_clear_irqs(priv, irq_status); ++ ++ spin_unlock(&priv->lock); + + return IRQ_HANDLED; + } +@@ -252,6 +272,8 @@ handled: + static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr) + { + priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE; ++ uniphier_fi2c_set_irqs(priv); ++ + /* do not use TX byte counter */ + writel(0, priv->membase + UNIPHIER_FI2C_TBC); + /* set slave address */ +@@ -284,6 +306,8 @@ static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr) + priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF; + } + ++ uniphier_fi2c_set_irqs(priv); ++ + /* set slave address with RD bit */ + writel(UNIPHIER_FI2C_DTTX_CMD | UNIPHIER_FI2C_DTTX_RD | addr << 1, + priv->membase + UNIPHIER_FI2C_DTTX); +@@ -307,14 +331,16 @@ static void uniphier_fi2c_recover(struct uniphier_fi2c_priv *priv) + } + + static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, +- struct i2c_msg *msg, bool stop) ++ struct i2c_msg *msg, bool repeat, ++ bool stop) + { + struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); + bool is_read = msg->flags & I2C_M_RD; +- unsigned long time_left; ++ unsigned long time_left, flags; + +- dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n", +- is_read ? "receive" : "transmit", msg->addr, msg->len, stop); ++ dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, repeat=%d, stop=%d\n", ++ is_read ? "receive" : "transmit", msg->addr, msg->len, ++ repeat, stop); + + priv->len = msg->len; + priv->buf = msg->buf; +@@ -326,22 +352,36 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, + priv->flags |= UNIPHIER_FI2C_STOP; + + reinit_completion(&priv->comp); +- uniphier_fi2c_clear_irqs(priv); ++ uniphier_fi2c_clear_irqs(priv, U32_MAX); + writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST, + priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */ + ++ spin_lock_irqsave(&priv->lock, flags); ++ + if (is_read) + uniphier_fi2c_rx_init(priv, msg->addr); + else + uniphier_fi2c_tx_init(priv, msg->addr); + +- uniphier_fi2c_set_irqs(priv); +- + dev_dbg(&adap->dev, "start condition\n"); +- writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, +- priv->membase + UNIPHIER_FI2C_CR); ++ /* ++ * For a repeated START condition, writing a slave address to the FIFO ++ * kicks the controller. So, the UNIPHIER_FI2C_CR register should be ++ * written only for a non-repeated START condition. ++ */ ++ if (!repeat) ++ writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, ++ priv->membase + UNIPHIER_FI2C_CR); ++ ++ spin_unlock_irqrestore(&priv->lock, flags); + + time_left = wait_for_completion_timeout(&priv->comp, adap->timeout); ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->enabled_irqs = 0; ++ uniphier_fi2c_set_irqs(priv); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ + if (!time_left) { + dev_err(&adap->dev, "transaction timeout.\n"); + uniphier_fi2c_recover(priv); +@@ -394,6 +434,7 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) + { + struct i2c_msg *msg, *emsg = msgs + num; ++ bool repeat = false; + int ret; + + ret = uniphier_fi2c_check_bus_busy(adap); +@@ -404,9 +445,11 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, + /* Emit STOP if it is the last message or I2C_M_STOP is set. */ + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); + +- ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); ++ ret = uniphier_fi2c_master_xfer_one(adap, msg, repeat, stop); + if (ret) + return ret; ++ ++ repeat = !stop; + } + + return num; +@@ -546,6 +589,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) + + priv->clk_cycle = clk_rate / bus_speed; + init_completion(&priv->comp); ++ spin_lock_init(&priv->lock); + priv->adap.owner = THIS_MODULE; + priv->adap.algo = &uniphier_fi2c_algo; + priv->adap.dev.parent = dev; +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c +index bf811b23bc95..7d00b6a53ed8 100644 +--- a/drivers/infiniband/hw/bnxt_re/main.c ++++ b/drivers/infiniband/hw/bnxt_re/main.c +@@ -782,12 +782,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, + struct ib_event ib_event; + + ib_event.device = ibdev; +- if (qp) ++ if (qp) { + ib_event.element.qp = qp; +- else ++ ib_event.event = event; ++ if (qp->event_handler) ++ qp->event_handler(&ib_event, qp->qp_context); ++ ++ } else { + ib_event.element.port_num = port_num; +- ib_event.event = event; +- ib_dispatch_event(&ib_event); ++ ib_event.event = event; ++ ib_dispatch_event(&ib_event); ++ } + } + + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02 +diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c +index 12d9e5f4beb1..58635b5f296f 100644 +--- a/drivers/isdn/mISDN/tei.c ++++ b/drivers/isdn/mISDN/tei.c +@@ -1180,8 +1180,7 @@ static int + ctrl_teimanager(struct manager *mgr, void *arg) + { + /* currently we only have one option */ +- int *val = (int *)arg; +- int ret = 0; ++ unsigned int *val = (unsigned int *)arg; + + switch (val[0]) { + case IMCLEAR_L2: +@@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg) + test_and_clear_bit(OPTION_L1_HOLD, &mgr->options); + break; + default: +- ret = -EINVAL; ++ return -EINVAL; + } +- return ret; ++ return 0; + } + + /* This function does create a L2 for fixed TEI in NT Mode */ +diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c +index da7f4fc1a51d..a0f61eb853c5 100644 +--- a/drivers/macintosh/windfarm_smu_sat.c ++++ b/drivers/macintosh/windfarm_smu_sat.c +@@ -22,14 +22,6 @@ + + #define VERSION "1.0" + +-#define DEBUG +- +-#ifdef DEBUG +-#define DBG(args...) printk(args) +-#else +-#define DBG(args...) do { } while(0) +-#endif +- + /* If the cache is older than 800ms we'll refetch it */ + #define MAX_AGE msecs_to_jiffies(800) + +@@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id, + buf[i+2] = data[3]; + buf[i+3] = data[2]; + } +-#ifdef DEBUG +- DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id); +- for (i = 0; i < len; ++i) +- DBG(" %x", buf[i]); +- DBG("\n"); +-#endif + ++ printk(KERN_DEBUG "sat %d partition %x:", sat_id, id); ++ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, ++ 16, 1, buf, len, false); + if (size) + *size = len; + return (struct smu_sdbp_header *) buf; +@@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat) + if (err < 0) + return err; + sat->last_read = jiffies; ++ + #ifdef LOTSA_DEBUG + { + int i; +- DBG(KERN_DEBUG "wf_sat_get: data is"); +- for (i = 0; i < 16; ++i) +- DBG(" %.2x", sat->cache[i]); +- DBG("\n"); ++ printk(KERN_DEBUG "wf_sat_get: data is"); ++ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, ++ 16, 1, sat->cache, 16, false); + } + #endif + return 0; +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 151211b4cb1b..2c5912e75514 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -2441,7 +2441,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) + } + + /* Enable bitmap creation for RAID levels != 0 */ +- mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); ++ mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096); + mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; + + if (!test_and_clear_bit(FirstUse, &rdev->flags)) { +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 433e78f453da..d08d77b9674f 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -226,7 +226,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) + + out_free_pages: + while (--j >= 0) +- resync_free_pages(&rps[j * 2]); ++ resync_free_pages(&rps[j]); + + j = 0; + out_free_bio: +diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c +index d300e5e7eadc..2ca9c928ed2f 100644 +--- a/drivers/media/platform/vivid/vivid-kthread-cap.c ++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c +@@ -777,7 +777,11 @@ static int vivid_thread_vid_cap(void *data) + if (kthread_should_stop()) + break; + +- mutex_lock(&dev->mutex); ++ if (!mutex_trylock(&dev->mutex)) { ++ schedule_timeout_uninterruptible(1); ++ continue; ++ } ++ + cur_jiffies = jiffies; + if (dev->cap_seq_resync) { + dev->jiffies_vid_cap = cur_jiffies; +@@ -930,8 +934,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) + + /* shutdown control thread */ + vivid_grab_controls(dev, false); +- mutex_unlock(&dev->mutex); + kthread_stop(dev->kthread_vid_cap); + dev->kthread_vid_cap = NULL; +- mutex_lock(&dev->mutex); + } +diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c +index 7c8d75852816..ed5d8fb854b4 100644 +--- a/drivers/media/platform/vivid/vivid-kthread-out.c ++++ b/drivers/media/platform/vivid/vivid-kthread-out.c +@@ -147,7 +147,11 @@ static int vivid_thread_vid_out(void *data) + if (kthread_should_stop()) + break; + +- mutex_lock(&dev->mutex); ++ if (!mutex_trylock(&dev->mutex)) { ++ schedule_timeout_uninterruptible(1); ++ continue; ++ } ++ + cur_jiffies = jiffies; + if (dev->out_seq_resync) { + dev->jiffies_vid_out = cur_jiffies; +@@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) + + /* shutdown control thread */ + vivid_grab_controls(dev, false); +- mutex_unlock(&dev->mutex); + kthread_stop(dev->kthread_vid_out); + dev->kthread_vid_out = NULL; +- mutex_lock(&dev->mutex); + } +diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c +index ebd7b9c4dd83..4f49c9a47d49 100644 +--- a/drivers/media/platform/vivid/vivid-sdr-cap.c ++++ b/drivers/media/platform/vivid/vivid-sdr-cap.c +@@ -149,7 +149,11 @@ static int vivid_thread_sdr_cap(void *data) + if (kthread_should_stop()) + break; + +- mutex_lock(&dev->mutex); ++ if (!mutex_trylock(&dev->mutex)) { ++ schedule_timeout_uninterruptible(1); ++ continue; ++ } ++ + cur_jiffies = jiffies; + if (dev->sdr_cap_seq_resync) { + dev->jiffies_sdr_cap = cur_jiffies; +@@ -309,10 +313,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) + } + + /* shutdown control thread */ +- mutex_unlock(&dev->mutex); + kthread_stop(dev->kthread_sdr_cap); + dev->kthread_sdr_cap = NULL; +- mutex_lock(&dev->mutex); + } + + const struct vb2_ops vivid_sdr_cap_qops = { +diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c +index 4ca3d600aa84..c66568e8f388 100644 +--- a/drivers/media/platform/vivid/vivid-vid-cap.c ++++ b/drivers/media/platform/vivid/vivid-vid-cap.c +@@ -239,9 +239,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) + if (vb2_is_streaming(&dev->vb_vid_out_q)) + dev->can_loop_video = vivid_vid_can_loop(dev); + +- if (dev->kthread_vid_cap) +- return 0; +- + dev->vid_cap_seq_count = 0; + dprintk(dev, 1, "%s\n", __func__); + for (i = 0; i < VIDEO_MAX_FRAME; i++) +diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c +index 0b1b6218ede8..3e7a26d15074 100644 +--- a/drivers/media/platform/vivid/vivid-vid-out.c ++++ b/drivers/media/platform/vivid/vivid-vid-out.c +@@ -158,9 +158,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) + if (vb2_is_streaming(&dev->vb_vid_cap_q)) + dev->can_loop_video = vivid_vid_can_loop(dev); + +- if (dev->kthread_vid_out) +- return 0; +- + dev->vid_out_seq_count = 0; + dprintk(dev, 1, "%s\n", __func__); + if (dev->start_streaming_error) { +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c +index a7547c88e4c3..edf8a7a76e86 100644 +--- a/drivers/media/rc/imon.c ++++ b/drivers/media/rc/imon.c +@@ -1737,8 +1737,7 @@ static void imon_incoming_scancode(struct imon_context *ictx, + spin_unlock_irqrestore(&ictx->kc_lock, flags); + + /* send touchscreen events through input subsystem if touchpad data */ +- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && +- buf[7] == 0x86) { ++ if (ictx->touch && len == 8 && buf[7] == 0x86) { + imon_touch_event(ictx, buf); + return; + +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c +index a8f3169e30b3..ac4fddfd0a43 100644 +--- a/drivers/media/usb/b2c2/flexcop-usb.c ++++ b/drivers/media/usb/b2c2/flexcop-usb.c +@@ -537,6 +537,9 @@ static int flexcop_usb_probe(struct usb_interface *intf, + struct flexcop_device *fc = NULL; + int ret; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { + err("out of memory\n"); + return -ENOMEM; +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c +index cfe86b4864b3..47a9a791ee7d 100644 +--- a/drivers/media/usb/dvb-usb/cxusb.c ++++ b/drivers/media/usb/dvb-usb/cxusb.c +@@ -455,7 +455,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d) + { + u8 ircode[4]; + +- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); ++ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0) ++ return 0; + + if (ircode[2] || ircode[3]) + rc_keydown(d->rc_dev, RC_PROTO_NEC, +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c +index 960272d3c924..4c39c502d616 100644 +--- a/drivers/media/usb/usbvision/usbvision-video.c ++++ b/drivers/media/usb/usbvision/usbvision-video.c +@@ -328,6 +328,10 @@ static int usbvision_v4l2_open(struct file *file) + if (mutex_lock_interruptible(&usbvision->v4l2_lock)) + return -ERESTARTSYS; + ++ if (usbvision->remove_pending) { ++ err_code = -ENODEV; ++ goto unlock; ++ } + if (usbvision->user) { + err_code = -EBUSY; + } else { +@@ -391,6 +395,7 @@ unlock: + static int usbvision_v4l2_close(struct file *file) + { + struct usb_usbvision *usbvision = video_drvdata(file); ++ int r; + + PDEBUG(DBG_IO, "close"); + +@@ -405,9 +410,10 @@ static int usbvision_v4l2_close(struct file *file) + usbvision_scratch_free(usbvision); + + usbvision->user--; ++ r = usbvision->remove_pending; + mutex_unlock(&usbvision->v4l2_lock); + +- if (usbvision->remove_pending) { ++ if (r) { + printk(KERN_INFO "%s: Final disconnect\n", __func__); + usbvision_release(usbvision); + return 0; +@@ -1091,6 +1097,11 @@ static int usbvision_radio_open(struct file *file) + + if (mutex_lock_interruptible(&usbvision->v4l2_lock)) + return -ERESTARTSYS; ++ ++ if (usbvision->remove_pending) { ++ err_code = -ENODEV; ++ goto out; ++ } + err_code = v4l2_fh_open(file); + if (err_code) + goto out; +@@ -1123,6 +1134,7 @@ out: + static int usbvision_radio_close(struct file *file) + { + struct usb_usbvision *usbvision = video_drvdata(file); ++ int r; + + PDEBUG(DBG_IO, ""); + +@@ -1135,9 +1147,10 @@ static int usbvision_radio_close(struct file *file) + usbvision_audio_off(usbvision); + usbvision->radio = 0; + usbvision->user--; ++ r = usbvision->remove_pending; + mutex_unlock(&usbvision->v4l2_lock); + +- if (usbvision->remove_pending) { ++ if (r) { + printk(KERN_INFO "%s: Final disconnect\n", __func__); + v4l2_fh_release(file); + usbvision_release(usbvision); +@@ -1562,6 +1575,7 @@ err_usb: + static void usbvision_disconnect(struct usb_interface *intf) + { + struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); ++ int u; + + PDEBUG(DBG_PROBE, ""); + +@@ -1578,13 +1592,14 @@ static void usbvision_disconnect(struct usb_interface *intf) + v4l2_device_disconnect(&usbvision->v4l2_dev); + usbvision_i2c_unregister(usbvision); + usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ ++ u = usbvision->user; + + usb_put_dev(usbvision->dev); + usbvision->dev = NULL; /* USB device is no more */ + + mutex_unlock(&usbvision->v4l2_lock); + +- if (usbvision->user) { ++ if (u) { + printk(KERN_INFO "%s: In use, disconnect pending\n", + __func__); + wake_up_interruptible(&usbvision->wait_frame); +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index c2939d080997..6445b638f207 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -2059,6 +2059,20 @@ static int uvc_probe(struct usb_interface *intf, + sizeof(dev->name) - len); + } + ++ /* Initialize the media device. */ ++#ifdef CONFIG_MEDIA_CONTROLLER ++ dev->mdev.dev = &intf->dev; ++ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); ++ if (udev->serial) ++ strscpy(dev->mdev.serial, udev->serial, ++ sizeof(dev->mdev.serial)); ++ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); ++ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); ++ media_device_init(&dev->mdev); ++ ++ dev->vdev.mdev = &dev->mdev; ++#endif ++ + /* Parse the Video Class control descriptor. */ + if (uvc_parse_control(dev) < 0) { + uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC " +@@ -2079,19 +2093,7 @@ static int uvc_probe(struct usb_interface *intf, + "linux-uvc-devel mailing list.\n"); + } + +- /* Initialize the media device and register the V4L2 device. */ +-#ifdef CONFIG_MEDIA_CONTROLLER +- dev->mdev.dev = &intf->dev; +- strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); +- if (udev->serial) +- strlcpy(dev->mdev.serial, udev->serial, +- sizeof(dev->mdev.serial)); +- strcpy(dev->mdev.bus_info, udev->devpath); +- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); +- media_device_init(&dev->mdev); +- +- dev->vdev.mdev = &dev->mdev; +-#endif ++ /* Register the V4L2 device. */ + if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) + goto error; + +diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c +index d8e3184bd27c..ad8a5296c50b 100644 +--- a/drivers/mfd/arizona-core.c ++++ b/drivers/mfd/arizona-core.c +@@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona) + if (ret != 0) + goto err_ref; + ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]); +- if (ret != 0) +- goto err_pm; ++ if (ret != 0) { ++ pm_runtime_put_sync(arizona->dev); ++ goto err_ref; ++ } + break; + case ARIZONA_32KZ_MCLK2: + ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]); +@@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona) + ARIZONA_CLK_32K_ENA); + } + +-err_pm: +- pm_runtime_put_sync(arizona->dev); + err_ref: + if (ret != 0) + arizona->clk32k_ref--; +diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c +index 15bc052704a6..9ca1f8c015de 100644 +--- a/drivers/mfd/intel_soc_pmic_bxtwc.c ++++ b/drivers/mfd/intel_soc_pmic_bxtwc.c +@@ -31,8 +31,8 @@ + + /* Interrupt Status Registers */ + #define BXTWC_IRQLVL1 0x4E02 +-#define BXTWC_PWRBTNIRQ 0x4E03 + ++#define BXTWC_PWRBTNIRQ 0x4E03 + #define BXTWC_THRM0IRQ 0x4E04 + #define BXTWC_THRM1IRQ 0x4E05 + #define BXTWC_THRM2IRQ 0x4E06 +@@ -47,10 +47,9 @@ + + /* Interrupt MASK Registers */ + #define BXTWC_MIRQLVL1 0x4E0E +-#define BXTWC_MPWRTNIRQ 0x4E0F +- + #define BXTWC_MIRQLVL1_MCHGR BIT(5) + ++#define BXTWC_MPWRBTNIRQ 0x4E0F + #define BXTWC_MTHRM0IRQ 0x4E12 + #define BXTWC_MTHRM1IRQ 0x4E13 + #define BXTWC_MTHRM2IRQ 0x4E14 +@@ -66,9 +65,7 @@ + /* Whiskey Cove PMIC share same ACPI ID between different platforms */ + #define BROXTON_PMIC_WC_HRV 4 + +-/* Manage in two IRQ chips since mask registers are not consecutive */ + enum bxtwc_irqs { +- /* Level 1 */ + BXTWC_PWRBTN_LVL1_IRQ = 0, + BXTWC_TMU_LVL1_IRQ, + BXTWC_THRM_LVL1_IRQ, +@@ -77,9 +74,11 @@ enum bxtwc_irqs { + BXTWC_CHGR_LVL1_IRQ, + BXTWC_GPIO_LVL1_IRQ, + BXTWC_CRIT_LVL1_IRQ, ++}; + +- /* Level 2 */ +- BXTWC_PWRBTN_IRQ, ++enum bxtwc_irqs_pwrbtn { ++ BXTWC_PWRBTN_IRQ = 0, ++ BXTWC_UIBTN_IRQ, + }; + + enum bxtwc_irqs_bcu { +@@ -113,7 +112,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = { + REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)), + REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)), + REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)), +- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03), ++}; ++ ++static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = { ++ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01), + }; + + static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = { +@@ -125,7 +127,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = { + }; + + static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = { +- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)), ++ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20), + REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f), + REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f), + }; +@@ -144,7 +146,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = { + .mask_base = BXTWC_MIRQLVL1, + .irqs = bxtwc_regmap_irqs, + .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs), +- .num_regs = 2, ++ .num_regs = 1, ++}; ++ ++static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { ++ .name = "bxtwc_irq_chip_pwrbtn", ++ .status_base = BXTWC_PWRBTNIRQ, ++ .mask_base = BXTWC_MPWRBTNIRQ, ++ .irqs = bxtwc_regmap_irqs_pwrbtn, ++ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn), ++ .num_regs = 1, + }; + + static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { +@@ -472,6 +483,16 @@ static int bxtwc_probe(struct platform_device *pdev) + return ret; + } + ++ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, ++ BXTWC_PWRBTN_LVL1_IRQ, ++ IRQF_ONESHOT, ++ &bxtwc_regmap_irq_chip_pwrbtn, ++ &pmic->irq_chip_data_pwrbtn); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n"); ++ return ret; ++ } ++ + ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, + BXTWC_TMU_LVL1_IRQ, + IRQF_ONESHOT, +diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c +index 2d6e2c392786..4a2fc59d5901 100644 +--- a/drivers/mfd/max8997.c ++++ b/drivers/mfd/max8997.c +@@ -155,12 +155,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata( + + pd->ono = irq_of_parse_and_map(dev->of_node, 1); + +- /* +- * ToDo: the 'wakeup' member in the platform data is more of a linux +- * specfic information. Hence, there is no binding for that yet and +- * not parsed here. +- */ +- + return pd; + } + +@@ -248,7 +242,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c, + */ + + /* MAX8997 has a power button input. */ +- device_init_wakeup(max8997->dev, pdata->wakeup); ++ device_init_wakeup(max8997->dev, true); + + return ret; + +diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c +index 6c16f170529f..75d52034f89d 100644 +--- a/drivers/mfd/mc13xxx-core.c ++++ b/drivers/mfd/mc13xxx-core.c +@@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, + if (ret) + goto out; + +- adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2; ++ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | ++ MC13XXX_ADC0_CHRGRAWDIV; + adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC; + + if (channel > 7) +diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c +index cac3bcc308a7..7bb929f05d85 100644 +--- a/drivers/misc/mic/scif/scif_fence.c ++++ b/drivers/misc/mic/scif/scif_fence.c +@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val) + dma_fail: + if (!x100) + dma_pool_free(ep->remote_dev->signal_pool, status, +- status->src_dma_addr); ++ src - offsetof(struct scif_status, val)); + alloc_fail: + return err; + } +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 267f7ab08420..a2ac9938d945 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -885,6 +885,7 @@ static void msdc_start_command(struct msdc_host *host, + WARN_ON(host->cmd); + host->cmd = cmd; + ++ mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); + if (!msdc_cmd_is_ready(host, mrq, cmd)) + return; + +@@ -896,7 +897,6 @@ static void msdc_start_command(struct msdc_host *host, + + cmd->error = 0; + rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); +- mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); + + sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); + writel(cmd->arg, host->base + SDC_ARG); +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 604c5abc08eb..af666951a959 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -1196,12 +1196,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) + return ret; + } + ++ bcm_sf2_gphy_enable_set(priv->dev->ds, true); ++ + ret = bcm_sf2_mdio_register(ds); + if (ret) { + pr_err("failed to register MDIO bus\n"); + return ret; + } + ++ bcm_sf2_gphy_enable_set(priv->dev->ds, false); ++ + ret = bcm_sf2_cfp_rst(priv); + if (ret) { + pr_err("failed to reset CFP\n"); +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index 0fff1502267a..be17194487c6 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -2527,7 +2527,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, +- .port_set_speed = mv88e6390_port_set_speed, ++ .port_set_speed = mv88e6341_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, +@@ -3029,7 +3029,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, +- .port_set_speed = mv88e6390_port_set_speed, ++ .port_set_speed = mv88e6341_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, +diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c +index 2cffecfe86e3..fd0a88c56031 100644 +--- a/drivers/net/dsa/mv88e6xxx/port.c ++++ b/drivers/net/dsa/mv88e6xxx/port.c +@@ -203,8 +203,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port, + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000; + break; + case 2500: +- ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | +- MV88E6390_PORT_MAC_CTL_ALTSPEED; ++ if (alt_bit) ++ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | ++ MV88E6390_PORT_MAC_CTL_ALTSPEED; ++ else ++ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000; + break; + case 10000: + /* all bits set, fall through... */ +@@ -266,6 +269,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) + return mv88e6xxx_port_set_speed(chip, port, speed, false, false); + } + ++/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */ ++int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) ++{ ++ if (speed == SPEED_MAX) ++ speed = port < 5 ? 1000 : 2500; ++ ++ if (speed > 2500) ++ return -EOPNOTSUPP; ++ ++ if (speed == 200 && port != 0) ++ return -EOPNOTSUPP; ++ ++ if (speed == 2500 && port < 5) ++ return -EOPNOTSUPP; ++ ++ return mv88e6xxx_port_set_speed(chip, port, speed, !port, true); ++} ++ + /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */ + int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) + { +diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h +index ccdc67fe9079..8a645683cf6b 100644 +--- a/drivers/net/dsa/mv88e6xxx/port.h ++++ b/drivers/net/dsa/mv88e6xxx/port.h +@@ -262,6 +262,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup); + + int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); + int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); ++int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); + int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); + int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); + int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); +diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig +index 99b30353541a..9e87d7b8360f 100644 +--- a/drivers/net/ethernet/amazon/Kconfig ++++ b/drivers/net/ethernet/amazon/Kconfig +@@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON + + config ENA_ETHERNET + tristate "Elastic Network Adapter (ENA) support" +- depends on (PCI_MSI && X86) ++ depends on PCI_MSI && !CPU_BIG_ENDIAN + ---help--- + This driver supports Elastic Network Adapter (ENA)" + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 1cc4fb27c13b..b6af286fa5c7 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -1138,7 +1138,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, + break; + } + +- return 0; ++ return ret; + } + + static void bcmgenet_power_up(struct bcmgenet_priv *priv, +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +index 5483cb23c08a..e9cff8ed5e07 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +@@ -2300,7 +2300,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), + GFP_KERNEL); + if (!chain) +- return -ENOMEM; ++ goto err_free_chain; + + cur_chain->next = chain; + chain->tqp_index = tx_ring->tqp->tqp_index; +@@ -2324,7 +2324,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + while (rx_ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) +- return -ENOMEM; ++ goto err_free_chain; + + cur_chain->next = chain; + chain->tqp_index = rx_ring->tqp->tqp_index; +@@ -2336,6 +2336,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + } + + return 0; ++ ++err_free_chain: ++ cur_chain = head->next; ++ while (cur_chain) { ++ chain = cur_chain->next; ++ devm_kfree(&pdev->dev, chain); ++ cur_chain = chain; ++ } ++ ++ return -ENOMEM; + } + + static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, +@@ -2530,8 +2540,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp, + return ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); +- if (ret) ++ if (ret) { ++ devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring); + return ret; ++ } + + return 0; + } +@@ -2556,6 +2568,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv) + + return 0; + err: ++ while (i--) { ++ devm_kfree(priv->dev, priv->ring_data[i].ring); ++ devm_kfree(priv->dev, ++ priv->ring_data[i + h->kinfo.num_tqps].ring); ++ } ++ + devm_kfree(&pdev->dev, priv->ring_data); + return ret; + } +diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c +index 0746b19ec6d3..295d27f33104 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ptp.c ++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c +@@ -65,9 +65,15 @@ + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. ++ * ++ * SYSTIM is converted to real time using a timecounter. As ++ * timecounter_cyc2time() allows old timestamps, the timecounter ++ * needs to be updated at least once per half of the SYSTIM interval. ++ * Scheduling of delayed work is not very accurate, so we aim for 8 ++ * minutes to be sure the actual interval is shorter than 9.16 minutes. + */ + +-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) ++#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) + #define IGB_PTP_TX_TIMEOUT (HZ * 15) + #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) + #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index d631cd94ee63..25a15bdc125e 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -1722,6 +1722,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: ++ cmd->data = MAX_NUM_OF_FS_RULES; + while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { + err = mlx4_en_get_flow(dev, cmd, i); + if (!err) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 090d54275a7d..387758fc6be4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1783,7 +1783,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, + + unlock: + mutex_unlock(&esw->state_lock); +- return 0; ++ return err; + } + + int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, +diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +index 2cf89126fb23..d765e7a69d6b 100644 +--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c ++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +@@ -86,6 +86,8 @@ retry: + return err; + + if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { ++ fsm_state_err = min_t(enum mlxfw_fsm_state_err, ++ fsm_state_err, MLXFW_FSM_STATE_ERR_MAX); + pr_err("Firmware flash failed: %s\n", + mlxfw_fsm_state_err_str[fsm_state_err]); + return -EINVAL; +diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h +index 91003bc6f00b..6c4714a8b54c 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed.h ++++ b/drivers/net/ethernet/qlogic/qed/qed.h +@@ -829,7 +829,7 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); + /* Prototypes */ + int qed_fill_dev_info(struct qed_dev *cdev, + struct qed_dev_info *dev_info); +-void qed_link_update(struct qed_hwfn *hwfn); ++void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); + u32 qed_unzip_data(struct qed_hwfn *p_hwfn, + u32 input_len, u8 *input_buf, + u32 max_size, u8 *unzip_buf); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c +index 557332f1f886..52e747fd9c83 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c +@@ -1389,6 +1389,7 @@ static int qed_get_link_data(struct qed_hwfn *hwfn, + } + + static void qed_fill_link(struct qed_hwfn *hwfn, ++ struct qed_ptt *ptt, + struct qed_link_output *if_link) + { + struct qed_mcp_link_params params; +@@ -1469,7 +1470,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, + + /* TODO - fill duplex properly */ + if_link->duplex = DUPLEX_FULL; +- qed_mcp_get_media_type(hwfn->cdev, &media_type); ++ qed_mcp_get_media_type(hwfn, ptt, &media_type); + if_link->port = qed_get_port_type(media_type); + + if_link->autoneg = params.speed.autoneg; +@@ -1525,21 +1526,34 @@ static void qed_fill_link(struct qed_hwfn *hwfn, + static void qed_get_current_link(struct qed_dev *cdev, + struct qed_link_output *if_link) + { ++ struct qed_hwfn *hwfn; ++ struct qed_ptt *ptt; + int i; + +- qed_fill_link(&cdev->hwfns[0], if_link); ++ hwfn = &cdev->hwfns[0]; ++ if (IS_PF(cdev)) { ++ ptt = qed_ptt_acquire(hwfn); ++ if (ptt) { ++ qed_fill_link(hwfn, ptt, if_link); ++ qed_ptt_release(hwfn, ptt); ++ } else { ++ DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); ++ } ++ } else { ++ qed_fill_link(hwfn, NULL, if_link); ++ } + + for_each_hwfn(cdev, i) + qed_inform_vf_link_state(&cdev->hwfns[i]); + } + +-void qed_link_update(struct qed_hwfn *hwfn) ++void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) + { + void *cookie = hwfn->cdev->ops_cookie; + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + struct qed_link_output if_link; + +- qed_fill_link(hwfn, &if_link); ++ qed_fill_link(hwfn, ptt, &if_link); + qed_inform_vf_link_state(hwfn); + + if (IS_LEAD_HWFN(hwfn) && cookie) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +index 7938abe9a301..ef17ca09d303 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c +@@ -1352,7 +1352,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) + qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + +- qed_link_update(p_hwfn); ++ qed_link_update(p_hwfn, p_ptt); + out: + spin_unlock_bh(&p_hwfn->mcp_info->link_lock); + } +@@ -1722,12 +1722,10 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + return 0; + } + +-int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) ++int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, ++ struct qed_ptt *p_ptt, u32 *p_media_type) + { +- struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; +- struct qed_ptt *p_ptt; +- +- if (IS_VF(cdev)) ++ if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (!qed_mcp_is_init(p_hwfn)) { +@@ -1735,16 +1733,15 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) + return -EBUSY; + } + +- *p_media_type = MEDIA_UNSPECIFIED; +- +- p_ptt = qed_ptt_acquire(p_hwfn); +- if (!p_ptt) +- return -EBUSY; +- +- *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + +- offsetof(struct public_port, media_type)); ++ if (!p_ptt) { ++ *p_media_type = MEDIA_UNSPECIFIED; ++ return -EINVAL; ++ } + +- qed_ptt_release(p_hwfn, p_ptt); ++ *p_media_type = qed_rd(p_hwfn, p_ptt, ++ p_hwfn->mcp_info->port_addr + ++ offsetof(struct public_port, ++ media_type)); + + return 0; + } +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h +index f1fe5e3427ea..8fcdb2c3e5db 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h +@@ -284,14 +284,15 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + * @brief Get media type value of the port. + * + * @param cdev - qed dev pointer ++ * @param p_ptt + * @param mfw_ver - media type value + * + * @return int - + * 0 - Operation was successul. + * -EBUSY - Operation failed + */ +-int qed_mcp_get_media_type(struct qed_dev *cdev, +- u32 *media_type); ++int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, ++ struct qed_ptt *p_ptt, u32 *media_type); + + /** + * @brief General function for sending commands to the MCP +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c +index 3220086f99de..a2a9921b467b 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c +@@ -1669,7 +1669,7 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) + ops->ports_update(cookie, vxlan_port, geneve_port); + + /* Always update link configuration according to bulletin */ +- qed_link_update(hwfn); ++ qed_link_update(hwfn, NULL); + } + + void qed_iov_vf_task(struct work_struct *work) +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +index 4b76c69fe86d..834208e55f7b 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +@@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) +- return 0; ++ return 1; + + switch (capid) { + case DCB_CAP_ATTR_PG: +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c +index 60cdb97f58e2..f22690792697 100644 +--- a/drivers/net/ethernet/sfc/ptp.c ++++ b/drivers/net/ethernet/sfc/ptp.c +@@ -1320,7 +1320,8 @@ void efx_ptp_remove(struct efx_nic *efx) + (void)efx_ptp_disable(efx); + + cancel_work_sync(&efx->ptp_data->work); +- cancel_work_sync(&efx->ptp_data->pps_work); ++ if (efx->ptp_data->pps_workwq) ++ cancel_work_sync(&efx->ptp_data->pps_work); + + skb_queue_purge(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c +index 8cb44eabc283..a44838aac97d 100644 +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -601,6 +601,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); ++ __dev_mc_unsync(ndev, NULL); + + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 9bcb7c3e879f..9bb65e0af7dd 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -2798,9 +2798,6 @@ static int macsec_dev_open(struct net_device *dev) + struct net_device *real_dev = macsec->real_dev; + int err; + +- if (!(real_dev->flags & IFF_UP)) +- return -ENETDOWN; +- + err = dev_uc_add(real_dev, dev->dev_addr); + if (err < 0) + return err; +@@ -3273,6 +3270,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev, + if (err < 0) + goto del_dev; + ++ netif_stacked_transfer_operstate(real_dev, dev); ++ linkwatch_fire_event(dev); ++ + macsec_generation++; + + return 0; +@@ -3444,6 +3444,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, + return NOTIFY_DONE; + + switch (event) { ++ case NETDEV_DOWN: ++ case NETDEV_UP: ++ case NETDEV_CHANGE: { ++ struct macsec_dev *m, *n; ++ struct macsec_rxh_data *rxd; ++ ++ rxd = macsec_data_rtnl(real_dev); ++ list_for_each_entry_safe(m, n, &rxd->secys, secys) { ++ struct net_device *dev = m->secy.netdev; ++ ++ netif_stacked_transfer_operstate(real_dev, dev); ++ } ++ break; ++ } + case NETDEV_UNREGISTER: { + struct macsec_dev *m, *n; + struct macsec_rxh_data *rxd; +diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c +index 0250aa9ae2cb..97bf49ad81a6 100644 +--- a/drivers/net/ntb_netdev.c ++++ b/drivers/net/ntb_netdev.c +@@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(unsigned long data) + struct ntb_netdev *dev = netdev_priv(ndev); + + if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { +- mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); ++ mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); + } else { + /* Make sure anybody stopping the queue after this sees the new + * value of ntb_transport_tx_free_entry() +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c +index 12b09e6e03ba..e03e91d5f1b1 100644 +--- a/drivers/net/phy/dp83867.c ++++ b/drivers/net/phy/dp83867.c +@@ -33,10 +33,18 @@ + + /* Extended Registers */ + #define DP83867_CFG4 0x0031 ++#define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6)) ++#define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5) ++#define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5) ++#define DP83867_CFG4_SGMII_ANEG_TIMER_2US (1 << 5) ++#define DP83867_CFG4_SGMII_ANEG_TIMER_16MS (0 << 5) ++ + #define DP83867_RGMIICTL 0x0032 + #define DP83867_STRAP_STS1 0x006E + #define DP83867_RGMIIDCTL 0x0086 + #define DP83867_IO_MUX_CFG 0x0170 ++#define DP83867_10M_SGMII_CFG 0x016F ++#define DP83867_10M_SGMII_RATE_ADAPT_MASK BIT(7) + + #define DP83867_SW_RESET BIT(15) + #define DP83867_SW_RESTART BIT(14) +@@ -283,6 +291,35 @@ static int dp83867_config_init(struct phy_device *phydev) + } + } + ++ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { ++ /* For support SPEED_10 in SGMII mode ++ * DP83867_10M_SGMII_RATE_ADAPT bit ++ * has to be cleared by software. That ++ * does not affect SPEED_100 and ++ * SPEED_1000. ++ */ ++ val = phy_read_mmd(phydev, DP83867_DEVADDR, ++ DP83867_10M_SGMII_CFG); ++ val &= ~DP83867_10M_SGMII_RATE_ADAPT_MASK; ++ ret = phy_write_mmd(phydev, DP83867_DEVADDR, ++ DP83867_10M_SGMII_CFG, val); ++ ++ if (ret) ++ return ret; ++ ++ /* After reset SGMII Autoneg timer is set to 2us (bits 6 and 5 ++ * are 01). That is not enough to finalize autoneg on some ++ * devices. Increase this timer duration to maximum 16ms. ++ */ ++ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); ++ val &= ~DP83867_CFG4_SGMII_ANEG_MASK; ++ val |= DP83867_CFG4_SGMII_ANEG_TIMER_16MS; ++ ret = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); ++ ++ if (ret) ++ return ret; ++ } ++ + /* Enable Interrupt output INT_OE in CFG3 register */ + if (phy_interrupt_is_valid(phydev)) { + val = phy_read(phydev, DP83867_CFG3); +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index 03e4fcdfeab7..e0cea5c05f0e 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -996,24 +996,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, + struct sk_buff *skb) + { + int orig_iif = skb->skb_iif; +- bool need_strict; ++ bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); ++ bool is_ndisc = ipv6_ndisc_frame(skb); + +- /* loopback traffic; do not push through packet taps again. +- * Reset pkt_type for upper layers to process skb ++ /* loopback, multicast & non-ND link-local traffic; do not push through ++ * packet taps again. Reset pkt_type for upper layers to process skb + */ +- if (skb->pkt_type == PACKET_LOOPBACK) { ++ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; + IP6CB(skb)->flags |= IP6SKB_L3SLAVE; +- skb->pkt_type = PACKET_HOST; ++ if (skb->pkt_type == PACKET_LOOPBACK) ++ skb->pkt_type = PACKET_HOST; + goto out; + } + +- /* if packet is NDISC or addressed to multicast or link-local +- * then keep the ingress interface +- */ +- need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); +- if (!ipv6_ndisc_frame(skb) && !need_strict) { ++ /* if packet is NDISC then keep the ingress interface */ ++ if (!is_ndisc) { + vrf_rx_stats(vrf_dev, skb->len); + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c +index 27ab3eb47534..0298ddc1ff06 100644 +--- a/drivers/net/wireless/ath/ath10k/pci.c ++++ b/drivers/net/wireless/ath/ath10k/pci.c +@@ -1039,10 +1039,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + struct ath10k_ce *ce = ath10k_ce_priv(ar); + int ret = 0; + u32 *buf; +- unsigned int completed_nbytes, orig_nbytes, remaining_bytes; ++ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; + struct ath10k_ce_pipe *ce_diag; + void *data_buf = NULL; +- u32 ce_data; /* Host buffer address in CE space */ + dma_addr_t ce_data_base = 0; + int i; + +@@ -1056,9 +1055,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ +- orig_nbytes = nbytes; ++ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); ++ + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, +- orig_nbytes, ++ alloc_nbytes, + &ce_data_base, + GFP_ATOMIC); + if (!data_buf) { +@@ -1066,9 +1066,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + goto done; + } + +- /* Copy caller's data to allocated DMA buf */ +- memcpy(data_buf, data, orig_nbytes); +- + /* + * The address supplied by the caller is in the + * Target CPU virtual address space. +@@ -1081,12 +1078,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + */ + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); + +- remaining_bytes = orig_nbytes; +- ce_data = ce_data_base; ++ remaining_bytes = nbytes; + while (remaining_bytes) { + /* FIXME: check cast */ + nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); + ++ /* Copy caller's data to allocated DMA buf */ ++ memcpy(data_buf, data, nbytes); ++ + /* Set up to receive directly into Target(!) address */ + ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); + if (ret != 0) +@@ -1096,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + * Request CE to send caller-supplied data that + * was copied to bounce buffer to Target(!) address. + */ +- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, ++ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base, + nbytes, 0, 0); + if (ret != 0) + goto done; +@@ -1137,12 +1136,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + + remaining_bytes -= nbytes; + address += nbytes; +- ce_data += nbytes; ++ data += nbytes; + } + + done: + if (data_buf) { +- dma_free_coherent(ar->dev, orig_nbytes, data_buf, ++ dma_free_coherent(ar->dev, alloc_nbytes, data_buf, + ce_data_base); + } + +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c +index f09a4ad2e9de..f9c79e21ab22 100644 +--- a/drivers/net/wireless/ath/ath10k/usb.c ++++ b/drivers/net/wireless/ath/ath10k/usb.c +@@ -49,6 +49,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) + struct ath10k_urb_context *urb_context = NULL; + unsigned long flags; + ++ /* bail if this pipe is not initialized */ ++ if (!pipe->ar_usb) ++ return NULL; ++ + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = list_first_entry(&pipe->urb_list_head, +@@ -66,6 +70,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, + { + unsigned long flags; + ++ /* bail if this pipe is not initialized */ ++ if (!pipe->ar_usb) ++ return; ++ + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + + pipe->urb_cnt++; +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +index 3dbfd86ebe36..76385834a7de 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +@@ -4116,7 +4116,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah) + + static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) + { +- u32 data, ko, kg; ++ u32 data = 0, ko, kg; + + if (!AR_SREV_9462_20_OR_LATER(ah)) + return; +diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c +index d63d7c326801..798516f42f2f 100644 +--- a/drivers/net/wireless/ath/wil6210/wmi.c ++++ b/drivers/net/wireless/ath/wil6210/wmi.c +@@ -1002,15 +1002,16 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, + { + int rc; + unsigned long remain; ++ ulong flags; + + mutex_lock(&wil->wmi_mutex); + +- spin_lock(&wil->wmi_ev_lock); ++ spin_lock_irqsave(&wil->wmi_ev_lock, flags); + wil->reply_id = reply_id; + wil->reply_buf = reply; + wil->reply_size = reply_size; + reinit_completion(&wil->wmi_call); +- spin_unlock(&wil->wmi_ev_lock); ++ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); + + rc = __wmi_send(wil, cmdid, buf, len); + if (rc) +@@ -1030,11 +1031,11 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, + } + + out: +- spin_lock(&wil->wmi_ev_lock); ++ spin_lock_irqsave(&wil->wmi_ev_lock, flags); + wil->reply_id = 0; + wil->reply_buf = NULL; + wil->reply_size = 0; +- spin_unlock(&wil->wmi_ev_lock); ++ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); + + mutex_unlock(&wil->wmi_mutex); + +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +index ddfdfe177e24..66f1f41b1380 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + } + + spin_lock_bh(&wl->lock); ++ wl->wlc->vif = vif; + wl->mute_tx = false; + brcms_c_mute(wl->wlc, false); + if (vif->type == NL80211_IFTYPE_STATION) +@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + static void + brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + { ++ struct brcms_info *wl = hw->priv; ++ ++ spin_lock_bh(&wl->lock); ++ wl->wlc->vif = NULL; ++ spin_unlock_bh(&wl->lock); + } + + static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) +@@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, + status = brcms_c_aggregatable(wl->wlc, tid); + spin_unlock_bh(&wl->lock); + if (!status) { +- brcms_err(wl->wlc->hw->d11core, +- "START: tid %d is not agg\'able\n", tid); ++ brcms_dbg_ht(wl->wlc->hw->d11core, ++ "START: tid %d is not agg\'able\n", tid); + return -EINVAL; + } + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); +@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw, + spin_unlock_bh(&wl->lock); + } + ++static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw, ++ struct ieee80211_sta *sta, bool set) ++{ ++ struct brcms_info *wl = hw->priv; ++ struct sk_buff *beacon = NULL; ++ u16 tim_offset = 0; ++ ++ spin_lock_bh(&wl->lock); ++ if (wl->wlc->vif) ++ beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif, ++ &tim_offset, NULL); ++ if (beacon) ++ brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset, ++ wl->wlc->vif->bss_conf.dtim_period); ++ spin_unlock_bh(&wl->lock); ++ ++ return 0; ++} ++ + static const struct ieee80211_ops brcms_ops = { + .tx = brcms_ops_tx, + .start = brcms_ops_start, +@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = { + .flush = brcms_ops_flush, + .get_tsf = brcms_ops_get_tsf, + .set_tsf = brcms_ops_set_tsf, ++ .set_tim = brcms_ops_beacon_set_tim, + }; + + void brcms_dpc(unsigned long data) +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h +index c4d135cff04a..9f76b880814e 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h +@@ -563,6 +563,7 @@ struct brcms_c_info { + + struct wiphy *wiphy; + struct scb pri_scb; ++ struct ieee80211_vif *vif; + + struct sk_buff *beacon; + u16 beacon_tim_offset; +diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c +index 54201c02fdb8..fc49255bab00 100644 +--- a/drivers/net/wireless/cisco/airo.c ++++ b/drivers/net/wireless/cisco/airo.c +@@ -5464,7 +5464,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { + we have to add a spin lock... */ + rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); + while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { +- ptr += sprintf(ptr, "%pM %*s rssi = %d", ++ ptr += sprintf(ptr, "%pM %.*s rssi = %d", + BSSList_rid.bssid, + (int)BSSList_rid.ssidLen, + BSSList_rid.ssid, +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index dde47c548818..5e8e34a08b2d 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -362,11 +362,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, + struct mwifiex_power_cfg power_cfg; + int dbm = MBM_TO_DBM(mbm); + +- if (type == NL80211_TX_POWER_FIXED) { ++ switch (type) { ++ case NL80211_TX_POWER_FIXED: + power_cfg.is_power_auto = 0; ++ power_cfg.is_power_fixed = 1; + power_cfg.power_level = dbm; +- } else { ++ break; ++ case NL80211_TX_POWER_LIMITED: ++ power_cfg.is_power_auto = 0; ++ power_cfg.is_power_fixed = 0; ++ power_cfg.power_level = dbm; ++ break; ++ case NL80211_TX_POWER_AUTOMATIC: + power_cfg.is_power_auto = 1; ++ break; + } + + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); +diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h +index 48e154e1865d..0dd592ea6e83 100644 +--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h ++++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h +@@ -267,6 +267,7 @@ struct mwifiex_ds_encrypt_key { + + struct mwifiex_power_cfg { + u32 is_power_auto; ++ u32 is_power_fixed; + u32 power_level; + }; + +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +index 82828a207963..a8043d76152a 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c ++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +@@ -728,6 +728,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, + txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; + txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); + if (!power_cfg->is_power_auto) { ++ u16 dbm_min = power_cfg->is_power_fixed ? ++ dbm : priv->min_tx_power_level; ++ + txp_cfg->mode = cpu_to_le32(1); + pg_tlv = (struct mwifiex_types_power_group *) + (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); +@@ -742,7 +745,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, + pg->last_rate_code = 0x03; + pg->modulation_class = MOD_CLASS_HR_DSSS; + pg->power_step = 0; +- pg->power_min = (s8) dbm; ++ pg->power_min = (s8) dbm_min; + pg->power_max = (s8) dbm; + pg++; + /* Power group for modulation class OFDM */ +@@ -750,7 +753,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, + pg->last_rate_code = 0x07; + pg->modulation_class = MOD_CLASS_OFDM; + pg->power_step = 0; +- pg->power_min = (s8) dbm; ++ pg->power_min = (s8) dbm_min; + pg->power_max = (s8) dbm; + pg++; + /* Power group for modulation class HTBW20 */ +@@ -758,7 +761,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, + pg->last_rate_code = 0x20; + pg->modulation_class = MOD_CLASS_HT; + pg->power_step = 0; +- pg->power_min = (s8) dbm; ++ pg->power_min = (s8) dbm_min; + pg->power_max = (s8) dbm; + pg->ht_bandwidth = HT_BW_20; + pg++; +@@ -767,7 +770,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, + pg->last_rate_code = 0x20; + pg->modulation_class = MOD_CLASS_HT; + pg->power_step = 0; +- pg->power_min = (s8) dbm; ++ pg->power_min = (s8) dbm_min; + pg->power_max = (s8) dbm; + pg->ht_bandwidth = HT_BW_40; + } +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +index 7806a4d2b1fc..91b01ca32e75 100644 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +@@ -5691,6 +5691,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + break; + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; ++ break; + default: + return -EOPNOTSUPP; + } +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +index f4129cf96e7c..bad70a4206fb 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +@@ -173,7 +173,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw) + rtl_read_byte(rtlpriv, FW_MAC1_READY)); + } + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, +- "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", ++ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", + rtl_read_dword(rtlpriv, REG_MCUFWDL)); + return -1; + } +diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c +index 5c0bcb1fe1a1..e75c3cee0252 100644 +--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c ++++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c +@@ -66,7 +66,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, + out: + mutex_unlock(&wl->mutex); + +- return 0; ++ return ret; + } + + static int +diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c +index bb43cebda9dc..60ae382f50da 100644 +--- a/drivers/nfc/port100.c ++++ b/drivers/nfc/port100.c +@@ -792,7 +792,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out, + + rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); + if (rc) +- usb_unlink_urb(dev->out_urb); ++ usb_kill_urb(dev->out_urb); + + exit: + mutex_unlock(&dev->out_urb_lock); +diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c +index 2557e2c05b90..58068f1447bb 100644 +--- a/drivers/ntb/hw/intel/ntb_hw_intel.c ++++ b/drivers/ntb/hw/intel/ntb_hw_intel.c +@@ -348,7 +348,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, + return 0; + } + +-static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) ++static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) + { + u64 shift, mask; + +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c +index 0b0a4825b3eb..096523d8dd42 100644 +--- a/drivers/nvme/target/fcloop.c ++++ b/drivers/nvme/target/fcloop.c +@@ -535,6 +535,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, + break; + + /* Fall-Thru to RSP handling */ ++ /* FALLTHRU */ + + case NVMET_FCOP_RSP: + if (fcpreq) { +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c +index 87650d42682f..9d204649c963 100644 +--- a/drivers/of/unittest.c ++++ b/drivers/of/unittest.c +@@ -910,20 +910,44 @@ static void __init of_unittest_platform_populate(void) + * of np into dup node (present in live tree) and + * updates parent of children of np to dup. + * +- * @np: node already present in live tree ++ * @np: node whose properties are being added to the live tree + * @dup: node present in live tree to be updated + */ + static void update_node_properties(struct device_node *np, + struct device_node *dup) + { + struct property *prop; ++ struct property *save_next; + struct device_node *child; +- +- for_each_property_of_node(np, prop) +- of_add_property(dup, prop); ++ int ret; + + for_each_child_of_node(np, child) + child->parent = dup; ++ ++ /* ++ * "unittest internal error: unable to add testdata property" ++ * ++ * If this message reports a property in node '/__symbols__' then ++ * the respective unittest overlay contains a label that has the ++ * same name as a label in the live devicetree. The label will ++ * be in the live devicetree only if the devicetree source was ++ * compiled with the '-@' option. If you encounter this error, ++ * please consider renaming __all__ of the labels in the unittest ++ * overlay dts files with an odd prefix that is unlikely to be ++ * used in a real devicetree. ++ */ ++ ++ /* ++ * open code for_each_property_of_node() because of_add_property() ++ * sets prop->next to NULL ++ */ ++ for (prop = np->properties; prop != NULL; prop = save_next) { ++ save_next = prop->next; ++ ret = of_add_property(dup, prop); ++ if (ret) ++ pr_err("unittest internal error: unable to add testdata property %pOF/%s", ++ np, prop->name); ++ } + } + + /** +@@ -932,18 +956,23 @@ static void update_node_properties(struct device_node *np, + * + * @np: Node to attach to live tree + */ +-static int attach_node_and_children(struct device_node *np) ++static void attach_node_and_children(struct device_node *np) + { + struct device_node *next, *dup, *child; + unsigned long flags; + const char *full_name; + + full_name = kasprintf(GFP_KERNEL, "%pOF", np); ++ ++ if (!strcmp(full_name, "/__local_fixups__") || ++ !strcmp(full_name, "/__fixups__")) ++ return; ++ + dup = of_find_node_by_path(full_name); + kfree(full_name); + if (dup) { + update_node_properties(np, dup); +- return 0; ++ return; + } + + child = np->child; +@@ -964,8 +993,6 @@ static int attach_node_and_children(struct device_node *np) + attach_node_and_children(child); + child = next; + } +- +- return 0; + } + + /** +diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c +index 9bc52e4cf52a..3ea8288c1605 100644 +--- a/drivers/pci/dwc/pci-keystone.c ++++ b/drivers/pci/dwc/pci-keystone.c +@@ -39,6 +39,7 @@ + #define PCIE_RC_K2HK 0xb008 + #define PCIE_RC_K2E 0xb009 + #define PCIE_RC_K2L 0xb00a ++#define PCIE_RC_K2G 0xb00b + + #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + +@@ -53,6 +54,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev) + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, ++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), ++ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { 0, }, + }; + +diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c +index 2537b022f42d..af6d5da10ea5 100644 +--- a/drivers/pci/host/vmd.c ++++ b/drivers/pci/host/vmd.c +@@ -753,12 +753,12 @@ static void vmd_remove(struct pci_dev *dev) + { + struct vmd_dev *vmd = pci_get_drvdata(dev); + +- vmd_detach_resources(vmd); + sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); + pci_stop_root_bus(vmd->bus); + pci_remove_root_bus(vmd->bus); + vmd_cleanup_srcu(vmd); + vmd_teardown_dma_ops(vmd); ++ vmd_detach_resources(vmd); + irq_domain_remove(vmd->irq_domain); + } + +diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c +index d090f37ca4a1..8b4e3582af6e 100644 +--- a/drivers/pinctrl/pinctrl-lpc18xx.c ++++ b/drivers/pinctrl/pinctrl-lpc18xx.c +@@ -630,14 +630,8 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = { + LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA), + }; + +-/** +- * enum lpc18xx_pin_config_param - possible pin configuration parameters +- * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt +- * controller. +- */ +-enum lpc18xx_pin_config_param { +- PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1, +-}; ++/* PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt controller */ ++#define PIN_CONFIG_GPIO_PIN_INT (PIN_CONFIG_END + 1) + + static const struct pinconf_generic_params lpc18xx_params[] = { + {"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0}, +diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c +index a0daf27042bd..90fd37e8207b 100644 +--- a/drivers/pinctrl/pinctrl-zynq.c ++++ b/drivers/pinctrl/pinctrl-zynq.c +@@ -971,15 +971,12 @@ enum zynq_io_standards { + zynq_iostd_max + }; + +-/** +- * enum zynq_pin_config_param - possible pin configuration parameters +- * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to ++/* ++ * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to + * this parameter (on a custom format) tells the driver which alternative + * IO standard to use. + */ +-enum zynq_pin_config_param { +- PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1, +-}; ++#define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1) + + static const struct pinconf_generic_params zynq_dt_params[] = { + {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18}, +diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +index 22aaf4375fac..0f0049dfaa3a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c ++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +@@ -1023,10 +1023,23 @@ static int pmic_gpio_probe(struct platform_device *pdev) + return ret; + } + +- ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins); +- if (ret) { +- dev_err(dev, "failed to add pin range\n"); +- goto err_range; ++ /* ++ * For DeviceTree-supported systems, the gpio core checks the ++ * pinctrl's device node for the "gpio-ranges" property. ++ * If it is present, it takes care of adding the pin ranges ++ * for the driver. In this case the driver can skip ahead. ++ * ++ * In order to remain compatible with older, existing DeviceTree ++ * files which don't set the "gpio-ranges" property or systems that ++ * utilize ACPI the driver has to call gpiochip_add_pin_range(). ++ */ ++ if (!of_property_read_bool(dev->of_node, "gpio-ranges")) { ++ ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, ++ npins); ++ if (ret) { ++ dev_err(dev, "failed to add pin range\n"); ++ goto err_range; ++ } + } + + return 0; +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +index 52edf3b5988d..cc8b86a16da0 100644 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c +@@ -1039,6 +1039,7 @@ static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl, + static int sunxi_pinctrl_build_state(struct platform_device *pdev) + { + struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev); ++ void *ptr; + int i; + + /* +@@ -1105,13 +1106,15 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) + } + + /* And now allocated and fill the array for real */ +- pctl->functions = krealloc(pctl->functions, +- pctl->nfunctions * sizeof(*pctl->functions), +- GFP_KERNEL); +- if (!pctl->functions) { ++ ptr = krealloc(pctl->functions, ++ pctl->nfunctions * sizeof(*pctl->functions), ++ GFP_KERNEL); ++ if (!ptr) { + kfree(pctl->functions); ++ pctl->functions = NULL; + return -ENOMEM; + } ++ pctl->functions = ptr; + + for (i = 0; i < pctl->desc->npins; i++) { + const struct sunxi_desc_pin *pin = pctl->desc->pins + i; +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c +index 9c4b0d7f15c3..59f3a37a44d7 100644 +--- a/drivers/platform/x86/asus-nb-wmi.c ++++ b/drivers/platform/x86/asus-nb-wmi.c +@@ -78,10 +78,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, + + static struct quirk_entry quirk_asus_unknown = { + .wapf = 0, ++ .wmi_backlight_set_devstate = true, + }; + + static struct quirk_entry quirk_asus_q500a = { + .i8042_filter = asus_q500a_i8042_filter, ++ .wmi_backlight_set_devstate = true, + }; + + /* +@@ -92,26 +94,32 @@ static struct quirk_entry quirk_asus_q500a = { + static struct quirk_entry quirk_asus_x55u = { + .wapf = 4, + .wmi_backlight_power = true, ++ .wmi_backlight_set_devstate = true, + .no_display_toggle = true, + }; + + static struct quirk_entry quirk_asus_wapf4 = { + .wapf = 4, ++ .wmi_backlight_set_devstate = true, + }; + + static struct quirk_entry quirk_asus_x200ca = { + .wapf = 2, ++ .wmi_backlight_set_devstate = true, + }; + + static struct quirk_entry quirk_asus_ux303ub = { + .wmi_backlight_native = true, ++ .wmi_backlight_set_devstate = true, + }; + + static struct quirk_entry quirk_asus_x550lb = { ++ .wmi_backlight_set_devstate = true, + .xusb2pr = 0x01D9, + }; + +-static struct quirk_entry quirk_asus_ux330uak = { ++static struct quirk_entry quirk_asus_forceals = { ++ .wmi_backlight_set_devstate = true, + .wmi_force_als_set = true, + }; + +@@ -387,7 +395,7 @@ static const struct dmi_system_id asus_quirks[] = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"), + }, +- .driver_data = &quirk_asus_ux330uak, ++ .driver_data = &quirk_asus_forceals, + }, + { + .callback = dmi_matched, +@@ -398,6 +406,15 @@ static const struct dmi_system_id asus_quirks[] = { + }, + .driver_data = &quirk_asus_x550lb, + }, ++ { ++ .callback = dmi_matched, ++ .ident = "ASUSTeK COMPUTER INC. UX430UQ", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "UX430UQ"), ++ }, ++ .driver_data = &quirk_asus_forceals, ++ }, + {}, + }; + +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 3f662cd774d7..1c1999600717 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -2147,7 +2147,7 @@ static int asus_wmi_add(struct platform_device *pdev) + err = asus_wmi_backlight_init(asus); + if (err && err != -ENODEV) + goto fail_backlight; +- } else ++ } else if (asus->driver->quirks->wmi_backlight_set_devstate) + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); + + status = wmi_install_notify_handler(asus->driver->event_guid, +diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h +index 6c1311f4b04d..57a79bddb286 100644 +--- a/drivers/platform/x86/asus-wmi.h ++++ b/drivers/platform/x86/asus-wmi.h +@@ -44,6 +44,7 @@ struct quirk_entry { + bool store_backlight_power; + bool wmi_backlight_power; + bool wmi_backlight_native; ++ bool wmi_backlight_set_devstate; + bool wmi_force_als_set; + int wapf; + /* +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c +index 4721a264bac2..1e69c1c9ec09 100644 +--- a/drivers/pwm/pwm-lpss.c ++++ b/drivers/pwm/pwm-lpss.c +@@ -97,7 +97,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + unsigned long long on_time_div; + unsigned long c = lpwm->info->clk_rate, base_unit_range; + unsigned long long base_unit, freq = NSEC_PER_SEC; +- u32 ctrl; ++ u32 orig_ctrl, ctrl; + + do_div(freq, period_ns); + +@@ -114,13 +114,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + do_div(on_time_div, period_ns); + on_time_div = 255ULL - on_time_div; + +- ctrl = pwm_lpss_read(pwm); ++ orig_ctrl = ctrl = pwm_lpss_read(pwm); + ctrl &= ~PWM_ON_TIME_DIV_MASK; + ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); + base_unit &= base_unit_range; + ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; + ctrl |= on_time_div; +- pwm_lpss_write(pwm, ctrl); ++ ++ if (orig_ctrl != ctrl) { ++ pwm_lpss_write(pwm, ctrl); ++ pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE); ++ } + } + + static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) +@@ -144,7 +148,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, + return ret; + } + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); +- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); + ret = pwm_lpss_wait_for_update(pwm); + if (ret) { +@@ -157,7 +160,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, + if (ret) + return ret; + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); +- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); + return pwm_lpss_wait_for_update(pwm); + } + } else if (pwm_is_enabled(pwm)) { +diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c +index 7067bca5c20d..6bfff0a6d655 100644 +--- a/drivers/rtc/rtc-s35390a.c ++++ b/drivers/rtc/rtc-s35390a.c +@@ -108,7 +108,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) + + static int s35390a_init(struct s35390a *s35390a) + { +- char buf; ++ u8 buf; + int ret; + unsigned initcount = 0; + +diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c +index 5ee7f44cf869..830b2d2dcf20 100644 +--- a/drivers/scsi/dc395x.c ++++ b/drivers/scsi/dc395x.c +@@ -1972,6 +1972,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) + xferred -= psge->length; + } else { + /* Partial SG entry done */ ++ pci_dma_sync_single_for_cpu(srb->dcb-> ++ acb->dev, ++ srb->sg_bus_addr, ++ SEGMENTX_LEN, ++ PCI_DMA_TODEVICE); + psge->length -= xferred; + psge->address += xferred; + srb->sg_index = idx; +@@ -3450,14 +3455,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + } + } + +- if (dir != PCI_DMA_NONE && scsi_sg_count(cmd)) +- pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd), +- scsi_sg_count(cmd), dir); +- + ckc_only = 0; + /* Check Error Conditions */ + ckc_e: + ++ pci_unmap_srb(acb, srb); ++ + if (cmd->cmnd[0] == INQUIRY) { + unsigned char *base = NULL; + struct ScsiInqData *ptr; +@@ -3511,7 +3514,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, + cmd, cmd->result); + srb_free_insert(acb, srb); + } +- pci_unmap_srb(acb, srb); + + cmd->scsi_done(cmd); + waiting_process_next(acb); +diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c +index 67621308eb9c..ea652f1e2071 100644 +--- a/drivers/scsi/ips.c ++++ b/drivers/scsi/ips.c +@@ -3497,6 +3497,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) + + case START_STOP: + scb->scsi_cmd->result = DID_OK << 16; ++ break; + + case TEST_UNIT_READY: + case INQUIRY: +diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c +index 609dafd661d1..da4583a2fa23 100644 +--- a/drivers/scsi/isci/host.c ++++ b/drivers/scsi/isci/host.c +@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) + * the task management request. + * @task_request: the handle to the task request object to start. + */ +-enum sci_task_status sci_controller_start_task(struct isci_host *ihost, +- struct isci_remote_device *idev, +- struct isci_request *ireq) ++enum sci_status sci_controller_start_task(struct isci_host *ihost, ++ struct isci_remote_device *idev, ++ struct isci_request *ireq) + { + enum sci_status status; + +@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, + "%s: SCIC Controller starting task from invalid " + "state\n", + __func__); +- return SCI_TASK_FAILURE_INVALID_STATE; ++ return SCI_FAILURE_INVALID_STATE; + } + + status = sci_remote_device_start_task(ihost, idev, ireq); +diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h +index b3539928073c..6bc3f022630a 100644 +--- a/drivers/scsi/isci/host.h ++++ b/drivers/scsi/isci/host.h +@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io( + struct isci_remote_device *idev, + struct isci_request *ireq); + +-enum sci_task_status sci_controller_start_task( ++enum sci_status sci_controller_start_task( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); +diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c +index ed197bc8e801..2f151708b59a 100644 +--- a/drivers/scsi/isci/request.c ++++ b/drivers/scsi/isci/request.c +@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, + + if (status == SCI_SUCCESS) { + if (ireq->stp.rsp.status & ATA_ERR) +- status = SCI_IO_FAILURE_RESPONSE_VALID; ++ status = SCI_FAILURE_IO_RESPONSE_VALID; + } else { +- status = SCI_IO_FAILURE_RESPONSE_VALID; ++ status = SCI_FAILURE_IO_RESPONSE_VALID; + } + + if (status != SCI_SUCCESS) { +diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c +index 6dcaed0c1fc8..fb6eba331ac6 100644 +--- a/drivers/scsi/isci/task.c ++++ b/drivers/scsi/isci/task.c +@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, + struct isci_tmf *tmf, unsigned long timeout_ms) + { + DECLARE_COMPLETION_ONSTACK(completion); +- enum sci_task_status status = SCI_TASK_FAILURE; ++ enum sci_status status = SCI_FAILURE; + struct isci_request *ireq; + int ret = TMF_RESP_FUNC_FAILED; + unsigned long flags; +@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, + /* start the TMF io. */ + status = sci_controller_start_task(ihost, idev, ireq); + +- if (status != SCI_TASK_SUCCESS) { ++ if (status != SCI_SUCCESS) { + dev_dbg(&ihost->pdev->dev, + "%s: start_io failed - status = 0x%x, request = %p\n", + __func__, +diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c +index e11eff6b0e97..045207b5560e 100644 +--- a/drivers/scsi/iscsi_tcp.c ++++ b/drivers/scsi/iscsi_tcp.c +@@ -798,7 +798,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) +- &addr, param, buf); ++ &addr, ++ (enum iscsi_param)param, buf); + default: + return iscsi_host_get_param(shost, param, buf); + } +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index ddd29752d96d..e5db20e8979d 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -1152,6 +1152,7 @@ stop_rr_fcf_flogi: + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); + spin_unlock_irq(&phba->hbalock); ++ phba->fcf.fcf_redisc_attempted = 0; /* reset */ + goto out; + } + if (!rc) { +@@ -1166,6 +1167,7 @@ stop_rr_fcf_flogi: + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); + spin_unlock_irq(&phba->hbalock); ++ phba->fcf.fcf_redisc_attempted = 0; /* reset */ + goto out; + } + } +@@ -1548,8 +1550,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, + */ + new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); + ++ /* return immediately if the WWPN matches ndlp */ + if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) + return ndlp; ++ + if (phba->sli_rev == LPFC_SLI_REV4) { + active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, + GFP_KERNEL); +@@ -1558,9 +1562,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, + phba->cfg_rrq_xri_bitmap_sz); + } + +- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, +- "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", +- ndlp, ndlp->nlp_DID, new_ndlp); ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, ++ "3178 PLOGI confirm: ndlp x%x x%x x%x: " ++ "new_ndlp x%x x%x x%x\n", ++ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, ++ (new_ndlp ? new_ndlp->nlp_DID : 0), ++ (new_ndlp ? new_ndlp->nlp_flag : 0), ++ (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); + + if (!new_ndlp) { + rc = memcmp(&ndlp->nlp_portname, name, +@@ -1609,6 +1617,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, + phba->cfg_rrq_xri_bitmap_sz); + } + ++ /* At this point in this routine, we know new_ndlp will be ++ * returned. however, any previous GID_FTs that were done ++ * would have updated nlp_fc4_type in ndlp, so we must ensure ++ * new_ndlp has the right value. ++ */ ++ if (vport->fc_flag & FC_FABRIC) ++ new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; ++ + lpfc_unreg_rpi(vport, new_ndlp); + new_ndlp->nlp_DID = ndlp->nlp_DID; + new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; +@@ -1730,6 +1746,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, + active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); ++ ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, ++ "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", ++ new_ndlp->nlp_DID, new_ndlp->nlp_flag, ++ new_ndlp->nlp_fc4_type); ++ + return new_ndlp; + } + +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c +index b970933a218d..d850077c5e22 100644 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c +@@ -1999,6 +1999,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) + "failover and change port state:x%x/x%x\n", + phba->pport->port_state, LPFC_VPORT_UNKNOWN); + phba->pport->port_state = LPFC_VPORT_UNKNOWN; ++ ++ if (!phba->fcf.fcf_redisc_attempted) { ++ lpfc_unregister_fcf(phba); ++ ++ rc = lpfc_sli4_redisc_fcf_table(phba); ++ if (!rc) { ++ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, ++ "3195 Rediscover FCF table\n"); ++ phba->fcf.fcf_redisc_attempted = 1; ++ lpfc_sli4_clear_fcf_rr_bmask(phba); ++ } else { ++ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, ++ "3196 Rediscover FCF table " ++ "failed. Status:x%x\n", rc); ++ } ++ } else { ++ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, ++ "3197 Already rediscover FCF table " ++ "attempted. No more retry\n"); ++ } + goto stop_flogi_current_fcf; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 25612ccf6ff2..15bcd00dd7a2 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -4997,7 +4997,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, + break; + } + /* If fast FCF failover rescan event is pending, do nothing */ +- if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { ++ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { + spin_unlock_irq(&phba->hbalock); + break; + } +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c +index a0658d158228..043bca6449cd 100644 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c +@@ -2829,8 +2829,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0211 DSM in event x%x on NPort x%x in " +- "state %d Data: x%x\n", +- evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); ++ "state %d Data: x%x x%x\n", ++ evt, ndlp->nlp_DID, cur_state, ++ ndlp->nlp_flag, ndlp->nlp_fc4_type); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM in: evt:%d ste:%d did:x%x", +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 6c2b098b7609..ebf7d3cda367 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -18056,15 +18056,8 @@ next_priority: + goto initial_priority; + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2844 No roundrobin failover FCF available\n"); +- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) +- return LPFC_FCOE_FCF_NEXT_NONE; +- else { +- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, +- "3063 Only FCF available idx %d, flag %x\n", +- next_fcf_index, +- phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); +- return next_fcf_index; +- } ++ ++ return LPFC_FCOE_FCF_NEXT_NONE; + } + + if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && +diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h +index 60200385fe00..a132a83ef233 100644 +--- a/drivers/scsi/lpfc/lpfc_sli4.h ++++ b/drivers/scsi/lpfc/lpfc_sli4.h +@@ -265,6 +265,7 @@ struct lpfc_fcf { + #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ + #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ + #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) ++ uint16_t fcf_redisc_attempted; + uint32_t addr_mode; + uint32_t eligible_fcf_cnt; + struct lpfc_fcf_rec current_rec; +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 8595d83229b7..577513649afb 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -3823,12 +3823,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) + /* + * The cur_state should not last for more than max_wait secs + */ +- for (i = 0; i < (max_wait * 1000); i++) { ++ for (i = 0; i < max_wait; i++) { + curr_abs_state = instance->instancet-> + read_fw_status_reg(instance->reg_set); + + if (abs_state == curr_abs_state) { +- msleep(1); ++ msleep(1000); + } else + break; + } +@@ -5324,7 +5324,7 @@ static int megasas_init_fw(struct megasas_instance *instance) + if (!instance->msix_vectors) { + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); + if (i < 0) +- goto fail_setup_irqs; ++ goto fail_init_adapter; + } + + megasas_setup_reply_map(instance); +@@ -5541,9 +5541,8 @@ static int megasas_init_fw(struct megasas_instance *instance) + + fail_get_ld_pd_list: + instance->instancet->disable_intr(instance); +-fail_init_adapter: + megasas_destroy_irqs(instance); +-fail_setup_irqs: ++fail_init_adapter: + if (instance->msix_vectors) + pci_free_irq_vectors(instance->pdev); + instance->msix_vectors = 0; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 7bfe53f48d1d..817a7963a038 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -3140,7 +3140,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) + * flag unset in NVDATA. + */ + mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); +- if (ioc->manu_pg11.EEDPTagMode == 0) { ++ if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { + pr_err("%s: overriding NVDATA EEDPTagMode setting\n", + ioc->name); + ioc->manu_pg11.EEDPTagMode &= ~0x3; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c +index dd6270125614..58acbff40abc 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_config.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_config.c +@@ -674,10 +674,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; +- r = _config_request(ioc, &mpi_request, mpi_reply, +- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, +- sizeof(*config_page)); + out: + return r; + } +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index b28efddab7b1..9ef0c6265cd2 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3328,6 +3328,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + return _scsih_check_for_pending_tm(ioc, smid); + } + ++/** _scsih_allow_scmd_to_device - check whether scmd needs to ++ * issue to IOC or not. ++ * @ioc: per adapter object ++ * @scmd: pointer to scsi command object ++ * ++ * Returns true if scmd can be issued to IOC otherwise returns false. ++ */ ++inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, ++ struct scsi_cmnd *scmd) ++{ ++ ++ if (ioc->pci_error_recovery) ++ return false; ++ ++ if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { ++ if (ioc->remove_host) ++ return false; ++ ++ return true; ++ } ++ ++ if (ioc->remove_host) { ++ ++ switch (scmd->cmnd[0]) { ++ case SYNCHRONIZE_CACHE: ++ case START_STOP: ++ return true; ++ default: ++ return false; ++ } ++ } ++ ++ return true; ++} + + /** + * _scsih_sas_control_complete - completion routine +@@ -4100,7 +4134,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) + return 0; + } + +- if (ioc->pci_error_recovery || ioc->remove_host) { ++ if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c +index 9bf64e6eca9b..1db4d3c1d2bf 100644 +--- a/drivers/spi/spi-omap2-mcspi.c ++++ b/drivers/spi/spi-omap2-mcspi.c +@@ -298,7 +298,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, + struct omap2_mcspi_cs *cs = spi->controller_state; + struct omap2_mcspi *mcspi; + unsigned int wcnt; +- int max_fifo_depth, fifo_depth, bytes_per_word; ++ int max_fifo_depth, bytes_per_word; + u32 chconf, xferlevel; + + mcspi = spi_master_get_devdata(master); +@@ -314,10 +314,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, + else + max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; + +- fifo_depth = gcd(t->len, max_fifo_depth); +- if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) +- goto disable_fifo; +- + wcnt = t->len / bytes_per_word; + if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) + goto disable_fifo; +@@ -325,16 +321,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, + xferlevel = wcnt << 16; + if (t->rx_buf != NULL) { + chconf |= OMAP2_MCSPI_CHCONF_FFER; +- xferlevel |= (fifo_depth - 1) << 8; ++ xferlevel |= (bytes_per_word - 1) << 8; + } ++ + if (t->tx_buf != NULL) { + chconf |= OMAP2_MCSPI_CHCONF_FFET; +- xferlevel |= fifo_depth - 1; ++ xferlevel |= bytes_per_word - 1; + } + + mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); + mcspi_write_chconf0(spi, chconf); +- mcspi->fifo_depth = fifo_depth; ++ mcspi->fifo_depth = max_fifo_depth; + + return; + } +@@ -601,7 +598,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) + struct dma_slave_config cfg; + enum dma_slave_buswidth width; + unsigned es; +- u32 burst; + void __iomem *chstat_reg; + void __iomem *irqstat_reg; + int wait_res; +@@ -623,22 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) + } + + count = xfer->len; +- burst = 1; +- +- if (mcspi->fifo_depth > 0) { +- if (count > mcspi->fifo_depth) +- burst = mcspi->fifo_depth / es; +- else +- burst = count / es; +- } + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; + cfg.src_addr_width = width; + cfg.dst_addr_width = width; +- cfg.src_maxburst = burst; +- cfg.dst_maxburst = burst; ++ cfg.src_maxburst = 1; ++ cfg.dst_maxburst = 1; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; +diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c +index db2a529accae..a7bd3c92356b 100644 +--- a/drivers/spi/spi-sh-msiof.c ++++ b/drivers/spi/spi-sh-msiof.c +@@ -1283,8 +1283,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) + + i = platform_get_irq(pdev, 0); + if (i < 0) { +- dev_err(&pdev->dev, "cannot get platform IRQ\n"); +- ret = -ENOENT; ++ dev_err(&pdev->dev, "cannot get IRQ\n"); ++ ret = i; + goto err1; + } + +diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h +index 2ae0f655e7a0..b86f47712e30 100644 +--- a/drivers/staging/ccree/cc_hw_queue_defs.h ++++ b/drivers/staging/ccree/cc_hw_queue_defs.h +@@ -467,8 +467,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc, + * @pdesc: pointer HW descriptor struct + * @mode: Any one of the modes defined in [CC7x-DESC] + */ +-static inline void set_cipher_mode(struct cc_hw_desc *pdesc, +- enum drv_cipher_mode mode) ++static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode) + { + pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode); + } +@@ -479,8 +478,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, + * @pdesc: pointer HW descriptor struct + * @mode: Any one of the modes defined in [CC7x-DESC] + */ +-static inline void set_cipher_config0(struct cc_hw_desc *pdesc, +- enum drv_crypto_direction mode) ++static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode) + { + pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode); + } +diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c +index 608403c7586b..f0572d6a5f63 100644 +--- a/drivers/staging/comedi/drivers/usbduxfast.c ++++ b/drivers/staging/comedi/drivers/usbduxfast.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk ++ * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -17,7 +17,7 @@ + * Description: University of Stirling USB DAQ & INCITE Technology Limited + * Devices: [ITL] USB-DUX-FAST (usbduxfast) + * Author: Bernd Porr <mail@berndporr.me.uk> +- * Updated: 10 Oct 2014 ++ * Updated: 16 Nov 2019 + * Status: stable + */ + +@@ -31,6 +31,7 @@ + * + * + * Revision history: ++ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest + * 0.9: Dropping the first data packet which seems to be from the last transfer. + * Buffer overflows in the FX2 are handed over to comedi. + * 0.92: Dropping now 4 packets. The quad buffer has to be emptied. +@@ -359,6 +360,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, + struct comedi_cmd *cmd) + { + int err = 0; ++ int err2 = 0; + unsigned int steps; + unsigned int arg; + +@@ -408,11 +410,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, + */ + steps = (cmd->convert_arg * 30) / 1000; + if (cmd->chanlist_len != 1) +- err |= comedi_check_trigger_arg_min(&steps, +- MIN_SAMPLING_PERIOD); +- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); +- arg = (steps * 1000) / 30; +- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); ++ err2 |= comedi_check_trigger_arg_min(&steps, ++ MIN_SAMPLING_PERIOD); ++ else ++ err2 |= comedi_check_trigger_arg_min(&steps, 1); ++ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); ++ if (err2) { ++ err |= err2; ++ arg = (steps * 1000) / 30; ++ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); ++ } + + if (cmd->stop_src == TRIG_COUNT) + err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); +diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c +index 73e5fee6cf1d..83126e2dce36 100644 +--- a/drivers/thermal/rcar_thermal.c ++++ b/drivers/thermal/rcar_thermal.c +@@ -401,8 +401,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) + rcar_thermal_for_each_priv(priv, common) { + if (rcar_thermal_had_changed(priv, status)) { + rcar_thermal_irq_disable(priv); +- schedule_delayed_work(&priv->work, +- msecs_to_jiffies(300)); ++ queue_delayed_work(system_freezable_wq, &priv->work, ++ msecs_to_jiffies(300)); + } + } + +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 9e26c530d2dd..b3208b1b1028 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -28,6 +28,7 @@ + #include <linux/mount.h> + #include <linux/file.h> + #include <linux/ioctl.h> ++#include <linux/compat.h> + + #undef TTY_DEBUG_HANGUP + #ifdef TTY_DEBUG_HANGUP +@@ -488,6 +489,7 @@ static int pty_bsd_ioctl(struct tty_struct *tty, + return -ENOIOCTLCMD; + } + ++#ifdef CONFIG_COMPAT + static long pty_bsd_compat_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) + { +@@ -495,8 +497,11 @@ static long pty_bsd_compat_ioctl(struct tty_struct *tty, + * PTY ioctls don't require any special translation between 32-bit and + * 64-bit userspace, they are already compatible. + */ +- return pty_bsd_ioctl(tty, cmd, arg); ++ return pty_bsd_ioctl(tty, cmd, (unsigned long)compat_ptr(arg)); + } ++#else ++#define pty_bsd_compat_ioctl NULL ++#endif + + static int legacy_count = CONFIG_LEGACY_PTY_COUNT; + /* +@@ -676,6 +681,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty, + return -ENOIOCTLCMD; + } + ++#ifdef CONFIG_COMPAT + static long pty_unix98_compat_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) + { +@@ -683,8 +689,12 @@ static long pty_unix98_compat_ioctl(struct tty_struct *tty, + * PTY ioctls don't require any special translation between 32-bit and + * 64-bit userspace, they are already compatible. + */ +- return pty_unix98_ioctl(tty, cmd, arg); ++ return pty_unix98_ioctl(tty, cmd, ++ cmd == TIOCSIG ? arg : (unsigned long)compat_ptr(arg)); + } ++#else ++#define pty_unix98_compat_ioctl NULL ++#endif + + /** + * ptm_unix98_lookup - find a pty master +diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c +index 636b8ae29b46..344e8c427c7e 100644 +--- a/drivers/tty/synclink_gt.c ++++ b/drivers/tty/synclink_gt.c +@@ -1187,14 +1187,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) + { + struct slgt_info *info = tty->driver_data; +- int rc = -ENOIOCTLCMD; ++ int rc; + + if (sanity_check(info, tty->name, "compat_ioctl")) + return -ENODEV; + DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd)); + + switch (cmd) { +- + case MGSL_IOCSPARAMS32: + rc = set_params32(info, compat_ptr(arg)); + break; +@@ -1214,18 +1213,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty, + case MGSL_IOCWAITGPIO: + case MGSL_IOCGXSYNC: + case MGSL_IOCGXCTRL: +- case MGSL_IOCSTXIDLE: +- case MGSL_IOCTXENABLE: +- case MGSL_IOCRXENABLE: +- case MGSL_IOCTXABORT: +- case TIOCMIWAIT: +- case MGSL_IOCSIF: +- case MGSL_IOCSXSYNC: +- case MGSL_IOCSXCTRL: +- rc = ioctl(tty, cmd, arg); ++ rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg)); + break; ++ default: ++ rc = ioctl(tty, cmd, arg); + } +- + DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc)); + return rc; + } +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c +index 03be7c75c5be..aad7963e40e7 100644 +--- a/drivers/usb/misc/appledisplay.c ++++ b/drivers/usb/misc/appledisplay.c +@@ -160,8 +160,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd) + pdata->msgdata, 2, + ACD_USB_TIMEOUT); + mutex_unlock(&pdata->sysfslock); +- +- return retval; ++ ++ if (retval < 0) ++ return retval; ++ else ++ return 0; + } + + static int appledisplay_bl_get_brightness(struct backlight_device *bd) +@@ -179,7 +182,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) + 0, + pdata->msgdata, 2, + ACD_USB_TIMEOUT); +- brightness = pdata->msgdata[1]; ++ if (retval < 2) { ++ if (retval >= 0) ++ retval = -EMSGSIZE; ++ } else { ++ brightness = pdata->msgdata[1]; ++ } + mutex_unlock(&pdata->sysfslock); + + if (retval < 0) +@@ -314,6 +322,7 @@ error: + if (pdata) { + if (pdata->urb) { + usb_kill_urb(pdata->urb); ++ cancel_delayed_work_sync(&pdata->work); + if (pdata->urbdata) + usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, + pdata->urbdata, pdata->urb->transfer_dma); +diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c +index eb0795c5ff7a..3a701c1e9e75 100644 +--- a/drivers/usb/misc/chaoskey.c ++++ b/drivers/usb/misc/chaoskey.c +@@ -396,13 +396,17 @@ static int _chaoskey_fill(struct chaoskey *dev) + !dev->reading, + (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); + +- if (result < 0) ++ if (result < 0) { ++ usb_kill_urb(dev->urb); + goto out; ++ } + +- if (result == 0) ++ if (result == 0) { + result = -ETIMEDOUT; +- else ++ usb_kill_urb(dev->urb); ++ } else { + result = dev->valid; ++ } + out: + /* Let the device go back to sleep eventually */ + usb_autopm_put_interface(dev->interface); +@@ -538,7 +542,21 @@ static int chaoskey_suspend(struct usb_interface *interface, + + static int chaoskey_resume(struct usb_interface *interface) + { ++ struct chaoskey *dev; ++ struct usb_device *udev = interface_to_usbdev(interface); ++ + usb_dbg(interface, "resume"); ++ dev = usb_get_intfdata(interface); ++ ++ /* ++ * We may have lost power. ++ * In that case the device that needs a long time ++ * for the first requests needs an extended timeout ++ * again ++ */ ++ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID) ++ dev->reads_started = false; ++ + return 0; + } + #else +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 98e466c3cfca..8dd9852f399d 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ + { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ ++ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index 393a91ab56ed..37967f4d93fd 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -1905,10 +1905,6 @@ static int mos7720_startup(struct usb_serial *serial) + product = le16_to_cpu(serial->dev->descriptor.idProduct); + dev = serial->dev; + +- /* setting configuration feature to one */ +- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), +- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); +- + if (product == MOSCHIP_DEVICE_ID_7715) { + struct urb *urb = serial->port[0]->interrupt_in_urb; + +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index 5e490177cf75..285527f115dd 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -131,11 +131,15 @@ + /* This driver also supports + * ATEN UC2324 device using Moschip MCS7840 + * ATEN UC2322 device using Moschip MCS7820 ++ * MOXA UPort 2210 device using Moschip MCS7820 + */ + #define USB_VENDOR_ID_ATENINTL 0x0557 + #define ATENINTL_DEVICE_ID_UC2324 0x2011 + #define ATENINTL_DEVICE_ID_UC2322 0x7820 + ++#define USB_VENDOR_ID_MOXA 0x110a ++#define MOXA_DEVICE_ID_2210 0x2210 ++ + /* Interrupt Routine Defines */ + + #define SERIAL_IIR_RLS 0x06 +@@ -206,6 +210,7 @@ static const struct usb_device_id id_table[] = { + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, ++ {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)}, + {} /* terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, id_table); +@@ -2065,6 +2070,7 @@ static int mos7840_probe(struct usb_serial *serial, + const struct usb_device_id *id) + { + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); ++ u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor); + u8 *buf; + int device_type; + +@@ -2074,6 +2080,11 @@ static int mos7840_probe(struct usb_serial *serial, + goto out; + } + ++ if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) { ++ device_type = MOSCHIP_DEVICE_ID_7820; ++ goto out; ++ } ++ + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); + if (!buf) + return -ENOMEM; +@@ -2327,11 +2338,6 @@ out: + goto error; + } else + dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); +- +- /* setting configuration feature to one */ +- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), +- 0x03, 0x00, 0x01, 0x00, NULL, 0x00, +- MOS_WDR_TIMEOUT); + } + return 0; + error: +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index b9fad046828d..8d349f2e5656 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -200,6 +200,7 @@ static void option_instat_callback(struct urb *urb); + #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ + + #define DELL_PRODUCT_5821E 0x81d7 ++#define DELL_PRODUCT_5821E_ESIM 0x81e0 + + #define KYOCERA_VENDOR_ID 0x0c88 + #define KYOCERA_PRODUCT_KPC650 0x17da +@@ -1047,6 +1048,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, +@@ -1992,6 +1995,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, ++ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */ ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, ++ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c +index 8812d3edade1..cb24b22252e4 100644 +--- a/drivers/usb/usbip/stub_rx.c ++++ b/drivers/usb/usbip/stub_rx.c +@@ -487,18 +487,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + if (pipe == -1) + return; + ++ /* ++ * Smatch reported the error case where use_sg is true and buf_len is 0. ++ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be ++ * released by stub event handler and connection will be shut down. ++ */ + priv = stub_priv_alloc(sdev, pdu); + if (!priv) + return; + + buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length; + ++ if (use_sg && !buf_len) { ++ dev_err(&udev->dev, "sg buffer with zero length\n"); ++ goto err_malloc; ++ } ++ + /* allocate urb transfer buffer, if needed */ + if (buf_len) { + if (use_sg) { + sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents); + if (!sgl) + goto err_malloc; ++ ++ /* Check if the server's HCD supports SG */ ++ if (!udev->bus->sg_tablesize) { ++ /* ++ * If the server's HCD doesn't support SG, break ++ * a single SG request into several URBs and map ++ * each SG list entry to corresponding URB ++ * buffer. The previously allocated SG list is ++ * stored in priv->sgl (If the server's HCD ++ * support SG, SG list is stored only in ++ * urb->sg) and it is used as an indicator that ++ * the server split single SG request into ++ * several URBs. Later, priv->sgl is used by ++ * stub_complete() and stub_send_ret_submit() to ++ * reassemble the divied URBs. ++ */ ++ support_sg = 0; ++ num_urbs = nents; ++ priv->completed_urbs = 0; ++ pdu->u.cmd_submit.transfer_flags &= ++ ~URB_DMA_MAP_SG; ++ } + } else { + buffer = kzalloc(buf_len, GFP_KERNEL); + if (!buffer) +@@ -506,24 +538,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + } + } + +- /* Check if the server's HCD supports SG */ +- if (use_sg && !udev->bus->sg_tablesize) { +- /* +- * If the server's HCD doesn't support SG, break a single SG +- * request into several URBs and map each SG list entry to +- * corresponding URB buffer. The previously allocated SG +- * list is stored in priv->sgl (If the server's HCD support SG, +- * SG list is stored only in urb->sg) and it is used as an +- * indicator that the server split single SG request into +- * several URBs. Later, priv->sgl is used by stub_complete() and +- * stub_send_ret_submit() to reassemble the divied URBs. +- */ +- support_sg = 0; +- num_urbs = nents; +- priv->completed_urbs = 0; +- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG; +- } +- + /* allocate urb array */ + priv->num_urbs = num_urbs; + priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL); +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index 5b9db5deffbb..491de830b8d9 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -103,7 +103,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + struct iov_iter iov_iter; + unsigned out, in; + size_t nbytes; +- size_t len; ++ size_t iov_len, payload_len; + int head; + + spin_lock_bh(&vsock->send_pkt_list_lock); +@@ -148,8 +148,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + break; + } + +- len = iov_length(&vq->iov[out], in); +- iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); ++ iov_len = iov_length(&vq->iov[out], in); ++ if (iov_len < sizeof(pkt->hdr)) { ++ virtio_transport_free_pkt(pkt); ++ vq_err(vq, "Buffer len [%zu] too small\n", iov_len); ++ break; ++ } ++ ++ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); ++ payload_len = pkt->len - pkt->off; ++ ++ /* If the packet is greater than the space available in the ++ * buffer, we split it using multiple buffers. ++ */ ++ if (payload_len > iov_len - sizeof(pkt->hdr)) ++ payload_len = iov_len - sizeof(pkt->hdr); ++ ++ /* Set the correct length in the header */ ++ pkt->hdr.len = cpu_to_le32(payload_len); + + nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); + if (nbytes != sizeof(pkt->hdr)) { +@@ -158,33 +174,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + break; + } + +- nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); +- if (nbytes != pkt->len) { ++ nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, ++ &iov_iter); ++ if (nbytes != payload_len) { + virtio_transport_free_pkt(pkt); + vq_err(vq, "Faulted on copying pkt buf\n"); + break; + } + +- vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); ++ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); + added = true; + +- if (pkt->reply) { +- int val; +- +- val = atomic_dec_return(&vsock->queued_replies); +- +- /* Do we have resources to resume tx processing? */ +- if (val + 1 == tx_vq->num) +- restart_tx = true; +- } +- + /* Deliver to monitoring devices all correctly transmitted + * packets. + */ + virtio_transport_deliver_tap_pkt(pkt); + +- total_len += pkt->len; +- virtio_transport_free_pkt(pkt); ++ pkt->off += payload_len; ++ total_len += payload_len; ++ ++ /* If we didn't send all the payload we can requeue the packet ++ * to send it with the next available buffer. ++ */ ++ if (pkt->off < pkt->len) { ++ spin_lock_bh(&vsock->send_pkt_list_lock); ++ list_add(&pkt->list, &vsock->send_pkt_list); ++ spin_unlock_bh(&vsock->send_pkt_list_lock); ++ } else { ++ if (pkt->reply) { ++ int val; ++ ++ val = atomic_dec_return(&vsock->queued_replies); ++ ++ /* Do we have resources to resume tx ++ * processing? ++ */ ++ if (val + 1 == tx_vq->num) ++ restart_tx = true; ++ } ++ ++ virtio_transport_free_pkt(pkt); ++ } + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); + if (added) + vhost_signal(&vsock->dev, vq); +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index cc9d421c0929..b82bb0b08161 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -432,7 +432,7 @@ unmap_release: + kfree(desc); + + END_USE(vq); +- return -EIO; ++ return -ENOMEM; + } + + /** +diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c +index bf641a191d07..7c4e33dbee4d 100644 +--- a/drivers/w1/slaves/w1_ds2438.c ++++ b/drivers/w1/slaves/w1_ds2438.c +@@ -186,8 +186,8 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value) + return -1; + } + +-static uint16_t w1_ds2438_get_voltage(struct w1_slave *sl, +- int adc_input, uint16_t *voltage) ++static int w1_ds2438_get_voltage(struct w1_slave *sl, ++ int adc_input, uint16_t *voltage) + { + unsigned int retries = W1_DS2438_RETRIES; + u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/]; +@@ -235,6 +235,25 @@ post_unlock: + return ret; + } + ++static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage) ++{ ++ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/]; ++ int ret; ++ ++ mutex_lock(&sl->master->bus_mutex); ++ ++ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) { ++ /* The voltage measured across current sense resistor RSENS. */ ++ *voltage = (((int16_t) w1_buf[DS2438_CURRENT_MSB]) << 8) | ((int16_t) w1_buf[DS2438_CURRENT_LSB]); ++ ret = 0; ++ } else ++ ret = -1; ++ ++ mutex_unlock(&sl->master->bus_mutex); ++ ++ return ret; ++} ++ + static ssize_t iad_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +@@ -257,6 +276,27 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj, + return ret; + } + ++static ssize_t iad_read(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *bin_attr, char *buf, ++ loff_t off, size_t count) ++{ ++ struct w1_slave *sl = kobj_to_w1_slave(kobj); ++ int ret; ++ int16_t voltage; ++ ++ if (off != 0) ++ return 0; ++ if (!buf) ++ return -EINVAL; ++ ++ if (w1_ds2438_get_current(sl, &voltage) == 0) { ++ ret = snprintf(buf, count, "%i\n", voltage); ++ } else ++ ret = -EIO; ++ ++ return ret; ++} ++ + static ssize_t page0_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +@@ -272,9 +312,13 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj, + + mutex_lock(&sl->master->bus_mutex); + ++ /* Read no more than page0 size */ ++ if (count > DS2438_PAGE_SIZE) ++ count = DS2438_PAGE_SIZE; ++ + if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) { +- memcpy(buf, &w1_buf, DS2438_PAGE_SIZE); +- ret = DS2438_PAGE_SIZE; ++ memcpy(buf, &w1_buf, count); ++ ret = count; + } else + ret = -EIO; + +@@ -289,7 +333,6 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj, + { + struct w1_slave *sl = kobj_to_w1_slave(kobj); + int ret; +- ssize_t c = PAGE_SIZE; + int16_t temp; + + if (off != 0) +@@ -298,8 +341,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj, + return -EINVAL; + + if (w1_ds2438_get_temperature(sl, &temp) == 0) { +- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp); +- ret = PAGE_SIZE - c; ++ ret = snprintf(buf, count, "%i\n", temp); + } else + ret = -EIO; + +@@ -312,7 +354,6 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj, + { + struct w1_slave *sl = kobj_to_w1_slave(kobj); + int ret; +- ssize_t c = PAGE_SIZE; + uint16_t voltage; + + if (off != 0) +@@ -321,8 +362,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj, + return -EINVAL; + + if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) { +- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage); +- ret = PAGE_SIZE - c; ++ ret = snprintf(buf, count, "%u\n", voltage); + } else + ret = -EIO; + +@@ -335,7 +375,6 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj, + { + struct w1_slave *sl = kobj_to_w1_slave(kobj); + int ret; +- ssize_t c = PAGE_SIZE; + uint16_t voltage; + + if (off != 0) +@@ -344,15 +383,14 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj, + return -EINVAL; + + if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) { +- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage); +- ret = PAGE_SIZE - c; ++ ret = snprintf(buf, count, "%u\n", voltage); + } else + ret = -EIO; + + return ret; + } + +-static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1); ++static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, iad_read, iad_write, 0); + static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE); + static BIN_ATTR_RO(temperature, 0/* real length varies */); + static BIN_ATTR_RO(vad, 0/* real length varies */); +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 7d521babc020..71a6deeb4e71 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -356,7 +356,10 @@ static enum bp_state reserve_additional_memory(void) + * callers drop the mutex before trying again. + */ + mutex_unlock(&balloon_mutex); ++ /* add_memory_resource() requires the device_hotplug lock */ ++ lock_device_hotplug(); + rc = add_memory_resource(nid, resource, memhp_auto_online); ++ unlock_device_hotplug(); + mutex_lock(&balloon_mutex); + + if (rc) { +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index 27983fd657ab..d2263caff307 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -2988,6 +2988,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, + + again: + b = get_old_root(root, time_seq); ++ if (!b) { ++ ret = -EIO; ++ goto done; ++ } + level = btrfs_header_level(b); + p->locks[level] = BTRFS_READ_LOCK; + +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 49a02bf091ae..204d585e012a 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -1863,7 +1863,7 @@ restore: + } + + /* Used to sort the devices by max_avail(descending sort) */ +-static int btrfs_cmp_device_free_bytes(const void *dev_info1, ++static inline int btrfs_cmp_device_free_bytes(const void *dev_info1, + const void *dev_info2) + { + if (((struct btrfs_device_info *)dev_info1)->max_avail > +@@ -1892,8 +1892,8 @@ static inline void btrfs_descending_sort_devices( + * The helper to calc the free space on the devices that can be used to store + * file data. + */ +-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, +- u64 *free_bytes) ++static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, ++ u64 *free_bytes) + { + struct btrfs_device_info *devices_info; + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index 3818027c12f5..5999d806de78 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -1631,7 +1631,6 @@ retry_lookup: + if (IS_ERR(realdn)) { + err = PTR_ERR(realdn); + d_drop(dn); +- dn = NULL; + goto next_item; + } + dn = realdn; +diff --git a/fs/dlm/member.c b/fs/dlm/member.c +index 3fda3832cf6a..cad6d85911a8 100644 +--- a/fs/dlm/member.c ++++ b/fs/dlm/member.c +@@ -680,7 +680,7 @@ int dlm_ls_start(struct dlm_ls *ls) + + error = dlm_config_nodes(ls->ls_name, &nodes, &count); + if (error < 0) +- goto fail; ++ goto fail_rv; + + spin_lock(&ls->ls_recover_lock); + +@@ -712,8 +712,9 @@ int dlm_ls_start(struct dlm_ls *ls) + return 0; + + fail: +- kfree(rv); + kfree(nodes); ++ fail_rv: ++ kfree(rv); + return error; + } + +diff --git a/fs/dlm/user.c b/fs/dlm/user.c +index d18e7a539f11..1f0c071d4a86 100644 +--- a/fs/dlm/user.c ++++ b/fs/dlm/user.c +@@ -702,7 +702,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat, + result.version[0] = DLM_DEVICE_VERSION_MAJOR; + result.version[1] = DLM_DEVICE_VERSION_MINOR; + result.version[2] = DLM_DEVICE_VERSION_PATCH; +- memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb)); ++ memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr)); + result.user_lksb = ua->user_lksb; + + /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c +index cc5729445194..ac3fa4bbed2d 100644 +--- a/fs/f2fs/data.c ++++ b/fs/f2fs/data.c +@@ -1445,6 +1445,7 @@ int do_write_data_page(struct f2fs_io_info *fio) + /* This page is already truncated */ + if (fio->old_blkaddr == NULL_ADDR) { + ClearPageUptodate(page); ++ clear_cold_data(page); + goto out_writepage; + } + got_it: +@@ -1597,8 +1598,10 @@ done: + + out: + inode_dec_dirty_pages(inode); +- if (err) ++ if (err) { + ClearPageUptodate(page); ++ clear_cold_data(page); ++ } + + if (wbc->for_reclaim) { + f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA); +@@ -2158,6 +2161,8 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset, + } + } + ++ clear_cold_data(page); ++ + /* This is atomic written page, keep Private */ + if (IS_ATOMIC_WRITTEN_PAGE(page)) + return drop_inmem_page(inode, page); +@@ -2176,6 +2181,7 @@ int f2fs_release_page(struct page *page, gfp_t wait) + if (IS_ATOMIC_WRITTEN_PAGE(page)) + return 0; + ++ clear_cold_data(page); + set_page_private(page, 0); + ClearPagePrivate(page); + return 1; +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c +index c0c933ad43c8..4abefd841b6c 100644 +--- a/fs/f2fs/dir.c ++++ b/fs/f2fs/dir.c +@@ -745,6 +745,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, + clear_page_dirty_for_io(page); + ClearPagePrivate(page); + ClearPageUptodate(page); ++ clear_cold_data(page); + inode_dec_dirty_pages(dir); + remove_dirty_inode(dir); + } +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index 9e5fca35e47d..2cd0d126ef8f 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -251,8 +251,10 @@ retry: + } + next: + /* we don't need to invalidate this in the sccessful status */ +- if (drop || recover) ++ if (drop || recover) { + ClearPageUptodate(page); ++ clear_cold_data(page); ++ } + set_page_private(page, 0); + ClearPagePrivate(page); + f2fs_put_page(page, 1); +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c +index 0d72baae5150..7cb0672294df 100644 +--- a/fs/gfs2/rgrp.c ++++ b/fs/gfs2/rgrp.c +@@ -623,7 +623,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) + RB_CLEAR_NODE(&rs->rs_node); + + if (rs->rs_free) { +- struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm); ++ u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) + ++ rs->rs_free - 1; ++ struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, }; ++ struct gfs2_bitmap *start, *last; + + /* return reserved blocks to the rgrp */ + BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); +@@ -634,7 +637,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) + it will force the number to be recalculated later. */ + rgd->rd_extfail_pt += rs->rs_free; + rs->rs_free = 0; +- clear_bit(GBF_FULL, &bi->bi_flags); ++ if (gfs2_rbm_from_block(&last_rbm, last_block)) ++ return; ++ start = rbm_bi(&rs->rs_rbm); ++ last = rbm_bi(&last_rbm); ++ do ++ clear_bit(GBF_FULL, &start->bi_flags); ++ while (start++ != last); + } + } + +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c +index da25c49203cc..896396554bcc 100644 +--- a/fs/hfs/brec.c ++++ b/fs/hfs/brec.c +@@ -445,6 +445,7 @@ skip: + /* restore search_key */ + hfs_bnode_read_key(node, fd->search_key, 14); + } ++ new_node = NULL; + } + + if (!rec && node->parent) +diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c +index 9bdff5e40626..19017d296173 100644 +--- a/fs/hfs/btree.c ++++ b/fs/hfs/btree.c +@@ -220,25 +220,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) + return node; + } + +-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) ++/* Make sure @tree has enough space for the @rsvd_nodes */ ++int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) + { +- struct hfs_bnode *node, *next_node; +- struct page **pagep; +- u32 nidx, idx; +- unsigned off; +- u16 off16; +- u16 len; +- u8 *data, byte, m; +- int i; +- +- while (!tree->free_nodes) { +- struct inode *inode = tree->inode; +- u32 count; +- int res; ++ struct inode *inode = tree->inode; ++ u32 count; ++ int res; + ++ while (tree->free_nodes < rsvd_nodes) { + res = hfs_extend_file(inode); + if (res) +- return ERR_PTR(res); ++ return res; + HFS_I(inode)->phys_size = inode->i_size = + (loff_t)HFS_I(inode)->alloc_blocks * + HFS_SB(tree->sb)->alloc_blksz; +@@ -246,9 +238,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) + tree->sb->s_blocksize_bits; + inode_set_bytes(inode, inode->i_size); + count = inode->i_size >> tree->node_size_shift; +- tree->free_nodes = count - tree->node_count; ++ tree->free_nodes += count - tree->node_count; + tree->node_count = count; + } ++ return 0; ++} ++ ++struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) ++{ ++ struct hfs_bnode *node, *next_node; ++ struct page **pagep; ++ u32 nidx, idx; ++ unsigned off; ++ u16 off16; ++ u16 len; ++ u8 *data, byte, m; ++ int i, res; ++ ++ res = hfs_bmap_reserve(tree, 1); ++ if (res) ++ return ERR_PTR(res); + + nidx = 0; + node = hfs_bnode_find(tree, nidx); +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h +index c8b252dbb26c..dcc2aab1b2c4 100644 +--- a/fs/hfs/btree.h ++++ b/fs/hfs/btree.h +@@ -82,6 +82,7 @@ struct hfs_find_data { + extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp); + extern void hfs_btree_close(struct hfs_btree *); + extern void hfs_btree_write(struct hfs_btree *); ++extern int hfs_bmap_reserve(struct hfs_btree *, int); + extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *); + extern void hfs_bmap_free(struct hfs_bnode *node); + +diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c +index 8a66405b0f8b..d365bf0b8c77 100644 +--- a/fs/hfs/catalog.c ++++ b/fs/hfs/catalog.c +@@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i + if (err) + return err; + ++ /* ++ * Fail early and avoid ENOSPC during the btree operations. We may ++ * have to split the root node at most once. ++ */ ++ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth); ++ if (err) ++ goto err2; ++ + hfs_cat_build_key(sb, fd.search_key, cnid, NULL); + entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ? + HFS_CDR_THD : HFS_CDR_FTH, +@@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name, + return err; + dst_fd = src_fd; + ++ /* ++ * Fail early and avoid ENOSPC during the btree operations. We may ++ * have to split the root node at most once. ++ */ ++ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth); ++ if (err) ++ goto out; ++ + /* find the old dir entry and read the data */ + hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); + err = hfs_brec_find(&src_fd); +diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c +index 5d0182654580..263d5028d9d1 100644 +--- a/fs/hfs/extent.c ++++ b/fs/hfs/extent.c +@@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) + if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) { + if (res != -ENOENT) + return res; ++ /* Fail early and avoid ENOSPC during the btree operation */ ++ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); ++ if (res) ++ return res; + hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec)); + HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); + } else { +@@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type) + return 0; + + blocks = 0; +- for (i = 0; i < 3; extent++, i++) ++ for (i = 0; i < 3; i++) + blocks += be16_to_cpu(extent[i].count); + + res = hfs_free_extents(sb, extent, blocks, blocks); +@@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block, + ablock = (u32)block / HFS_SB(sb)->fs_div; + + if (block >= HFS_I(inode)->fs_blocks) { +- if (block > HFS_I(inode)->fs_blocks || !create) ++ if (!create) ++ return 0; ++ if (block > HFS_I(inode)->fs_blocks) + return -EIO; + if (ablock >= HFS_I(inode)->alloc_blocks) { + res = hfs_extend_file(inode); +diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c +index 2538b49cc349..350afd67bd69 100644 +--- a/fs/hfs/inode.c ++++ b/fs/hfs/inode.c +@@ -642,6 +642,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) + + truncate_setsize(inode, attr->ia_size); + hfs_file_truncate(inode); ++ inode->i_atime = inode->i_mtime = inode->i_ctime = ++ current_time(inode); + } + + setattr_copy(inode, attr); +diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c +index 2bab6b3cdba4..e6d554476db4 100644 +--- a/fs/hfsplus/attributes.c ++++ b/fs/hfsplus/attributes.c +@@ -217,6 +217,11 @@ int hfsplus_create_attr(struct inode *inode, + if (err) + goto failed_init_create_attr; + ++ /* Fail early and avoid ENOSPC during the btree operation */ ++ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1); ++ if (err) ++ goto failed_create_attr; ++ + if (name) { + err = hfsplus_attr_build_key(sb, fd.search_key, + inode->i_ino, name); +@@ -313,6 +318,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name) + if (err) + return err; + ++ /* Fail early and avoid ENOSPC during the btree operation */ ++ err = hfs_bmap_reserve(fd.tree, fd.tree->depth); ++ if (err) ++ goto out; ++ + if (name) { + err = hfsplus_attr_build_key(sb, fd.search_key, + inode->i_ino, name); +diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c +index d3f36982f685..0f53a486d2c1 100644 +--- a/fs/hfsplus/brec.c ++++ b/fs/hfsplus/brec.c +@@ -448,6 +448,7 @@ skip: + /* restore search_key */ + hfs_bnode_read_key(node, fd->search_key, 14); + } ++ new_node = NULL; + } + + if (!rec && node->parent) +diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c +index 3de3bc4918b5..66774f4cb4fd 100644 +--- a/fs/hfsplus/btree.c ++++ b/fs/hfsplus/btree.c +@@ -342,26 +342,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) + return node; + } + +-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) ++/* Make sure @tree has enough space for the @rsvd_nodes */ ++int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) + { +- struct hfs_bnode *node, *next_node; +- struct page **pagep; +- u32 nidx, idx; +- unsigned off; +- u16 off16; +- u16 len; +- u8 *data, byte, m; +- int i; ++ struct inode *inode = tree->inode; ++ struct hfsplus_inode_info *hip = HFSPLUS_I(inode); ++ u32 count; ++ int res; + +- while (!tree->free_nodes) { +- struct inode *inode = tree->inode; +- struct hfsplus_inode_info *hip = HFSPLUS_I(inode); +- u32 count; +- int res; ++ if (rsvd_nodes <= 0) ++ return 0; + ++ while (tree->free_nodes < rsvd_nodes) { + res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree)); + if (res) +- return ERR_PTR(res); ++ return res; + hip->phys_size = inode->i_size = + (loff_t)hip->alloc_blocks << + HFSPLUS_SB(tree->sb)->alloc_blksz_shift; +@@ -369,9 +364,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) + hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift; + inode_set_bytes(inode, inode->i_size); + count = inode->i_size >> tree->node_size_shift; +- tree->free_nodes = count - tree->node_count; ++ tree->free_nodes += count - tree->node_count; + tree->node_count = count; + } ++ return 0; ++} ++ ++struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) ++{ ++ struct hfs_bnode *node, *next_node; ++ struct page **pagep; ++ u32 nidx, idx; ++ unsigned off; ++ u16 off16; ++ u16 len; ++ u8 *data, byte, m; ++ int i, res; ++ ++ res = hfs_bmap_reserve(tree, 1); ++ if (res) ++ return ERR_PTR(res); + + nidx = 0; + node = hfs_bnode_find(tree, nidx); +diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c +index a196369ba779..35472cba750e 100644 +--- a/fs/hfsplus/catalog.c ++++ b/fs/hfsplus/catalog.c +@@ -265,6 +265,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, + if (err) + return err; + ++ /* ++ * Fail early and avoid ENOSPC during the btree operations. We may ++ * have to split the root node at most once. ++ */ ++ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth); ++ if (err) ++ goto err2; ++ + hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid); + entry_size = hfsplus_fill_cat_thread(sb, &entry, + S_ISDIR(inode->i_mode) ? +@@ -333,6 +341,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str) + if (err) + return err; + ++ /* ++ * Fail early and avoid ENOSPC during the btree operations. We may ++ * have to split the root node at most once. ++ */ ++ err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2); ++ if (err) ++ goto out; ++ + if (!str) { + int len; + +@@ -433,6 +449,14 @@ int hfsplus_rename_cat(u32 cnid, + return err; + dst_fd = src_fd; + ++ /* ++ * Fail early and avoid ENOSPC during the btree operations. We may ++ * have to split the root node at most twice. ++ */ ++ err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1); ++ if (err) ++ goto out; ++ + /* find the old dir entry and read the data */ + err = hfsplus_cat_build_key(sb, src_fd.search_key, + src_dir->i_ino, src_name); +diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c +index e8770935ce6d..58f296bfd438 100644 +--- a/fs/hfsplus/extents.c ++++ b/fs/hfsplus/extents.c +@@ -100,6 +100,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode, + if (hip->extent_state & HFSPLUS_EXT_NEW) { + if (res != -ENOENT) + return res; ++ /* Fail early and avoid ENOSPC during the btree operation */ ++ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); ++ if (res) ++ return res; + hfs_brec_insert(fd, hip->cached_extents, + sizeof(hfsplus_extent_rec)); + hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); +@@ -233,7 +237,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock, + ablock = iblock >> sbi->fs_shift; + + if (iblock >= hip->fs_blocks) { +- if (iblock > hip->fs_blocks || !create) ++ if (!create) ++ return 0; ++ if (iblock > hip->fs_blocks) + return -EIO; + if (ablock >= hip->alloc_blocks) { + res = hfsplus_file_extend(inode, false); +diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h +index a015044daa05..dbb55d823385 100644 +--- a/fs/hfsplus/hfsplus_fs.h ++++ b/fs/hfsplus/hfsplus_fs.h +@@ -312,6 +312,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb) + #define hfs_btree_open hfsplus_btree_open + #define hfs_btree_close hfsplus_btree_close + #define hfs_btree_write hfsplus_btree_write ++#define hfs_bmap_reserve hfsplus_bmap_reserve + #define hfs_bmap_alloc hfsplus_bmap_alloc + #define hfs_bmap_free hfsplus_bmap_free + #define hfs_bnode_read hfsplus_bnode_read +@@ -396,6 +397,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors, + struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id); + void hfs_btree_close(struct hfs_btree *tree); + int hfs_btree_write(struct hfs_btree *tree); ++int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes); + struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree); + void hfs_bmap_free(struct hfs_bnode *node); + +diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c +index 190c60efbc99..5b31f4730ee9 100644 +--- a/fs/hfsplus/inode.c ++++ b/fs/hfsplus/inode.c +@@ -262,6 +262,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) + } + truncate_setsize(inode, attr->ia_size); + hfsplus_file_truncate(inode); ++ inode->i_mtime = inode->i_ctime = current_time(inode); + } + + setattr_copy(inode, attr); +diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c +index 9f8250df99f1..f9b84f7a3e4b 100644 +--- a/fs/ocfs2/buffer_head_io.c ++++ b/fs/ocfs2/buffer_head_io.c +@@ -99,25 +99,34 @@ out: + return ret; + } + ++/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it ++ * will be easier to handle read failure. ++ */ + int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, + unsigned int nr, struct buffer_head *bhs[]) + { + int status = 0; + unsigned int i; + struct buffer_head *bh; ++ int new_bh = 0; + + trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); + + if (!nr) + goto bail; + ++ /* Don't put buffer head and re-assign it to NULL if it is allocated ++ * outside since the caller can't be aware of this alternation! ++ */ ++ new_bh = (bhs[0] == NULL); ++ + for (i = 0 ; i < nr ; i++) { + if (bhs[i] == NULL) { + bhs[i] = sb_getblk(osb->sb, block++); + if (bhs[i] == NULL) { + status = -ENOMEM; + mlog_errno(status); +- goto bail; ++ break; + } + } + bh = bhs[i]; +@@ -157,9 +166,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, + submit_bh(REQ_OP_READ, 0, bh); + } + ++read_failure: + for (i = nr; i > 0; i--) { + bh = bhs[i - 1]; + ++ if (unlikely(status)) { ++ if (new_bh && bh) { ++ /* If middle bh fails, let previous bh ++ * finish its read and then put it to ++ * aovoid bh leak ++ */ ++ if (!buffer_jbd(bh)) ++ wait_on_buffer(bh); ++ put_bh(bh); ++ bhs[i - 1] = NULL; ++ } else if (bh && buffer_uptodate(bh)) { ++ clear_buffer_uptodate(bh); ++ } ++ continue; ++ } ++ + /* No need to wait on the buffer if it's managed by JBD. */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); +@@ -169,8 +195,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, + * so we can safely record this and loop back + * to cleanup the other buffers. */ + status = -EIO; +- put_bh(bh); +- bhs[i - 1] = NULL; ++ goto read_failure; + } + } + +@@ -178,6 +203,9 @@ bail: + return status; + } + ++/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it ++ * will be easier to handle read failure. ++ */ + int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + struct buffer_head *bhs[], int flags, + int (*validate)(struct super_block *sb, +@@ -187,6 +215,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + int i, ignore_cache = 0; + struct buffer_head *bh; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); ++ int new_bh = 0; + + trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); + +@@ -212,6 +241,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + goto bail; + } + ++ /* Don't put buffer head and re-assign it to NULL if it is allocated ++ * outside since the caller can't be aware of this alternation! ++ */ ++ new_bh = (bhs[0] == NULL); ++ + ocfs2_metadata_cache_io_lock(ci); + for (i = 0 ; i < nr ; i++) { + if (bhs[i] == NULL) { +@@ -220,7 +254,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + ocfs2_metadata_cache_io_unlock(ci); + status = -ENOMEM; + mlog_errno(status); +- goto bail; ++ /* Don't forget to put previous bh! */ ++ break; + } + } + bh = bhs[i]; +@@ -314,16 +349,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + } + } + +- status = 0; +- ++read_failure: + for (i = (nr - 1); i >= 0; i--) { + bh = bhs[i]; + + if (!(flags & OCFS2_BH_READAHEAD)) { +- if (status) { +- /* Clear the rest of the buffers on error */ +- put_bh(bh); +- bhs[i] = NULL; ++ if (unlikely(status)) { ++ /* Clear the buffers on error including those ++ * ever succeeded in reading ++ */ ++ if (new_bh && bh) { ++ /* If middle bh fails, let previous bh ++ * finish its read and then put it to ++ * aovoid bh leak ++ */ ++ if (!buffer_jbd(bh)) ++ wait_on_buffer(bh); ++ put_bh(bh); ++ bhs[i] = NULL; ++ } else if (bh && buffer_uptodate(bh)) { ++ clear_buffer_uptodate(bh); ++ } + continue; + } + /* We know this can't have changed as we hold the +@@ -341,9 +387,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + * uptodate. */ + status = -EIO; + clear_buffer_needs_validate(bh); +- put_bh(bh); +- bhs[i] = NULL; +- continue; ++ goto read_failure; + } + + if (buffer_needs_validate(bh)) { +@@ -353,11 +397,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, + BUG_ON(buffer_jbd(bh)); + clear_buffer_needs_validate(bh); + status = validate(sb, bh); +- if (status) { +- put_bh(bh); +- bhs[i] = NULL; +- continue; +- } ++ if (status) ++ goto read_failure; + } + } + +diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c +index 9b984cae4c4e..1d6dc8422899 100644 +--- a/fs/ocfs2/dlm/dlmdebug.c ++++ b/fs/ocfs2/dlm/dlmdebug.c +@@ -329,7 +329,7 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle) + { + char *buf; + +- buf = (char *) get_zeroed_page(GFP_NOFS); ++ buf = (char *) get_zeroed_page(GFP_ATOMIC); + if (buf) { + dump_mle(mle, buf, PAGE_SIZE - 1); + free_page((unsigned long)buf); +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c +index 5193218f5889..e961015fb484 100644 +--- a/fs/ocfs2/dlmglue.c ++++ b/fs/ocfs2/dlmglue.c +@@ -3422,7 +3422,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, + * we can recover correctly from node failure. Otherwise, we may get + * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. + */ +- if (!ocfs2_is_o2cb_active() && ++ if (ocfs2_userspace_stack(osb) && + lockres->l_ops->flags & LOCK_TYPE_USES_LVB) + lvb = 1; + +diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c +index f55f82ca3425..1565dd8e8856 100644 +--- a/fs/ocfs2/move_extents.c ++++ b/fs/ocfs2/move_extents.c +@@ -25,6 +25,7 @@ + #include "ocfs2_ioctl.h" + + #include "alloc.h" ++#include "localalloc.h" + #include "aops.h" + #include "dlmglue.h" + #include "extent_map.h" +@@ -222,6 +223,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, + struct ocfs2_refcount_tree *ref_tree = NULL; + u32 new_phys_cpos, new_len; + u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); ++ int need_free = 0; + + if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { + BUG_ON(!ocfs2_is_refcount_inode(inode)); +@@ -312,6 +314,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, + if (!partial) { + context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; + ret = -ENOSPC; ++ need_free = 1; + goto out_commit; + } + } +@@ -336,6 +339,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, + mlog_errno(ret); + + out_commit: ++ if (need_free && context->data_ac) { ++ struct ocfs2_alloc_context *data_ac = context->data_ac; ++ ++ if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL) ++ ocfs2_free_local_alloc_bits(osb, handle, data_ac, ++ new_phys_cpos, new_len); ++ else ++ ocfs2_free_clusters(handle, ++ data_ac->ac_inode, ++ data_ac->ac_bh, ++ ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos), ++ new_len); ++ } ++ + ocfs2_commit_trans(osb, handle); + + out_unlock_mutex: +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c +index d6c350ba25b9..c4b029c43464 100644 +--- a/fs/ocfs2/stackglue.c ++++ b/fs/ocfs2/stackglue.c +@@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; + */ + static struct ocfs2_stack_plugin *active_stack; + +-inline int ocfs2_is_o2cb_active(void) +-{ +- return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); +-} +-EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); +- + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) + { + struct ocfs2_stack_plugin *p; +diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h +index e3036e1790e8..f2dce10fae54 100644 +--- a/fs/ocfs2/stackglue.h ++++ b/fs/ocfs2/stackglue.h +@@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p + int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); + void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); + +-/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ +-int ocfs2_is_o2cb_active(void); +- + extern struct kset *ocfs2_kset; + + #endif /* STACKGLUE_H */ +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c +index eca49da6d7e0..77740ef5a8e8 100644 +--- a/fs/ocfs2/xattr.c ++++ b/fs/ocfs2/xattr.c +@@ -1497,6 +1497,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, + return loc->xl_ops->xlo_check_space(loc, xi); + } + ++static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) ++{ ++ loc->xl_ops->xlo_add_entry(loc, name_hash); ++ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); ++ /* ++ * We can't leave the new entry's xe_name_offset at zero or ++ * add_namevalue() will go nuts. We set it to the size of our ++ * storage so that it can never be less than any other entry. ++ */ ++ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); ++} ++ + static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) + { +@@ -2128,31 +2140,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, + if (rc) + goto out; + +- if (!loc->xl_entry) { +- rc = -EINVAL; +- goto out; +- } +- +- if (ocfs2_xa_can_reuse_entry(loc, xi)) { +- orig_value_size = loc->xl_entry->xe_value_size; +- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); +- if (rc) +- goto out; +- goto alloc_value; +- } ++ if (loc->xl_entry) { ++ if (ocfs2_xa_can_reuse_entry(loc, xi)) { ++ orig_value_size = loc->xl_entry->xe_value_size; ++ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); ++ if (rc) ++ goto out; ++ goto alloc_value; ++ } + +- if (!ocfs2_xattr_is_local(loc->xl_entry)) { +- orig_clusters = ocfs2_xa_value_clusters(loc); +- rc = ocfs2_xa_value_truncate(loc, 0, ctxt); +- if (rc) { +- mlog_errno(rc); +- ocfs2_xa_cleanup_value_truncate(loc, +- "overwriting", +- orig_clusters); +- goto out; ++ if (!ocfs2_xattr_is_local(loc->xl_entry)) { ++ orig_clusters = ocfs2_xa_value_clusters(loc); ++ rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ++ if (rc) { ++ mlog_errno(rc); ++ ocfs2_xa_cleanup_value_truncate(loc, ++ "overwriting", ++ orig_clusters); ++ goto out; ++ } + } +- } +- ocfs2_xa_wipe_namevalue(loc); ++ ocfs2_xa_wipe_namevalue(loc); ++ } else ++ ocfs2_xa_add_entry(loc, name_hash); + + /* + * If we get here, we have a blank entry. Fill it. We grow our +diff --git a/fs/read_write.c b/fs/read_write.c +index 38a8bcccf0dd..e8136a72c13f 100644 +--- a/fs/read_write.c ++++ b/fs/read_write.c +@@ -1709,6 +1709,34 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write) + + return security_file_permission(file, write ? MAY_WRITE : MAY_READ); + } ++/* ++ * Ensure that we don't remap a partial EOF block in the middle of something ++ * else. Assume that the offsets have already been checked for block ++ * alignment. ++ * ++ * For deduplication we always scale down to the previous block because we ++ * can't meaningfully compare post-EOF contents. ++ * ++ * For clone we only link a partial EOF block above the destination file's EOF. ++ */ ++static int generic_remap_check_len(struct inode *inode_in, ++ struct inode *inode_out, ++ loff_t pos_out, ++ u64 *len, ++ bool is_dedupe) ++{ ++ u64 blkmask = i_blocksize(inode_in) - 1; ++ ++ if ((*len & blkmask) == 0) ++ return 0; ++ ++ if (is_dedupe) ++ *len &= ~blkmask; ++ else if (pos_out + *len < i_size_read(inode_out)) ++ return -EINVAL; ++ ++ return 0; ++} + + /* + * Check that the two inodes are eligible for cloning, the ranges make +@@ -1815,6 +1843,11 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, + return -EBADE; + } + ++ ret = generic_remap_check_len(inode_in, inode_out, pos_out, len, ++ is_dedupe); ++ if (ret) ++ return ret; ++ + return 1; + } + EXPORT_SYMBOL(vfs_clone_file_prep_inodes); +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index e4a623956df5..e5970ecdfd58 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -58,6 +58,32 @@ static kmem_zone_t *xfs_buf_zone; + #define xb_to_gfp(flags) \ + ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) + ++/* ++ * Locking orders ++ * ++ * xfs_buf_ioacct_inc: ++ * xfs_buf_ioacct_dec: ++ * b_sema (caller holds) ++ * b_lock ++ * ++ * xfs_buf_stale: ++ * b_sema (caller holds) ++ * b_lock ++ * lru_lock ++ * ++ * xfs_buf_rele: ++ * b_lock ++ * pag_buf_lock ++ * lru_lock ++ * ++ * xfs_buftarg_wait_rele ++ * lru_lock ++ * b_lock (trylock due to inversion) ++ * ++ * xfs_buftarg_isolate ++ * lru_lock ++ * b_lock (trylock due to inversion) ++ */ + + static inline int + xfs_buf_is_vmapped( +@@ -983,8 +1009,18 @@ xfs_buf_rele( + + ASSERT(atomic_read(&bp->b_hold) > 0); + +- release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); ++ /* ++ * We grab the b_lock here first to serialise racing xfs_buf_rele() ++ * calls. The pag_buf_lock being taken on the last reference only ++ * serialises against racing lookups in xfs_buf_find(). IOWs, the second ++ * to last reference we drop here is not serialised against the last ++ * reference until we take bp->b_lock. Hence if we don't grab b_lock ++ * first, the last "release" reference can win the race to the lock and ++ * free the buffer before the second-to-last reference is processed, ++ * leading to a use-after-free scenario. ++ */ + spin_lock(&bp->b_lock); ++ release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); + if (!release) { + /* + * Drop the in-flight state if the buffer is already on the LRU +diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h +index 835c2271196a..aec255fb62aa 100644 +--- a/include/linux/bitmap.h ++++ b/include/linux/bitmap.h +@@ -185,8 +185,13 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf, + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) + #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) + ++/* ++ * The static inlines below do not handle constant nbits==0 correctly, ++ * so make such users (should any ever turn up) call the out-of-line ++ * versions. ++ */ + #define small_const_nbits(nbits) \ +- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) ++ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) + + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) + { +@@ -350,7 +355,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, + } + + static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, +- unsigned int shift, int nbits) ++ unsigned int shift, unsigned int nbits) + { + if (small_const_nbits(nbits)) + *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index bb4758ffd403..7668c68ddb5b 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -890,6 +890,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); + void kvm_vcpu_kick(struct kvm_vcpu *vcpu); + + bool kvm_is_reserved_pfn(kvm_pfn_t pfn); ++bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); + + struct kvm_irq_ack_notifier { + struct hlist_node link; +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h +index 58e110aee7ab..d36a02935391 100644 +--- a/include/linux/memory_hotplug.h ++++ b/include/linux/memory_hotplug.h +@@ -316,6 +316,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} + + extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, + void *arg, int (*func)(struct memory_block *, void *)); ++extern int __add_memory(int nid, u64 start, u64 size); + extern int add_memory(int nid, u64 start, u64 size); + extern int add_memory_resource(int nid, struct resource *resource, bool online); + extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock); +diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h +index 5aacdb017a9f..806a4f095312 100644 +--- a/include/linux/mfd/intel_soc_pmic.h ++++ b/include/linux/mfd/intel_soc_pmic.h +@@ -25,6 +25,7 @@ struct intel_soc_pmic { + int irq; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_chip_data; ++ struct regmap_irq_chip_data *irq_chip_data_pwrbtn; + struct regmap_irq_chip_data *irq_chip_data_tmu; + struct regmap_irq_chip_data *irq_chip_data_bcu; + struct regmap_irq_chip_data *irq_chip_data_adc; +diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h +index cf815577bd68..3ae1fe743bc3 100644 +--- a/include/linux/mfd/max8997.h ++++ b/include/linux/mfd/max8997.h +@@ -178,7 +178,6 @@ struct max8997_led_platform_data { + struct max8997_platform_data { + /* IRQ */ + int ono; +- int wakeup; + + /* ---- PMIC ---- */ + struct max8997_regulator_data *regulators; +diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h +index 638222e43e48..93011c61aafd 100644 +--- a/include/linux/mfd/mc13xxx.h ++++ b/include/linux/mfd/mc13xxx.h +@@ -247,6 +247,7 @@ struct mc13xxx_platform_data { + #define MC13XXX_ADC0_TSMOD0 (1 << 12) + #define MC13XXX_ADC0_TSMOD1 (1 << 13) + #define MC13XXX_ADC0_TSMOD2 (1 << 14) ++#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15) + #define MC13XXX_ADC0_ADINC1 (1 << 16) + #define MC13XXX_ADC0_ADINC2 (1 << 17) + +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index 76d789d6cea0..ffa8d64f6fef 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -1102,7 +1102,7 @@ static void audit_log_execve_info(struct audit_context *context, + } + + /* write as much as we can to the audit log */ +- if (len_buf > 0) { ++ if (len_buf >= 0) { + /* NOTE: some magic numbers here - basically if we + * can't fit a reasonable amount of data into the + * existing audit buffer, flush it and start with +diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c +index 482bf42e21a4..1060eee6c8d5 100644 +--- a/kernel/bpf/devmap.c ++++ b/kernel/bpf/devmap.c +@@ -388,8 +388,7 @@ static int dev_map_notification(struct notifier_block *notifier, + struct bpf_dtab_netdev *dev, *odev; + + dev = READ_ONCE(dtab->netdev_map[i]); +- if (!dev || +- dev->dev->ifindex != netdev->ifindex) ++ if (!dev || netdev != dev->dev) + continue; + odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); + if (dev == odev) +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 5b33c14ab8b2..4e50beb162c0 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -1099,7 +1099,7 @@ void __init setup_log_buf(int early) + { + unsigned long flags; + char *new_log_buf; +- int free; ++ unsigned int free; + + if (log_buf != __log_buf) + return; +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index feeb52880d35..67433fbdcb5a 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -8319,13 +8319,22 @@ out_all_pinned: + sd->nr_balance_failed = 0; + + out_one_pinned: ++ ld_moved = 0; ++ ++ /* ++ * idle_balance() disregards balance intervals, so we could repeatedly ++ * reach this code, which would lead to balance_interval skyrocketting ++ * in a short amount of time. Skip the balance_interval increase logic ++ * to avoid that. ++ */ ++ if (env.idle == CPU_NEWLY_IDLE) ++ goto out; ++ + /* tune up the balancing interval */ + if (((env.flags & LBF_ALL_PINNED) && + sd->balance_interval < MAX_PINNED_INTERVAL) || + (sd->balance_interval < sd->max_interval)) + sd->balance_interval *= 2; +- +- ld_moved = 0; + out: + return ld_moved; + } +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index 9dcd80ed9d4c..867d173dab48 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -1347,7 +1347,7 @@ void sched_init_numa(void) + int level = 0; + int i, j, k; + +- sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); ++ sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL); + if (!sched_domains_numa_distance) + return; + +diff --git a/mm/ksm.c b/mm/ksm.c +index f50cc573815f..764486ffcd16 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -849,13 +849,13 @@ static int remove_stable_node(struct stable_node *stable_node) + return 0; + } + +- if (WARN_ON_ONCE(page_mapped(page))) { +- /* +- * This should not happen: but if it does, just refuse to let +- * merge_across_nodes be switched - there is no need to panic. +- */ +- err = -EBUSY; +- } else { ++ /* ++ * Page could be still mapped if this races with __mmput() running in ++ * between ksm_exit() and exit_mmap(). Just refuse to let ++ * merge_across_nodes/max_page_sharing be switched. ++ */ ++ err = -EBUSY; ++ if (!page_mapped(page)) { + /* + * The stable node did not yet appear stale to get_ksm_page(), + * since that allows for an unmapped ksm page to be recognized +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index d4affa9982ca..2d6626ab29d1 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -343,12 +343,8 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, + unsigned long start_pfn, + unsigned long end_pfn) + { +- struct mem_section *ms; +- + for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { +- ms = __pfn_to_section(start_pfn); +- +- if (unlikely(!valid_section(ms))) ++ if (unlikely(!pfn_to_online_page(start_pfn))) + continue; + + if (unlikely(pfn_to_nid(start_pfn) != nid)) +@@ -368,15 +364,12 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, + unsigned long start_pfn, + unsigned long end_pfn) + { +- struct mem_section *ms; + unsigned long pfn; + + /* pfn is the end pfn of a memory section. */ + pfn = end_pfn - 1; + for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { +- ms = __pfn_to_section(pfn); +- +- if (unlikely(!valid_section(ms))) ++ if (unlikely(!pfn_to_online_page(pfn))) + continue; + + if (unlikely(pfn_to_nid(pfn) != nid)) +@@ -398,7 +391,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, + unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ + unsigned long zone_end_pfn = z; + unsigned long pfn; +- struct mem_section *ms; + int nid = zone_to_nid(zone); + + zone_span_writelock(zone); +@@ -436,9 +428,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, + */ + pfn = zone_start_pfn; + for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { +- ms = __pfn_to_section(pfn); +- +- if (unlikely(!valid_section(ms))) ++ if (unlikely(!pfn_to_online_page(pfn))) + continue; + + if (page_zone(pfn_to_page(pfn)) != zone) +@@ -494,6 +484,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn) + int nr_pages = PAGES_PER_SECTION; + unsigned long flags; + ++#ifdef CONFIG_ZONE_DEVICE ++ /* ++ * Zone shrinking code cannot properly deal with ZONE_DEVICE. So ++ * we will not try to shrink the zones - which is okay as ++ * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. ++ */ ++ if (zone_idx(zone) == ZONE_DEVICE) ++ return; ++#endif ++ + pgdat_resize_lock(zone->zone_pgdat, &flags); + shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); + update_pgdat_span(pgdat); +@@ -1073,7 +1073,12 @@ static int online_memory_block(struct memory_block *mem, void *arg) + return device_online(&mem->dev); + } + +-/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ ++/* ++ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug ++ * and online/offline operations (triggered e.g. by sysfs). ++ * ++ * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG ++ */ + int __ref add_memory_resource(int nid, struct resource *res, bool online) + { + u64 start, size; +@@ -1166,9 +1171,9 @@ out: + mem_hotplug_done(); + return ret; + } +-EXPORT_SYMBOL_GPL(add_memory_resource); + +-int __ref add_memory(int nid, u64 start, u64 size) ++/* requires device_hotplug_lock, see add_memory_resource() */ ++int __ref __add_memory(int nid, u64 start, u64 size) + { + struct resource *res; + int ret; +@@ -1182,6 +1187,17 @@ int __ref add_memory(int nid, u64 start, u64 size) + release_memory_resource(res); + return ret; + } ++ ++int add_memory(int nid, u64 start, u64 size) ++{ ++ int rc; ++ ++ lock_device_hotplug(); ++ rc = __add_memory(nid, start, size); ++ unlock_device_hotplug(); ++ ++ return rc; ++} + EXPORT_SYMBOL_GPL(add_memory); + + #ifdef CONFIG_MEMORY_HOTREMOVE +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index e001de5ac50c..a40c075fd8f1 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -2150,6 +2150,13 @@ EXPORT_SYMBOL(tag_pages_for_writeback); + * not miss some pages (e.g., because some other process has cleared TOWRITE + * tag we set). The rule we follow is that TOWRITE tag can be cleared only + * by the process clearing the DIRTY tag (and submitting the page for IO). ++ * ++ * To avoid deadlocks between range_cyclic writeback and callers that hold ++ * pages in PageWriteback to aggregate IO until write_cache_pages() returns, ++ * we do not loop back to the start of the file. Doing so causes a page ++ * lock/page writeback access order inversion - we should only ever lock ++ * multiple pages in ascending page->index order, and looping back to the start ++ * of the file violates that rule and causes deadlocks. + */ + int write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc, writepage_t writepage, +@@ -2164,7 +2171,6 @@ int write_cache_pages(struct address_space *mapping, + pgoff_t index; + pgoff_t end; /* Inclusive */ + pgoff_t done_index; +- int cycled; + int range_whole = 0; + int tag; + +@@ -2172,23 +2178,17 @@ int write_cache_pages(struct address_space *mapping, + if (wbc->range_cyclic) { + writeback_index = mapping->writeback_index; /* prev offset */ + index = writeback_index; +- if (index == 0) +- cycled = 1; +- else +- cycled = 0; + end = -1; + } else { + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; +- cycled = 1; /* ignore range_cyclic tests */ + } + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) + tag = PAGECACHE_TAG_TOWRITE; + else + tag = PAGECACHE_TAG_DIRTY; +-retry: + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) + tag_pages_for_writeback(mapping, index, end); + done_index = index; +@@ -2296,17 +2296,14 @@ continue_unlock: + pagevec_release(&pvec); + cond_resched(); + } +- if (!cycled && !done) { +- /* +- * range_cyclic: +- * We hit the last page and there is more work to be done: wrap +- * back to the start of the file +- */ +- cycled = 1; +- index = 0; +- end = writeback_index - 1; +- goto retry; +- } ++ ++ /* ++ * If we hit the last page and there is more work to be done: wrap ++ * back the index back to the start of the file for the next ++ * time we are called. ++ */ ++ if (wbc->range_cyclic && !done) ++ done_index = 0; + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) + mapping->writeback_index = done_index; + +diff --git a/net/core/dev.c b/net/core/dev.c +index 9d6beb9de924..3ce68484ed5a 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3029,7 +3029,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de + } + + skb = next; +- if (netif_xmit_stopped(txq) && skb) { ++ if (netif_tx_queue_stopped(txq) && skb) { + rc = NETDEV_TX_BUSY; + break; + } +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 925af6b43017..b598e9909fec 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1767,6 +1767,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_MAC]) { + struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); + ++ if (ivm->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_mac) + err = ops->ndo_set_vf_mac(dev, ivm->vf, +@@ -1778,6 +1780,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_VLAN]) { + struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); + ++ if (ivv->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_vlan) + err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, +@@ -1810,6 +1814,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (len == 0) + return -EINVAL; + ++ if (ivvl[0]->vf >= INT_MAX) ++ return -EINVAL; + err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, + ivvl[0]->qos, ivvl[0]->vlan_proto); + if (err < 0) +@@ -1820,6 +1826,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); + struct ifla_vf_info ivf; + ++ if (ivt->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_get_vf_config) + err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); +@@ -1838,6 +1846,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_RATE]) { + struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); + ++ if (ivt->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_rate) + err = ops->ndo_set_vf_rate(dev, ivt->vf, +@@ -1850,6 +1860,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_SPOOFCHK]) { + struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); + ++ if (ivs->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_spoofchk) + err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, +@@ -1861,6 +1873,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_LINK_STATE]) { + struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); + ++ if (ivl->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_link_state) + err = ops->ndo_set_vf_link_state(dev, ivl->vf, +@@ -1874,6 +1888,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + + err = -EOPNOTSUPP; + ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); ++ if (ivrssq_en->vf >= INT_MAX) ++ return -EINVAL; + if (ops->ndo_set_vf_rss_query_en) + err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, + ivrssq_en->setting); +@@ -1884,6 +1900,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_TRUST]) { + struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); + ++ if (ivt->vf >= INT_MAX) ++ return -EINVAL; + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_trust) + err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); +@@ -1894,15 +1912,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + if (tb[IFLA_VF_IB_NODE_GUID]) { + struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); + ++ if (ivt->vf >= INT_MAX) ++ return -EINVAL; + if (!ops->ndo_set_vf_guid) + return -EOPNOTSUPP; +- + return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); + } + + if (tb[IFLA_VF_IB_PORT_GUID]) { + struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); + ++ if (ivt->vf >= INT_MAX) ++ return -EINVAL; + if (!ops->ndo_set_vf_guid) + return -EOPNOTSUPP; + +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 7b4ce3f9e2f4..5ec73cf386df 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -718,6 +718,7 @@ static void tcp_v6_init_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb) + { ++ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); + struct inet_request_sock *ireq = inet_rsk(req); + const struct ipv6_pinfo *np = inet6_sk(sk_listener); + +@@ -725,7 +726,7 @@ static void tcp_v6_init_req(struct request_sock *req, + ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; + + /* So that link locals have meaning */ +- if (!sk_listener->sk_bound_dev_if && ++ if ((!sk_listener->sk_bound_dev_if || l3_slave) && + ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) + ireq->ir_iif = tcp_v6_iif(skb); + +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c +index 0171b27a2b81..48d81857961c 100644 +--- a/net/openvswitch/conntrack.c ++++ b/net/openvswitch/conntrack.c +@@ -1083,7 +1083,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, + &info->labels.mask); + if (err) + return err; +- } else if (labels_nonzero(&info->labels.mask)) { ++ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && ++ labels_nonzero(&info->labels.mask)) { + err = ovs_ct_set_labels(ct, key, &info->labels.value, + &info->labels.mask); + if (err) +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c +index b6f6bfad8b2a..fb0caa500ac8 100644 +--- a/net/sched/act_pedit.c ++++ b/net/sched/act_pedit.c +@@ -46,7 +46,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, + int err = -EINVAL; + int rem; + +- if (!nla || !n) ++ if (!nla) + return NULL; + + keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); +@@ -163,6 +163,9 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, + return -EINVAL; + + parm = nla_data(pattr); ++ if (!parm->nkeys) ++ return -EINVAL; ++ + ksize = parm->nkeys * sizeof(struct tc_pedit_key); + if (nla_len(pattr) < sizeof(*parm) + ksize) + return -EINVAL; +@@ -172,8 +175,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, + return PTR_ERR(keys_ex); + + if (!tcf_idr_check(tn, parm->index, a, bind)) { +- if (!parm->nkeys) +- return -EINVAL; + ret = tcf_idr_create(tn, parm->index, est, a, + &act_pedit_ops, bind, false); + if (ret) +diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c +index 1d74d653e6c0..ad0dcb69395d 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_seal.c ++++ b/net/sunrpc/auth_gss/gss_krb5_seal.c +@@ -63,6 +63,7 @@ + #include <linux/sunrpc/gss_krb5.h> + #include <linux/random.h> + #include <linux/crypto.h> ++#include <linux/atomic.h> + + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + # define RPCDBG_FACILITY RPCDBG_AUTH +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index a42871a59f3b..f75b5b7c1fc2 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -127,7 +127,7 @@ static struct ctl_table xs_tunables_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xprt_min_resvport_limit, +- .extra2 = &xprt_max_resvport ++ .extra2 = &xprt_max_resvport_limit + }, + { + .procname = "max_resvport", +@@ -135,7 +135,7 @@ static struct ctl_table xs_tunables_table[] = { + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, +- .extra1 = &xprt_min_resvport, ++ .extra1 = &xprt_min_resvport_limit, + .extra2 = &xprt_max_resvport_limit + }, + { +@@ -1754,11 +1754,17 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) + spin_unlock_bh(&xprt->transport_lock); + } + +-static unsigned short xs_get_random_port(void) ++static int xs_get_random_port(void) + { +- unsigned short range = xprt_max_resvport - xprt_min_resvport + 1; +- unsigned short rand = (unsigned short) prandom_u32() % range; +- return rand + xprt_min_resvport; ++ unsigned short min = xprt_min_resvport, max = xprt_max_resvport; ++ unsigned short range; ++ unsigned short rand; ++ ++ if (max < min) ++ return -EADDRINUSE; ++ range = max - min + 1; ++ rand = (unsigned short) prandom_u32() % range; ++ return rand + min; + } + + /** +@@ -1815,9 +1821,9 @@ static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) + transport->srcport = xs_sock_getport(sock); + } + +-static unsigned short xs_get_srcport(struct sock_xprt *transport) ++static int xs_get_srcport(struct sock_xprt *transport) + { +- unsigned short port = transport->srcport; ++ int port = transport->srcport; + + if (port == 0 && transport->xprt.resvport) + port = xs_get_random_port(); +@@ -1838,7 +1844,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock) + { + struct sockaddr_storage myaddr; + int err, nloop = 0; +- unsigned short port = xs_get_srcport(transport); ++ int port = xs_get_srcport(transport); + unsigned short last; + + /* +@@ -1856,8 +1862,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock) + * transport->xprt.resvport == 1) xs_get_srcport above will + * ensure that port is non-zero and we will bind as needed. + */ +- if (port == 0) +- return 0; ++ if (port <= 0) ++ return port; + + memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); + do { +@@ -3286,12 +3292,8 @@ static int param_set_uint_minmax(const char *val, + + static int param_set_portnr(const char *val, const struct kernel_param *kp) + { +- if (kp->arg == &xprt_min_resvport) +- return param_set_uint_minmax(val, kp, +- RPC_MIN_RESVPORT, +- xprt_max_resvport); + return param_set_uint_minmax(val, kp, +- xprt_min_resvport, ++ RPC_MIN_RESVPORT, + RPC_MAX_RESVPORT); + } + +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 4de9dfd14d09..99f581a61cfa 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -225,6 +225,8 @@ static inline void unix_release_addr(struct unix_address *addr) + + static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) + { ++ *hashp = 0; ++ + if (len <= sizeof(short) || len > sizeof(*sunaddr)) + return -EINVAL; + if (!sunaddr || sunaddr->sun_family != AF_UNIX) +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c +index a8eb0657c1e8..d20f43057323 100644 +--- a/net/vmw_vsock/virtio_transport_common.c ++++ b/net/vmw_vsock/virtio_transport_common.c +@@ -92,8 +92,17 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) + struct virtio_vsock_pkt *pkt = opaque; + struct af_vsockmon_hdr *hdr; + struct sk_buff *skb; ++ size_t payload_len; ++ void *payload_buf; + +- skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len, ++ /* A packet could be split to fit the RX buffer, so we can retrieve ++ * the payload length from the header and the buffer pointer taking ++ * care of the offset in the original packet. ++ */ ++ payload_len = le32_to_cpu(pkt->hdr.len); ++ payload_buf = pkt->buf + pkt->off; ++ ++ skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len, + GFP_ATOMIC); + if (!skb) + return NULL; +@@ -133,8 +142,8 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) + + skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr)); + +- if (pkt->len) { +- skb_put_data(skb, pkt->buf, pkt->len); ++ if (payload_len) { ++ skb_put_data(skb, payload_buf, payload_len); + } + + return skb; +diff --git a/net/wireless/ap.c b/net/wireless/ap.c +index 63682176c96c..c4bd3ecef508 100644 +--- a/net/wireless/ap.c ++++ b/net/wireless/ap.c +@@ -40,6 +40,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, + cfg80211_sched_dfs_chan_update(rdev); + } + ++ schedule_work(&cfg80211_disconnect_work); ++ + return err; + } + +diff --git a/net/wireless/core.h b/net/wireless/core.h +index 90f90c7d8bf9..507ec6446eb6 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -429,6 +429,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev); + bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, + u32 center_freq_khz, u32 bw_khz); + ++extern struct work_struct cfg80211_disconnect_work; ++ + /** + * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable + * @wiphy: the wiphy to validate against +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index d014aea07160..8344153800e2 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void) + * All devices must be idle as otherwise if you are actively + * scanning some new beacon hints could be learned and would + * count as new regulatory hints. ++ * Also if there is any other active beaconing interface we ++ * need not issue a disconnect hint and reset any info such ++ * as chan dfs state, etc. + */ + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + wdev_lock(wdev); +- if (wdev->conn || wdev->current_bss) ++ if (wdev->conn || wdev->current_bss || ++ cfg80211_beaconing_iface_active(wdev)) + is_all_idle = false; + wdev_unlock(wdev); + } +@@ -663,7 +667,7 @@ static void disconnect_work(struct work_struct *work) + rtnl_unlock(); + } + +-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); ++DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); + + + /* +diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c +index 5826aa8362f1..9edb26ab16e9 100644 +--- a/sound/firewire/isight.c ++++ b/sound/firewire/isight.c +@@ -639,7 +639,7 @@ static int isight_probe(struct fw_unit *unit, + if (!isight->audio_base) { + dev_err(&unit->device, "audio unit base not found\n"); + err = -ENXIO; +- goto err_unit; ++ goto error; + } + fw_iso_resources_init(&isight->resources, unit); + +@@ -668,12 +668,12 @@ static int isight_probe(struct fw_unit *unit, + dev_set_drvdata(&unit->device, isight); + + return 0; +- +-err_unit: +- fw_unit_put(isight->unit); +- mutex_destroy(&isight->mutex); + error: + snd_card_free(card); ++ ++ mutex_destroy(&isight->mutex); ++ fw_unit_put(isight->unit); ++ + return err; + } + +diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c +index 7e21621e492a..7fd1b4000883 100644 +--- a/sound/i2c/cs8427.c ++++ b/sound/i2c/cs8427.c +@@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device, + struct cs8427 *chip = device->private_data; + char *hw_data = udata ? + chip->playback.hw_udata : chip->playback.hw_status; +- char data[32]; ++ unsigned char data[32]; + int err, idx; + + if (!memcmp(hw_data, ndata, count)) +diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c +index 45a4aa9d2a47..901457da25ec 100644 +--- a/sound/soc/tegra/tegra_sgtl5000.c ++++ b/sound/soc/tegra/tegra_sgtl5000.c +@@ -149,14 +149,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev) + dev_err(&pdev->dev, + "Property 'nvidia,i2s-controller' missing/invalid\n"); + ret = -EINVAL; +- goto err; ++ goto err_put_codec_of_node; + } + + tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node; + + ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev); + if (ret) +- goto err; ++ goto err_put_cpu_of_node; + + ret = snd_soc_register_card(card); + if (ret) { +@@ -169,6 +169,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev) + + err_fini_utils: + tegra_asoc_utils_fini(&machine->util_data); ++err_put_cpu_of_node: ++ of_node_put(tegra_sgtl5000_dai.cpu_of_node); ++ tegra_sgtl5000_dai.cpu_of_node = NULL; ++ tegra_sgtl5000_dai.platform_of_node = NULL; ++err_put_codec_of_node: ++ of_node_put(tegra_sgtl5000_dai.codec_of_node); ++ tegra_sgtl5000_dai.codec_of_node = NULL; + err: + return ret; + } +@@ -183,6 +190,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev) + + tegra_asoc_utils_fini(&machine->util_data); + ++ of_node_put(tegra_sgtl5000_dai.cpu_of_node); ++ tegra_sgtl5000_dai.cpu_of_node = NULL; ++ tegra_sgtl5000_dai.platform_of_node = NULL; ++ of_node_put(tegra_sgtl5000_dai.codec_of_node); ++ tegra_sgtl5000_dai.codec_of_node = NULL; ++ + return ret; + } + +diff --git a/tools/gpio/Build b/tools/gpio/Build +index 620c1937d957..4141f35837db 100644 +--- a/tools/gpio/Build ++++ b/tools/gpio/Build +@@ -1,3 +1,4 @@ ++gpio-utils-y += gpio-utils.o + lsgpio-y += lsgpio.o gpio-utils.o + gpio-hammer-y += gpio-hammer.o gpio-utils.o + gpio-event-mon-y += gpio-event-mon.o gpio-utils.o +diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile +index f8bc8656a544..6a73c06e069c 100644 +--- a/tools/gpio/Makefile ++++ b/tools/gpio/Makefile +@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h + + prepare: $(OUTPUT)include/linux/gpio.h + ++GPIO_UTILS_IN := $(output)gpio-utils-in.o ++$(GPIO_UTILS_IN): prepare FORCE ++ $(Q)$(MAKE) $(build)=gpio-utils ++ + # + # lsgpio + # + LSGPIO_IN := $(OUTPUT)lsgpio-in.o +-$(LSGPIO_IN): prepare FORCE ++$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o + $(Q)$(MAKE) $(build)=lsgpio + $(OUTPUT)lsgpio: $(LSGPIO_IN) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ +@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN) + # gpio-hammer + # + GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o +-$(GPIO_HAMMER_IN): prepare FORCE ++$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o + $(Q)$(MAKE) $(build)=gpio-hammer + $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ +@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) + # gpio-event-mon + # + GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o +-$(GPIO_EVENT_MON_IN): prepare FORCE ++$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o + $(Q)$(MAKE) $(build)=gpio-event-mon + $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ +diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk +index b02a36b2c14f..a42015b305f4 100644 +--- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk ++++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk +@@ -69,7 +69,7 @@ BEGIN { + + lprefix1_expr = "\\((66|!F3)\\)" + lprefix2_expr = "\\(F3\\)" +- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" ++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)" + lprefix_expr = "\\((66|F2|F3)\\)" + max_lprefix = 4 + +@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) + return add_flags(imm, mod) + } + +-/^[0-9a-f]+\:/ { ++/^[0-9a-f]+:/ { + if (NR == 1) + next + # get index +diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c +index 943b6b614683..bed0794e3295 100644 +--- a/tools/power/acpi/tools/acpidump/apmain.c ++++ b/tools/power/acpi/tools/acpidump/apmain.c +@@ -139,7 +139,7 @@ static int ap_insert_action(char *argument, u32 to_be_done) + + current_action++; + if (current_action > AP_MAX_ACTIONS) { +- fprintf(stderr, "Too many table options (max %u)\n", ++ fprintf(stderr, "Too many table options (max %d)\n", + AP_MAX_ACTIONS); + return (-1); + } +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +index 231bcd2c4eb5..1e7ac6f3362f 100644 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +@@ -71,8 +71,11 @@ test_badarg "\$stackp" "\$stack0+10" "\$stack1-10" + echo "r ${PROBEFUNC} \$retval" > kprobe_events + ! echo "p ${PROBEFUNC} \$retval" > kprobe_events + ++# $comm was introduced in 4.8, older kernels reject it. ++if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then + : "Comm access" + test_goodarg "\$comm" ++fi + + : "Indirect memory access" + test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \ +diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile +index 1be547434a49..7e0c175b8297 100644 +--- a/tools/testing/selftests/powerpc/cache_shape/Makefile ++++ b/tools/testing/selftests/powerpc/cache_shape/Makefile +@@ -1,11 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 +-TEST_PROGS := cache_shape +- +-all: $(TEST_PROGS) +- +-$(TEST_PROGS): ../harness.c ../utils.c ++TEST_GEN_PROGS := cache_shape + + include ../../lib.mk + +-clean: +- rm -f $(TEST_PROGS) *.o ++$(TEST_GEN_PROGS): ../harness.c ../utils.c +diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile +index a7cbd5082e27..4213978f3ee2 100644 +--- a/tools/testing/selftests/powerpc/signal/Makefile ++++ b/tools/testing/selftests/powerpc/signal/Makefile +@@ -1,14 +1,9 @@ + # SPDX-License-Identifier: GPL-2.0 +-TEST_PROGS := signal signal_tm +- +-all: $(TEST_PROGS) +- +-$(TEST_PROGS): ../harness.c ../utils.c signal.S ++TEST_GEN_PROGS := signal signal_tm + + CFLAGS += -maltivec +-signal_tm: CFLAGS += -mhtm ++$(OUTPUT)/signal_tm: CFLAGS += -mhtm + + include ../../lib.mk + +-clean: +- rm -f $(TEST_PROGS) *.o ++$(TEST_GEN_PROGS): ../harness.c ../utils.c signal.S +diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile +index 30b8ff8fb82e..e4cedfe9753d 100644 +--- a/tools/testing/selftests/powerpc/switch_endian/Makefile ++++ b/tools/testing/selftests/powerpc/switch_endian/Makefile +@@ -7,6 +7,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S + + include ../../lib.mk + ++$(OUTPUT)/switch_endian_test: ASFLAGS += -I $(OUTPUT) + $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S + + $(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o +diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c +index 6e290874b70e..f1c6e025cbe5 100644 +--- a/tools/testing/selftests/watchdog/watchdog-test.c ++++ b/tools/testing/selftests/watchdog/watchdog-test.c +@@ -89,7 +89,13 @@ int main(int argc, char *argv[]) + fd = open("/dev/watchdog", O_WRONLY); + + if (fd == -1) { +- printf("Watchdog device not enabled.\n"); ++ if (errno == ENOENT) ++ printf("Watchdog device not enabled.\n"); ++ else if (errno == EACCES) ++ printf("Run watchdog as root.\n"); ++ else ++ printf("Watchdog device open failed %s\n", ++ strerror(errno)); + exit(-1); + } + +@@ -103,7 +109,7 @@ int main(int argc, char *argv[]) + printf("Last boot is caused by: %s.\n", (flags != 0) ? + "Watchdog" : "Power-On-Reset"); + else +- printf("WDIOC_GETBOOTSTATUS errno '%s'\n", strerror(errno)); ++ printf("WDIOC_GETBOOTSTATUS error '%s'\n", strerror(errno)); + break; + case 'd': + flags = WDIOS_DISABLECARD; +@@ -111,7 +117,7 @@ int main(int argc, char *argv[]) + if (!ret) + printf("Watchdog card disabled.\n"); + else +- printf("WDIOS_DISABLECARD errno '%s'\n", strerror(errno)); ++ printf("WDIOS_DISABLECARD error '%s'\n", strerror(errno)); + break; + case 'e': + flags = WDIOS_ENABLECARD; +@@ -119,7 +125,7 @@ int main(int argc, char *argv[]) + if (!ret) + printf("Watchdog card enabled.\n"); + else +- printf("WDIOS_ENABLECARD errno '%s'\n", strerror(errno)); ++ printf("WDIOS_ENABLECARD error '%s'\n", strerror(errno)); + break; + case 'p': + ping_rate = strtoul(optarg, NULL, 0); +@@ -133,7 +139,7 @@ int main(int argc, char *argv[]) + if (!ret) + printf("Watchdog timeout set to %u seconds.\n", flags); + else +- printf("WDIOC_SETTIMEOUT errno '%s'\n", strerror(errno)); ++ printf("WDIOC_SETTIMEOUT error '%s'\n", strerror(errno)); + break; + default: + usage(argv[0]); +diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c +index 6ff7b601f854..4bb905925b0e 100644 +--- a/tools/usb/usbip/libsrc/usbip_host_common.c ++++ b/tools/usb/usbip/libsrc/usbip_host_common.c +@@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) + int size; + int fd; + int length; +- char status; ++ char status[2] = { 0 }; + int value = 0; + + size = snprintf(status_attr_path, sizeof(status_attr_path), +@@ -61,15 +61,15 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) + return -1; + } + +- length = read(fd, &status, 1); ++ length = read(fd, status, 1); + if (length < 0) { + err("error reading attribute %s", status_attr_path); + close(fd); + return -1; + } + +- value = atoi(&status); +- ++ value = atoi(status); ++ close(fd); + return value; + } + +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index ea61162b2b53..cdaacdf7bc87 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -142,10 +142,30 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + { + } + ++bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) ++{ ++ /* ++ * The metadata used by is_zone_device_page() to determine whether or ++ * not a page is ZONE_DEVICE is guaranteed to be valid if and only if ++ * the device has been pinned, e.g. by get_user_pages(). WARN if the ++ * page_count() is zero to help detect bad usage of this helper. ++ */ ++ if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) ++ return false; ++ ++ return is_zone_device_page(pfn_to_page(pfn)); ++} ++ + bool kvm_is_reserved_pfn(kvm_pfn_t pfn) + { ++ /* ++ * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting ++ * perspective they are "normal" pages, albeit with slightly different ++ * usage rules. ++ */ + if (pfn_valid(pfn)) +- return PageReserved(pfn_to_page(pfn)); ++ return PageReserved(pfn_to_page(pfn)) && ++ !kvm_is_zone_device_pfn(pfn); + + return true; + } +@@ -1730,7 +1750,7 @@ static void kvm_release_pfn_dirty(kvm_pfn_t pfn) + + void kvm_set_pfn_dirty(kvm_pfn_t pfn) + { +- if (!kvm_is_reserved_pfn(pfn)) { ++ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) { + struct page *page = pfn_to_page(pfn); + + if (!PageReserved(page)) +@@ -1741,7 +1761,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); + + void kvm_set_pfn_accessed(kvm_pfn_t pfn) + { +- if (!kvm_is_reserved_pfn(pfn)) ++ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) + mark_page_accessed(pfn_to_page(pfn)); + } + EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |