diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1013_linux-4.15.14.patch | 3891 |
2 files changed, 3895 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 2b51c4d4..f4d8a807 100644 --- a/0000_README +++ b/0000_README @@ -95,6 +95,10 @@ Patch: 1012_linux-4.15.13.patch From: http://www.kernel.org Desc: Linux 4.15.13 +Patch: 1013_linux-4.15.14.patch +From: http://www.kernel.org +Desc: Linux 4.15.14 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1013_linux-4.15.14.patch b/1013_linux-4.15.14.patch new file mode 100644 index 00000000..9227e361 --- /dev/null +++ b/1013_linux-4.15.14.patch @@ -0,0 +1,3891 @@ +diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio +index 2e3f919485f4..fdec308a5041 100644 +--- a/Documentation/ABI/testing/sysfs-bus-iio ++++ b/Documentation/ABI/testing/sysfs-bus-iio +@@ -32,7 +32,7 @@ Description: + Description of the physical chip / device for device X. + Typically a part number. + +-What: /sys/bus/iio/devices/iio:deviceX/timestamp_clock ++What: /sys/bus/iio/devices/iio:deviceX/current_timestamp_clock + KernelVersion: 4.5 + Contact: linux-iio@vger.kernel.org + Description: +diff --git a/Makefile b/Makefile +index 82245e654d10..a5e561900daf 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 15 +-SUBLEVEL = 13 ++SUBLEVEL = 14 + EXTRAVERSION = + NAME = Fearless Coyote + +@@ -798,6 +798,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) + # disable invalid "can't wrap" optimizations for signed / pointers + KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) + ++# clang sets -fmerge-all-constants by default as optimization, but this ++# is non-conforming behavior for C and in fact breaks the kernel, so we ++# need to disable it here generally. ++KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants) ++ ++# for gcc -fno-merge-all-constants disables everything, but it is fine ++# to have actual conforming behavior enabled. ++KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) ++ + # Make sure -fstack-check isn't enabled (like gentoo apparently did) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) + +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index 5bdc2c4db9ad..f15dc3dfecf8 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -941,3 +941,13 @@ int pmd_clear_huge(pmd_t *pmd) + pmd_clear(pmd); + return 1; + } ++ ++int pud_free_pmd_page(pud_t *pud) ++{ ++ return pud_none(*pud); ++} ++ ++int pmd_free_pte_page(pmd_t *pmd) ++{ ++ return pmd_none(*pmd); ++} +diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h +index ecff2d1ca5a3..6eaa7ad5fc2c 100644 +--- a/arch/h8300/include/asm/byteorder.h ++++ b/arch/h8300/include/asm/byteorder.h +@@ -2,7 +2,6 @@ + #ifndef __H8300_BYTEORDER_H__ + #define __H8300_BYTEORDER_H__ + +-#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__ + #include <linux/byteorder/big_endian.h> + + #endif +diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig +index 692ae85a3e3d..8e3a1fc2bc39 100644 +--- a/arch/mips/lantiq/Kconfig ++++ b/arch/mips/lantiq/Kconfig +@@ -13,6 +13,8 @@ choice + config SOC_AMAZON_SE + bool "Amazon SE" + select SOC_TYPE_XWAY ++ select MFD_SYSCON ++ select MFD_CORE + + config SOC_XWAY + bool "XWAY" +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 52500d3b7004..e0af39b33e28 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -549,9 +549,9 @@ void __init ltq_soc_init(void) + clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), + ltq_ar9_fpi_hz(), CLOCK_250M); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); +- clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); ++ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); +- clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); ++ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM); + clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); + clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); +@@ -560,7 +560,7 @@ void __init ltq_soc_init(void) + } else { + clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), + ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); +- clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0); ++ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); +diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c +index 1b274742077d..d2718de60b9b 100644 +--- a/arch/mips/ralink/mt7621.c ++++ b/arch/mips/ralink/mt7621.c +@@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info) + u32 n1; + u32 rev; + ++ /* Early detection of CMP support */ ++ mips_cm_probe(); ++ mips_cpc_probe(); ++ ++ if (mips_cps_numiocu(0)) { ++ /* ++ * mips_cm_probe() wipes out bootloader ++ * config for CM regions and we have to configure them ++ * again. This SoC cannot talk to pamlbus devices ++ * witout proper iocu region set up. ++ * ++ * FIXME: it would be better to do this with values ++ * from DT, but we need this very early because ++ * without this we cannot talk to pretty much anything ++ * including serial. ++ */ ++ write_gcr_reg0_base(MT7621_PALMBUS_BASE); ++ write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE | ++ CM_GCR_REGn_MASK_CMTGT_IOCU0); ++ __sync(); ++ } ++ + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); + +@@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info) + + rt2880_pinmux_data = mt7621_pinmux_data; + +- /* Early detection of CMP support */ +- mips_cm_probe(); +- mips_cpc_probe(); +- +- if (mips_cps_numiocu(0)) { +- /* +- * mips_cm_probe() wipes out bootloader +- * config for CM regions and we have to configure them +- * again. This SoC cannot talk to pamlbus devices +- * witout proper iocu region set up. +- * +- * FIXME: it would be better to do this with values +- * from DT, but we need this very early because +- * without this we cannot talk to pretty much anything +- * including serial. +- */ +- write_gcr_reg0_base(MT7621_PALMBUS_BASE); +- write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE | +- CM_GCR_REGn_MASK_CMTGT_IOCU0); +- } + + if (!register_cps_smp_ops()) + return; +diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c +index 64543d66e76b..e9531fea23a2 100644 +--- a/arch/mips/ralink/reset.c ++++ b/arch/mips/ralink/reset.c +@@ -96,16 +96,9 @@ static void ralink_restart(char *command) + unreachable(); + } + +-static void ralink_halt(void) +-{ +- local_irq_disable(); +- unreachable(); +-} +- + static int __init mips_reboot_setup(void) + { + _machine_restart = ralink_restart; +- _machine_halt = ralink_halt; + + return 0; + } +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 498c1b812300..1c4d012550ec 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -223,6 +223,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) + + LDFLAGS := -m elf_$(UTS_MACHINE) + ++# ++# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to ++# the linker to force 2MB page size regardless of the default page size used ++# by the linker. ++# ++ifdef CONFIG_X86_64 ++LDFLAGS += $(call ld-option, -z max-page-size=0x200000) ++endif ++ + # Speed up the build + KBUILD_CFLAGS += -pipe + # Workaround for a gcc prelease that unfortunately was shipped in a suse release +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index 98761a1576ce..252fee320816 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -309,6 +309,10 @@ static void parse_elf(void *output) + + switch (phdr->p_type) { + case PT_LOAD: ++#ifdef CONFIG_X86_64 ++ if ((phdr->p_align % 0x200000) != 0) ++ error("Alignment of LOAD segment isn't multiple of 2MB"); ++#endif + #ifdef CONFIG_RELOCATABLE + dest = output; + dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index 50dcbf640850..3a7d58384479 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -1097,7 +1097,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ + #endif /* CONFIG_HYPERV */ + + idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK +-idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK ++idtentry int3 do_int3 has_error_code=0 + idtentry stack_segment do_stack_segment has_error_code=1 + + #ifdef CONFIG_XEN +diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c +index 577fa8adb785..542392b6aab6 100644 +--- a/arch/x86/entry/vsyscall/vsyscall_64.c ++++ b/arch/x86/entry/vsyscall/vsyscall_64.c +@@ -355,7 +355,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root) + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + p4d = p4d_offset(pgd, VSYSCALL_ADDR); + #if CONFIG_PGTABLE_LEVELS >= 5 +- p4d->p4d |= _PAGE_USER; ++ set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); + #endif + pud = pud_offset(p4d, VSYSCALL_ADDR); + set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 56457cb73448..9b18a227fff7 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3194,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left) + X86_CONFIG(.event=0xc0, .umask=0x01)) { + if (left < 128) + left = 128; +- left &= ~0x3fu; ++ left &= ~0x3fULL; + } + return left; + } +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c +index 6d8044ab1060..7da7a79eba53 100644 +--- a/arch/x86/events/intel/uncore_snbep.c ++++ b/arch/x86/events/intel/uncore_snbep.c +@@ -3562,24 +3562,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = { + NULL, + }; + ++/* ++ * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 ++ * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. ++ */ ++#define SKX_CAPID6 0x9c ++#define SKX_CHA_BIT_MASK GENMASK(27, 0) ++ + static int skx_count_chabox(void) + { +- struct pci_dev *chabox_dev = NULL; +- int bus, count = 0; ++ struct pci_dev *dev = NULL; ++ u32 val = 0; + +- while (1) { +- chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); +- if (!chabox_dev) +- break; +- if (count == 0) +- bus = chabox_dev->bus->number; +- if (bus != chabox_dev->bus->number) +- break; +- count++; +- } ++ dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev); ++ if (!dev) ++ goto out; + +- pci_dev_put(chabox_dev); +- return count; ++ pci_read_config_dword(dev, SKX_CAPID6, &val); ++ val &= SKX_CHA_BIT_MASK; ++out: ++ pci_dev_put(dev); ++ return hweight32(val); + } + + void skx_uncore_cpu_init(void) +@@ -3606,7 +3609,7 @@ static struct intel_uncore_type skx_uncore_imc = { + }; + + static struct attribute *skx_upi_uncore_formats_attr[] = { +- &format_attr_event_ext.attr, ++ &format_attr_event.attr, + &format_attr_umask_ext.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, +diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h +index 8b6780751132..5db8b0b10766 100644 +--- a/arch/x86/include/asm/vmx.h ++++ b/arch/x86/include/asm/vmx.h +@@ -352,6 +352,7 @@ enum vmcs_field { + #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ + #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ + #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ ++#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ + #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ + + /* GUEST_INTERRUPTIBILITY_INFO flags. */ +diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c +index 56d99be3706a..50bee5fe1140 100644 +--- a/arch/x86/kernel/idt.c ++++ b/arch/x86/kernel/idt.c +@@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = { + */ + static const __initconst struct idt_data dbg_idts[] = { + INTG(X86_TRAP_DB, debug), +- INTG(X86_TRAP_BP, int3), + }; + #endif + +@@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss; + static const __initconst struct idt_data ist_idts[] = { + ISTG(X86_TRAP_DB, debug, DEBUG_STACK), + ISTG(X86_TRAP_NMI, nmi, NMI_STACK), +- SISTG(X86_TRAP_BP, int3, DEBUG_STACK), + ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK), + #ifdef CONFIG_X86_MCE + ISTG(X86_TRAP_MC, &machine_check, MCE_STACK), +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 3d9b2308e7fa..03f3d7695dac 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code) + } + NOKPROBE_SYMBOL(do_general_protection); + +-/* May run on IST stack. */ + dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) + { + #ifdef CONFIG_DYNAMIC_FTRACE +@@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) + if (poke_int3_handler(regs)) + return; + ++ /* ++ * Use ist_enter despite the fact that we don't use an IST stack. ++ * We can be called from a kprobe in non-CONTEXT_KERNEL kernel ++ * mode or even during context tracking state changes. ++ * ++ * This means that we can't schedule. That's okay. ++ */ + ist_enter(regs); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +@@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) + SIGTRAP) == NOTIFY_STOP) + goto exit; + +- /* +- * Let others (NMI) know that the debug stack is in use +- * as we may switch to the interrupt stack. +- */ +- debug_stack_usage_inc(); + cond_local_irq_enable(regs); + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); + cond_local_irq_disable(regs); +- debug_stack_usage_dec(); ++ + exit: + ist_exit(regs); + } +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 87b453eeae40..2beb77c319e8 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1079,6 +1079,13 @@ static inline bool is_machine_check(u32 intr_info) + (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); + } + ++/* Undocumented: icebp/int1 */ ++static inline bool is_icebp(u32 intr_info) ++{ ++ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) ++ == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); ++} ++ + static inline bool cpu_has_vmx_msr_bitmap(void) + { + return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; +@@ -6173,7 +6180,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= dr6 | DR6_RTM; +- if (!(dr6 & ~DR6_RESERVED)) /* icebp */ ++ if (is_icebp(intr_info)) + skip_emulated_instruction(vcpu); + + kvm_queue_exception(vcpu, DB_VECTOR); +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 004abf9ebf12..34cda7e0551b 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd) + + return 0; + } ++ ++/** ++ * pud_free_pmd_page - Clear pud entry and free pmd page. ++ * @pud: Pointer to a PUD. ++ * ++ * Context: The pud range has been unmaped and TLB purged. ++ * Return: 1 if clearing the entry succeeded. 0 otherwise. ++ */ ++int pud_free_pmd_page(pud_t *pud) ++{ ++ pmd_t *pmd; ++ int i; ++ ++ if (pud_none(*pud)) ++ return 1; ++ ++ pmd = (pmd_t *)pud_page_vaddr(*pud); ++ ++ for (i = 0; i < PTRS_PER_PMD; i++) ++ if (!pmd_free_pte_page(&pmd[i])) ++ return 0; ++ ++ pud_clear(pud); ++ free_page((unsigned long)pmd); ++ ++ return 1; ++} ++ ++/** ++ * pmd_free_pte_page - Clear pmd entry and free pte page. ++ * @pmd: Pointer to a PMD. ++ * ++ * Context: The pmd range has been unmaped and TLB purged. ++ * Return: 1 if clearing the entry succeeded. 0 otherwise. ++ */ ++int pmd_free_pte_page(pmd_t *pmd) ++{ ++ pte_t *pte; ++ ++ if (pmd_none(*pmd)) ++ return 1; ++ ++ pte = (pte_t *)pmd_page_vaddr(*pmd); ++ pmd_clear(pmd); ++ free_page((unsigned long)pte); ++ ++ return 1; ++} + #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index 940aac70b4da..bb77606d04e0 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -1156,7 +1156,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + * may converge on the last pass. In such case do one more + * pass to emit the final image + */ +- for (pass = 0; pass < 10 || image; pass++) { ++ for (pass = 0; pass < 20 || image; pass++) { + proglen = do_jit(prog, addrs, image, oldproglen, &ctx); + if (proglen <= 0) { + image = NULL; +@@ -1183,6 +1183,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + } + } + oldproglen = proglen; ++ cond_resched(); + } + + if (bpf_jit_enable > 1) +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 2dd15e967c3f..0167e526b04f 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -228,7 +228,7 @@ int __init efi_alloc_page_tables(void) + if (!pud) { + if (CONFIG_PGTABLE_LEVELS > 4) + free_page((unsigned long) pgd_page_vaddr(*pgd)); +- free_page((unsigned long)efi_pgd); ++ free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); + return -ENOMEM; + } + +diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c +index 11b113f8e367..ebb626ffb5fa 100644 +--- a/drivers/acpi/acpi_watchdog.c ++++ b/drivers/acpi/acpi_watchdog.c +@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void) + res.start = gas->address; + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + res.flags = IORESOURCE_MEM; +- res.end = res.start + ALIGN(gas->access_width, 4); ++ res.end = res.start + ALIGN(gas->access_width, 4) - 1; + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + res.flags = IORESOURCE_IO; +- res.end = res.start + gas->access_width; ++ res.end = res.start + gas->access_width - 1; + } else { + pr_warn("Unsupported address space: %u\n", + gas->space_id); +diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c +index 917f1cc0fda4..8fb74d9011da 100644 +--- a/drivers/acpi/numa.c ++++ b/drivers/acpi/numa.c +@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) + */ + int acpi_map_pxm_to_online_node(int pxm) + { +- int node, n, dist, min_dist; ++ int node, min_node; + + node = acpi_map_pxm_to_node(pxm); + + if (node == NUMA_NO_NODE) + node = 0; + ++ min_node = node; + if (!node_online(node)) { +- min_dist = INT_MAX; ++ int min_dist = INT_MAX, dist, n; ++ + for_each_online_node(n) { + dist = node_distance(node, n); + if (dist < min_dist) { + min_dist = dist; +- node = n; ++ min_node = n; + } + } + } + +- return node; ++ return min_node; + } + EXPORT_SYMBOL(acpi_map_pxm_to_online_node); + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 44a9d630b7ac..2badef1271fd 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -542,7 +542,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), + .driver_data = board_ahci_yes_fbs }, +- { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), ++ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ ++ .driver_data = board_ahci_yes_fbs }, ++ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */ + .driver_data = board_ahci_yes_fbs }, + + /* Promise */ +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 3c09122bf038..7431ccd03316 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + ++ /* Crucial BX100 SSD 500GB has broken LPM support */ ++ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, ++ ++ /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ ++ { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ++ ATA_HORKAGE_ZERO_AFTER_TRIM | ++ ATA_HORKAGE_NOLPM, }, ++ /* 512GB MX100 with newer firmware has only LPM issues */ ++ { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | ++ ATA_HORKAGE_NOLPM, }, ++ ++ /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ ++ { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ++ ATA_HORKAGE_ZERO_AFTER_TRIM | ++ ATA_HORKAGE_NOLPM, }, ++ { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ++ ATA_HORKAGE_ZERO_AFTER_TRIM | ++ ATA_HORKAGE_NOLPM, }, ++ + /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, +@@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, +- { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ++ { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ++ ATA_HORKAGE_ZERO_AFTER_TRIM, }, ++ { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, +@@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) + * We guarantee to LLDs that they will have at least one + * non-zero sg if the command is a data command. + */ +- if (WARN_ON_ONCE(ata_is_data(prot) && +- (!qc->sg || !qc->n_elem || !qc->nbytes))) ++ if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) + goto sys_err; + + if (ata_is_dma(prot) || (ata_is_pio(prot) && +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 66be961c93a4..197e110f8ac7 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) + goto invalid_fld; + } + ++ /* We may not issue NCQ commands to devices not supporting NCQ */ ++ if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { ++ fp = 1; ++ goto invalid_fld; ++ } ++ + /* sanity check for pio multi commands */ + if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { + fp = 1; +@@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, + if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { + /* relay SCSI command to ATAPI device */ + int len = COMMAND_SIZE(scsi_op); +- if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) ++ if (unlikely(len > scmd->cmd_len || ++ len > dev->cdb_len || ++ scmd->cmd_len > ATAPI_CDB_LEN)) + goto bad_cdb_len; + + xlat_func = atapi_xlat; +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index e71e54c478da..2f57e8b88a7a 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -230,7 +230,6 @@ static const struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, +@@ -263,6 +262,7 @@ static const struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + + /* QCA ROME chipset */ ++ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, +@@ -383,10 +383,10 @@ static const struct usb_device_id blacklist_table[] = { + */ + static const struct dmi_system_id btusb_needs_reset_resume_table[] = { + { +- /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */ ++ /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ + .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), +- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"), ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), + }, + }, + {} +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c +index 44301a3d9963..a07f6451694a 100644 +--- a/drivers/clk/bcm/clk-bcm2835.c ++++ b/drivers/clk/bcm/clk-bcm2835.c +@@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits { + static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { + .mask0 = 0, + .set0 = 0, +- .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), ++ .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK, + .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), +- .mask3 = (u32)~A2W_PLL_KA_MASK, ++ .mask3 = A2W_PLL_KA_MASK, + .set3 = (2 << A2W_PLL_KA_SHIFT), + .fb_prediv_mask = BIT(14), + }; + + static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { +- .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), ++ .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK, + .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), +- .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), ++ .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK, + .set1 = (6 << A2W_PLLH_KP_SHIFT), + .mask3 = 0, + .set3 = 0, +@@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) + ~A2W_PLL_CTRL_PWRDN); + + /* Take the PLL out of reset. */ ++ spin_lock(&cprman->regs_lock); + cprman_write(cprman, data->cm_ctrl_reg, + cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); ++ spin_unlock(&cprman->regs_lock); + + /* Wait for the PLL to lock. */ + timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); +@@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, + } + + /* Unmask the reference clock from the oscillator. */ ++ spin_lock(&cprman->regs_lock); + cprman_write(cprman, A2W_XOSC_CTRL, + cprman_read(cprman, A2W_XOSC_CTRL) | + data->reference_enable_mask); ++ spin_unlock(&cprman->regs_lock); + + if (do_ana_setup_first) + bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); +diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +index 72b16ed1012b..3b97f60540ad 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c ++++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +@@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = { + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-a", + clk_out_parents, +- &ccu_div_ops, ++ &ccu_mp_ops, + 0), + }, + }; +@@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = { + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-b", + clk_out_parents, +- &ccu_div_ops, ++ &ccu_mp_ops, + 0), + }, + }; +@@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = { + .features = CCU_FEATURE_FIXED_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("out-c", + clk_out_parents, +- &ccu_div_ops, ++ &ccu_mp_ops, + 0), + }, + }; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index bb5fa895fb64..97ecfd16ee82 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -3090,8 +3090,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + + switch (aplane->base.type) { + case DRM_PLANE_TYPE_PRIMARY: +- aplane->base.format_default = true; +- + res = drm_universal_plane_init( + dm->adev->ddev, + &aplane->base, +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +index 9bd142f65f9b..e1acc10e35a2 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( + struct cea_sad *sad = &sads[i]; + + edid_caps->audio_modes[i].format_code = sad->format; +- edid_caps->audio_modes[i].channel_count = sad->channels; ++ edid_caps->audio_modes[i].channel_count = sad->channels + 1; + edid_caps->audio_modes[i].sample_rate = sad->freq; + edid_caps->audio_modes[i].sample_size = sad->byte2; + } +diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c +index 5e1f1e2deb52..343cb5fa9b03 100644 +--- a/drivers/gpu/drm/drm_framebuffer.c ++++ b/drivers/gpu/drm/drm_framebuffer.c +@@ -458,6 +458,12 @@ int drm_mode_getfb(struct drm_device *dev, + if (!fb) + return -ENOENT; + ++ /* Multi-planar framebuffers need getfb2. */ ++ if (fb->format->num_planes > 1) { ++ ret = -EINVAL; ++ goto out; ++ } ++ + r->height = fb->height; + r->width = fb->width; + r->depth = fb->format->depth; +@@ -481,6 +487,7 @@ int drm_mode_getfb(struct drm_device *dev, + ret = -ENODEV; + } + ++out: + drm_framebuffer_put(fb); + + return ret; +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 30e129684c7c..fd0d0c758a94 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector) + /* don't do anything if sink is not display port, i.e., + * passive dp->(dvi|hdmi) adaptor + */ +- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { +- int saved_dpms = connector->dpms; +- /* Only turn off the display if it's physically disconnected */ +- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); +- } else if (radeon_dp_needs_link_train(radeon_connector)) { +- /* Don't try to start link training before we +- * have the dpcd */ +- if (!radeon_dp_getdpcd(radeon_connector)) +- return; +- +- /* set it to OFF so that drm_helper_connector_dpms() +- * won't return immediately since the current state +- * is ON at this point. +- */ +- connector->dpms = DRM_MODE_DPMS_OFF; +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); +- } +- connector->dpms = saved_dpms; ++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && ++ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && ++ radeon_dp_needs_link_train(radeon_connector)) { ++ /* Don't start link training before we have the DPCD */ ++ if (!radeon_dp_getdpcd(radeon_connector)) ++ return; ++ ++ /* Turn the connector off and back on immediately, which ++ * will trigger link training ++ */ ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + } + } +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c +index b5b335c9b2bb..2ebdc6d5a76e 100644 +--- a/drivers/gpu/drm/udl/udl_fb.c ++++ b/drivers/gpu/drm/udl/udl_fb.c +@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) + { + unsigned long start = vma->vm_start; + unsigned long size = vma->vm_end - vma->vm_start; +- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; ++ unsigned long offset; + unsigned long page, pos; + +- if (offset + size > info->fix.smem_len) ++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) ++ return -EINVAL; ++ ++ offset = vma->vm_pgoff << PAGE_SHIFT; ++ ++ if (offset > info->fix.smem_len || size > info->fix.smem_len - offset) + return -EINVAL; + + pos = (unsigned long)info->fix.smem_start + offset; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index 184340d486c3..86d25f18aa99 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) + */ + void vmw_svga_disable(struct vmw_private *dev_priv) + { ++ /* ++ * Disabling SVGA will turn off device modesetting capabilities, so ++ * notify KMS about that so that it doesn't cache atomic state that ++ * isn't valid anymore, for example crtcs turned on. ++ * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), ++ * but vmw_kms_lost_device() takes the reservation sem and thus we'll ++ * end up with lock order reversal. Thus, a master may actually perform ++ * a new modeset just after we call vmw_kms_lost_device() and race with ++ * vmw_svga_disable(), but that should at worst cause atomic KMS state ++ * to be inconsistent with the device, causing modesetting problems. ++ * ++ */ ++ vmw_kms_lost_device(dev_priv->dev); + ttm_write_lock(&dev_priv->reservation_sem, false); + spin_lock(&dev_priv->svga_lock); + if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +index 7e5f30e234b1..8c65cc3b0dda 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, + int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); ++void vmw_kms_lost_device(struct drm_device *dev); + + int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index fcd58145d0da..dfaaf9a2d81e 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -31,7 +31,6 @@ + #include <drm/drm_atomic_helper.h> + #include <drm/drm_rect.h> + +- + /* Might need a hrtimer here? */ + #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) + +@@ -2531,9 +2530,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, + * Helper to be used if an error forces the caller to undo the actions of + * vmw_kms_helper_resource_prepare. + */ +-void vmw_kms_helper_resource_revert(struct vmw_resource *res) ++void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) + { +- vmw_kms_helper_buffer_revert(res->backup); ++ struct vmw_resource *res = ctx->res; ++ ++ vmw_kms_helper_buffer_revert(ctx->buf); ++ vmw_dmabuf_unreference(&ctx->buf); + vmw_resource_unreserve(res, false, NULL, 0); + mutex_unlock(&res->dev_priv->cmdbuf_mutex); + } +@@ -2550,10 +2552,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) + * interrupted by a signal. + */ + int vmw_kms_helper_resource_prepare(struct vmw_resource *res, +- bool interruptible) ++ bool interruptible, ++ struct vmw_validation_ctx *ctx) + { + int ret = 0; + ++ ctx->buf = NULL; ++ ctx->res = res; ++ + if (interruptible) + ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); + else +@@ -2572,6 +2578,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, + res->dev_priv->has_mob); + if (ret) + goto out_unreserve; ++ ++ ctx->buf = vmw_dmabuf_reference(res->backup); + } + ret = vmw_resource_validate(res); + if (ret) +@@ -2579,7 +2587,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, + return 0; + + out_revert: +- vmw_kms_helper_buffer_revert(res->backup); ++ vmw_kms_helper_buffer_revert(ctx->buf); + out_unreserve: + vmw_resource_unreserve(res, false, NULL, 0); + out_unlock: +@@ -2595,11 +2603,13 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, + * @out_fence: Optional pointer to a fence pointer. If non-NULL, a + * ref-counted fence pointer is returned here. + */ +-void vmw_kms_helper_resource_finish(struct vmw_resource *res, +- struct vmw_fence_obj **out_fence) ++void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, ++ struct vmw_fence_obj **out_fence) + { +- if (res->backup || out_fence) +- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, ++ struct vmw_resource *res = ctx->res; ++ ++ if (ctx->buf || out_fence) ++ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, + out_fence, NULL); + + vmw_resource_unreserve(res, false, NULL, 0); +@@ -2865,3 +2875,14 @@ int vmw_kms_set_config(struct drm_mode_set *set, + + return drm_atomic_helper_set_config(set, ctx); + } ++ ++ ++/** ++ * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost ++ * ++ * @dev: Pointer to the drm device ++ */ ++void vmw_kms_lost_device(struct drm_device *dev) ++{ ++ drm_atomic_helper_shutdown(dev); ++} +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +index cd9da2dd79af..3d2ca280eaa7 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +@@ -240,6 +240,11 @@ struct vmw_display_unit { + int set_gui_y; + }; + ++struct vmw_validation_ctx { ++ struct vmw_resource *res; ++ struct vmw_dma_buffer *buf; ++}; ++ + #define vmw_crtc_to_du(x) \ + container_of(x, struct vmw_display_unit, crtc) + #define vmw_connector_to_du(x) \ +@@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, + struct drm_vmw_fence_rep __user * + user_fence_rep); + int vmw_kms_helper_resource_prepare(struct vmw_resource *res, +- bool interruptible); +-void vmw_kms_helper_resource_revert(struct vmw_resource *res); +-void vmw_kms_helper_resource_finish(struct vmw_resource *res, ++ bool interruptible, ++ struct vmw_validation_ctx *ctx); ++void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); ++void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, + struct vmw_fence_obj **out_fence); + int vmw_kms_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, +@@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, + + int vmw_kms_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); +- + #endif +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +index 63a4cd794b73..3ec9eae831b8 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +@@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer_surface *vfbs = + container_of(framebuffer, typeof(*vfbs), base); + struct vmw_kms_sou_surface_dirty sdirty; ++ struct vmw_validation_ctx ctx; + int ret; + + if (!srf) + srf = &vfbs->surface->res; + +- ret = vmw_kms_helper_resource_prepare(srf, true); ++ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); + if (ret) + return ret; + +@@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, + ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, + dest_x, dest_y, num_clips, inc, + &sdirty.base); +- vmw_kms_helper_resource_finish(srf, out_fence); ++ vmw_kms_helper_resource_finish(&ctx, out_fence); + + return ret; + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +index b68d74888ab1..6b969e5dea2a 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +@@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, + struct vmw_framebuffer_surface *vfbs = + container_of(framebuffer, typeof(*vfbs), base); + struct vmw_stdu_dirty sdirty; ++ struct vmw_validation_ctx ctx; + int ret; + + if (!srf) + srf = &vfbs->surface->res; + +- ret = vmw_kms_helper_resource_prepare(srf, true); ++ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); + if (ret) + return ret; + +@@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, + dest_x, dest_y, num_clips, inc, + &sdirty.base); + out_finish: +- vmw_kms_helper_resource_finish(srf, out_fence); ++ vmw_kms_helper_resource_finish(&ctx, out_fence); + + return ret; + } +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c +index 12eb8caa4263..3f8dde8d59ba 100644 +--- a/drivers/hv/ring_buffer.c ++++ b/drivers/hv/ring_buffer.c +@@ -394,13 +394,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel, + } + EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); + ++/* How many bytes were read in this iterator cycle */ ++static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, ++ u32 start_read_index) ++{ ++ if (rbi->priv_read_index >= start_read_index) ++ return rbi->priv_read_index - start_read_index; ++ else ++ return rbi->ring_datasize - start_read_index + ++ rbi->priv_read_index; ++} ++ + /* + * Update host ring buffer after iterating over packets. + */ + void hv_pkt_iter_close(struct vmbus_channel *channel) + { + struct hv_ring_buffer_info *rbi = &channel->inbound; +- u32 orig_write_sz = hv_get_bytes_to_write(rbi); ++ u32 curr_write_sz, pending_sz, bytes_read, start_read_index; + + /* + * Make sure all reads are done before we update the read index since +@@ -408,8 +419,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) + * is updated. + */ + virt_rmb(); ++ start_read_index = rbi->ring_buffer->read_index; + rbi->ring_buffer->read_index = rbi->priv_read_index; + ++ if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) ++ return; ++ + /* + * Issue a full memory barrier before making the signaling decision. + * Here is the reason for having this barrier: +@@ -423,26 +438,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) + */ + virt_mb(); + +- /* If host has disabled notifications then skip */ +- if (rbi->ring_buffer->interrupt_mask) ++ pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); ++ if (!pending_sz) + return; + +- if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { +- u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); ++ /* ++ * Ensure the read of write_index in hv_get_bytes_to_write() ++ * happens after the read of pending_send_sz. ++ */ ++ virt_rmb(); ++ curr_write_sz = hv_get_bytes_to_write(rbi); ++ bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); + +- /* +- * If there was space before we began iteration, +- * then host was not blocked. Also handles case where +- * pending_sz is zero then host has nothing pending +- * and does not need to be signaled. +- */ +- if (orig_write_sz > pending_sz) +- return; ++ /* ++ * If there was space before we began iteration, ++ * then host was not blocked. ++ */ + +- /* If pending write will not fit, don't give false hope. */ +- if (hv_get_bytes_to_write(rbi) < pending_sz) +- return; +- } ++ if (curr_write_sz - bytes_read > pending_sz) ++ return; ++ ++ /* If pending write will not fit, don't give false hope. */ ++ if (curr_write_sz <= pending_sz) ++ return; + + vmbus_setevent(channel); + } +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c +index b960015cb073..051a72eecb24 100644 +--- a/drivers/hwmon/k10temp.c ++++ b/drivers/hwmon/k10temp.c +@@ -86,6 +86,7 @@ static const struct tctl_offset tctl_offset_table[] = { + { 0x17, "AMD Ryzen 7 1800X", 20000 }, + { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, + { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, ++ { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, + { 0x17, "AMD Ryzen Threadripper 1950", 10000 }, + { 0x17, "AMD Ryzen Threadripper 1920", 10000 }, + { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, +@@ -128,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev, + + data->read_tempreg(data->pdev, ®val); + temp = (regval >> 21) * 125; +- temp -= data->temp_offset; ++ if (temp > data->temp_offset) ++ temp -= data->temp_offset; ++ else ++ temp = 0; + + return sprintf(buf, "%u\n", temp); + } +diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c +index 460aa58e0159..3e6fd5a8ac5b 100644 +--- a/drivers/iio/accel/st_accel_core.c ++++ b/drivers/iio/accel/st_accel_core.c +@@ -951,7 +951,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) + if (!pdata) + pdata = (struct st_sensors_platform_data *)&default_accel_pdata; + +- err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); ++ err = st_sensors_init_sensor(indio_dev, pdata); + if (err < 0) + goto st_accel_power_off; + +diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c +index 36047147ce7c..0d237fd69769 100644 +--- a/drivers/iio/adc/meson_saradc.c ++++ b/drivers/iio/adc/meson_saradc.c +@@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev) + regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); + } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); + +- if (timeout < 0) ++ if (timeout < 0) { ++ mutex_unlock(&indio_dev->mlock); + return -ETIMEDOUT; ++ } + } + + return 0; +diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c +index fbe2431f5b81..1ea9f5513b02 100644 +--- a/drivers/iio/chemical/ccs811.c ++++ b/drivers/iio/chemical/ccs811.c +@@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client) + if (ret < 0) + return ret; + ++ if ((ret & CCS811_STATUS_FW_MODE_APPLICATION)) ++ return 0; ++ + if ((ret & CCS811_STATUS_APP_VALID_MASK) != + CCS811_STATUS_APP_VALID_LOADED) + return -EIO; +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +index 4fdb7fcc3ea8..cebc6bd31b79 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +@@ -130,6 +130,7 @@ struct st_lsm6dsx_sensor { + * @irq: Device interrupt line (I2C or SPI). + * @lock: Mutex to protect read and write operations. + * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO. ++ * @conf_lock: Mutex to prevent concurrent FIFO configuration update. + * @fifo_mode: FIFO operating mode supported by the device. + * @enable_mask: Enabled sensor bitmask. + * @sip: Total number of samples (acc/gyro) in a given pattern. +@@ -144,6 +145,7 @@ struct st_lsm6dsx_hw { + + struct mutex lock; + struct mutex fifo_lock; ++ struct mutex conf_lock; + + enum st_lsm6dsx_fifo_mode fifo_mode; + u8 enable_mask; +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +index 755c472e8a05..c899d658f6be 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +@@ -325,38 +325,40 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable) + struct st_lsm6dsx_hw *hw = sensor->hw; + int err; + ++ mutex_lock(&hw->conf_lock); ++ + if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) { + err = st_lsm6dsx_flush_fifo(hw); + if (err < 0) +- return err; ++ goto out; + } + + if (enable) { + err = st_lsm6dsx_sensor_enable(sensor); + if (err < 0) +- return err; ++ goto out; + } else { + err = st_lsm6dsx_sensor_disable(sensor); + if (err < 0) +- return err; ++ goto out; + } + + err = st_lsm6dsx_set_fifo_odr(sensor, enable); + if (err < 0) +- return err; ++ goto out; + + err = st_lsm6dsx_update_decimators(hw); + if (err < 0) +- return err; ++ goto out; + + err = st_lsm6dsx_update_watermark(sensor, sensor->watermark); + if (err < 0) +- return err; ++ goto out; + + if (hw->enable_mask) { + err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT); + if (err < 0) +- return err; ++ goto out; + + /* + * store enable buffer timestamp as reference to compute +@@ -365,7 +367,10 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable) + sensor->ts = iio_get_time_ns(iio_dev); + } + +- return 0; ++out: ++ mutex_unlock(&hw->conf_lock); ++ ++ return err; + } + + static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private) +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +index 239c735242be..4d43c956d676 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +@@ -448,7 +448,7 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, + + st_lsm6dsx_sensor_disable(sensor); + +- *val = (s16)data; ++ *val = (s16)le16_to_cpu(data); + + return IIO_VAL_INT; + } +@@ -528,7 +528,12 @@ static int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val) + if (val < 1 || val > hw->settings->max_fifo_size) + return -EINVAL; + ++ mutex_lock(&hw->conf_lock); ++ + err = st_lsm6dsx_update_watermark(sensor, val); ++ ++ mutex_unlock(&hw->conf_lock); ++ + if (err < 0) + return err; + +@@ -739,6 +744,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name, + + mutex_init(&hw->lock); + mutex_init(&hw->fifo_lock); ++ mutex_init(&hw->conf_lock); + + hw->dev = dev; + hw->irq = irq; +diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c +index 349e5c713c03..4ddb6cf7d401 100644 +--- a/drivers/iio/pressure/st_pressure_core.c ++++ b/drivers/iio/pressure/st_pressure_core.c +@@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) + press_data->sensor_settings->drdy_irq.int2.addr)) + pdata = (struct st_sensors_platform_data *)&default_press_pdata; + +- err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); ++ err = st_sensors_init_sensor(indio_dev, pdata); + if (err < 0) + goto st_press_power_off; + +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 1961c6a45437..c51c602f06d6 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, + *umem = ib_umem_get(pd->uobject->context, start, length, + access_flags, 0); + err = PTR_ERR_OR_ZERO(*umem); +- if (err < 0) { ++ if (err) { ++ *umem = NULL; + mlx5_ib_err(dev, "umem get failed (%d)\n", err); + return err; + } +@@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + if (err) { + mlx5_ib_warn(dev, "Failed to rereg UMR\n"); + ib_umem_release(mr->umem); ++ mr->umem = NULL; + clean_mr(dev, mr); + return err; + } +@@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) + u32 key = mr->mmkey.key; + + err = destroy_mkey(dev, mr); +- kfree(mr); + if (err) { + mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", + key, err); + return err; + } +- } else { +- mlx5_mr_cache_free(dev, mr); + } + + return 0; +@@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) + atomic_sub(npages, &dev->mdev->priv.reg_pages); + } + ++ if (!mr->allocated_from_cache) ++ kfree(mr); ++ else ++ mlx5_mr_cache_free(dev, mr); ++ + return 0; + } + +diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c +index 92f93a880015..aba488cd0e64 100644 +--- a/drivers/media/platform/tegra-cec/tegra_cec.c ++++ b/drivers/media/platform/tegra-cec/tegra_cec.c +@@ -172,16 +172,13 @@ static irqreturn_t tegra_cec_irq_handler(int irq, void *data) + } + } + +- if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | +- TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | +- TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | +- TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) { ++ if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) { + cec_write(cec, TEGRA_CEC_INT_STAT, +- (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | +- TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | +- TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | +- TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)); +- } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { ++ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED); ++ cec->rx_done = false; ++ cec->rx_buf_cnt = 0; ++ } ++ if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { + u32 v; + + cec_write(cec, TEGRA_CEC_INT_STAT, +@@ -255,7 +252,7 @@ static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable) + TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED | + TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED | + TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | +- TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN); ++ TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED); + + cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE); + return 0; +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c +index b737a9540331..df5fe43072d6 100644 +--- a/drivers/mmc/core/block.c ++++ b/drivers/mmc/core/block.c +@@ -66,6 +66,7 @@ MODULE_ALIAS("mmc:block"); + #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ + #define MMC_SANITIZE_REQ_TIMEOUT 240000 + #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) ++#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) + + #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ + (rq_data_dir(req) == WRITE)) +@@ -579,6 +580,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, + return data.error; + } + ++ /* ++ * Make sure the cache of the PARTITION_CONFIG register and ++ * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write ++ * changed it successfully. ++ */ ++ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && ++ (cmd.opcode == MMC_SWITCH)) { ++ struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); ++ u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); ++ ++ /* ++ * Update cache so the next mmc_blk_part_switch call operates ++ * on up-to-date data. ++ */ ++ card->ext_csd.part_config = value; ++ main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; ++ } ++ + /* + * According to the SD specs, some commands require a delay after + * issuing the command. +diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h +index 79a5b985ccf5..9c821eedd156 100644 +--- a/drivers/mmc/core/card.h ++++ b/drivers/mmc/core/card.h +@@ -82,6 +82,7 @@ struct mmc_fixup { + #define CID_MANFID_APACER 0x27 + #define CID_MANFID_KINGSTON 0x70 + #define CID_MANFID_HYNIX 0x90 ++#define CID_MANFID_NUMONYX 0xFE + + #define END_FIXUP { NULL } + +diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h +index 75d317623852..5153577754f0 100644 +--- a/drivers/mmc/core/quirks.h ++++ b/drivers/mmc/core/quirks.h +@@ -109,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = { + */ + MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, + 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), ++ /* ++ * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI ++ * feature is used so disable the HPI feature for such buggy cards. ++ */ ++ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX, ++ 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6), + + END_FIXUP + }; +diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c +index fa41d9422d57..a84aa3f1ae85 100644 +--- a/drivers/mmc/host/dw_mmc-exynos.c ++++ b/drivers/mmc/host/dw_mmc-exynos.c +@@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) + static int dw_mci_exynos_runtime_resume(struct device *dev) + { + struct dw_mci *host = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = dw_mci_runtime_resume(dev); ++ if (ret) ++ return ret; + + dw_mci_exynos_config_smu(host); +- return dw_mci_runtime_resume(dev); ++ ++ return ret; + } + + /** +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c +index d9b4acefed31..06d47414d0c1 100644 +--- a/drivers/mmc/host/dw_mmc.c ++++ b/drivers/mmc/host/dw_mmc.c +@@ -413,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host) + cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (cto_div == 0) + cto_div = 1; +- cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); ++ ++ cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, ++ host->bus_hz); + + /* add a bit spare time */ + cto_ms += 10; +@@ -562,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) + (sizeof(struct idmac_desc_64addr) * + (i + 1))) >> 32; + /* Initialize reserved and buffer size fields to "0" */ ++ p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +@@ -584,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) + i++, p++) { + p->des3 = cpu_to_le32(host->sg_dma + + (sizeof(struct idmac_desc) * (i + 1))); ++ p->des0 = 0; + p->des1 = 0; + } + +@@ -1799,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host) + } + + if (host->use_dma == TRANS_MODE_IDMAC) +- /* It is also recommended that we reset and reprogram idmac */ +- dw_mci_idmac_reset(host); ++ /* It is also required that we reinit idmac */ ++ dw_mci_idmac_init(host); + + ret = true; + +@@ -1948,8 +1952,9 @@ static void dw_mci_set_drto(struct dw_mci *host) + drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (drto_div == 0) + drto_div = 1; +- drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, +- host->bus_hz); ++ ++ drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, ++ host->bus_hz); + + /* add a bit spare time */ + drto_ms += 10; +diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c +index de8c902059b8..7d80a8bb96fe 100644 +--- a/drivers/mtd/mtdchar.c ++++ b/drivers/mtd/mtdchar.c +@@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_info *mtd, + for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { + u32 eccpos; + +- ret = mtd_ooblayout_ecc(mtd, section, &oobregion); ++ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); + if (ret < 0) { + if (ret != -ERANGE) + return ret; +@@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) + for (i = 0; i < ARRAY_SIZE(to->eccpos);) { + u32 eccpos; + +- ret = mtd_ooblayout_ecc(mtd, section, &oobregion); ++ ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); + if (ret < 0) { + if (ret != -ERANGE) + return ret; +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index bbdd68a54d68..4005b427023c 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) + + /* returns nonzero if entire page is blank */ + static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, +- u32 *eccstat, unsigned int bufnum) ++ u32 eccstat, unsigned int bufnum) + { +- u32 reg = eccstat[bufnum / 4]; +- int errors; +- +- errors = (reg >> ((3 - bufnum % 4) * 8)) & 15; +- +- return errors; ++ return (eccstat >> ((3 - bufnum % 4) * 8)) & 15; + } + + /* +@@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + struct fsl_ifc_ctrl *ctrl = priv->ctrl; + struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; +- u32 eccstat[4]; ++ u32 eccstat; + int i; + + /* set the chip select for NAND Transaction */ +@@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + if (nctrl->eccread) { + int errors; + int bufnum = nctrl->page & priv->bufnum_mask; +- int sector = bufnum * chip->ecc.steps; +- int sector_end = sector + chip->ecc.steps - 1; ++ int sector_start = bufnum * chip->ecc.steps; ++ int sector_end = sector_start + chip->ecc.steps - 1; + __be32 *eccstat_regs; + +- if (ctrl->version >= FSL_IFC_VERSION_2_0_0) +- eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; +- else +- eccstat_regs = ifc->ifc_nand.v1_nand_eccstat; ++ eccstat_regs = ifc->ifc_nand.nand_eccstat; ++ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]); + +- for (i = sector / 4; i <= sector_end / 4; i++) +- eccstat[i] = ifc_in32(&eccstat_regs[i]); ++ for (i = sector_start; i <= sector_end; i++) { ++ if (i != sector_start && !(i % 4)) ++ eccstat = ifc_in32(&eccstat_regs[i / 4]); + +- for (i = sector; i <= sector_end; i++) { + errors = check_read_ecc(mtd, ctrl, eccstat, i); + + if (errors == 15) { +@@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) + struct fsl_ifc_ctrl *ctrl = priv->ctrl; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + u32 nand_fsr; ++ int status; + + /* Use READ_STATUS command, but wait for the device to be ready */ + ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +@@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) + fsl_ifc_run_command(mtd); + + nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); +- ++ status = nand_fsr >> 24; + /* + * The chip always seems to report that it is + * write-protected, even when it is not. + */ +- return nand_fsr | NAND_STATUS_WP; ++ return status | NAND_STATUS_WP; + } + + /* +diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c +index 1e37313054f3..6da69af103e6 100644 +--- a/drivers/net/can/cc770/cc770.c ++++ b/drivers/net/can/cc770/cc770.c +@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, + return 0; + } + +-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) ++static void cc770_tx(struct net_device *dev, int mo) + { + struct cc770_priv *priv = netdev_priv(dev); +- struct net_device_stats *stats = &dev->stats; +- struct can_frame *cf = (struct can_frame *)skb->data; +- unsigned int mo = obj2msgobj(CC770_OBJ_TX); ++ struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; + u8 dlc, rtr; + u32 id; + int i; + +- if (can_dropped_invalid_skb(dev, skb)) +- return NETDEV_TX_OK; +- +- if ((cc770_read_reg(priv, +- msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { +- netdev_err(dev, "TX register is still occupied!\n"); +- return NETDEV_TX_BUSY; +- } +- +- netif_stop_queue(dev); +- + dlc = cf->can_dlc; + id = cf->can_id; +- if (cf->can_id & CAN_RTR_FLAG) +- rtr = 0; +- else +- rtr = MSGCFG_DIR; ++ rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; ++ ++ cc770_write_reg(priv, msgobj[mo].ctrl0, ++ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); +- cc770_write_reg(priv, msgobj[mo].ctrl0, +- MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); ++ + if (id & CAN_EFF_FLAG) { + id &= CAN_EFF_MASK; + cc770_write_reg(priv, msgobj[mo].config, +@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) + for (i = 0; i < dlc; i++) + cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); + +- /* Store echo skb before starting the transfer */ +- can_put_echo_skb(skb, dev, 0); +- + cc770_write_reg(priv, msgobj[mo].ctrl1, +- RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); ++ RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); ++ cc770_write_reg(priv, msgobj[mo].ctrl0, ++ MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); ++} + +- stats->tx_bytes += dlc; ++static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct cc770_priv *priv = netdev_priv(dev); ++ unsigned int mo = obj2msgobj(CC770_OBJ_TX); + ++ if (can_dropped_invalid_skb(dev, skb)) ++ return NETDEV_TX_OK; + +- /* +- * HM: We had some cases of repeated IRQs so make sure the +- * INT is acknowledged I know it's already further up, but +- * doing again fixed the issue +- */ +- cc770_write_reg(priv, msgobj[mo].ctrl0, +- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); ++ netif_stop_queue(dev); ++ ++ if ((cc770_read_reg(priv, ++ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { ++ netdev_err(dev, "TX register is still occupied!\n"); ++ return NETDEV_TX_BUSY; ++ } ++ ++ priv->tx_skb = skb; ++ cc770_tx(dev, mo); + + return NETDEV_TX_OK; + } +@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) + struct cc770_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int mo = obj2msgobj(o); ++ struct can_frame *cf; ++ u8 ctrl1; ++ ++ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); + +- /* Nothing more to send, switch off interrupts */ + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); +- /* +- * We had some cases of repeated IRQ so make sure the +- * INT is acknowledged ++ cc770_write_reg(priv, msgobj[mo].ctrl1, ++ RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); ++ ++ if (unlikely(!priv->tx_skb)) { ++ netdev_err(dev, "missing tx skb in tx interrupt\n"); ++ return; ++ } ++ ++ if (unlikely(ctrl1 & MSGLST_SET)) { ++ stats->rx_over_errors++; ++ stats->rx_errors++; ++ } ++ ++ /* When the CC770 is sending an RTR message and it receives a regular ++ * message that matches the id of the RTR message, it will overwrite the ++ * outgoing message in the TX register. When this happens we must ++ * process the received message and try to transmit the outgoing skb ++ * again. + */ +- cc770_write_reg(priv, msgobj[mo].ctrl0, +- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); ++ if (unlikely(ctrl1 & NEWDAT_SET)) { ++ cc770_rx(dev, mo, ctrl1); ++ cc770_tx(dev, mo); ++ return; ++ } + ++ cf = (struct can_frame *)priv->tx_skb->data; ++ stats->tx_bytes += cf->can_dlc; + stats->tx_packets++; ++ ++ can_put_echo_skb(priv->tx_skb, dev, 0); + can_get_echo_skb(dev, 0); ++ priv->tx_skb = NULL; ++ + netif_wake_queue(dev); + } + +@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) + priv->can.do_set_bittiming = cc770_set_bittiming; + priv->can.do_set_mode = cc770_set_mode; + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; ++ priv->tx_skb = NULL; + + memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); + +diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h +index a1739db98d91..95752e1d1283 100644 +--- a/drivers/net/can/cc770/cc770.h ++++ b/drivers/net/can/cc770/cc770.h +@@ -193,6 +193,8 @@ struct cc770_priv { + u8 cpu_interface; /* CPU interface register */ + u8 clkout; /* Clock out register */ + u8 bus_config; /* Bus conffiguration register */ ++ ++ struct sk_buff *tx_skb; + }; + + struct net_device *alloc_cc770dev(int sizeof_priv); +diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c +index 2772d05ff11c..fedd927ba6ed 100644 +--- a/drivers/net/can/ifi_canfd/ifi_canfd.c ++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c +@@ -30,6 +30,7 @@ + #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) + #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) + #define IFI_CANFD_STCMD_BUSOFF BIT(4) ++#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5) + #define IFI_CANFD_STCMD_BUSMONITOR BIT(16) + #define IFI_CANFD_STCMD_LOOPBACK BIT(18) + #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) +@@ -52,7 +53,10 @@ + #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) + + #define IFI_CANFD_INTERRUPT 0xc ++#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0) + #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) ++#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2) ++#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3) + #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) + #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) + #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) +@@ -61,6 +65,10 @@ + #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) + + #define IFI_CANFD_IRQMASK 0x10 ++#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0) ++#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1) ++#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2) ++#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3) + #define IFI_CANFD_IRQMASK_SET_ERR BIT(7) + #define IFI_CANFD_IRQMASK_SET_TS BIT(15) + #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) +@@ -136,6 +144,8 @@ + #define IFI_CANFD_SYSCLOCK 0x50 + + #define IFI_CANFD_VER 0x54 ++#define IFI_CANFD_VER_REV_MASK 0xff ++#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15 + + #define IFI_CANFD_IP_ID 0x58 + #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD +@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) + + if (enable) { + enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | +- IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; ++ IFI_CANFD_IRQMASK_RXFIFO_NEMPTY | ++ IFI_CANFD_IRQMASK_ERROR_STATE_CHG | ++ IFI_CANFD_IRQMASK_ERROR_WARNING | ++ IFI_CANFD_IRQMASK_ERROR_BUSOFF; + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; + } +@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev) + return 1; + } + +-static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) ++static int ifi_canfd_handle_lec_err(struct net_device *ndev) + { + struct ifi_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; ++ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); + const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | + IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | + IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | +@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: ++ /* error active state */ ++ priv->can.can_stats.error_warning++; ++ priv->can.state = CAN_STATE_ERROR_ACTIVE; ++ break; ++ case CAN_STATE_ERROR_WARNING: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; +@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, + ifi_canfd_get_berr_counter(ndev, &bec); + + switch (new_state) { +- case CAN_STATE_ERROR_ACTIVE: ++ case CAN_STATE_ERROR_WARNING: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? +@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, + return 1; + } + +-static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) ++static int ifi_canfd_handle_state_errors(struct net_device *ndev) + { + struct ifi_canfd_priv *priv = netdev_priv(ndev); ++ u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); + int work_done = 0; +- u32 isr; + +- /* +- * The ErrWarn condition is a little special, since the bit is +- * located in the INTERRUPT register instead of STCMD register. +- */ +- isr = readl(priv->base + IFI_CANFD_INTERRUPT); +- if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && ++ if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) && ++ (priv->can.state != CAN_STATE_ERROR_ACTIVE)) { ++ netdev_dbg(ndev, "Error, entered active state\n"); ++ work_done += ifi_canfd_handle_state_change(ndev, ++ CAN_STATE_ERROR_ACTIVE); ++ } ++ ++ if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) && + (priv->can.state != CAN_STATE_ERROR_WARNING)) { +- /* Clear the interrupt */ +- writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, +- priv->base + IFI_CANFD_INTERRUPT); + netdev_dbg(ndev, "Error, entered warning state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_ERROR_WARNING); +@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) + { + struct net_device *ndev = napi->dev; + struct ifi_canfd_priv *priv = netdev_priv(ndev); +- const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | +- IFI_CANFD_STCMD_BUSOFF; +- int work_done = 0; +- +- u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); + u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); +- u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); ++ int work_done = 0; + + /* Handle bus state changes */ +- if ((stcmd & stcmd_state_mask) || +- ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) +- work_done += ifi_canfd_handle_state_errors(ndev, stcmd); ++ work_done += ifi_canfd_handle_state_errors(ndev); + + /* Handle lost messages on RX */ + if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) +@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) + + /* Handle lec errors on the bus */ + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) +- work_done += ifi_canfd_handle_lec_err(ndev, errctr); ++ work_done += ifi_canfd_handle_lec_err(ndev); + + /* Handle normal messages on RX */ + if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) +@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) + struct net_device_stats *stats = &ndev->stats; + const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | + IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | ++ IFI_CANFD_INTERRUPT_ERROR_COUNTER | ++ IFI_CANFD_INTERRUPT_ERROR_STATE_CHG | + IFI_CANFD_INTERRUPT_ERROR_WARNING | +- IFI_CANFD_INTERRUPT_ERROR_COUNTER; ++ IFI_CANFD_INTERRUPT_ERROR_BUSOFF; + const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | + IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; +- const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | +- IFI_CANFD_INTERRUPT_ERROR_WARNING)); ++ const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ); + u32 isr; + + isr = readl(priv->base + IFI_CANFD_INTERRUPT); +@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) + struct resource *res; + void __iomem *addr; + int irq, ret; +- u32 id; ++ u32 id, rev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(dev, res); +@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) + return -EINVAL; + } + ++ rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK; ++ if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) { ++ dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n", ++ rev, IFI_CANFD_VER_REV_MIN_SUPPORTED); ++ return -EINVAL; ++ } ++ + ndev = alloc_candev(sizeof(*priv), 1); + if (!ndev) + return -ENOMEM; +diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c +index 55513411a82e..ed8561d4a90f 100644 +--- a/drivers/net/can/peak_canfd/peak_canfd.c ++++ b/drivers/net/can/peak_canfd/peak_canfd.c +@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, + + spin_lock_irqsave(&priv->echo_lock, flags); + can_get_echo_skb(priv->ndev, msg->client); +- spin_unlock_irqrestore(&priv->echo_lock, flags); + + /* count bytes of the echo instead of skb */ + stats->tx_bytes += cf_len; +@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, + /* restart tx queue (a slot is free) */ + netif_wake_queue(priv->ndev); + ++ spin_unlock_irqrestore(&priv->echo_lock, flags); + return 0; + } + +@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, + + /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ + if (pucan_status_is_rx_barrier(msg)) { +- unsigned long flags; + + if (priv->enable_tx_path) { + int err = priv->enable_tx_path(priv); +@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, + return err; + } + +- /* restart network queue only if echo skb array is free */ +- spin_lock_irqsave(&priv->echo_lock, flags); +- +- if (!priv->can.echo_skb[priv->echo_idx]) { +- spin_unlock_irqrestore(&priv->echo_lock, flags); +- +- netif_wake_queue(ndev); +- } else { +- spin_unlock_irqrestore(&priv->echo_lock, flags); +- } ++ /* start network queue (echo_skb array is empty) */ ++ netif_start_queue(ndev); + + return 0; + } +@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, + */ + should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); + +- spin_unlock_irqrestore(&priv->echo_lock, flags); +- +- /* write the skb on the interface */ +- priv->write_tx_msg(priv, msg); +- + /* stop network tx queue if not enough room to save one more msg too */ + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + should_stop_tx_queue |= (room_left < +@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, + if (should_stop_tx_queue) + netif_stop_queue(ndev); + ++ spin_unlock_irqrestore(&priv->echo_lock, flags); ++ ++ /* write the skb on the interface */ ++ priv->write_tx_msg(priv, msg); ++ + return NETDEV_TX_OK; + } + +diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c +index 788c3464a3b0..3c51a884db87 100644 +--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c ++++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c +@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg) + priv->tx_pages_free++; + spin_unlock_irqrestore(&priv->tx_lock, flags); + +- /* wake producer up */ +- netif_wake_queue(priv->ucan.ndev); ++ /* wake producer up (only if enough room in echo_skb array) */ ++ spin_lock_irqsave(&priv->ucan.echo_lock, flags); ++ if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) ++ netif_wake_queue(priv->ucan.ndev); ++ ++ spin_unlock_irqrestore(&priv->ucan.echo_lock, flags); + } + + /* re-enable Rx DMA transfer for this CAN */ +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +index 2ee54133efa1..82064e909784 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) + * @dev_addr: optional device address. + * + * P2P needs mac addresses for P2P device and interface. If no device +- * address it specified, these are derived from the primary net device, ie. +- * the permanent ethernet address of the device. ++ * address it specified, these are derived from a random ethernet ++ * address. + */ + static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) + { +- struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; +- bool local_admin = false; ++ bool random_addr = false; + +- if (!dev_addr || is_zero_ether_addr(dev_addr)) { +- dev_addr = pri_ifp->mac_addr; +- local_admin = true; +- } ++ if (!dev_addr || is_zero_ether_addr(dev_addr)) ++ random_addr = true; + +- /* Generate the P2P Device Address. This consists of the device's +- * primary MAC address with the locally administered bit set. ++ /* Generate the P2P Device Address obtaining a random ethernet ++ * address with the locally administered bit set. + */ +- memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); +- if (local_admin) +- p2p->dev_addr[0] |= 0x02; ++ if (random_addr) ++ eth_random_addr(p2p->dev_addr); ++ else ++ memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); + + /* Generate the P2P Interface Address. If the discovery and connection + * BSSCFGs need to simultaneously co-exist, then this address must be +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +index 7cd1ffa7d4a7..7df0a02f9b34 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +@@ -1124,7 +1124,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) + + /* Configuration Space offset 0x70f BIT7 is used to control L0S */ + tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); +- _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); ++ _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | ++ ASPM_L1_LATENCY << 3); + + /* Configuration Space offset 0x719 Bit3 is for L1 + * BIT4 is for clock request +diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c +index 345acca576b3..1bd7b3734751 100644 +--- a/drivers/nvdimm/blk.c ++++ b/drivers/nvdimm/blk.c +@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) + disk->queue = q; + disk->flags = GENHD_FL_EXT_DEVT; + nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); +- set_capacity(disk, 0); +- device_add_disk(dev, disk); + + if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) + return -ENOMEM; +@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) + } + + set_capacity(disk, available_disk_size >> SECTOR_SHIFT); ++ device_add_disk(dev, disk); + revalidate_disk(disk); + return 0; + } +diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c +index c586bcdb5190..c625df951fa1 100644 +--- a/drivers/nvdimm/btt.c ++++ b/drivers/nvdimm/btt.c +@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt) + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); + btt->btt_queue->queuedata = btt; + +- set_capacity(btt->btt_disk, 0); +- device_add_disk(&btt->nd_btt->dev, btt->btt_disk); + if (btt_meta_size(btt)) { + int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); + +@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt) + } + } + set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); ++ device_add_disk(&btt->nd_btt->dev, btt->btt_disk); + btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; + revalidate_disk(btt->btt_disk); + +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index d7135140bf40..a2f18c4a0346 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3906,6 +3906,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, + quirk_dma_func1_alias); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, + quirk_dma_func1_alias); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, ++ quirk_dma_func1_alias); + /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, + PCI_DEVICE_ID_JMICRON_JMB388_ESD, +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +index 071084d3ee9c..92aeea174a56 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +@@ -129,7 +129,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = { + EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), + }; + +-const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = s5pv210_pin_bank, +@@ -142,6 +142,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = { ++ .ctrl = s5pv210_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(s5pv210_pin_ctrl), ++}; ++ + /* Pad retention control code for accessing PMU regmap */ + static atomic_t exynos_shared_retention_refcnt; + +@@ -204,7 +209,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst + * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes + * two gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos3250_pin_banks0, +@@ -225,6 +230,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = { ++ .ctrl = exynos3250_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos3250_pin_ctrl), ++}; ++ + /* pin banks of exynos4210 pin-controller 0 */ + static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), +@@ -308,7 +318,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco + * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes + * three gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos4210_pin_banks0, +@@ -334,6 +344,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = { ++ .ctrl = exynos4210_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos4210_pin_ctrl), ++}; ++ + /* pin banks of exynos4x12 pin-controller 0 */ + static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), +@@ -396,7 +411,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = + * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes + * four gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos4x12_pin_banks0, +@@ -432,6 +447,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = { ++ .ctrl = exynos4x12_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos4x12_pin_ctrl), ++}; ++ + /* pin banks of exynos5250 pin-controller 0 */ + static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), +@@ -492,7 +512,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = + * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes + * four gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos5250_pin_banks0, +@@ -528,6 +548,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = { ++ .ctrl = exynos5250_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos5250_pin_ctrl), ++}; ++ + /* pin banks of exynos5260 pin-controller 0 */ + static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), +@@ -572,7 +597,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = + * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes + * three gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos5260_pin_banks0, +@@ -592,6 +617,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = { ++ .ctrl = exynos5260_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos5260_pin_ctrl), ++}; ++ + /* pin banks of exynos5410 pin-controller 0 */ + static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), +@@ -662,7 +692,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = + * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes + * four gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos5410_pin_banks0, +@@ -695,6 +725,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = { ++ .ctrl = exynos5410_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos5410_pin_ctrl), ++}; ++ + /* pin banks of exynos5420 pin-controller 0 */ + static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), +@@ -779,7 +814,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst + * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes + * four gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos5420_pin_banks0, +@@ -813,3 +848,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { + .retention_data = &exynos4_audio_retention_data, + }, + }; ++ ++const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = { ++ .ctrl = exynos5420_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos5420_pin_ctrl), ++}; +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +index 08e9fdb58fd2..0ab88fc268ea 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +@@ -180,7 +180,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init + * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes + * ten gpio/pin-mux/pinconfig controllers. + */ +-const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 data */ + .pin_banks = exynos5433_pin_banks0, +@@ -265,6 +265,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = { ++ .ctrl = exynos5433_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos5433_pin_ctrl), ++}; ++ + /* pin banks of exynos7 pin-controller - ALIVE */ + static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { + EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), +@@ -344,7 +349,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = { + EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), + }; + +-const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { + { + /* pin-controller instance 0 Alive data */ + .pin_banks = exynos7_pin_banks0, +@@ -397,3 +402,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { + .eint_gpio_init = exynos_eint_gpio_init, + }, + }; ++ ++const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = { ++ .ctrl = exynos7_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl), ++}; +diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +index edf27264b603..67da1cf18b68 100644 +--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c ++++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +@@ -570,7 +570,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = { + PIN_BANK_2BIT(13, 0x080, "gpj"), + }; + +-const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { + { + .pin_banks = s3c2412_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), +@@ -578,6 +578,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = { ++ .ctrl = s3c2412_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl), ++}; ++ + static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { + PIN_BANK_A(27, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), +@@ -592,7 +597,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { + PIN_BANK_2BIT(2, 0x100, "gpm"), + }; + +-const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { + { + .pin_banks = s3c2416_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), +@@ -600,6 +605,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = { ++ .ctrl = s3c2416_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl), ++}; ++ + static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { + PIN_BANK_A(25, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), +@@ -612,7 +622,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { + PIN_BANK_2BIT(13, 0x0d0, "gpj"), + }; + +-const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { + { + .pin_banks = s3c2440_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), +@@ -620,6 +630,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { + }, + }; + ++const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = { ++ .ctrl = s3c2440_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl), ++}; ++ + static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { + PIN_BANK_A(28, 0x000, "gpa"), + PIN_BANK_2BIT(11, 0x010, "gpb"), +@@ -635,10 +650,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { + PIN_BANK_2BIT(2, 0x100, "gpm"), + }; + +-const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { + { + .pin_banks = s3c2450_pin_banks, + .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), + .eint_wkup_init = s3c24xx_eint_init, + }, + }; ++ ++const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = { ++ .ctrl = s3c2450_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl), ++}; +diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +index e63663b32907..0bdc1e683181 100644 +--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c ++++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +@@ -794,7 +794,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = { + * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes + * one gpio/pin-mux/pinconfig controller. + */ +-const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { ++static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { + { + /* pin-controller instance 1 data */ + .pin_banks = s3c64xx_pin_banks0, +@@ -803,3 +803,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { + .eint_wkup_init = s3c64xx_eint_eint0_init, + }, + }; ++ ++const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = { ++ .ctrl = s3c64xx_pin_ctrl, ++ .num_ctrl = ARRAY_SIZE(s3c64xx_pin_ctrl), ++}; +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c +index e04f7fe0a65d..26e8fab736f1 100644 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.c ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c +@@ -947,12 +947,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev, + return 0; + } + ++static const struct samsung_pin_ctrl * ++samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) ++{ ++ struct device_node *node = pdev->dev.of_node; ++ const struct samsung_pinctrl_of_match_data *of_data; ++ int id; ++ ++ id = of_alias_get_id(node, "pinctrl"); ++ if (id < 0) { ++ dev_err(&pdev->dev, "failed to get alias id\n"); ++ return NULL; ++ } ++ ++ of_data = of_device_get_match_data(&pdev->dev); ++ if (id >= of_data->num_ctrl) { ++ dev_err(&pdev->dev, "invalid alias id %d\n", id); ++ return NULL; ++ } ++ ++ return &(of_data->ctrl[id]); ++} ++ + /* retrieve the soc specific data */ + static const struct samsung_pin_ctrl * + samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, + struct platform_device *pdev) + { +- int id; + struct device_node *node = pdev->dev.of_node; + struct device_node *np; + const struct samsung_pin_bank_data *bdata; +@@ -962,13 +983,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, + void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; + unsigned int i; + +- id = of_alias_get_id(node, "pinctrl"); +- if (id < 0) { +- dev_err(&pdev->dev, "failed to get alias id\n"); ++ ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev); ++ if (!ctrl) + return ERR_PTR(-ENOENT); +- } +- ctrl = of_device_get_match_data(&pdev->dev); +- ctrl += id; + + d->suspend = ctrl->suspend; + d->resume = ctrl->resume; +@@ -1193,41 +1210,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev) + static const struct of_device_id samsung_pinctrl_dt_match[] = { + #ifdef CONFIG_PINCTRL_EXYNOS_ARM + { .compatible = "samsung,exynos3250-pinctrl", +- .data = exynos3250_pin_ctrl }, ++ .data = &exynos3250_of_data }, + { .compatible = "samsung,exynos4210-pinctrl", +- .data = exynos4210_pin_ctrl }, ++ .data = &exynos4210_of_data }, + { .compatible = "samsung,exynos4x12-pinctrl", +- .data = exynos4x12_pin_ctrl }, ++ .data = &exynos4x12_of_data }, + { .compatible = "samsung,exynos5250-pinctrl", +- .data = exynos5250_pin_ctrl }, ++ .data = &exynos5250_of_data }, + { .compatible = "samsung,exynos5260-pinctrl", +- .data = exynos5260_pin_ctrl }, ++ .data = &exynos5260_of_data }, + { .compatible = "samsung,exynos5410-pinctrl", +- .data = exynos5410_pin_ctrl }, ++ .data = &exynos5410_of_data }, + { .compatible = "samsung,exynos5420-pinctrl", +- .data = exynos5420_pin_ctrl }, ++ .data = &exynos5420_of_data }, + { .compatible = "samsung,s5pv210-pinctrl", +- .data = s5pv210_pin_ctrl }, ++ .data = &s5pv210_of_data }, + #endif + #ifdef CONFIG_PINCTRL_EXYNOS_ARM64 + { .compatible = "samsung,exynos5433-pinctrl", +- .data = exynos5433_pin_ctrl }, ++ .data = &exynos5433_of_data }, + { .compatible = "samsung,exynos7-pinctrl", +- .data = exynos7_pin_ctrl }, ++ .data = &exynos7_of_data }, + #endif + #ifdef CONFIG_PINCTRL_S3C64XX + { .compatible = "samsung,s3c64xx-pinctrl", +- .data = s3c64xx_pin_ctrl }, ++ .data = &s3c64xx_of_data }, + #endif + #ifdef CONFIG_PINCTRL_S3C24XX + { .compatible = "samsung,s3c2412-pinctrl", +- .data = s3c2412_pin_ctrl }, ++ .data = &s3c2412_of_data }, + { .compatible = "samsung,s3c2416-pinctrl", +- .data = s3c2416_pin_ctrl }, ++ .data = &s3c2416_of_data }, + { .compatible = "samsung,s3c2440-pinctrl", +- .data = s3c2440_pin_ctrl }, ++ .data = &s3c2440_of_data }, + { .compatible = "samsung,s3c2450-pinctrl", +- .data = s3c2450_pin_ctrl }, ++ .data = &s3c2450_of_data }, + #endif + {}, + }; +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h +index 9af07af6cad6..ae932e0c05f2 100644 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.h ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.h +@@ -285,6 +285,16 @@ struct samsung_pinctrl_drv_data { + void (*resume)(struct samsung_pinctrl_drv_data *); + }; + ++/** ++ * struct samsung_pinctrl_of_match_data: OF match device specific configuration data. ++ * @ctrl: array of pin controller data. ++ * @num_ctrl: size of array @ctrl. ++ */ ++struct samsung_pinctrl_of_match_data { ++ const struct samsung_pin_ctrl *ctrl; ++ unsigned int num_ctrl; ++}; ++ + /** + * struct samsung_pin_group: represent group of pins of a pinmux function. + * @name: name of the pin group, used to lookup the group. +@@ -313,20 +323,20 @@ struct samsung_pmx_func { + }; + + /* list of all exported SoC specific data */ +-extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; +-extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; +-extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; +-extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; +-extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; +-extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; +-extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; +-extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; ++extern const struct samsung_pinctrl_of_match_data exynos3250_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos4210_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos5250_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos5260_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos5410_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos5420_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos5433_of_data; ++extern const struct samsung_pinctrl_of_match_data exynos7_of_data; ++extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; ++extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; ++extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; ++extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; ++extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; ++extern const struct samsung_pinctrl_of_match_data s5pv210_of_data; + + #endif /* __PINCTRL_SAMSUNG_H */ +diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c +index 86196ffd2faf..fa3e4b7e0c9f 100644 +--- a/drivers/staging/android/ion/ion_cma_heap.c ++++ b/drivers/staging/android/ion/ion_cma_heap.c +@@ -21,6 +21,7 @@ + #include <linux/err.h> + #include <linux/cma.h> + #include <linux/scatterlist.h> ++#include <linux/highmem.h> + + #include "ion.h" + +@@ -51,6 +52,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, + if (!pages) + return -ENOMEM; + ++ if (PageHighMem(pages)) { ++ unsigned long nr_clear_pages = nr_pages; ++ struct page *page = pages; ++ ++ while (nr_clear_pages > 0) { ++ void *vaddr = kmap_atomic(page); ++ ++ memset(vaddr, 0, PAGE_SIZE); ++ kunmap_atomic(vaddr); ++ page++; ++ nr_clear_pages--; ++ } ++ } else { ++ memset(page_address(pages), 0, size); ++ } ++ + table = kmalloc(sizeof(*table), GFP_KERNEL); + if (!table) + goto err; +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 88b902c525d7..b4e57c5a8bba 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -1727,7 +1727,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) + default_attr(vc); + update_attr(vc); + +- vc->vc_tab_stop[0] = 0x01010100; ++ vc->vc_tab_stop[0] = + vc->vc_tab_stop[1] = + vc->vc_tab_stop[2] = + vc->vc_tab_stop[3] = +@@ -1771,7 +1771,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) + vc->vc_pos -= (vc->vc_x << 1); + while (vc->vc_x < vc->vc_cols - 1) { + vc->vc_x++; +- if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) ++ if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) + break; + } + vc->vc_pos += (vc->vc_x << 1); +@@ -1831,7 +1831,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) + lf(vc); + return; + case 'H': +- vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); ++ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); + return; + case 'Z': + respond_ID(tty); +@@ -2024,7 +2024,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) + return; + case 'g': + if (!vc->vc_par[0]) +- vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); ++ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); + else if (vc->vc_par[0] == 3) { + vc->vc_tab_stop[0] = + vc->vc_tab_stop[1] = +diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c +index 6d1fbda0f461..0da9943d405f 100644 +--- a/drivers/watchdog/wdat_wdt.c ++++ b/drivers/watchdog/wdat_wdt.c +@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) + + memset(&r, 0, sizeof(r)); + r.start = gas->address; +- r.end = r.start + gas->access_width; ++ r.end = r.start + gas->access_width - 1; + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + r.flags = IORESOURCE_MEM; + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 8a85f3f53446..8a5bde8b1444 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -118,6 +118,16 @@ static void huge_pagevec_release(struct pagevec *pvec) + pagevec_reinit(pvec); + } + ++/* ++ * Mask used when checking the page offset value passed in via system ++ * calls. This value will be converted to a loff_t which is signed. ++ * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the ++ * value. The extra bit (- 1 in the shift value) is to take the sign ++ * bit into account. ++ */ ++#define PGOFF_LOFFT_MAX \ ++ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) ++ + static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) + { + struct inode *inode = file_inode(file); +@@ -137,12 +147,13 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) + vma->vm_ops = &hugetlb_vm_ops; + + /* +- * Offset passed to mmap (before page shift) could have been +- * negative when represented as a (l)off_t. ++ * page based offset in vm_pgoff could be sufficiently large to ++ * overflow a (l)off_t when converted to byte offset. + */ +- if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0) ++ if (vma->vm_pgoff & PGOFF_LOFFT_MAX) + return -EINVAL; + ++ /* must be huge page aligned */ + if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) + return -EINVAL; + +diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c +index 804adfebba2f..3e047eb4cc7c 100644 +--- a/fs/ncpfs/ncplib_kernel.c ++++ b/fs/ncpfs/ncplib_kernel.c +@@ -981,6 +981,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id, + goto out; + } + *bytes_read = ncp_reply_be16(server, 0); ++ if (*bytes_read > to_read) { ++ result = -EINVAL; ++ goto out; ++ } + source = ncp_reply_data(server, 2 + (offset & 1)); + + memcpy(target, source, *bytes_read); +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 5a75135f5f53..974b1be7f148 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -268,6 +268,35 @@ free_blocked_lock(struct nfsd4_blocked_lock *nbl) + kfree(nbl); + } + ++static void ++remove_blocked_locks(struct nfs4_lockowner *lo) ++{ ++ struct nfs4_client *clp = lo->lo_owner.so_client; ++ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); ++ struct nfsd4_blocked_lock *nbl; ++ LIST_HEAD(reaplist); ++ ++ /* Dequeue all blocked locks */ ++ spin_lock(&nn->blocked_locks_lock); ++ while (!list_empty(&lo->lo_blocked)) { ++ nbl = list_first_entry(&lo->lo_blocked, ++ struct nfsd4_blocked_lock, ++ nbl_list); ++ list_del_init(&nbl->nbl_list); ++ list_move(&nbl->nbl_lru, &reaplist); ++ } ++ spin_unlock(&nn->blocked_locks_lock); ++ ++ /* Now free them */ ++ while (!list_empty(&reaplist)) { ++ nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, ++ nbl_lru); ++ list_del_init(&nbl->nbl_lru); ++ posix_unblock_lock(&nbl->nbl_lock); ++ free_blocked_lock(nbl); ++ } ++} ++ + static int + nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) + { +@@ -1866,6 +1895,7 @@ static __be32 mark_client_expired_locked(struct nfs4_client *clp) + static void + __destroy_client(struct nfs4_client *clp) + { ++ int i; + struct nfs4_openowner *oo; + struct nfs4_delegation *dp; + struct list_head reaplist; +@@ -1895,6 +1925,16 @@ __destroy_client(struct nfs4_client *clp) + nfs4_get_stateowner(&oo->oo_owner); + release_openowner(oo); + } ++ for (i = 0; i < OWNER_HASH_SIZE; i++) { ++ struct nfs4_stateowner *so, *tmp; ++ ++ list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], ++ so_strhash) { ++ /* Should be no openowners at this point */ ++ WARN_ON_ONCE(so->so_is_open_owner); ++ remove_blocked_locks(lockowner(so)); ++ } ++ } + nfsd4_return_all_client_layouts(clp); + nfsd4_shutdown_callback(clp); + if (clp->cl_cb_conn.cb_xprt) +@@ -6358,6 +6398,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, + } + spin_unlock(&clp->cl_lock); + free_ol_stateid_reaplist(&reaplist); ++ remove_blocked_locks(lo); + nfs4_put_stateowner(&lo->lo_owner); + + return status; +@@ -7143,6 +7184,8 @@ nfs4_state_destroy_net(struct net *net) + } + } + ++ WARN_ON(!list_empty(&nn->blocked_locks_lru)); ++ + for (i = 0; i < CLIENT_HASH_SIZE; i++) { + while (!list_empty(&nn->unconf_id_hashtbl[i])) { + clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); +@@ -7209,7 +7252,6 @@ nfs4_state_shutdown_net(struct net *net) + struct nfs4_delegation *dp = NULL; + struct list_head *pos, *next, reaplist; + struct nfsd_net *nn = net_generic(net, nfsd_net_id); +- struct nfsd4_blocked_lock *nbl; + + cancel_delayed_work_sync(&nn->laundromat_work); + locks_end_grace(&nn->nfsd4_manager); +@@ -7230,24 +7272,6 @@ nfs4_state_shutdown_net(struct net *net) + nfs4_put_stid(&dp->dl_stid); + } + +- BUG_ON(!list_empty(&reaplist)); +- spin_lock(&nn->blocked_locks_lock); +- while (!list_empty(&nn->blocked_locks_lru)) { +- nbl = list_first_entry(&nn->blocked_locks_lru, +- struct nfsd4_blocked_lock, nbl_lru); +- list_move(&nbl->nbl_lru, &reaplist); +- list_del_init(&nbl->nbl_list); +- } +- spin_unlock(&nn->blocked_locks_lock); +- +- while (!list_empty(&reaplist)) { +- nbl = list_first_entry(&reaplist, +- struct nfsd4_blocked_lock, nbl_lru); +- list_del_init(&nbl->nbl_lru); +- posix_unblock_lock(&nbl->nbl_lock); +- free_blocked_lock(nbl); +- } +- + nfsd4_client_tracking_exit(net); + nfs4_state_destroy_net(net); + } +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index 868e68561f91..dfc861caa478 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -976,6 +976,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); + int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); + int pud_clear_huge(pud_t *pud); + int pmd_clear_huge(pmd_t *pmd); ++int pud_free_pmd_page(pud_t *pud); ++int pmd_free_pte_page(pmd_t *pmd); + #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ + static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) + { +@@ -1001,6 +1003,14 @@ static inline int pmd_clear_huge(pmd_t *pmd) + { + return 0; + } ++static inline int pud_free_pmd_page(pud_t *pud) ++{ ++ return 0; ++} ++static inline int pmd_free_pte_page(pmd_t *pmd) ++{ ++ return 0; ++} + #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + + #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h +index c332f0a45607..3fdfede2f0f3 100644 +--- a/include/linux/fsl_ifc.h ++++ b/include/linux/fsl_ifc.h +@@ -734,11 +734,7 @@ struct fsl_ifc_nand { + u32 res19[0x10]; + __be32 nand_fsr; + u32 res20; +- /* The V1 nand_eccstat is actually 4 words that overlaps the +- * V2 nand_eccstat. +- */ +- __be32 v1_nand_eccstat[2]; +- __be32 v2_nand_eccstat[6]; ++ __be32 nand_eccstat[8]; + u32 res21[0x1c]; + __be32 nanndcr; + u32 res22[0x2]; +diff --git a/include/linux/memblock.h b/include/linux/memblock.h +index 7ed0f7782d16..9efd592c5da4 100644 +--- a/include/linux/memblock.h ++++ b/include/linux/memblock.h +@@ -187,7 +187,6 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, + unsigned long *end_pfn); + void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, + unsigned long *out_end_pfn, int *out_nid); +-unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn); + + /** + * for_each_mem_pfn_range - early memory pfn range iterator +diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h +index 200f731be557..7b706ff21335 100644 +--- a/include/trace/events/mmc.h ++++ b/include/trace/events/mmc.h +@@ -86,8 +86,8 @@ TRACE_EVENT(mmc_request_start, + __entry->stop_flags, __entry->stop_retries, + __entry->sbc_opcode, __entry->sbc_arg, + __entry->sbc_flags, __entry->sbc_retries, +- __entry->blocks, __entry->blk_addr, +- __entry->blksz, __entry->data_flags, __entry->tag, ++ __entry->blocks, __entry->blksz, ++ __entry->blk_addr, __entry->data_flags, __entry->tag, + __entry->can_retune, __entry->doing_retune, + __entry->retune_now, __entry->need_retune, + __entry->hold_retune, __entry->retune_period) +diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h +index 17a022c5b414..da3315ed1bcd 100644 +--- a/include/uapi/linux/usb/audio.h ++++ b/include/uapi/linux/usb/audio.h +@@ -370,7 +370,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d + { + return (protocol == UAC_VERSION_1) ? + desc->baSourceID[desc->bNrInPins + 4] : +- desc->baSourceID[desc->bNrInPins + 6]; ++ 2; /* in UAC2, this value is constant */ + } + + static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, +@@ -378,7 +378,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de + { + return (protocol == UAC_VERSION_1) ? + &desc->baSourceID[desc->bNrInPins + 5] : +- &desc->baSourceID[desc->bNrInPins + 7]; ++ &desc->baSourceID[desc->bNrInPins + 6]; + } + + static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 5cb783fc8224..b719351b48d2 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -1687,7 +1687,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz + union bpf_attr attr = {}; + int err; + +- if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) ++ if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + err = check_uarg_tail_zero(uattr, sizeof(attr), size); +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 7e4c44538119..2522fac782af 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -3183,6 +3183,16 @@ static int cgroup_enable_threaded(struct cgroup *cgrp) + if (cgroup_is_threaded(cgrp)) + return 0; + ++ /* ++ * If @cgroup is populated or has domain controllers enabled, it ++ * can't be switched. While the below cgroup_can_be_thread_root() ++ * test can catch the same conditions, that's only when @parent is ++ * not mixable, so let's check it explicitly. ++ */ ++ if (cgroup_is_populated(cgrp) || ++ cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) ++ return -EOPNOTSUPP; ++ + /* we're joining the parent's domain, ensure its validity */ + if (!cgroup_is_valid_domain(dom_cgrp) || + !cgroup_can_be_thread_root(dom_cgrp)) +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 5d8f4031f8d5..385480a5aa45 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -2246,7 +2246,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, + struct perf_event_context *task_ctx, + enum event_type_t event_type) + { +- enum event_type_t ctx_event_type = event_type & EVENT_ALL; ++ enum event_type_t ctx_event_type; + bool cpu_event = !!(event_type & EVENT_CPU); + + /* +@@ -2256,6 +2256,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, + if (event_type & EVENT_PINNED) + event_type |= EVENT_FLEXIBLE; + ++ ctx_event_type = event_type & EVENT_ALL; ++ + perf_pmu_disable(cpuctx->ctx.pmu); + if (task_ctx) + task_ctx_sched_out(cpuctx, task_ctx, event_type); +diff --git a/kernel/module.c b/kernel/module.c +index 09e48eee4d55..0c4530763f02 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -4223,7 +4223,7 @@ static int modules_open(struct inode *inode, struct file *file) + m->private = kallsyms_show_value() ? NULL : (void *)8ul; + } + +- return 0; ++ return err; + } + + static const struct file_operations proc_modules_operations = { +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 5a31a85bbd84..b4f7f890c1b9 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6611,13 +6611,18 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) + parent_quota = parent_b->hierarchical_quota; + + /* +- * Ensure max(child_quota) <= parent_quota, inherit when no ++ * Ensure max(child_quota) <= parent_quota. On cgroup2, ++ * always take the min. On cgroup1, only inherit when no + * limit is set: + */ +- if (quota == RUNTIME_INF) +- quota = parent_quota; +- else if (parent_quota != RUNTIME_INF && quota > parent_quota) +- return -EINVAL; ++ if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { ++ quota = min(quota, parent_quota); ++ } else { ++ if (quota == RUNTIME_INF) ++ quota = parent_quota; ++ else if (parent_quota != RUNTIME_INF && quota > parent_quota) ++ return -EINVAL; ++ } + } + cfs_b->hierarchical_quota = quota; + +diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c +index ec999f32c840..708992708332 100644 +--- a/kernel/time/posix-timers.c ++++ b/kernel/time/posix-timers.c +@@ -50,6 +50,7 @@ + #include <linux/export.h> + #include <linux/hashtable.h> + #include <linux/compat.h> ++#include <linux/nospec.h> + + #include "timekeeping.h" + #include "posix-timers.h" +@@ -1346,11 +1347,15 @@ static const struct k_clock * const posix_clocks[] = { + + static const struct k_clock *clockid_to_kclock(const clockid_t id) + { +- if (id < 0) ++ clockid_t idx = id; ++ ++ if (id < 0) { + return (id & CLOCKFD_MASK) == CLOCKFD ? + &clock_posix_dynamic : &clock_posix_cpu; ++ } + +- if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id]) ++ if (id >= ARRAY_SIZE(posix_clocks)) + return NULL; +- return posix_clocks[id]; ++ ++ return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))]; + } +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index 40207c2a4113..fe2429b382ca 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -636,7 +636,41 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = { + .arg3_type = ARG_ANYTHING, + }; + +-BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx, ++static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) ++{ ++ switch (func_id) { ++ case BPF_FUNC_perf_event_output: ++ return &bpf_perf_event_output_proto_tp; ++ case BPF_FUNC_get_stackid: ++ return &bpf_get_stackid_proto_tp; ++ default: ++ return tracing_func_proto(func_id); ++ } ++} ++ ++static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) ++ return false; ++ if (type != BPF_READ) ++ return false; ++ if (off % size != 0) ++ return false; ++ ++ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); ++ return true; ++} ++ ++const struct bpf_verifier_ops tracepoint_verifier_ops = { ++ .get_func_proto = tp_prog_func_proto, ++ .is_valid_access = tp_prog_is_valid_access, ++}; ++ ++const struct bpf_prog_ops tracepoint_prog_ops = { ++}; ++ ++BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, + struct bpf_perf_event_value *, buf, u32, size) + { + int err = -EINVAL; +@@ -653,8 +687,8 @@ BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx, + return err; + } + +-static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = { +- .func = bpf_perf_prog_read_value_tp, ++static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { ++ .func = bpf_perf_prog_read_value, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +@@ -662,7 +696,7 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = { + .arg3_type = ARG_CONST_SIZE, + }; + +-static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) ++static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id) + { + switch (func_id) { + case BPF_FUNC_perf_event_output: +@@ -670,34 +704,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) + case BPF_FUNC_get_stackid: + return &bpf_get_stackid_proto_tp; + case BPF_FUNC_perf_prog_read_value: +- return &bpf_perf_prog_read_value_proto_tp; ++ return &bpf_perf_prog_read_value_proto; + default: + return tracing_func_proto(func_id); + } + } + +-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, +- struct bpf_insn_access_aux *info) +-{ +- if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) +- return false; +- if (type != BPF_READ) +- return false; +- if (off % size != 0) +- return false; +- +- BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); +- return true; +-} +- +-const struct bpf_verifier_ops tracepoint_verifier_ops = { +- .get_func_proto = tp_prog_func_proto, +- .is_valid_access = tp_prog_is_valid_access, +-}; +- +-const struct bpf_prog_ops tracepoint_prog_ops = { +-}; +- + static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info) + { +@@ -754,7 +766,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, + } + + const struct bpf_verifier_ops perf_event_verifier_ops = { +- .get_func_proto = tp_prog_func_proto, ++ .get_func_proto = pe_prog_func_proto, + .is_valid_access = pe_prog_is_valid_access, + .convert_ctx_access = pe_prog_convert_ctx_access, + }; +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 492700c5fb4d..fccf00a66298 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -635,7 +635,7 @@ static int create_trace_kprobe(int argc, char **argv) + char *symbol = NULL, *event = NULL, *group = NULL; + int maxactive = 0; + char *arg; +- unsigned long offset = 0; ++ long offset = 0; + void *addr = NULL; + char buf[MAX_EVENT_NAME_LEN]; + +@@ -723,7 +723,7 @@ static int create_trace_kprobe(int argc, char **argv) + symbol = argv[1]; + /* TODO: support .init module functions */ + ret = traceprobe_split_symbol_offset(symbol, &offset); +- if (ret) { ++ if (ret || offset < 0 || offset > UINT_MAX) { + pr_info("Failed to parse either an address or a symbol.\n"); + return ret; + } +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c +index d59357308677..daf54bda4dc8 100644 +--- a/kernel/trace/trace_probe.c ++++ b/kernel/trace/trace_probe.c +@@ -320,7 +320,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type, + } + + /* Split symbol and offset. */ +-int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) ++int traceprobe_split_symbol_offset(char *symbol, long *offset) + { + char *tmp; + int ret; +@@ -328,13 +328,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) + if (!offset) + return -EINVAL; + +- tmp = strchr(symbol, '+'); ++ tmp = strpbrk(symbol, "+-"); + if (tmp) { +- /* skip sign because kstrtoul doesn't accept '+' */ +- ret = kstrtoul(tmp + 1, 0, offset); ++ ret = kstrtol(tmp, 0, offset); + if (ret) + return ret; +- + *tmp = '\0'; + } else + *offset = 0; +diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h +index fb66e3eaa192..a0d750e3d17c 100644 +--- a/kernel/trace/trace_probe.h ++++ b/kernel/trace/trace_probe.h +@@ -353,7 +353,7 @@ extern int traceprobe_conflict_field_name(const char *name, + extern void traceprobe_update_arg(struct probe_arg *arg); + extern void traceprobe_free_probe_arg(struct probe_arg *arg); + +-extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset); ++extern int traceprobe_split_symbol_offset(char *symbol, long *offset); + + /* Sum up total data length for dynamic arraies (strings) */ + static nokprobe_inline int +diff --git a/lib/ioremap.c b/lib/ioremap.c +index b808a390e4c3..54e5bbaa3200 100644 +--- a/lib/ioremap.c ++++ b/lib/ioremap.c +@@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, + + if (ioremap_pmd_enabled() && + ((next - addr) == PMD_SIZE) && +- IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { ++ IS_ALIGNED(phys_addr + addr, PMD_SIZE) && ++ pmd_free_pte_page(pmd)) { + if (pmd_set_huge(pmd, phys_addr + addr, prot)) + continue; + } +@@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, + + if (ioremap_pud_enabled() && + ((next - addr) == PUD_SIZE) && +- IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { ++ IS_ALIGNED(phys_addr + addr, PUD_SIZE) && ++ pud_free_pmd_page(pud)) { + if (pud_set_huge(pud, phys_addr + addr, prot)) + continue; + } +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 0e7ded98d114..4ed6c89e95c3 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2791,11 +2791,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, + + list_for_each_safe(pos, next, &list) { + page = list_entry((void *)pos, struct page, mapping); +- lock_page(page); ++ if (!trylock_page(page)) ++ goto next; + /* split_huge_page() removes page from list on success */ + if (!split_huge_page(page)) + split++; + unlock_page(page); ++next: + put_page(page); + } + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 9a334f5fb730..d01912be98da 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -18,6 +18,7 @@ + #include <linux/bootmem.h> + #include <linux/sysfs.h> + #include <linux/slab.h> ++#include <linux/mmdebug.h> + #include <linux/sched/signal.h> + #include <linux/rmap.h> + #include <linux/string_helpers.h> +@@ -4354,6 +4355,12 @@ int hugetlb_reserve_pages(struct inode *inode, + struct resv_map *resv_map; + long gbl_reserve; + ++ /* This should never happen */ ++ if (from > to) { ++ VM_WARN(1, "%s called with a negative range\n", __func__); ++ return -EINVAL; ++ } ++ + /* + * Only apply hugepage reservation if asked. At fault time, an + * attempt will be made for VM_NORESERVE to allocate a page +diff --git a/mm/khugepaged.c b/mm/khugepaged.c +index ea4ff259b671..33255bf91074 100644 +--- a/mm/khugepaged.c ++++ b/mm/khugepaged.c +@@ -530,7 +530,12 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, + goto out; + } + +- VM_BUG_ON_PAGE(PageCompound(page), page); ++ /* TODO: teach khugepaged to collapse THP mapped with pte */ ++ if (PageCompound(page)) { ++ result = SCAN_PAGE_COMPOUND; ++ goto out; ++ } ++ + VM_BUG_ON_PAGE(!PageAnon(page), page); + + /* +diff --git a/mm/memblock.c b/mm/memblock.c +index d25b5a456cca..028429d5bc38 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -1101,34 +1101,6 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, + *out_nid = r->nid; + } + +-unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, +- unsigned long max_pfn) +-{ +- struct memblock_type *type = &memblock.memory; +- unsigned int right = type->cnt; +- unsigned int mid, left = 0; +- phys_addr_t addr = PFN_PHYS(++pfn); +- +- do { +- mid = (right + left) / 2; +- +- if (addr < type->regions[mid].base) +- right = mid; +- else if (addr >= (type->regions[mid].base + +- type->regions[mid].size)) +- left = mid + 1; +- else { +- /* addr is within the region, so pfn is valid */ +- return pfn; +- } +- } while (left < right); +- +- if (right == type->cnt) +- return -1UL; +- else +- return PHYS_PFN(type->regions[right].base); +-} +- + /** + * memblock_set_node - set node ID on memblock regions + * @base: base of area to set node ID for +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 9f927497f2f5..d8ee1effa4a6 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -3588,7 +3588,7 @@ static bool __need_fs_reclaim(gfp_t gfp_mask) + return false; + + /* this guy won't enter reclaim */ +- if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) ++ if (current->flags & PF_MEMALLOC) + return false; + + /* We're only interested __GFP_FS allocations for now */ +@@ -5348,17 +5348,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, + if (context != MEMMAP_EARLY) + goto not_early; + +- if (!early_pfn_valid(pfn)) { +-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +- /* +- * Skip to the pfn preceding the next valid one (or +- * end_pfn), such that we hit a valid pfn (or end_pfn) +- * on our next iteration of the loop. +- */ +- pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; +-#endif ++ if (!early_pfn_valid(pfn)) + continue; +- } + if (!early_pfn_in_nid(pfn, nid)) + continue; + if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) +diff --git a/mm/shmem.c b/mm/shmem.c +index 7fbe67be86fa..29a369c2067f 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -493,36 +493,45 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, + info = list_entry(pos, struct shmem_inode_info, shrinklist); + inode = &info->vfs_inode; + +- if (nr_to_split && split >= nr_to_split) { +- iput(inode); +- continue; +- } ++ if (nr_to_split && split >= nr_to_split) ++ goto leave; + +- page = find_lock_page(inode->i_mapping, ++ page = find_get_page(inode->i_mapping, + (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); + if (!page) + goto drop; + ++ /* No huge page at the end of the file: nothing to split */ + if (!PageTransHuge(page)) { +- unlock_page(page); + put_page(page); + goto drop; + } + ++ /* ++ * Leave the inode on the list if we failed to lock ++ * the page at this time. ++ * ++ * Waiting for the lock may lead to deadlock in the ++ * reclaim path. ++ */ ++ if (!trylock_page(page)) { ++ put_page(page); ++ goto leave; ++ } ++ + ret = split_huge_page(page); + unlock_page(page); + put_page(page); + +- if (ret) { +- /* split failed: leave it on the list */ +- iput(inode); +- continue; +- } ++ /* If split failed leave the inode on the list */ ++ if (ret) ++ goto leave; + + split++; + drop: + list_del_init(&info->shrinklist); + removed++; ++leave: + iput(inode); + } + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 47d5ced51f2d..503fa224c6b6 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -1846,6 +1846,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, + if (stat.nr_writeback && stat.nr_writeback == nr_taken) + set_bit(PGDAT_WRITEBACK, &pgdat->flags); + ++ /* ++ * If dirty pages are scanned that are not queued for IO, it ++ * implies that flushers are not doing their job. This can ++ * happen when memory pressure pushes dirty pages to the end of ++ * the LRU before the dirty limits are breached and the dirty ++ * data has expired. It can also happen when the proportion of ++ * dirty pages grows not through writes but through memory ++ * pressure reclaiming all the clean cache. And in some cases, ++ * the flushers simply cannot keep up with the allocation ++ * rate. Nudge the flusher threads in case they are asleep. ++ */ ++ if (stat.nr_unqueued_dirty == nr_taken) ++ wakeup_flusher_threads(WB_REASON_VMSCAN); ++ + /* + * Legacy memcg will stall in page writeback so avoid forcibly + * stalling here. +@@ -1858,22 +1872,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, + if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested) + set_bit(PGDAT_CONGESTED, &pgdat->flags); + +- /* +- * If dirty pages are scanned that are not queued for IO, it +- * implies that flushers are not doing their job. This can +- * happen when memory pressure pushes dirty pages to the end of +- * the LRU before the dirty limits are breached and the dirty +- * data has expired. It can also happen when the proportion of +- * dirty pages grows not through writes but through memory +- * pressure reclaiming all the clean cache. And in some cases, +- * the flushers simply cannot keep up with the allocation +- * rate. Nudge the flusher threads in case they are asleep, but +- * also allow kswapd to start writing pages during reclaim. +- */ +- if (stat.nr_unqueued_dirty == nr_taken) { +- wakeup_flusher_threads(WB_REASON_VMSCAN); ++ /* Allow kswapd to start writing pages during reclaim. */ ++ if (stat.nr_unqueued_dirty == nr_taken) + set_bit(PGDAT_DIRTY, &pgdat->flags); +- } + + /* + * If kswapd scans pages marked marked for immediate +diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c +index 0333143a1fa7..1063a4377502 100644 +--- a/sound/drivers/aloop.c ++++ b/sound/drivers/aloop.c +@@ -192,6 +192,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm) + dpcm->timer.expires = 0; + } + ++static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm) ++{ ++ del_timer_sync(&dpcm->timer); ++} ++ + #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK) + #define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE) + #define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE) +@@ -326,6 +331,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream) + struct loopback_cable *cable = dpcm->cable; + int bps, salign; + ++ loopback_timer_stop_sync(dpcm); ++ + salign = (snd_pcm_format_width(runtime->format) * + runtime->channels) / 8; + bps = salign * runtime->rate; +@@ -659,7 +666,9 @@ static void free_cable(struct snd_pcm_substream *substream) + return; + if (cable->streams[!substream->stream]) { + /* other stream is still alive */ ++ spin_lock_irq(&cable->lock); + cable->streams[substream->stream] = NULL; ++ spin_unlock_irq(&cable->lock); + } else { + /* free the cable */ + loopback->cables[substream->number][dev] = NULL; +@@ -698,7 +707,6 @@ static int loopback_open(struct snd_pcm_substream *substream) + loopback->cables[substream->number][dev] = cable; + } + dpcm->cable = cable; +- cable->streams[substream->stream] = dpcm; + + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + +@@ -730,6 +738,11 @@ static int loopback_open(struct snd_pcm_substream *substream) + runtime->hw = loopback_pcm_hardware; + else + runtime->hw = cable->hw; ++ ++ spin_lock_irq(&cable->lock); ++ cable->streams[substream->stream] = dpcm; ++ spin_unlock_irq(&cable->lock); ++ + unlock: + if (err < 0) { + free_cable(substream); +@@ -744,7 +757,7 @@ static int loopback_close(struct snd_pcm_substream *substream) + struct loopback *loopback = substream->private_data; + struct loopback_pcm *dpcm = substream->runtime->private_data; + +- loopback_timer_stop(dpcm); ++ loopback_timer_stop_sync(dpcm); + mutex_lock(&loopback->cable_lock); + free_cable(substream); + mutex_unlock(&loopback->cable_lock); +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index d5017adf9feb..c507c69029e3 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -375,6 +375,7 @@ enum { + ((pci)->device == 0x160c)) + + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) ++#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) + + static char *driver_short_names[] = { + [AZX_DRIVER_ICH] = "HDA Intel", +@@ -1744,6 +1745,10 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, + else + chip->bdl_pos_adj = bdl_pos_adj[dev]; + ++ /* Workaround for a communication error on CFL (bko#199007) */ ++ if (IS_CFL(pci)) ++ chip->polling_mode = 1; ++ + err = azx_bus_init(chip, model[dev], &pci_hda_io_ops); + if (err < 0) { + kfree(hda); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 18bab5ffbe4a..206774703a33 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -3130,6 +3130,8 @@ static void alc256_init(struct hda_codec *codec) + + alc_update_coef_idx(codec, 0x46, 3 << 12, 0); + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ ++ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ ++ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); + } + + static void alc256_shutup(struct hda_codec *codec) +@@ -3509,8 +3511,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled) + pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid); + pinval &= ~AC_PINCTL_VREFEN; + pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80; +- if (spec->mute_led_nid) ++ if (spec->mute_led_nid) { ++ /* temporarily power up/down for setting VREF */ ++ snd_hda_power_up_pm(codec); + snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval); ++ snd_hda_power_down_pm(codec); ++ } + } + + /* Make sure the led works even in runtime suspend */ +@@ -5375,6 +5381,7 @@ enum { + ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, + ALC298_FIXUP_TPT470_DOCK, + ALC255_FIXUP_DUMMY_LINEOUT_VERB, ++ ALC255_FIXUP_DELL_HEADSET_MIC, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -6235,6 +6242,13 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE + }, ++ [ALC255_FIXUP_DELL_HEADSET_MIC] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { } ++ }, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -6289,6 +6303,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), ++ SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), +@@ -7032,6 +7048,8 @@ static int patch_alc269(struct hda_codec *codec) + break; + case 0x10ec0257: + spec->codec_variant = ALC269_TYPE_ALC257; ++ spec->shutup = alc256_shutup; ++ spec->init_hook = alc256_init; + spec->gen.mixer_nid = 0; + break; + case 0x10ec0215: +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index 59af5a8419e2..00be6e8c35a2 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -967,7 +967,7 @@ static void print_metric_csv(void *ctx, + char buf[64], *vals, *ends; + + if (unit == NULL || fmt == NULL) { +- fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep); ++ fprintf(out, "%s%s", csv_sep, csv_sep); + return; + } + snprintf(buf, sizeof(buf), fmt, val); +diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c +index 1ae1c5a7392e..6f22238f3217 100644 +--- a/tools/testing/selftests/x86/ptrace_syscall.c ++++ b/tools/testing/selftests/x86/ptrace_syscall.c +@@ -183,8 +183,10 @@ static void test_ptrace_syscall_restart(void) + if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0) + err(1, "PTRACE_TRACEME"); + ++ pid_t pid = getpid(), tid = syscall(SYS_gettid); ++ + printf("\tChild will make one syscall\n"); +- raise(SIGSTOP); ++ syscall(SYS_tgkill, pid, tid, SIGSTOP); + + syscall(SYS_gettid, 10, 11, 12, 13, 14, 15); + _exit(0); +@@ -301,9 +303,11 @@ static void test_restart_under_ptrace(void) + if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0) + err(1, "PTRACE_TRACEME"); + ++ pid_t pid = getpid(), tid = syscall(SYS_gettid); ++ + printf("\tChild will take a nap until signaled\n"); + setsigign(SIGUSR1, SA_RESTART); +- raise(SIGSTOP); ++ syscall(SYS_tgkill, pid, tid, SIGSTOP); + + syscall(SYS_pause, 0, 0, 0, 0, 0, 0); + _exit(0); |