diff options
author | Mike Pagano <mpagano@gentoo.org> | 2019-06-22 15:00:45 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2019-06-22 15:00:45 -0400 |
commit | 0ed25f4179a9ef56b3f6dd8fdf7d92535b79ea2d (patch) | |
tree | d412c74d98444600505ad48ac36f39fa65ae5dce | |
parent | Linux patch 4.4.182 (diff) | |
download | linux-patches-0ed25f4179a9ef56b3f6dd8fdf7d92535b79ea2d.tar.gz linux-patches-0ed25f4179a9ef56b3f6dd8fdf7d92535b79ea2d.tar.bz2 linux-patches-0ed25f4179a9ef56b3f6dd8fdf7d92535b79ea2d.zip |
Linux patch 4.4.1834.4-184
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1182_linux-4.4.183.patch | 2216 |
2 files changed, 2220 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 4539ced9..eb4744ab 100644 --- a/0000_README +++ b/0000_README @@ -771,6 +771,10 @@ Patch: 1181_linux-4.4.182.patch From: http://www.kernel.org Desc: Linux 4.4.182 +Patch: 1182_linux-4.4.183.patch +From: http://www.kernel.org +Desc: Linux 4.4.183 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1182_linux-4.4.183.patch b/1182_linux-4.4.183.patch new file mode 100644 index 00000000..acd1ebca --- /dev/null +++ b/1182_linux-4.4.183.patch @@ -0,0 +1,2216 @@ +diff --git a/Makefile b/Makefile +index fcfede5e39de..4ac762e01e60 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 182 ++SUBLEVEL = 183 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts +index 4ecef6981d5c..b54c0b8a5b34 100644 +--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts ++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts +@@ -97,6 +97,7 @@ + regulator-name = "PVDD_APIO_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; ++ regulator-always-on; + }; + + ldo3_reg: LDO3 { +@@ -135,6 +136,7 @@ + regulator-name = "PVDD_ABB_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; ++ regulator-always-on; + }; + + ldo9_reg: LDO9 { +diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi +index e6af41c4bbc1..3992b8ea1c48 100644 +--- a/arch/arm/boot/dts/imx6qdl.dtsi ++++ b/arch/arm/boot/dts/imx6qdl.dtsi +@@ -853,7 +853,7 @@ + compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma"; + reg = <0x020ec000 0x4000>; + interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clks IMX6QDL_CLK_SDMA>, ++ clocks = <&clks IMX6QDL_CLK_IPG>, + <&clks IMX6QDL_CLK_SDMA>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; +diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi +index d8ba99f1d87b..ac820dfef977 100644 +--- a/arch/arm/boot/dts/imx6sl.dtsi ++++ b/arch/arm/boot/dts/imx6sl.dtsi +@@ -657,7 +657,7 @@ + reg = <0x020ec000 0x4000>; + interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks IMX6SL_CLK_SDMA>, +- <&clks IMX6SL_CLK_SDMA>; ++ <&clks IMX6SL_CLK_AHB>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + /* imx6sl reuses imx6q sdma firmware */ +diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi +index 6963dff815dc..5783eb8541ed 100644 +--- a/arch/arm/boot/dts/imx6sx.dtsi ++++ b/arch/arm/boot/dts/imx6sx.dtsi +@@ -732,7 +732,7 @@ + compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma"; + reg = <0x020ec000 0x4000>; + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clks IMX6SX_CLK_SDMA>, ++ clocks = <&clks IMX6SX_CLK_IPG>, + <&clks IMX6SX_CLK_SDMA>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; +diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c +index a003833ac112..013f4d55ede8 100644 +--- a/arch/arm/mach-exynos/suspend.c ++++ b/arch/arm/mach-exynos/suspend.c +@@ -508,8 +508,27 @@ early_wakeup: + + static void exynos5420_prepare_pm_resume(void) + { ++ unsigned int mpidr, cluster; ++ ++ mpidr = read_cpuid_mpidr(); ++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); ++ + if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) + WARN_ON(mcpm_cpu_powered_up()); ++ ++ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { ++ /* ++ * When system is resumed on the LITTLE/KFC core (cluster 1), ++ * the DSCR is not properly updated until the power is turned ++ * on also for the cluster 0. Enable it for a while to ++ * propagate the SPNIDEN and SPIDEN signals from Secure JTAG ++ * block and avoid undefined instruction issue on CP14 reset. ++ */ ++ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, ++ EXYNOS_COMMON_CONFIGURATION(0)); ++ pmu_raw_writel(0, ++ EXYNOS_COMMON_CONFIGURATION(0)); ++ } + } + + static void exynos5420_pm_resume(void) +diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c +index aa19b7ac8222..476c7b4be378 100644 +--- a/arch/ia64/mm/numa.c ++++ b/arch/ia64/mm/numa.c +@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr) + + return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); + } ++EXPORT_SYMBOL(paddr_to_nid); + + #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) + /* +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h +index a92d95aee42d..1883627eb12c 100644 +--- a/arch/powerpc/include/asm/kvm_host.h ++++ b/arch/powerpc/include/asm/kvm_host.h +@@ -250,6 +250,7 @@ struct kvm_arch { + #ifdef CONFIG_PPC_BOOK3S_64 + struct list_head spapr_tce_tables; + struct list_head rtas_tokens; ++ struct mutex rtas_token_lock; + DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); + #endif + #ifdef CONFIG_KVM_MPIC +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c +index 099c79d8c160..4aab1c9c83e1 100644 +--- a/arch/powerpc/kvm/book3s.c ++++ b/arch/powerpc/kvm/book3s.c +@@ -809,6 +809,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) + #ifdef CONFIG_PPC64 + INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); + INIT_LIST_HEAD(&kvm->arch.rtas_tokens); ++ mutex_init(&kvm->arch.rtas_token_lock); + #endif + + return kvm->arch.kvm_ops->init_vm(kvm); +diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c +index ef27fbd5d9c5..b1b2273d1f6d 100644 +--- a/arch/powerpc/kvm/book3s_rtas.c ++++ b/arch/powerpc/kvm/book3s_rtas.c +@@ -133,7 +133,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name) + { + struct rtas_token_definition *d, *tmp; + +- lockdep_assert_held(&kvm->lock); ++ lockdep_assert_held(&kvm->arch.rtas_token_lock); + + list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { + if (rtas_name_matches(d->handler->name, name)) { +@@ -154,7 +154,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token) + bool found; + int i; + +- lockdep_assert_held(&kvm->lock); ++ lockdep_assert_held(&kvm->arch.rtas_token_lock); + + list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { + if (d->token == token) +@@ -193,14 +193,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + +- mutex_lock(&kvm->lock); ++ mutex_lock(&kvm->arch.rtas_token_lock); + + if (args.token) + rc = rtas_token_define(kvm, args.name, args.token); + else + rc = rtas_token_undefine(kvm, args.name); + +- mutex_unlock(&kvm->lock); ++ mutex_unlock(&kvm->arch.rtas_token_lock); + + return rc; + } +@@ -232,7 +232,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) + orig_rets = args.rets; + args.rets = &args.args[be32_to_cpu(args.nargs)]; + +- mutex_lock(&vcpu->kvm->lock); ++ mutex_lock(&vcpu->kvm->arch.rtas_token_lock); + + rc = -ENOENT; + list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { +@@ -243,7 +243,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) + } + } + +- mutex_unlock(&vcpu->kvm->lock); ++ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); + + if (rc == 0) { + args.rets = orig_rets; +@@ -269,8 +269,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm) + { + struct rtas_token_definition *d, *tmp; + +- lockdep_assert_held(&kvm->lock); +- + list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { + list_del(&d->list); + kfree(d); +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c +index 5ddb1debba95..23911ecfbad6 100644 +--- a/arch/s390/kvm/kvm-s390.c ++++ b/arch/s390/kvm/kvm-s390.c +@@ -2721,21 +2721,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) + { +- int rc; +- +- /* If the basics of the memslot do not change, we do not want +- * to update the gmap. Every update causes several unnecessary +- * segment translation exceptions. This is usually handled just +- * fine by the normal fault handler + gmap, but it will also +- * cause faults on the prefix page of running guest CPUs. +- */ +- if (old->userspace_addr == mem->userspace_addr && +- old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && +- old->npages * PAGE_SIZE == mem->memory_size) +- return; ++ int rc = 0; + +- rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, +- mem->guest_phys_addr, mem->memory_size); ++ switch (change) { ++ case KVM_MR_DELETE: ++ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, ++ old->npages * PAGE_SIZE); ++ break; ++ case KVM_MR_MOVE: ++ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, ++ old->npages * PAGE_SIZE); ++ if (rc) ++ break; ++ /* FALLTHROUGH */ ++ case KVM_MR_CREATE: ++ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, ++ mem->guest_phys_addr, mem->memory_size); ++ break; ++ case KVM_MR_FLAGS_ONLY: ++ break; ++ default: ++ WARN(1, "Unknown KVM MR CHANGE: %d\n", change); ++ } + if (rc) + pr_warn("failed to commit memory region\n"); + return; +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index e94e6f16172b..6f2483292de0 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -717,8 +717,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c) + { + set_cpu_cap(c, X86_FEATURE_ZEN); + +- /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ +- if (!cpu_has(c, X86_FEATURE_CPB)) ++ /* ++ * Fix erratum 1076: CPB feature bit not being set in CPUID. ++ * Always set it, except when running under a hypervisor. ++ */ ++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) + set_cpu_cap(c, X86_FEATURE_CPB); + } + +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c +index 325ed90511cf..3572434a73cb 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel.c ++++ b/arch/x86/kernel/cpu/perf_event_intel.c +@@ -2513,7 +2513,7 @@ static int intel_pmu_hw_config(struct perf_event *event) + return ret; + + if (event->attr.precise_ip) { +- if (!(event->attr.freq || event->attr.wakeup_events)) { ++ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { + event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; + if (!(event->attr.sample_type & + ~intel_pmu_free_running_flags(event))) +diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c +index 23a7c7ba377a..8fc07ea23344 100644 +--- a/arch/x86/kvm/pmu_intel.c ++++ b/arch/x86/kvm/pmu_intel.c +@@ -235,11 +235,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + } + break; + default: +- if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || +- (pmc = get_fixed_pmc(pmu, msr))) { +- if (!msr_info->host_initiated) +- data = (s64)(s32)data; +- pmc->counter += data - pmc_read_counter(pmc); ++ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { ++ if (msr_info->host_initiated) ++ pmc->counter = data; ++ else ++ pmc->counter = (s32)data; ++ return 0; ++ } else if ((pmc = get_fixed_pmc(pmu, msr))) { ++ pmc->counter = data; + return 0; + } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { + if (data == pmc->eventsel) +diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c +index 9bd115484745..5f0e596b0519 100644 +--- a/arch/x86/pci/irq.c ++++ b/arch/x86/pci/irq.c +@@ -1117,6 +1117,8 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = { + + void __init pcibios_irq_init(void) + { ++ struct irq_routing_table *rtable = NULL; ++ + DBG(KERN_DEBUG "PCI: IRQ init\n"); + + if (raw_pci_ops == NULL) +@@ -1127,8 +1129,10 @@ void __init pcibios_irq_init(void) + pirq_table = pirq_find_routing_table(); + + #ifdef CONFIG_PCI_BIOS +- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) ++ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) { + pirq_table = pcibios_get_irq_routing_table(); ++ rtable = pirq_table; ++ } + #endif + if (pirq_table) { + pirq_peer_trick(); +@@ -1143,8 +1147,10 @@ void __init pcibios_irq_init(void) + * If we're using the I/O APIC, avoid using the PCI IRQ + * routing table + */ +- if (io_apic_assign_pci_irqs) ++ if (io_apic_assign_pci_irqs) { ++ kfree(rtable); + pirq_table = NULL; ++ } + } + + x86_init.pci.fixup_irqs(); +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 7dc52ba27eac..01eb2a2a3746 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -570,6 +570,12 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, + + if (mm) { + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) { ++ if (allocate == 0) ++ goto free_range; ++ goto err_no_vma; ++ } ++ + vma = proc->vma; + if (vma && mm != proc->vma_vm_mm) { + pr_err("%d: vma mm and task mm mismatch\n", +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index d543172b20b3..a352f09baef6 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4176,9 +4176,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + +- /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ +- { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, +- { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, ++ /* drives which fail FPDMA_AA activation (some may freeze afterwards) ++ the ST disks also have LPM issues */ ++ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA | ++ ATA_HORKAGE_NOLPM, }, ++ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA | ++ ATA_HORKAGE_NOLPM, }, + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, + + /* Blacklist entries taken from Silicon Image 3124/3132 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c +index 9040878e3e2b..a6cda84b67da 100644 +--- a/drivers/clk/rockchip/clk-rk3288.c ++++ b/drivers/clk/rockchip/clk-rk3288.c +@@ -797,6 +797,9 @@ static const int rk3288_saved_cru_reg_ids[] = { + RK3288_CLKSEL_CON(10), + RK3288_CLKSEL_CON(33), + RK3288_CLKSEL_CON(37), ++ ++ /* We turn aclk_dmac1 on for suspend; this will restore it */ ++ RK3288_CLKGATE_CON(10), + }; + + static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)]; +@@ -812,6 +815,14 @@ static int rk3288_clk_suspend(void) + readl_relaxed(rk3288_cru_base + reg_id); + } + ++ /* ++ * Going into deep sleep (specifically setting PMU_CLR_DMA in ++ * RK3288_PMU_PWRMODE_CON1) appears to fail unless ++ * "aclk_dmac1" is on. ++ */ ++ writel_relaxed(1 << (12 + 16), ++ rk3288_cru_base + RK3288_CLKGATE_CON(10)); ++ + /* + * Switch PLLs other than DPLL (for SDRAM) to slow mode to + * avoid crashes on resume. The Mask ROM on the system will +diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c +index e3b8bebfdd30..4afca3968773 100644 +--- a/drivers/crypto/amcc/crypto4xx_alg.c ++++ b/drivers/crypto/amcc/crypto4xx_alg.c +@@ -138,8 +138,7 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher, + sa = (struct dynamic_sa_ctl *) ctx->sa_in; + ctx->hash_final = 0; + +- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ? +- SA_SAVE_IV : SA_NOT_SAVE_IV), ++ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, + SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, + SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, + SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, +diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c +index 1e810f5f03fa..78d0722feacb 100644 +--- a/drivers/crypto/amcc/crypto4xx_core.c ++++ b/drivers/crypto/amcc/crypto4xx_core.c +@@ -645,15 +645,6 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, + addr = dma_map_page(dev->core_dev->device, sg_page(dst), + dst->offset, dst->length, DMA_FROM_DEVICE); + } +- +- if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { +- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); +- +- crypto4xx_memcpy_from_le32((u32 *)req->iv, +- pd_uinfo->sr_va->save_iv, +- crypto_skcipher_ivsize(skcipher)); +- } +- + crypto4xx_ret_sg_desc(dev, pd_uinfo); + if (ablk_req->base.complete != NULL) + ablk_req->base.complete(&ablk_req->base, 0); +diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c +index 7d56b47e4fcf..25e25b64bc89 100644 +--- a/drivers/dma/idma64.c ++++ b/drivers/dma/idma64.c +@@ -594,7 +594,7 @@ static int idma64_probe(struct idma64_chip *chip) + idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + +- idma64->dma.dev = chip->dev; ++ idma64->dma.dev = chip->sysdev; + + ret = dma_async_device_register(&idma64->dma); + if (ret) +@@ -632,6 +632,7 @@ static int idma64_platform_probe(struct platform_device *pdev) + { + struct idma64_chip *chip; + struct device *dev = &pdev->dev; ++ struct device *sysdev = dev->parent; + struct resource *mem; + int ret; + +@@ -648,11 +649,12 @@ static int idma64_platform_probe(struct platform_device *pdev) + if (IS_ERR(chip->regs)) + return PTR_ERR(chip->regs); + +- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + chip->dev = dev; ++ chip->sysdev = sysdev; + + ret = idma64_probe(chip); + if (ret) +diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h +index f6aeff0af8a5..e40c69bd1fb5 100644 +--- a/drivers/dma/idma64.h ++++ b/drivers/dma/idma64.h +@@ -215,12 +215,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) + /** + * struct idma64_chip - representation of iDMA 64-bit controller hardware + * @dev: struct device of the DMA controller ++ * @sysdev: struct device of the physical device that does DMA + * @irq: irq line + * @regs: memory mapped I/O space + * @idma64: struct idma64 that is filed by idma64_probe() + */ + struct idma64_chip { + struct device *dev; ++ struct device *sysdev; + int irq; + void __iomem *regs; + struct idma64 *idma64; +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +index 469dc378adeb..aaae6040b4c8 100644 +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -579,6 +579,7 @@ config GPIO_ADP5588 + config GPIO_ADP5588_IRQ + bool "Interrupt controller support for ADP5588" + depends on GPIO_ADP5588=y ++ select GPIOLIB_IRQCHIP + help + Say yes here to enable the adp5588 to be used as an interrupt + controller. It requires the driver to be built in the kernel. +diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c +index 9943273ec981..c8c49b1d5f9f 100644 +--- a/drivers/gpio/gpio-omap.c ++++ b/drivers/gpio/gpio-omap.c +@@ -292,6 +292,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) + } + } + ++/* ++ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. ++ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs ++ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none ++ * are capable waking up the system from off mode. ++ */ ++static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) ++{ ++ u32 no_wake = bank->non_wakeup_gpios; ++ ++ if (no_wake) ++ return !!(~no_wake & gpio_mask); ++ ++ return false; ++} ++ + static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, + unsigned trigger) + { +@@ -323,13 +339,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, + } + + /* This part needs to be executed always for OMAP{34xx, 44xx} */ +- if (!bank->regs->irqctrl) { +- /* On omap24xx proceed only when valid GPIO bit is set */ +- if (bank->non_wakeup_gpios) { +- if (!(bank->non_wakeup_gpios & gpio_bit)) +- goto exit; +- } +- ++ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { + /* + * Log the edge gpio and manually trigger the IRQ + * after resume if the input level changes +@@ -342,7 +352,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, + bank->enabled_non_wakeup_gpios &= ~gpio_bit; + } + +-exit: + bank->level_mask = + readl_relaxed(bank->base + bank->regs->leveldetect0) | + readl_relaxed(bank->base + bank->regs->leveldetect1); +diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c +index c7c243e9b808..4300e27ed113 100644 +--- a/drivers/gpu/drm/i2c/adv7511.c ++++ b/drivers/gpu/drm/i2c/adv7511.c +@@ -781,11 +781,11 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder, + vsync_polarity = 1; + } + +- if (mode->vrefresh <= 24000) ++ if (drm_mode_vrefresh(mode) <= 24) + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; +- else if (mode->vrefresh <= 25000) ++ else if (drm_mode_vrefresh(mode) <= 25) + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; +- else if (mode->vrefresh <= 30000) ++ else if (drm_mode_vrefresh(mode) <= 30) + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; + else + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index ad0dd566aded..8dba10135d53 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -2442,7 +2442,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, + + cmd = container_of(header, typeof(*cmd), header); + +- if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { ++ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || ++ cmd->body.type < SVGA3D_SHADERTYPE_MIN) { + DRM_ERROR("Illegal shader type %u.\n", + (unsigned) cmd->body.type); + return -EINVAL; +@@ -2681,6 +2682,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, + if (view_type == vmw_view_max) + return -EINVAL; + cmd = container_of(header, typeof(*cmd), header); ++ if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { ++ DRM_ERROR("Invalid surface id.\n"); ++ return -EINVAL; ++ } + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->sid, &srf_node); +diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c +index 9d7be5af2bf2..6618db75fa25 100644 +--- a/drivers/i2c/busses/i2c-acorn.c ++++ b/drivers/i2c/busses/i2c-acorn.c +@@ -83,6 +83,7 @@ static struct i2c_algo_bit_data ioc_data = { + + static struct i2c_adapter ioc_ops = { + .nr = 0, ++ .name = "ioc", + .algo_data = &ioc_data, + }; + +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index 57e3790c87b1..e56b774e7cf9 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -295,6 +295,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, + rdwr_pa[i].buf[0] < 1 || + rdwr_pa[i].len < rdwr_pa[i].buf[0] + + I2C_SMBUS_BLOCK_MAX) { ++ i++; + res = -EINVAL; + break; + } +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 67c4c73343d4..6968154a073e 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -1042,6 +1042,8 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) + * mlx4_ib_vma_close(). + */ + down_write(&owning_mm->mmap_sem); ++ if (!mmget_still_valid(owning_mm)) ++ goto skip_mm; + for (i = 0; i < HW_BAR_COUNT; i++) { + vma = context->hw_bar_info[i].vma; + if (!vma) +@@ -1061,6 +1063,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) + context->hw_bar_info[i].vma->vm_ops = NULL; + } + ++skip_mm: + up_write(&owning_mm->mmap_sem); + mmput(owning_mm); + put_task_struct(owning_process); +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 3e97c4b2ebed..b965561a4162 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3983,9 +3983,7 @@ static void __init init_no_remapping_devices(void) + + /* This IOMMU has *only* gfx devices. Either bypass it or + set the gfx_mapped flag, as appropriate */ +- if (dmar_map_gfx) { +- intel_iommu_gfx_mapped = 1; +- } else { ++ if (!dmar_map_gfx) { + drhd->ignored = 1; + for_each_active_dev_scope(drhd->devices, + drhd->devices_cnt, i, dev) +@@ -4694,6 +4692,9 @@ int __init intel_iommu_init(void) + goto out_free_reserved_range; + } + ++ if (dmar_map_gfx) ++ intel_iommu_gfx_mapped = 1; ++ + init_no_remapping_devices(); + + ret = init_dmars(); +diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c +index 0d29b5a6356d..8cbb75d09a1d 100644 +--- a/drivers/isdn/mISDN/socket.c ++++ b/drivers/isdn/mISDN/socket.c +@@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + memcpy(di.channelmap, dev->channelmap, + sizeof(di.channelmap)); + di.nrbchan = dev->nrbchan; +- strcpy(di.name, dev_name(&dev->dev)); ++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); + if (copy_to_user((void __user *)arg, &di, sizeof(di))) + err = -EFAULT; + } else +@@ -678,7 +678,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + memcpy(di.channelmap, dev->channelmap, + sizeof(di.channelmap)); + di.nrbchan = dev->nrbchan; +- strcpy(di.name, dev_name(&dev->dev)); ++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); + if (copy_to_user((void __user *)arg, &di, sizeof(di))) + err = -EFAULT; + } else +@@ -692,6 +692,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + err = -EFAULT; + break; + } ++ dn.name[sizeof(dn.name) - 1] = '\0'; + dev = get_mdevice(dn.id); + if (dev) + err = device_rename(&dev->dev, dn.name); +diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c +index 646fe85261c1..158eae17031c 100644 +--- a/drivers/md/bcache/bset.c ++++ b/drivers/md/bcache/bset.c +@@ -823,12 +823,22 @@ unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, + struct bset *i = bset_tree_last(b)->data; + struct bkey *m, *prev = NULL; + struct btree_iter iter; ++ struct bkey preceding_key_on_stack = ZERO_KEY; ++ struct bkey *preceding_key_p = &preceding_key_on_stack; + + BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); + +- m = bch_btree_iter_init(b, &iter, b->ops->is_extents +- ? PRECEDING_KEY(&START_KEY(k)) +- : PRECEDING_KEY(k)); ++ /* ++ * If k has preceding key, preceding_key_p will be set to address ++ * of k's preceding key; otherwise preceding_key_p will be set ++ * to NULL inside preceding_key(). ++ */ ++ if (b->ops->is_extents) ++ preceding_key(&START_KEY(k), &preceding_key_p); ++ else ++ preceding_key(k, &preceding_key_p); ++ ++ m = bch_btree_iter_init(b, &iter, preceding_key_p); + + if (b->ops->insert_fixup(b, k, &iter, replace_key)) + return status; +diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h +index ae964624efb2..b935839ab79c 100644 +--- a/drivers/md/bcache/bset.h ++++ b/drivers/md/bcache/bset.h +@@ -417,20 +417,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) + return __bch_cut_back(where, k); + } + +-#define PRECEDING_KEY(_k) \ +-({ \ +- struct bkey *_ret = NULL; \ +- \ +- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ +- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ +- \ +- if (!_ret->low) \ +- _ret->high--; \ +- _ret->low--; \ +- } \ +- \ +- _ret; \ +-}) ++/* ++ * Pointer '*preceding_key_p' points to a memory object to store preceding ++ * key of k. If the preceding key does not exist, set '*preceding_key_p' to ++ * NULL. So the caller of preceding_key() needs to take care of memory ++ * which '*preceding_key_p' pointed to before calling preceding_key(). ++ * Currently the only caller of preceding_key() is bch_btree_insert_key(), ++ * and it points to an on-stack variable, so the memory release is handled ++ * by stackframe itself. ++ */ ++static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p) ++{ ++ if (KEY_INODE(k) || KEY_OFFSET(k)) { ++ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0); ++ if (!(*preceding_key_p)->low) ++ (*preceding_key_p)->high--; ++ (*preceding_key_p)->low--; ++ } else { ++ (*preceding_key_p) = NULL; ++ } ++} + + static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) + { +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index ac867489b5a9..498875193386 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -267,6 +267,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) + { + u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; + ++ /* Set the device in reset state */ ++ writel(0, lpss->priv + LPSS_PRIV_RESETS); ++ + intel_lpss_deassert_reset(lpss); + + intel_lpss_set_remap_addr(lpss); +diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c +index 72aab60ae846..db8684430f02 100644 +--- a/drivers/mfd/twl6040.c ++++ b/drivers/mfd/twl6040.c +@@ -316,8 +316,19 @@ int twl6040_power(struct twl6040 *twl6040, int on) + } + } + ++ /* ++ * Register access can produce errors after power-up unless we ++ * wait at least 8ms based on measurements on duovero. ++ */ ++ usleep_range(10000, 12000); ++ + /* Sync with the HW */ +- regcache_sync(twl6040->regmap); ++ ret = regcache_sync(twl6040->regmap); ++ if (ret) { ++ dev_err(twl6040->dev, "Failed to sync with the HW: %i\n", ++ ret); ++ goto out; ++ } + + /* Default PLL configuration after power up */ + twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; +diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c +index 99635dd9dbac..bb3a76ad80da 100644 +--- a/drivers/misc/kgdbts.c ++++ b/drivers/misc/kgdbts.c +@@ -1132,7 +1132,7 @@ static void kgdbts_put_char(u8 chr) + + static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) + { +- int len = strlen(kmessage); ++ size_t len = strlen(kmessage); + + if (len >= MAX_CONFIG_LEN) { + printk(KERN_ERR "kgdbts: config string too long\n"); +@@ -1152,7 +1152,7 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) + + strcpy(config, kmessage); + /* Chop out \n char as a result of echo */ +- if (config[len - 1] == '\n') ++ if (len && config[len - 1] == '\n') + config[len - 1] = '\0'; + + /* Go and configure with the new params. */ +diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c +index 3acde3b9b767..7799cf33cc6e 100644 +--- a/drivers/net/ethernet/dec/tulip/de4x5.c ++++ b/drivers/net/ethernet/dec/tulip/de4x5.c +@@ -2106,7 +2106,6 @@ static struct eisa_driver de4x5_eisa_driver = { + .remove = de4x5_eisa_remove, + } + }; +-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); + #endif + + #ifdef CONFIG_PCI +diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c +index 734f655c99c1..51bfe74be8d4 100644 +--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c ++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c +@@ -1050,7 +1050,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); + break; + case ETHTOOL_GRXRINGS: +- cmd->data = adapter->num_rx_qs - 1; ++ cmd->data = adapter->num_rx_qs; + break; + default: + return -EINVAL; +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index afaf79b8761f..2d9f4ed9a65e 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -1408,6 +1408,10 @@ static void sh_eth_dev_exit(struct net_device *ndev) + sh_eth_get_stats(ndev); + sh_eth_reset(ndev); + ++ /* Set the RMII mode again if required */ ++ if (mdp->cd->rmiimode) ++ sh_eth_write(ndev, 0x1, RMIIMODE); ++ + /* Set MAC address again */ + update_mac_address(ndev); + } +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c +index 01f95d192d25..2b16a5fed9de 100644 +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) + dev); + dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + ++ netif_stop_queue(net); + retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); + if (retval) { + dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", + __func__, retval); + dev->net->stats.tx_errors++; + dev_kfree_skb_any(skb); ++ netif_wake_queue(net); + } else { + dev->net->stats.tx_packets++; + dev->net->stats.tx_bytes += skb->len; + dev_consume_skb_any(skb); +- netif_stop_queue(net); + } + + return NETDEV_TX_OK; +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 6fd4e5a5ef4a..931cc33e46f0 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -789,7 +789,7 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, + void *buf) + { + u8 *p, *b; +- int i, bit_offset = cell->bit_offset; ++ int i, extra, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { +@@ -804,11 +804,16 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, + p = b; + *b++ >>= bit_offset; + } +- +- /* result fits in less bytes */ +- if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) +- *p-- = 0; ++ } else { ++ /* point to the msb */ ++ p += cell->bytes - 1; + } ++ ++ /* result fits in less bytes */ ++ extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); ++ while (--extra >= 0) ++ *p-- = 0; ++ + /* clear msb bits if any leftover in the last byte */ + *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); + } +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c +index 414c33686621..b18cf12731ee 100644 +--- a/drivers/pci/host/pcie-rcar.c ++++ b/drivers/pci/host/pcie-rcar.c +@@ -737,6 +737,10 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) + + /* setup MSI data target */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); ++ if (!msi->pages) { ++ err = -ENOMEM; ++ goto err; ++ } + base = virt_to_phys((void *)msi->pages); + + rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); +diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c +index 4cfa46360d12..6a2499f4d610 100644 +--- a/drivers/pci/host/pcie-xilinx.c ++++ b/drivers/pci/host/pcie-xilinx.c +@@ -349,14 +349,19 @@ static const struct irq_domain_ops msi_domain_ops = { + * xilinx_pcie_enable_msi - Enable MSI support + * @port: PCIe port information + */ +-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) ++static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) + { + phys_addr_t msg_addr; + + port->msi_pages = __get_free_pages(GFP_KERNEL, 0); ++ if (!port->msi_pages) ++ return -ENOMEM; ++ + msg_addr = virt_to_phys((void *)port->msi_pages); + pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); + pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); ++ ++ return 0; + } + + /* INTx Functions */ +@@ -555,6 +560,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; ++ int ret; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); +@@ -582,7 +588,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) + return PTR_ERR(port->irq_domain); + } + +- xilinx_pcie_enable_msi(port); ++ ret = xilinx_pcie_enable_msi(port); ++ if (ret) ++ return ret; + } + + return 0; +diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c +index f2fcbe944d94..aae295708ea7 100644 +--- a/drivers/pci/hotplug/rpadlpar_core.c ++++ b/drivers/pci/hotplug/rpadlpar_core.c +@@ -55,6 +55,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) + if ((rc == 0) && (!strcmp(drc_name, name))) + break; + } ++ of_node_put(parent); + + return dn; + } +@@ -78,6 +79,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, + return np; + } + ++/* Returns a device_node with its reference count incremented */ + static struct device_node *find_dlpar_node(char *drc_name, int *node_type) + { + struct device_node *dn; +@@ -314,6 +316,7 @@ int dlpar_add_slot(char *drc_name) + rc = dlpar_add_phb(drc_name, dn); + break; + } ++ of_node_put(dn); + + printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); + exit: +@@ -447,6 +450,7 @@ int dlpar_remove_slot(char *drc_name) + rc = dlpar_remove_pci_slot(drc_name, dn); + break; + } ++ of_node_put(dn); + vm_unmap_aliases(); + + printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c +index a0b8c8a8c323..5c285f2b3a65 100644 +--- a/drivers/platform/chrome/cros_ec_proto.c ++++ b/drivers/platform/chrome/cros_ec_proto.c +@@ -66,6 +66,17 @@ static int send_command(struct cros_ec_device *ec_dev, + else + xfer_fxn = ec_dev->cmd_xfer; + ++ if (!xfer_fxn) { ++ /* ++ * This error can happen if a communication error happened and ++ * the EC is trying to use protocol v2, on an underlying ++ * communication mechanism that does not support v2. ++ */ ++ dev_err_once(ec_dev->dev, ++ "missing EC transfer API, cannot send command\n"); ++ return -EIO; ++ } ++ + ret = (*xfer_fxn)(ec_dev, msg); + if (msg->result == EC_RES_IN_PROGRESS) { + int i; +diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c +index ec84ff8ad1b4..6911f9662300 100644 +--- a/drivers/pwm/core.c ++++ b/drivers/pwm/core.c +@@ -284,10 +284,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, + if (IS_ENABLED(CONFIG_OF)) + of_pwmchip_add(chip); + +- pwmchip_sysfs_export(chip); +- + out: + mutex_unlock(&pwm_lock); ++ ++ if (!ret) ++ pwmchip_sysfs_export(chip); ++ + return ret; + } + EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); +@@ -321,7 +323,7 @@ int pwmchip_remove(struct pwm_chip *chip) + unsigned int i; + int ret = 0; + +- pwmchip_sysfs_unexport_children(chip); ++ pwmchip_sysfs_unexport(chip); + + mutex_lock(&pwm_lock); + +@@ -341,8 +343,6 @@ int pwmchip_remove(struct pwm_chip *chip) + + free_pwms(chip); + +- pwmchip_sysfs_unexport(chip); +- + out: + mutex_unlock(&pwm_lock); + return ret; +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c +index 062dff1c902d..ede17f89d57f 100644 +--- a/drivers/pwm/pwm-tiehrpwm.c ++++ b/drivers/pwm/pwm-tiehrpwm.c +@@ -385,6 +385,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) + } + + /* Update shadow register first before modifying active register */ ++ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, ++ AQSFRC_RLDCSF_ZRO); + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); + /* + * Changes to immediate action on Action Qualifier. This puts +diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c +index 375008e2be20..199370e41da9 100644 +--- a/drivers/pwm/sysfs.c ++++ b/drivers/pwm/sysfs.c +@@ -338,19 +338,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip) + } + + void pwmchip_sysfs_unexport(struct pwm_chip *chip) +-{ +- struct device *parent; +- +- parent = class_find_device(&pwm_class, NULL, chip, +- pwmchip_sysfs_match); +- if (parent) { +- /* for class_find_device() */ +- put_device(parent); +- device_unregister(parent); +- } +-} +- +-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) + { + struct device *parent; + unsigned int i; +@@ -368,6 +355,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) + } + + put_device(parent); ++ device_unregister(parent); + } + + static int __init pwm_sysfs_init(void) +diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c +index 28c671b609b2..0c71b69b9f88 100644 +--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c ++++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c +@@ -829,7 +829,7 @@ ret_err_rqe: + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { +- if (err_warn_bit_map & (u64) (1 << i)) { ++ if (err_warn_bit_map & ((u64)1 << i)) { + err_warn = i; + break; + } +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c +index f3bb7af4e984..5eaf14c15590 100644 +--- a/drivers/scsi/cxgbi/libcxgbi.c ++++ b/drivers/scsi/cxgbi/libcxgbi.c +@@ -634,6 +634,10 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) + + if (ndev->flags & IFF_LOOPBACK) { + ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); ++ if (!ndev) { ++ err = -ENETUNREACH; ++ goto rel_neigh; ++ } + mtu = ndev->mtu; + pr_info("rt dev %s, loopback -> %s, mtu %u.\n", + n->dev->name, ndev->name, mtu); +diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c +index ee1f9ee995e5..400eee9d7783 100644 +--- a/drivers/scsi/libsas/sas_expander.c ++++ b/drivers/scsi/libsas/sas_expander.c +@@ -978,6 +978,8 @@ static struct domain_device *sas_ex_discover_expander( + list_del(&child->dev_list_node); + spin_unlock_irq(&parent->port->dev_list_lock); + sas_put_device(child); ++ sas_port_delete(phy->port); ++ phy->port = NULL; + return NULL; + } + list_add_tail(&child->siblings, &parent->ex_dev.children); +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 398c9a0a5ade..82a690924f5e 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -6498,7 +6498,10 @@ int + lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) + { + struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, +- rrq->nlp_DID); ++ rrq->nlp_DID); ++ if (!ndlp) ++ return 1; ++ + if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) + return lpfc_issue_els_rrq(rrq->vport, ndlp, + rrq->nlp_DID, rrq); +diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c +index 105597a885cb..33b10dd7d87e 100644 +--- a/drivers/soc/mediatek/mtk-pmic-wrap.c ++++ b/drivers/soc/mediatek/mtk-pmic-wrap.c +@@ -591,7 +591,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) + static int pwrap_init_cipher(struct pmic_wrapper *wrp) + { + int ret; +- u32 rdata; ++ u32 rdata = 0; + + pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); + pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST); +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index e87b6fc9f4c6..193aa3da5033 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1371,12 +1371,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { + + static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) + { +- struct device *dev = param; +- +- if (dev != chan->device->dev->parent) +- return false; +- +- return true; ++ return param == chan->device->dev; + } + + static struct pxa2xx_spi_master * +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index a30d68c4b689..039837db65fc 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -258,7 +258,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) + + static bool dw8250_idma_filter(struct dma_chan *chan, void *param) + { +- return param == chan->device->dev->parent; ++ return param == chan->device->dev; + } + + static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) +@@ -290,7 +290,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) + data->uart_16550_compatible = true; + } + +- /* Platforms with iDMA */ ++ /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + p->set_termios = dw8250_set_termios; +diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c +index 59828d819145..5ad978acd90c 100644 +--- a/drivers/tty/serial/sunhv.c ++++ b/drivers/tty/serial/sunhv.c +@@ -392,7 +392,7 @@ static struct uart_ops sunhv_pops = { + static struct uart_driver sunhv_reg = { + .owner = THIS_MODULE, + .driver_name = "sunhv", +- .dev_name = "ttyS", ++ .dev_name = "ttyHV", + .major = TTY_MAJOR, + }; + +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 38c7676e7a82..19e819aa2419 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -70,6 +70,9 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */ + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* Logitech HD Webcam C270 */ ++ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, ++ + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9f96dd274370..1effe74ec638 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1166,6 +1166,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */ + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ +@@ -1767,6 +1771,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), + .driver_info = RSVD(5) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ ++ .driver_info = RSVD(7) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index 9706d214c409..8fd5e19846ef 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -101,6 +101,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, ++ { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h +index d84c3b3d477b..496cbccbf26c 100644 +--- a/drivers/usb/serial/pl2303.h ++++ b/drivers/usb/serial/pl2303.h +@@ -159,3 +159,6 @@ + #define SMART_VENDOR_ID 0x0b8c + #define SMART_PRODUCT_ID 0x2303 + ++/* Allied Telesis VT-Kit3 */ ++#define AT_VENDOR_ID 0x0caa ++#define AT_VTKIT3_PRODUCT_ID 0x3001 +diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h +index f5fc3271e19c..e2c5491a411b 100644 +--- a/drivers/usb/storage/unusual_realtek.h ++++ b/drivers/usb/storage/unusual_realtek.h +@@ -28,6 +28,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999, + "USB Card Reader", + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), + ++UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999, ++ "Realtek", ++ "USB Card Reader", ++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), ++ + UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999, + "Realtek", + "USB Card Reader", +diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c +index 15d3ccff2965..4a397c7c1b56 100644 +--- a/drivers/video/fbdev/hgafb.c ++++ b/drivers/video/fbdev/hgafb.c +@@ -285,6 +285,8 @@ static int hga_card_detect(void) + hga_vram_len = 0x08000; + + hga_vram = ioremap(0xb0000, hga_vram_len); ++ if (!hga_vram) ++ goto error; + + if (request_region(0x3b0, 12, "hgafb")) + release_io_ports = 1; +diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c +index 9b167f7ef6c6..4994a540f680 100644 +--- a/drivers/video/fbdev/imsttfb.c ++++ b/drivers/video/fbdev/imsttfb.c +@@ -1517,6 +1517,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + info->fix.smem_start = addr; + info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? + 0x400000 : 0x800000); ++ if (!info->screen_base) { ++ release_mem_region(addr, size); ++ framebuffer_release(info); ++ return -ENOMEM; ++ } + info->fix.mmio_start = addr + 0x800000; + par->dc_regs = ioremap(addr + 0x800000, 0x1000); + par->cmap_regs_phys = addr + 0x840000; +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index a7a1b218f308..8e709b641b55 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry, + if (sd) { + /* Coordinate with configfs_readdir */ + spin_lock(&configfs_dirent_lock); +- /* Coordinate with configfs_attach_attr where will increase +- * sd->s_count and update sd->s_dentry to new allocated one. +- * Only set sd->dentry to null when this dentry is the only +- * sd owner. +- * If not do so, configfs_d_iput may run just after +- * configfs_attach_attr and set sd->s_dentry to null +- * even it's still in use. ++ /* ++ * Set sd->s_dentry to null only when this dentry is the one ++ * that is going to be killed. Otherwise configfs_d_iput may ++ * run just after configfs_attach_attr and set sd->s_dentry to ++ * NULL even it's still in use. + */ +- if (atomic_read(&sd->s_count) <= 2) ++ if (sd->s_dentry == dentry) + sd->s_dentry = NULL; + + spin_unlock(&configfs_dirent_lock); +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c +index 2878be3e448f..410354c334d7 100644 +--- a/fs/f2fs/recovery.c ++++ b/fs/f2fs/recovery.c +@@ -413,7 +413,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, + + get_node_info(sbi, dn.nid, &ni); + f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); +- f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); ++ ++ if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { ++ f2fs_msg(sbi->sb, KERN_WARNING, ++ "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", ++ inode->i_ino, ofs_of_node(dn.node_page), ++ ofs_of_node(page)); ++ err = -EFAULT; ++ goto err; ++ } + + for (; start < end; start++, dn.ofs_in_node++) { + block_t src, dest; +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h +index 08b08ae6ba9d..f461fecf0e54 100644 +--- a/fs/f2fs/segment.h ++++ b/fs/f2fs/segment.h +@@ -598,7 +598,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr) + static inline int check_block_count(struct f2fs_sb_info *sbi, + int segno, struct f2fs_sit_entry *raw_sit) + { +-#ifdef CONFIG_F2FS_CHECK_FS + bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; + int valid_blocks = 0; + int cur_pos = 0, next_pos; +@@ -625,7 +624,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi, + set_sbi_flag(sbi, SBI_NEED_FSCK); + return -EINVAL; + } +-#endif ++ + /* check segment usage, and check boundary of a given segment number */ + if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg + || segno > TOTAL_SEGS(sbi) - 1)) { +diff --git a/fs/fat/file.c b/fs/fat/file.c +index a08f1039909a..d3f655ae020b 100644 +--- a/fs/fat/file.c ++++ b/fs/fat/file.c +@@ -156,12 +156,17 @@ static int fat_file_release(struct inode *inode, struct file *filp) + int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) + { + struct inode *inode = filp->f_mapping->host; +- int res, err; ++ int err; ++ ++ err = __generic_file_fsync(filp, start, end, datasync); ++ if (err) ++ return err; + +- res = generic_file_fsync(filp, start, end, datasync); + err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); ++ if (err) ++ return err; + +- return res ? res : err; ++ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + } + + +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 341196338e48..f5d2d2340b44 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1724,7 +1724,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, + offset = outarg->offset & ~PAGE_CACHE_MASK; + file_size = i_size_read(inode); + +- num = outarg->size; ++ num = min(outarg->size, fc->max_write); + if (outarg->offset > file_size) + num = 0; + else if (outarg->offset + num > file_size) +diff --git a/fs/inode.c b/fs/inode.c +index b5c3a6473aaa..00ec6db1cad5 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -1744,8 +1744,13 @@ int file_remove_privs(struct file *file) + int kill; + int error = 0; + +- /* Fast path for nothing security related */ +- if (IS_NOSEC(inode)) ++ /* ++ * Fast path for nothing security related. ++ * As well for non-regular files, e.g. blkdev inodes. ++ * For example, blkdev_write_iter() might get here ++ * trying to remove privs which it is not allowed to. ++ */ ++ if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) + return 0; + + kill = dentry_needs_remove_privs(dentry); +diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h +index fcfc48cbe136..128d6e216fd7 100644 +--- a/fs/nfsd/vfs.h ++++ b/fs/nfsd/vfs.h +@@ -109,8 +109,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra); + + static inline int fh_want_write(struct svc_fh *fh) + { +- int ret = mnt_want_write(fh->fh_export->ex_path.mnt); ++ int ret; + ++ if (fh->fh_want_write) ++ return 0; ++ ret = mnt_want_write(fh->fh_export->ex_path.mnt); + if (!ret) + fh->fh_want_write = true; + return ret; +diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c +index 290373024d9d..e8ace3b54e9c 100644 +--- a/fs/ocfs2/dcache.c ++++ b/fs/ocfs2/dcache.c +@@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, + + out_attach: + spin_lock(&dentry_attach_lock); ++ if (unlikely(dentry->d_fsdata && !alias)) { ++ /* d_fsdata is set by a racing thread which is doing ++ * the same thing as this thread is doing. Leave the racing ++ * thread going ahead and we return here. ++ */ ++ spin_unlock(&dentry_attach_lock); ++ iput(dl->dl_inode); ++ ocfs2_lock_res_free(&dl->dl_lockres); ++ kfree(dl); ++ return 0; ++ } ++ + dentry->d_fsdata = dl; + dl->dl_count++; + spin_unlock(&dentry_attach_lock); +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 75691a20313c..ad1ccdcef74e 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -947,6 +947,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, + continue; + up_read(&mm->mmap_sem); + down_write(&mm->mmap_sem); ++ /* ++ * Avoid to modify vma->vm_flags ++ * without locked ops while the ++ * coredump reads the vm_flags. ++ */ ++ if (!mmget_still_valid(mm)) { ++ /* ++ * Silently return "count" ++ * like if get_task_mm() ++ * failed. FIXME: should this ++ * function have returned ++ * -ESRCH if get_task_mm() ++ * failed like if ++ * get_proc_task() fails? ++ */ ++ up_write(&mm->mmap_sem); ++ goto out_mm; ++ } + for (vma = mm->mmap; vma; vma = vma->vm_next) { + vma->vm_flags &= ~VM_SOFTDIRTY; + vma_set_page_prot(vma); +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index e93541282aa1..f187e02d267e 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -446,6 +446,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + * taking the mmap_sem for writing. + */ + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto skip_mm; + prev = NULL; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + cond_resched(); +@@ -468,6 +470,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + vma->vm_flags = new_flags; + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + } ++skip_mm: + up_write(&mm->mmap_sem); + mmput(mm); + wakeup: +@@ -769,6 +772,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, + goto out; + + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto out_unlock; + vma = find_vma_prev(mm, start, &prev); + if (!vma) + goto out_unlock; +@@ -914,6 +919,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, + goto out; + + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto out_unlock; + vma = find_vma_prev(mm, start, &prev); + if (!vma) + goto out_unlock; +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h +index 210ccc4ea44b..8607c937145f 100644 +--- a/include/linux/cgroup.h ++++ b/include/linux/cgroup.h +@@ -453,7 +453,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task, + * + * Find the css for the (@task, @subsys_id) combination, increment a + * reference on and return it. This function is guaranteed to return a +- * valid css. ++ * valid css. The returned css may already have been offlined. + */ + static inline struct cgroup_subsys_state * + task_get_css(struct task_struct *task, int subsys_id) +@@ -463,7 +463,13 @@ task_get_css(struct task_struct *task, int subsys_id) + rcu_read_lock(); + while (true) { + css = task_css(task, subsys_id); +- if (likely(css_tryget_online(css))) ++ /* ++ * Can't use css_tryget_online() here. A task which has ++ * PF_EXITING set may stay associated with an offline css. ++ * If such task calls this function, css_tryget_online() ++ * will keep failing. ++ */ ++ if (likely(css_tryget(css))) + break; + cpu_relax(); + } +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 251adf4d8a71..ed653ba47c46 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1098,6 +1098,27 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, + void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, + unsigned long start, unsigned long end); + ++/* ++ * This has to be called after a get_task_mm()/mmget_not_zero() ++ * followed by taking the mmap_sem for writing before modifying the ++ * vmas or anything the coredump pretends not to change from under it. ++ * ++ * NOTE: find_extend_vma() called from GUP context is the only place ++ * that can modify the "mm" (notably the vm_start/end) under mmap_sem ++ * for reading and outside the context of the process, so it is also ++ * the only case that holds the mmap_sem for reading that must call ++ * this function. Generally if the mmap_sem is hold for reading ++ * there's no need of this check after get_task_mm()/mmget_not_zero(). ++ * ++ * This function can be obsoleted and the check can be removed, after ++ * the coredump code will hold the mmap_sem for writing before ++ * invoking the ->core_dump methods. ++ */ ++static inline bool mmget_still_valid(struct mm_struct *mm) ++{ ++ return likely(!mm->core_state); ++} ++ + /** + * mm_walk - callbacks for walk_page_range + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry +diff --git a/include/linux/pwm.h b/include/linux/pwm.h +index aa8736d5b2f3..cfc3ed46cad2 100644 +--- a/include/linux/pwm.h ++++ b/include/linux/pwm.h +@@ -331,7 +331,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) + #ifdef CONFIG_PWM_SYSFS + void pwmchip_sysfs_export(struct pwm_chip *chip); + void pwmchip_sysfs_unexport(struct pwm_chip *chip); +-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip); + #else + static inline void pwmchip_sysfs_export(struct pwm_chip *chip) + { +@@ -340,10 +339,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip) + static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) + { + } +- +-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) +-{ +-} + #endif /* CONFIG_PWM_SYSFS */ + + #endif /* __LINUX_PWM_H */ +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 7c0c83dfe86e..876688b5a356 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -174,9 +174,6 @@ struct adv_info { + + #define HCI_MAX_SHORT_NAME_LENGTH 10 + +-/* Min encryption key size to match with SMP */ +-#define HCI_MIN_ENC_KEY_SIZE 7 +- + /* Default LE RPA expiry time, 15 minutes */ + #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) + +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index 5e24eb0ab5dd..6ed74825ab54 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -373,7 +373,8 @@ static void mqueue_evict_inode(struct inode *inode) + struct user_struct *user; + unsigned long mq_bytes, mq_treesize; + struct ipc_namespace *ipc_ns; +- struct msg_msg *msg; ++ struct msg_msg *msg, *nmsg; ++ LIST_HEAD(tmp_msg); + + clear_inode(inode); + +@@ -384,10 +385,15 @@ static void mqueue_evict_inode(struct inode *inode) + info = MQUEUE_I(inode); + spin_lock(&info->lock); + while ((msg = msg_get(info)) != NULL) +- free_msg(msg); ++ list_add_tail(&msg->m_list, &tmp_msg); + kfree(info->node_cache); + spin_unlock(&info->lock); + ++ list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { ++ list_del(&msg->m_list); ++ free_msg(msg); ++ } ++ + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * +diff --git a/ipc/msgutil.c b/ipc/msgutil.c +index ed81aafd2392..9467307487f7 100644 +--- a/ipc/msgutil.c ++++ b/ipc/msgutil.c +@@ -18,6 +18,7 @@ + #include <linux/utsname.h> + #include <linux/proc_ns.h> + #include <linux/uaccess.h> ++#include <linux/sched.h> + + #include "util.h" + +@@ -66,6 +67,9 @@ static struct msg_msg *alloc_msg(size_t len) + pseg = &msg->next; + while (len > 0) { + struct msg_msgseg *seg; ++ ++ cond_resched(); ++ + alen = min(len, DATALEN_SEG); + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); + if (seg == NULL) +@@ -178,6 +182,8 @@ void free_msg(struct msg_msg *msg) + kfree(msg); + while (seg != NULL) { + struct msg_msgseg *tmp = seg->next; ++ ++ cond_resched(); + kfree(seg); + seg = tmp; + } +diff --git a/kernel/cred.c b/kernel/cred.c +index ff8606f77d90..098af0bc0b7e 100644 +--- a/kernel/cred.c ++++ b/kernel/cred.c +@@ -447,6 +447,15 @@ int commit_creds(struct cred *new) + if (task->mm) + set_dumpable(task->mm, suid_dumpable); + task->pdeath_signal = 0; ++ /* ++ * If a task drops privileges and becomes nondumpable, ++ * the dumpability change must become visible before ++ * the credential change; otherwise, a __ptrace_may_access() ++ * racing with this change may be able to attach to a task it ++ * shouldn't be able to attach to (as if the task had dropped ++ * privileges without becoming nondumpable). ++ * Pairs with a read barrier in __ptrace_may_access(). ++ */ + smp_wmb(); + } + +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c +index 7324d83d6bd8..410f83cad06c 100644 +--- a/kernel/events/ring_buffer.c ++++ b/kernel/events/ring_buffer.c +@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle) + unsigned long head; + + again: ++ /* ++ * In order to avoid publishing a head value that goes backwards, ++ * we must ensure the load of @rb->head happens after we've ++ * incremented @rb->nest. ++ * ++ * Otherwise we can observe a @rb->head value before one published ++ * by an IRQ/NMI happening between the load and the increment. ++ */ ++ barrier(); + head = local_read(&rb->head); + + /* +- * IRQ/NMI can happen here, which means we can miss a head update. ++ * IRQ/NMI can happen here and advance @rb->head, causing our ++ * load above to be stale. + */ + +- if (!local_dec_and_test(&rb->nest)) ++ /* ++ * If this isn't the outermost nesting, we don't have to update ++ * @rb->user_page->data_head. ++ */ ++ if (local_read(&rb->nest) > 1) { ++ local_dec(&rb->nest); + goto out; ++ } + + /* + * Since the mmap() consumer (userspace) can run on a different CPU: +@@ -88,9 +104,18 @@ again: + rb->user_page->data_head = head; + + /* +- * Now check if we missed an update -- rely on previous implied +- * compiler barriers to force a re-read. ++ * We must publish the head before decrementing the nest count, ++ * otherwise an IRQ/NMI can publish a more recent head value and our ++ * write will (temporarily) publish a stale value. ++ */ ++ barrier(); ++ local_set(&rb->nest, 0); ++ ++ /* ++ * Ensure we decrement @rb->nest before we validate the @rb->head. ++ * Otherwise we cannot be sure we caught the 'last' nested update. + */ ++ barrier(); + if (unlikely(head != local_read(&rb->head))) { + local_inc(&rb->nest); + goto again; +diff --git a/kernel/futex.c b/kernel/futex.c +index ec9df5ba040b..15d850ffbe29 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -593,8 +593,8 @@ again: + * applies. If this is really a shmem page then the page lock + * will prevent unexpected transitions. + */ +- lock_page(page); +- shmem_swizzled = PageSwapCache(page) || page->mapping; ++ lock_page(page_head); ++ shmem_swizzled = PageSwapCache(page_head) || page_head->mapping; + unlock_page(page_head); + put_page(page_head); + +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 8303874c2a06..1aa33fe37aa8 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -292,6 +292,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) + return -EPERM; + ok: + rcu_read_unlock(); ++ /* ++ * If a task drops privileges and becomes nondumpable (through a syscall ++ * like setresuid()) while we are trying to access it, we must ensure ++ * that the dumpability is read after the credentials; otherwise, ++ * we may be able to attach to a task that we shouldn't be able to ++ * attach to (as if the task had dropped privileges without becoming ++ * nondumpable). ++ * Pairs with a write barrier in commit_creds(). ++ */ ++ smp_rmb(); + mm = task->mm; + if (mm && + ((get_dumpable(mm) != SUID_DUMP_USER) && +@@ -673,6 +683,10 @@ static int ptrace_peek_siginfo(struct task_struct *child, + if (arg.nr < 0) + return -EINVAL; + ++ /* Ensure arg.off fits in an unsigned long */ ++ if (arg.off > ULONG_MAX) ++ return 0; ++ + if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) + pending = &child->signal->shared_pending; + else +@@ -680,18 +694,20 @@ static int ptrace_peek_siginfo(struct task_struct *child, + + for (i = 0; i < arg.nr; ) { + siginfo_t info; +- s32 off = arg.off + i; ++ unsigned long off = arg.off + i; ++ bool found = false; + + spin_lock_irq(&child->sighand->siglock); + list_for_each_entry(q, &pending->list, list) { + if (!off--) { ++ found = true; + copy_siginfo(&info, &q->info); + break; + } + } + spin_unlock_irq(&child->sighand->siglock); + +- if (off >= 0) /* beyond the end of the list */ ++ if (!found) /* beyond the end of the list */ + break; + + #ifdef CONFIG_COMPAT +diff --git a/kernel/sys.c b/kernel/sys.c +index e2446ade79ba..1855f1bf113e 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -1762,7 +1762,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) + ((unsigned long)prctl_map->__m1 __op \ + (unsigned long)prctl_map->__m2) ? 0 : -EINVAL + error = __prctl_check_order(start_code, <, end_code); +- error |= __prctl_check_order(start_data, <, end_data); ++ error |= __prctl_check_order(start_data,<=, end_data); + error |= __prctl_check_order(start_brk, <=, brk); + error |= __prctl_check_order(arg_start, <=, arg_end); + error |= __prctl_check_order(env_start, <=, env_end); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index c140659db669..24c7fe8608d0 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -2461,8 +2461,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int + if (neg) + continue; + val = convmul * val / convdiv; +- if ((min && val < *min) || (max && val > *max)) +- continue; ++ if ((min && val < *min) || (max && val > *max)) { ++ err = -EINVAL; ++ break; ++ } + *i = val; + } else { + val = convdiv * (*i) / convmul; +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c +index ab861771e37f..0e0dc5d89911 100644 +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c +@@ -633,7 +633,7 @@ static inline void process_adjtimex_modes(struct timex *txc, + time_constant = max(time_constant, 0l); + } + +- if (txc->modes & ADJ_TAI && txc->constant > 0) ++ if (txc->modes & ADJ_TAI && txc->constant >= 0) + *time_tai = txc->constant; + + if (txc->modes & ADJ_OFFSET) +diff --git a/mm/cma.c b/mm/cma.c +index f0d91aca5a4c..5ae4452656cd 100644 +--- a/mm/cma.c ++++ b/mm/cma.c +@@ -100,8 +100,10 @@ static int __init cma_activate_area(struct cma *cma) + + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + +- if (!cma->bitmap) ++ if (!cma->bitmap) { ++ cma->count = 0; + return -ENOMEM; ++ } + + WARN_ON_ONCE(!pfn_valid(pfn)); + zone = page_zone(pfn_to_page(pfn)); +diff --git a/mm/cma_debug.c b/mm/cma_debug.c +index f8e4b60db167..da50dab56b70 100644 +--- a/mm/cma_debug.c ++++ b/mm/cma_debug.c +@@ -57,7 +57,7 @@ static int cma_maxchunk_get(void *data, u64 *val) + mutex_lock(&cma->lock); + for (;;) { + start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); +- if (start >= cma->count) ++ if (start >= bitmap_maxno) + break; + end = find_next_bit(cma->bitmap, bitmap_maxno, start); + maxchunk = max(end - start, maxchunk); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index d7f65a8c629b..fd932e7a25dd 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1221,12 +1221,23 @@ void free_huge_page(struct page *page) + ClearPagePrivate(page); + + /* +- * A return code of zero implies that the subpool will be under its +- * minimum size if the reservation is not restored after page is free. +- * Therefore, force restore_reserve operation. ++ * If PagePrivate() was set on page, page allocation consumed a ++ * reservation. If the page was associated with a subpool, there ++ * would have been a page reserved in the subpool before allocation ++ * via hugepage_subpool_get_pages(). Since we are 'restoring' the ++ * reservtion, do not call hugepage_subpool_put_pages() as this will ++ * remove the reserved page from the subpool. + */ +- if (hugepage_subpool_put_pages(spool, 1) == 0) +- restore_reserve = true; ++ if (!restore_reserve) { ++ /* ++ * A return code of zero implies that the subpool will be ++ * under its minimum size if the reservation is not restored ++ * after page is free. Therefore, force restore_reserve ++ * operation. ++ */ ++ if (hugepage_subpool_put_pages(spool, 1) == 0) ++ restore_reserve = true; ++ } + + spin_lock(&hugetlb_lock); + clear_page_huge_active(page); +diff --git a/mm/list_lru.c b/mm/list_lru.c +index 732a066e3d3a..4aa714db2fcf 100644 +--- a/mm/list_lru.c ++++ b/mm/list_lru.c +@@ -313,7 +313,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, + } + return 0; + fail: +- __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); ++ __memcg_destroy_list_lru_node(memcg_lrus, begin, i); + return -ENOMEM; + } + +diff --git a/mm/mmap.c b/mm/mmap.c +index baa4c1280bff..a24e42477001 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -42,6 +42,7 @@ + #include <linux/memory.h> + #include <linux/printk.h> + #include <linux/userfaultfd_k.h> ++#include <linux/mm.h> + + #include <asm/uaccess.h> + #include <asm/cacheflush.h> +@@ -2398,7 +2399,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) + vma = find_vma_prev(mm, addr, &prev); + if (vma && (vma->vm_start <= addr)) + return vma; +- if (!prev || expand_stack(prev, addr)) ++ /* don't alter vm_end if the coredump is running */ ++ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr)) + return NULL; + if (prev->vm_flags & VM_LOCKED) + populate_vma_page_range(prev, addr, prev->vm_end, NULL); +@@ -2424,6 +2426,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) + return vma; + if (!(vma->vm_flags & VM_GROWSDOWN)) + return NULL; ++ /* don't alter vm_start if the coredump is running */ ++ if (!mmget_still_valid(mm)) ++ return NULL; + start = vma->vm_start; + if (expand_stack(vma, addr)) + return NULL; +diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c +index 149f82bd83fd..6ba56f215229 100644 +--- a/net/ax25/ax25_route.c ++++ b/net/ax25/ax25_route.c +@@ -443,9 +443,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) + } + + if (ax25->sk != NULL) { ++ local_bh_disable(); + bh_lock_sock(ax25->sk); + sock_reset_flag(ax25->sk, SOCK_ZAPPED); + bh_unlock_sock(ax25->sk); ++ local_bh_enable(); + } + + put: +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 83d4d574fa44..80be0ee17ff3 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1177,14 +1177,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn) + !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + return 0; + +- /* The minimum encryption key size needs to be enforced by the +- * host stack before establishing any L2CAP connections. The +- * specification in theory allows a minimum of 1, but to align +- * BR/EDR and LE transports, a minimum of 7 is chosen. +- */ +- if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE) +- return 0; +- + return 1; + } + +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 9d812ba38ff2..8b0908c7e9cc 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -2705,6 +2705,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) + } + + void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) ++ __acquires(tbl->lock) + __acquires(rcu_bh) + { + struct neigh_seq_state *state = seq->private; +@@ -2715,6 +2716,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl + + rcu_read_lock_bh(); + state->nht = rcu_dereference_bh(tbl->nht); ++ read_lock(&tbl->lock); + + return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; + } +@@ -2748,8 +2750,13 @@ out: + EXPORT_SYMBOL(neigh_seq_next); + + void neigh_seq_stop(struct seq_file *seq, void *v) ++ __releases(tbl->lock) + __releases(rcu_bh) + { ++ struct neigh_seq_state *state = seq->private; ++ struct neigh_table *tbl = state->tbl; ++ ++ read_unlock(&tbl->lock); + rcu_read_unlock_bh(); + } + EXPORT_SYMBOL(neigh_seq_stop); +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c +index c6061f7343f1..8a6c682bfc22 100644 +--- a/net/ipv6/ip6_flowlabel.c ++++ b/net/ipv6/ip6_flowlabel.c +@@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) + rcu_read_lock_bh(); + for_each_sk_fl_rcu(np, sfl) { + struct ip6_flowlabel *fl = sfl->fl; +- if (fl->label == label) { ++ ++ if (fl->label == label && atomic_inc_not_zero(&fl->users)) { + fl->lastuse = jiffies; +- atomic_inc(&fl->users); + rcu_read_unlock_bh(); + return fl; + } +@@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) + goto done; + } + fl1 = sfl->fl; +- atomic_inc(&fl1->users); ++ if (!atomic_inc_not_zero(&fl1->users)) ++ fl1 = NULL; + break; + } + } +diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c +index fc60d9d738b5..cdb913e7627e 100644 +--- a/net/lapb/lapb_iface.c ++++ b/net/lapb/lapb_iface.c +@@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev) + lapb = __lapb_devtostruct(dev); + if (!lapb) + goto out; ++ lapb_put(lapb); + + lapb_stop_t1timer(lapb); + lapb_stop_t2timer(lapb); +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c +index f04714d70bf7..a42e2ce4a726 100644 +--- a/sound/core/seq/seq_ports.c ++++ b/sound/core/seq/seq_ports.c +@@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, + list_del_init(list); + grp->exclusive = 0; + write_unlock_irq(&grp->list_lock); +- up_write(&grp->list_mutex); + + if (!empty) + unsubscribe_port(client, port, grp, &subs->info, ack); ++ up_write(&grp->list_mutex); + } + + /* connect two ports */ +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 74c9600876d6..ef8955abd918 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1707,9 +1707,6 @@ static int azx_first_init(struct azx *chip) + chip->msi = 0; + } + +- if (azx_acquire_irq(chip, 0) < 0) +- return -EBUSY; +- + pci_set_master(pci); + synchronize_irq(bus->irq); + +@@ -1820,6 +1817,9 @@ static int azx_first_init(struct azx *chip) + return -ENODEV; + } + ++ if (azx_acquire_irq(chip, 0) < 0) ++ return -EBUSY; ++ + strcpy(card->driver, "HDA-Intel"); + strlcpy(card->shortname, driver_short_names[chip->driver_type], + sizeof(card->shortname)); +diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c +index d562e1b9a5d1..5b079709ec8a 100644 +--- a/sound/soc/codecs/cs42xx8.c ++++ b/sound/soc/codecs/cs42xx8.c +@@ -561,6 +561,7 @@ static int cs42xx8_runtime_resume(struct device *dev) + msleep(5); + + regcache_cache_only(cs42xx8->regmap, false); ++ regcache_mark_dirty(cs42xx8->regmap); + + ret = regcache_sync(cs42xx8->regmap); + if (ret) { |