diff options
author | Mike Pagano <mpagano@gentoo.org> | 2022-09-15 06:31:36 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2022-09-15 06:31:36 -0400 |
commit | 8325c14c96a71a8cea99233517e770f90a444b29 (patch) | |
tree | eae702b1e235c5ac1cfe7787030cfdb9516a0991 | |
parent | Linux patch 5.4.212 (diff) | |
download | linux-patches-8325c14c96a71a8cea99233517e770f90a444b29.tar.gz linux-patches-8325c14c96a71a8cea99233517e770f90a444b29.tar.bz2 linux-patches-8325c14c96a71a8cea99233517e770f90a444b29.zip |
Linux patch 5.4.2135.4-218
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1212_linux-5.4.213.patch | 2998 |
2 files changed, 3002 insertions, 0 deletions
diff --git a/0000_README b/0000_README index daf9aa4b..1f79a793 100644 --- a/0000_README +++ b/0000_README @@ -891,6 +891,10 @@ Patch: 1211_linux-5.4.212.patch From: http://www.kernel.org Desc: Linux 5.4.212 +Patch: 1212_linux-5.4.213.patch +From: http://www.kernel.org +Desc: Linux 5.4.213 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1212_linux-5.4.213.patch b/1212_linux-5.4.213.patch new file mode 100644 index 00000000..522768b7 --- /dev/null +++ b/1212_linux-5.4.213.patch @@ -0,0 +1,2998 @@ +diff --git a/Makefile b/Makefile +index cecfe23f521f1..4a4c83d2b3f7c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 212 ++SUBLEVEL = 213 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +index eea317b41020d..5e454a694b78a 100644 +--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +@@ -51,16 +51,6 @@ + vin-supply = <®_3p3v_s5>; + }; + +- reg_3p3v_s0: regulator-3p3v-s0 { +- compatible = "regulator-fixed"; +- regulator-name = "V_3V3_S0"; +- regulator-min-microvolt = <3300000>; +- regulator-max-microvolt = <3300000>; +- regulator-always-on; +- regulator-boot-on; +- vin-supply = <®_3p3v_s5>; +- }; +- + reg_3p3v_s5: regulator-3p3v-s5 { + compatible = "regulator-fixed"; + regulator-name = "V_3V3_S5"; +diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c +index 587543c6c51cb..97c42be71338a 100644 +--- a/arch/arm64/kernel/cacheinfo.c ++++ b/arch/arm64/kernel/cacheinfo.c +@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, + + int init_cache_level(unsigned int cpu) + { +- unsigned int ctype, level, leaves, fw_level; ++ unsigned int ctype, level, leaves; ++ int fw_level; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + + for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { +@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu) + else + fw_level = acpi_find_last_cache_level(cpu); + ++ if (fw_level < 0) ++ return fw_level; ++ + if (level < fw_level) { + /* + * some external caches not specified in CLIDR_EL1 +diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c +index e9de6da0ce51f..9dcfe9de55b0a 100644 +--- a/arch/mips/loongson32/ls1c/board.c ++++ b/arch/mips/loongson32/ls1c/board.c +@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = { + static int __init ls1c_platform_init(void) + { + ls1x_serial_set_uartclk(&ls1x_uart_pdev); +- ls1x_rtc_set_extclk(&ls1x_rtc_pdev); + + return platform_add_devices(ls1c_platform_devices, + ARRAY_SIZE(ls1c_platform_devices)); +diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S +index 951a339369dd5..b59a0c3d36921 100644 +--- a/arch/parisc/kernel/head.S ++++ b/arch/parisc/kernel/head.S +@@ -22,7 +22,7 @@ + #include <linux/linkage.h> + #include <linux/init.h> + +- .level PA_ASM_LEVEL ++ .level 1.1 + + __INITDATA + ENTRY(boot_args) +@@ -69,6 +69,47 @@ $bss_loop: + stw,ma %arg2,4(%r1) + stw,ma %arg3,4(%r1) + ++#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) ++ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU ++ * and halt kernel if we detect a PA1.x CPU. */ ++ ldi 32,%r10 ++ mtctl %r10,%cr11 ++ .level 2.0 ++ mfctl,w %cr11,%r10 ++ .level 1.1 ++ comib,<>,n 0,%r10,$cpu_ok ++ ++ load32 PA(msg1),%arg0 ++ ldi msg1_end-msg1,%arg1 ++$iodc_panic: ++ copy %arg0, %r10 ++ copy %arg1, %r11 ++ load32 PA(init_stack),%sp ++#define MEM_CONS 0x3A0 ++ ldw MEM_CONS+32(%r0),%arg0 // HPA ++ ldi ENTRY_IO_COUT,%arg1 ++ ldw MEM_CONS+36(%r0),%arg2 // SPA ++ ldw MEM_CONS+8(%r0),%arg3 // layers ++ load32 PA(__bss_start),%r1 ++ stw %r1,-52(%sp) // arg4 ++ stw %r0,-56(%sp) // arg5 ++ stw %r10,-60(%sp) // arg6 = ptr to text ++ stw %r11,-64(%sp) // arg7 = len ++ stw %r0,-68(%sp) // arg8 ++ load32 PA(.iodc_panic_ret), %rp ++ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC ++ bv,n (%r1) ++.iodc_panic_ret: ++ b . /* wait endless with ... */ ++ or %r10,%r10,%r10 /* qemu idle sleep */ ++msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" ++msg1_end: ++ ++$cpu_ok: ++#endif ++ ++ .level PA_ASM_LEVEL ++ + /* Initialize startup VM. Just map first 16/32 MB of memory */ + load32 PA(swapper_pg_dir),%r4 + mtctl %r4,%cr24 /* Initialize kernel root pointer */ +diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S +index 5b905a2f4e4df..4a8f3526f5a53 100644 +--- a/arch/powerpc/kernel/systbl.S ++++ b/arch/powerpc/kernel/systbl.S +@@ -25,6 +25,7 @@ sys_call_table: + #include <asm/syscall_table_64.h> + #undef __SYSCALL + #else ++ .p2align 2 + #define __SYSCALL(nr, entry) .long entry + #include <asm/syscall_table_32.h> + #undef __SYSCALL +diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h +index de8f0bf5f238c..487725e49b961 100644 +--- a/arch/s390/include/asm/hugetlb.h ++++ b/arch/s390/include/asm/hugetlb.h +@@ -35,9 +35,11 @@ static inline bool is_hugepage_only_range(struct mm_struct *mm, + static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) + { +- if (len & ~HPAGE_MASK) ++ struct hstate *h = hstate_file(file); ++ ++ if (len & ~huge_page_mask(h)) + return -EINVAL; +- if (addr & ~HPAGE_MASK) ++ if (addr & ~huge_page_mask(h)) + return -EINVAL; + return 0; + } +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S +index 7e0eb40209177..4df41695caec8 100644 +--- a/arch/s390/kernel/vmlinux.lds.S ++++ b/arch/s390/kernel/vmlinux.lds.S +@@ -124,6 +124,7 @@ SECTIONS + /* + * Table with the patch locations to undo expolines + */ ++ . = ALIGN(4); + .nospec_call_table : { + __nospec_call_start = . ; + *(.s390_indirect*) +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 1e5df3ccdd5cb..a1ee1a760c3eb 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -44,6 +44,7 @@ + * the optimal version — two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ ++#ifdef CONFIG_X86_64 + #define __FILL_RETURN_BUFFER(reg, nr, sp) \ + mov $(nr/2), reg; \ + 771: \ +@@ -64,6 +65,19 @@ + add $(BITS_PER_LONG/8) * nr, sp; \ + /* barrier for jnz misprediction */ \ + lfence; ++#else ++/* ++ * i386 doesn't unconditionally have LFENCE, as such it can't ++ * do a loop. ++ */ ++#define __FILL_RETURN_BUFFER(reg, nr, sp) \ ++ .rept nr; \ ++ call 772f; \ ++ int3; \ ++772:; \ ++ .endr; \ ++ add $(BITS_PER_LONG/8) * nr, sp; ++#endif + + #define __ISSUE_UNBALANCED_RET_GUARD(sp) \ + call 881f; \ +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index c273d0df69394..807ee97254795 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -1748,6 +1748,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, + } + ret = binder_inc_ref_olocked(ref, strong, target_list); + *rdata = ref->data; ++ if (ret && ref == new_ref) { ++ /* ++ * Cleanup the failed reference here as the target ++ * could now be dead and have already released its ++ * references by now. Calling on the new reference ++ * with strong=0 and a tmp_refs will not decrement ++ * the node. The new_ref gets kfree'd below. ++ */ ++ binder_cleanup_ref_olocked(new_ref); ++ ref = NULL; ++ } ++ + binder_proc_unlock(proc); + if (new_ref && ref != new_ref) + /* +diff --git a/drivers/base/dd.c b/drivers/base/dd.c +index 4e45c87ed1778..10063d8a1b7d4 100644 +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -818,6 +818,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) + } else if (ret == -EPROBE_DEFER) { + dev_dbg(dev, "Device match requests probe deferral\n"); + driver_deferred_probe_add(dev); ++ /* ++ * Device can't match with a driver right now, so don't attempt ++ * to match or bind with other drivers on the bus. ++ */ ++ return ret; + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d", ret); + return ret; +@@ -1057,6 +1062,11 @@ static int __driver_attach(struct device *dev, void *data) + } else if (ret == -EPROBE_DEFER) { + dev_dbg(dev, "Device match requests probe deferral\n"); + driver_deferred_probe_add(dev); ++ /* ++ * Driver could not match with device, but may match with ++ * another device on the bus. ++ */ ++ return 0; + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d", ret); + return ret; +diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c +index 1654fd0eedc94..a790a8ca02ff4 100644 +--- a/drivers/clk/bcm/clk-raspberrypi.c ++++ b/drivers/clk/bcm/clk-raspberrypi.c +@@ -113,7 +113,7 @@ static unsigned long raspberrypi_fw_pll_get_rate(struct clk_hw *hw, + RPI_FIRMWARE_ARM_CLK_ID, + &val); + if (ret) +- return ret; ++ return 0; + + return val * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + } +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 13332f89e034b..c002f83adf573 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -852,10 +852,9 @@ static void clk_core_unprepare(struct clk_core *core) + if (core->ops->unprepare) + core->ops->unprepare(core->hw); + +- clk_pm_runtime_put(core); +- + trace_clk_unprepare_complete(core); + clk_core_unprepare(core->parent); ++ clk_pm_runtime_put(core); + } + + static void clk_core_unprepare_lock(struct clk_core *core) +diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c +index b1395133389ed..b82cc8beac671 100644 +--- a/drivers/firmware/efi/capsule-loader.c ++++ b/drivers/firmware/efi/capsule-loader.c +@@ -241,29 +241,6 @@ failed: + return ret; + } + +-/** +- * efi_capsule_flush - called by file close or file flush +- * @file: file pointer +- * @id: not used +- * +- * If a capsule is being partially uploaded then calling this function +- * will be treated as upload termination and will free those completed +- * buffer pages and -ECANCELED will be returned. +- **/ +-static int efi_capsule_flush(struct file *file, fl_owner_t id) +-{ +- int ret = 0; +- struct capsule_info *cap_info = file->private_data; +- +- if (cap_info->index > 0) { +- pr_err("capsule upload not complete\n"); +- efi_free_all_buff_pages(cap_info); +- ret = -ECANCELED; +- } +- +- return ret; +-} +- + /** + * efi_capsule_release - called by file close + * @inode: not used +@@ -276,6 +253,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) + { + struct capsule_info *cap_info = file->private_data; + ++ if (cap_info->index > 0 && ++ (cap_info->header.headersize == 0 || ++ cap_info->count < cap_info->total_size)) { ++ pr_err("capsule upload not complete\n"); ++ efi_free_all_buff_pages(cap_info); ++ } ++ + kfree(cap_info->pages); + kfree(cap_info->phys); + kfree(file->private_data); +@@ -323,7 +307,6 @@ static const struct file_operations efi_capsule_fops = { + .owner = THIS_MODULE, + .open = efi_capsule_open, + .write = efi_capsule_write, +- .flush = efi_capsule_flush, + .release = efi_capsule_release, + .llseek = no_llseek, + }; +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index 317f54f19477e..c81d73d5e0159 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -1198,7 +1198,9 @@ static int pca953x_suspend(struct device *dev) + { + struct pca953x_chip *chip = dev_get_drvdata(dev); + ++ mutex_lock(&chip->i2c_lock); + regcache_cache_only(chip->regmap, true); ++ mutex_unlock(&chip->i2c_lock); + + if (atomic_read(&chip->wakeup_path)) + device_set_wakeup_path(dev); +@@ -1221,13 +1223,17 @@ static int pca953x_resume(struct device *dev) + } + } + ++ mutex_lock(&chip->i2c_lock); + regcache_cache_only(chip->regmap, false); + regcache_mark_dirty(chip->regmap); + ret = pca953x_regcache_sync(dev); +- if (ret) ++ if (ret) { ++ mutex_unlock(&chip->i2c_lock); + return ret; ++ } + + ret = regcache_sync(chip->regmap); ++ mutex_unlock(&chip->i2c_lock); + if (ret) { + dev_err(dev, "Failed to restore register map: %d\n", ret); + return ret; +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index 5906a8951a6c6..685a2df01d096 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -2472,7 +2472,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) + + gfx_v9_0_tiling_mode_table_init(adev); + +- gfx_v9_0_setup_rb(adev); ++ if (adev->gfx.num_gfx_rings) ++ gfx_v9_0_setup_rb(adev); + gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); + adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); + +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +index 641f1258f08dc..e60157fe7a7bf 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); + ++ tmp = mmVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index 25a2d80287d67..d6a72f3cb1fbd 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -167,21 +167,6 @@ void drm_gem_private_object_init(struct drm_device *dev, + } + EXPORT_SYMBOL(drm_gem_private_object_init); + +-static void +-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) +-{ +- /* +- * Note: obj->dma_buf can't disappear as long as we still hold a +- * handle reference in obj->handle_count. +- */ +- mutex_lock(&filp->prime.lock); +- if (obj->dma_buf) { +- drm_prime_remove_buf_handle_locked(&filp->prime, +- obj->dma_buf); +- } +- mutex_unlock(&filp->prime.lock); +-} +- + /** + * drm_gem_object_handle_free - release resources bound to userspace handles + * @obj: GEM object to clean up. +@@ -255,7 +240,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + else if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + +- drm_gem_remove_prime_handles(obj, file_priv); ++ drm_prime_remove_buf_handle(&file_priv->prime, id); + drm_vma_node_revoke(&obj->vma_node, file_priv); + + drm_gem_object_handle_put_unlocked(obj); +diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h +index 51a2055c8f18a..41a9a9bae5848 100644 +--- a/drivers/gpu/drm/drm_internal.h ++++ b/drivers/gpu/drm/drm_internal.h +@@ -59,8 +59,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, + + void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); + void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); +-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, +- struct dma_buf *dma_buf); ++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, ++ uint32_t handle); + + /* drm_drv.c */ + struct drm_minor *drm_minor_acquire(unsigned int minor_id); +diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c +index 0a2316e0e8121..6b7cf0170f9d1 100644 +--- a/drivers/gpu/drm/drm_prime.c ++++ b/drivers/gpu/drm/drm_prime.c +@@ -187,29 +187,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri + return -ENOENT; + } + +-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, +- struct dma_buf *dma_buf) ++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, ++ uint32_t handle) + { + struct rb_node *rb; + +- rb = prime_fpriv->dmabufs.rb_node; ++ mutex_lock(&prime_fpriv->lock); ++ ++ rb = prime_fpriv->handles.rb_node; + while (rb) { + struct drm_prime_member *member; + +- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); +- if (member->dma_buf == dma_buf) { ++ member = rb_entry(rb, struct drm_prime_member, handle_rb); ++ if (member->handle == handle) { + rb_erase(&member->handle_rb, &prime_fpriv->handles); + rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); + +- dma_buf_put(dma_buf); ++ dma_buf_put(member->dma_buf); + kfree(member); +- return; +- } else if (member->dma_buf < dma_buf) { ++ break; ++ } else if (member->handle < handle) { + rb = rb->rb_right; + } else { + rb = rb->rb_left; + } + } ++ ++ mutex_unlock(&prime_fpriv->lock); + } + + void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c +index 399b1542509f7..d79314992adad 100644 +--- a/drivers/gpu/drm/i915/display/intel_quirks.c ++++ b/drivers/gpu/drm/i915/display/intel_quirks.c +@@ -146,6 +146,9 @@ static struct intel_quirk intel_quirks[] = { + /* ASRock ITX*/ + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, ++ /* ECS Liva Q2 */ ++ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, ++ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, + }; + + void intel_init_quirks(struct drm_i915_private *i915) +diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c +index 245c20d36f1b2..45ffccdcd50e7 100644 +--- a/drivers/gpu/drm/i915/gvt/handlers.c ++++ b/drivers/gpu/drm/i915/gvt/handlers.c +@@ -654,7 +654,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, + else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) + index = FDI_RX_IMR_TO_PIPE(offset); + else { +- gvt_vgpu_err("Unsupport registers %x\n", offset); ++ gvt_vgpu_err("Unsupported registers %x\n", offset); + return -EINVAL; + } + +diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c +index b7b7c1a9164ab..726c881394576 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c ++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c +@@ -97,7 +97,7 @@ static const char * const dsi_8996_bus_clk_names[] = { + static const struct msm_dsi_config msm8996_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { +- .num = 2, ++ .num = 3, + .regs = { + {"vdda", 18160, 1 }, /* 1.25 V */ + {"vcca", 17000, 32 }, /* 0.925 V */ +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +index 60d50643d0b5c..08a95c3a94444 100644 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); +- timing->shared_timings.clk_pre_inc_by_2 = 0; ++ timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->ta_go = 3; +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 5d017f0aec665..e892582e847b5 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -1623,6 +1623,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, + if (r) { + /* delay GPU reset to resume */ + radeon_fence_driver_force_completion(rdev, i); ++ } else { ++ /* finish executing delayed work */ ++ flush_delayed_work(&rdev->fence_drv[i].lockup_work); + } + } + +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c +index 3ea4021f267cf..d96e435cc42b1 100644 +--- a/drivers/hwmon/gpio-fan.c ++++ b/drivers/hwmon/gpio-fan.c +@@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev, + if (!fan_data) + return -EINVAL; + ++ if (state >= fan_data->num_speed) ++ return -EINVAL; ++ + set_fan_speed(fan_data, state); + return 0; + } +diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c +index dd52f08ec82e2..4e2e8e819b1e7 100644 +--- a/drivers/iio/adc/mcp3911.c ++++ b/drivers/iio/adc/mcp3911.c +@@ -38,8 +38,8 @@ + #define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3) + #define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6) + +-/* Internal voltage reference in uV */ +-#define MCP3911_INT_VREF_UV 1200000 ++/* Internal voltage reference in mV */ ++#define MCP3911_INT_VREF_MV 1200 + + #define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff) + #define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff) +@@ -111,6 +111,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, + if (ret) + goto out; + ++ *val = sign_extend32(*val, 23); ++ + ret = IIO_VAL_INT; + break; + +@@ -135,11 +137,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, + + *val = ret / 1000; + } else { +- *val = MCP3911_INT_VREF_UV; ++ *val = MCP3911_INT_VREF_MV; + } + +- *val2 = 24; +- ret = IIO_VAL_FRACTIONAL_LOG2; ++ /* ++ * For 24bit Conversion ++ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5 ++ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5) ++ */ ++ ++ /* val2 = (2^23 * 1.5) */ ++ *val2 = 12582912; ++ ret = IIO_VAL_FRACTIONAL; + break; + } + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index de7df5ab06f3b..cf174aa7fe25b 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -1719,8 +1719,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, + } + + if (!validate_net_dev(*net_dev, +- (struct sockaddr *)&req->listen_addr_storage, +- (struct sockaddr *)&req->src_addr_storage)) { ++ (struct sockaddr *)&req->src_addr_storage, ++ (struct sockaddr *)&req->listen_addr_storage)) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +index 76a14db7028dd..b9ab3ca3079c7 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +@@ -89,7 +89,7 @@ + #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32 + #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE + #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE +-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 ++#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 + #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 + #define HNS_ROCE_INVALID_LKEY 0x100 + #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 +diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c +index 348c1df69cdc6..3897a3ce02ad0 100644 +--- a/drivers/infiniband/hw/mlx5/mad.c ++++ b/drivers/infiniband/hw/mlx5/mad.c +@@ -219,6 +219,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, + mdev = dev->mdev; + mdev_port_num = 1; + } ++ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { ++ /* set local port to one for Function-Per-Port HCA. */ ++ mdev = dev->mdev; ++ mdev_port_num = 1; ++ } ++ + /* Declaring support of extended counters */ + if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { + struct ib_class_port_info cpi = {}; +diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c +index 424918eb1cd4a..5e6d96bd2eb12 100644 +--- a/drivers/infiniband/sw/siw/siw_qp_tx.c ++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c +@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) + dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); + + if (paddr) +- return virt_to_page(paddr); ++ return virt_to_page((void *)paddr); + + return NULL; + } +@@ -523,13 +523,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) + kunmap(p); + } + } else { +- u64 va = sge->laddr + sge_off; ++ /* ++ * Cast to an uintptr_t to preserve all 64 bits ++ * in sge->laddr. ++ */ ++ uintptr_t va = (uintptr_t)(sge->laddr + sge_off); + +- page_array[seg] = virt_to_page(va & PAGE_MASK); ++ /* ++ * virt_to_page() takes a (void *) pointer ++ * so cast to a (void *) meaning it will be 64 ++ * bits on a 64 bit platform and 32 bits on a ++ * 32 bit platform. ++ */ ++ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); + if (do_crc) + crypto_shash_update( + c_tx->mpa_crc_hd, +- (void *)(uintptr_t)va, ++ (void *)va, + plen); + } + +diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c +index f95a81b9fac72..2380546d79782 100644 +--- a/drivers/input/joystick/iforce/iforce-serio.c ++++ b/drivers/input/joystick/iforce/iforce-serio.c +@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce) + + again: + if (iforce->xmit.head == iforce->xmit.tail) { +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); ++ iforce_clear_xmit_and_wake(iforce); + spin_unlock_irqrestore(&iforce->xmit_lock, flags); + return; + } +@@ -64,7 +64,7 @@ again: + if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags)) + goto again; + +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); ++ iforce_clear_xmit_and_wake(iforce); + + spin_unlock_irqrestore(&iforce->xmit_lock, flags); + } +@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio, + iforce_serio->cmd_response_len = iforce_serio->len; + + /* Signal that command is done */ +- wake_up(&iforce->wait); ++ wake_up_all(&iforce->wait); + } else if (likely(iforce->type)) { + iforce_process_packet(iforce, iforce_serio->id, + iforce_serio->data_in, +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c +index ea58805c480fa..cba92bd590a8d 100644 +--- a/drivers/input/joystick/iforce/iforce-usb.c ++++ b/drivers/input/joystick/iforce/iforce-usb.c +@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce) + spin_lock_irqsave(&iforce->xmit_lock, flags); + + if (iforce->xmit.head == iforce->xmit.tail) { +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); ++ iforce_clear_xmit_and_wake(iforce); + spin_unlock_irqrestore(&iforce->xmit_lock, flags); + return; + } +@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce) + XMIT_INC(iforce->xmit.tail, n); + + if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) { +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); + dev_warn(&iforce_usb->intf->dev, + "usb_submit_urb failed %d\n", n); ++ iforce_clear_xmit_and_wake(iforce); + } + + /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended. +@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb) + struct iforce *iforce = &iforce_usb->iforce; + + if (urb->status) { +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); + dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n", + urb->status); ++ iforce_clear_xmit_and_wake(iforce); + return; + } + + __iforce_usb_xmit(iforce); + +- wake_up(&iforce->wait); ++ wake_up_all(&iforce->wait); + } + + static int iforce_usb_probe(struct usb_interface *intf, +diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h +index 6aa761ebbdf77..9ccb9107ccbef 100644 +--- a/drivers/input/joystick/iforce/iforce.h ++++ b/drivers/input/joystick/iforce/iforce.h +@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id, + response_data, response_len); + } + ++static inline void iforce_clear_xmit_and_wake(struct iforce *iforce) ++{ ++ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); ++ wake_up_all(&iforce->wait); ++} ++ + /* Public functions */ + /* iforce-main.c */ + int iforce_init_device(struct device *parent, u16 bustype, +diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c +index 3fb64dbda1a21..76873aa005b41 100644 +--- a/drivers/input/misc/rk805-pwrkey.c ++++ b/drivers/input/misc/rk805-pwrkey.c +@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = { + }; + module_platform_driver(rk805_pwrkey_driver); + ++MODULE_ALIAS("platform:rk805-pwrkey"); + MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>"); + MODULE_DESCRIPTION("RK805 PMIC Power Key driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index fb5ddf3864fdd..5812ef3345feb 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -1357,7 +1357,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev) + of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); + + spin_lock_irqsave(&cctx->lock, flags); +- sess = &cctx->session[cctx->sesscount]; ++ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) { ++ dev_err(&pdev->dev, "too many sessions\n"); ++ spin_unlock_irqrestore(&cctx->lock, flags); ++ return -ENOSPC; ++ } ++ sess = &cctx->session[cctx->sesscount++]; + sess->used = false; + sess->valid = true; + sess->dev = dev; +@@ -1370,13 +1375,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev) + struct fastrpc_session_ctx *dup_sess; + + for (i = 1; i < sessions; i++) { +- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS) ++ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) + break; +- dup_sess = &cctx->session[cctx->sesscount]; ++ dup_sess = &cctx->session[cctx->sesscount++]; + memcpy(dup_sess, sess, sizeof(*dup_sess)); + } + } +- cctx->sesscount++; + spin_unlock_irqrestore(&cctx->lock, flags); + rc = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (rc) { +diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c +index 5706abb3c0eaa..10125b02d1543 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_client.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c +@@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) + "Cannot locate client instance close routine\n"); + return; + } ++ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { ++ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); ++ return; ++ } + cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + i40e_client_release_qvlist(&cdev->lan_info); +@@ -376,7 +380,6 @@ void i40e_client_subtask(struct i40e_pf *pf) + /* Remove failed client instance */ + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state); +- i40e_client_del_instance(pf); + return; + } + } +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +index 4a3baa7e01424..0eec05d905eb0 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +@@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) + + void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) + { +- struct dentry *mvpp2_dir, *mvpp2_root; ++ static struct dentry *mvpp2_root; ++ struct dentry *mvpp2_dir; + int ret, i; + +- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); + if (!mvpp2_root) + mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); + +diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c +index 8157666209798..e4d919de7e3fc 100644 +--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c ++++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c +@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, + bool removing; + int err = 0; + +- entry = kzalloc(sizeof(*entry), GFP_KERNEL); ++ entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c +index 5945ac5f38eea..cb29da961e12b 100644 +--- a/drivers/net/ieee802154/adf7242.c ++++ b/drivers/net/ieee802154/adf7242.c +@@ -1310,10 +1310,11 @@ static int adf7242_remove(struct spi_device *spi) + + debugfs_remove_recursive(lp->debugfs_root); + ++ ieee802154_unregister_hw(lp->hw); ++ + cancel_delayed_work_sync(&lp->work); + destroy_workqueue(lp->wqueue); + +- ieee802154_unregister_hw(lp->hw); + mutex_destroy(&lp->bmux); + ieee802154_free_hw(lp->hw); + +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c +index ae17d2f9d5347..cc1522550f2c4 100644 +--- a/drivers/net/phy/dp83822.c ++++ b/drivers/net/phy/dp83822.c +@@ -198,7 +198,6 @@ static int dp83822_config_intr(struct phy_device *phydev) + return misr_status; + + misr_status |= (DP83822_RX_ERR_HF_INT_EN | +- DP83822_FALSE_CARRIER_HF_INT_EN | + DP83822_ANEG_COMPLETE_INT_EN | + DP83822_DUP_MODE_CHANGE_INT_EN | + DP83822_SPEED_CHANGED_INT_EN | +diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c +index ce891ac32388f..b79a8aeab4c79 100644 +--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c ++++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c +@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, + /* Repeat initial/next rate. + * For legacy IL_NUMBER_TRY == 1, this loop will not execute. + * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ +- while (repeat_rate > 0) { ++ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; +@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, + cpu_to_le32(new_rate); + repeat_rate--; + idx++; +- if (idx >= LINK_QUAL_MAX_RETRY_NUM) +- goto out; + } + + il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, +@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, + repeat_rate--; + } + +-out: + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c +index 2a27ac9aedbaa..3169859cd3906 100644 +--- a/drivers/nvme/host/tcp.c ++++ b/drivers/nvme/host/tcp.c +@@ -1074,7 +1074,7 @@ static void nvme_tcp_io_work(struct work_struct *w) + if (result > 0) + pending = true; + +- if (!pending) ++ if (!pending || !queue->rd_enabled) + return; + + } while (!time_after(jiffies, deadline)); /* quota is exhausted */ +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index ee81d94fe810c..ff206faae775c 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -709,6 +709,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) + + static void __nvmet_req_complete(struct nvmet_req *req, u16 status) + { ++ struct nvmet_ns *ns = req->ns; ++ + if (!req->sq->sqhd_disabled) + nvmet_update_sq_head(req); + req->cqe->sq_id = cpu_to_le16(req->sq->qid); +@@ -719,9 +721,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) + + trace_nvmet_req_complete(req); + +- if (req->ns) +- nvmet_put_namespace(req->ns); + req->ops->queue_response(req); ++ if (ns) ++ nvmet_put_namespace(ns); + } + + void nvmet_req_complete(struct nvmet_req *req, u16 status) +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c +index 5013568c571e5..6209d58e9492a 100644 +--- a/drivers/parisc/ccio-dma.c ++++ b/drivers/parisc/ccio-dma.c +@@ -1378,15 +1378,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) + } + } + +-static void __init ccio_init_resources(struct ioc *ioc) ++static int __init ccio_init_resources(struct ioc *ioc) + { + struct resource *res = ioc->mmio_region; + char *name = kmalloc(14, GFP_KERNEL); +- ++ if (unlikely(!name)) ++ return -ENOMEM; + snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); + + ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); + ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); ++ return 0; + } + + static int new_ioc_area(struct resource *res, unsigned long size, +@@ -1541,7 +1543,10 @@ static int __init ccio_probe(struct parisc_device *dev) + return -ENOMEM; + } + ccio_ioc_init(ioc); +- ccio_init_resources(ioc); ++ if (ccio_init_resources(ioc)) { ++ kfree(ioc); ++ return -ENOMEM; ++ } + hppa_dma_ops = &ccio_ops; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c +index 597cfabc0967c..ee349a16b73a4 100644 +--- a/drivers/platform/x86/pmc_atom.c ++++ b/drivers/platform/x86/pmc_atom.c +@@ -244,7 +244,7 @@ static void pmc_power_off(void) + pm1_cnt_port = acpi_base_addr + PM1_CNT; + + pm1_cnt_value = inl(pm1_cnt_port); +- pm1_cnt_value &= SLEEP_TYPE_MASK; ++ pm1_cnt_value &= ~SLEEP_TYPE_MASK; + pm1_cnt_value |= SLEEP_TYPE_S5; + pm1_cnt_value |= SLEEP_ENABLE; + +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index ae2addadb36f2..6ba3f6e7ea4f8 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -2486,13 +2486,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev) + */ + static int _regulator_handle_consumer_enable(struct regulator *regulator) + { ++ int ret; + struct regulator_dev *rdev = regulator->rdev; + + lockdep_assert_held_once(&rdev->mutex.base); + + regulator->enable_count++; +- if (regulator->uA_load && regulator->enable_count == 1) +- return drms_uA_update(rdev); ++ if (regulator->uA_load && regulator->enable_count == 1) { ++ ret = drms_uA_update(rdev); ++ if (ret) ++ regulator->enable_count--; ++ return ret; ++ } + + return 0; + } +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index b5cee2a2ac66c..8930696021fbd 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -6537,7 +6537,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) + /* Allocate device driver memory */ + rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); + if (rc) +- return -ENOMEM; ++ goto out_destroy_workqueue; + + /* IF Type 2 ports get initialized now. */ + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= +@@ -6911,6 +6911,9 @@ out_free_bsmbx: + lpfc_destroy_bootstrap_mbox(phba); + out_free_mem: + lpfc_mem_free(phba); ++out_destroy_workqueue: ++ destroy_workqueue(phba->wq); ++ phba->wq = NULL; + return rc; + } + +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index a78a702511faa..944273f60d224 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -5182,7 +5182,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance) + if (!fusion->log_to_span) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); +- kfree(instance->ctrl_context); + return -ENOMEM; + } + } +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 97c1f242ef0a3..044a00edb5459 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3238,6 +3238,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) + fw_event = list_first_entry(&ioc->fw_event_list, + struct fw_event_work, list); + list_del_init(&fw_event->list); ++ fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + +@@ -3272,7 +3273,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) + if (cancel_work_sync(&fw_event->work)) + fw_event_work_put(fw_event); + +- fw_event_work_put(fw_event); + } + } + +diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c +index c6ec7d95bcfcc..722fd54e537cf 100644 +--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c ++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c +@@ -681,13 +681,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + const struct of_device_id *of_id = NULL; + struct device_node *dn; + void __iomem *base; +- int ret, i; ++ int ret, i, s; + + /* AON ctrl registers */ + base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); + if (IS_ERR(base)) { + pr_err("error mapping AON_CTRL\n"); +- return PTR_ERR(base); ++ ret = PTR_ERR(base); ++ goto aon_err; + } + ctrl.aon_ctrl_base = base; + +@@ -697,8 +698,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + /* Assume standard offset */ + ctrl.aon_sram = ctrl.aon_ctrl_base + + AON_CTRL_SYSTEM_DATA_RAM_OFS; ++ s = 0; + } else { + ctrl.aon_sram = base; ++ s = 1; + } + + writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); +@@ -708,7 +711,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + (const void **)&ddr_phy_data); + if (IS_ERR(base)) { + pr_err("error mapping DDR PHY\n"); +- return PTR_ERR(base); ++ ret = PTR_ERR(base); ++ goto ddr_phy_err; + } + ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; + ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; +@@ -728,17 +732,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + for_each_matching_node(dn, ddr_shimphy_dt_ids) { + i = ctrl.num_memc; + if (i >= MAX_NUM_MEMC) { ++ of_node_put(dn); + pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); + break; + } + + base = of_io_request_and_map(dn, 0, dn->full_name); + if (IS_ERR(base)) { ++ of_node_put(dn); + if (!ctrl.support_warm_boot) + break; + + pr_err("error mapping DDR SHIMPHY %d\n", i); +- return PTR_ERR(base); ++ ret = PTR_ERR(base); ++ goto ddr_shimphy_err; + } + ctrl.memcs[i].ddr_shimphy_base = base; + ctrl.num_memc++; +@@ -749,14 +756,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + for_each_matching_node(dn, brcmstb_memc_of_match) { + base = of_iomap(dn, 0); + if (!base) { ++ of_node_put(dn); + pr_err("error mapping DDR Sequencer %d\n", i); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto brcmstb_memc_err; + } + + of_id = of_match_node(brcmstb_memc_of_match, dn); + if (!of_id) { + iounmap(base); +- return -EINVAL; ++ of_node_put(dn); ++ ret = -EINVAL; ++ goto brcmstb_memc_err; + } + + ddr_seq_data = of_id->data; +@@ -776,21 +787,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + dn = of_find_matching_node(NULL, sram_dt_ids); + if (!dn) { + pr_err("SRAM not found\n"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto brcmstb_memc_err; + } + + ret = brcmstb_init_sram(dn); + of_node_put(dn); + if (ret) { + pr_err("error setting up SRAM for PM\n"); +- return ret; ++ goto brcmstb_memc_err; + } + + ctrl.pdev = pdev; + + ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); +- if (!ctrl.s3_params) +- return -ENOMEM; ++ if (!ctrl.s3_params) { ++ ret = -ENOMEM; ++ goto s3_params_err; ++ } + ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, + sizeof(*ctrl.s3_params), + DMA_TO_DEVICE); +@@ -810,7 +824,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev) + + out: + kfree(ctrl.s3_params); +- ++s3_params_err: ++ iounmap(ctrl.boot_sram); ++brcmstb_memc_err: ++ for (i--; i >= 0; i--) ++ iounmap(ctrl.memcs[i].ddr_ctrl); ++ddr_shimphy_err: ++ for (i = 0; i < ctrl.num_memc; i++) ++ iounmap(ctrl.memcs[i].ddr_shimphy_base); ++ ++ iounmap(ctrl.memcs[0].ddr_phy_base); ++ddr_phy_err: ++ iounmap(ctrl.aon_ctrl_base); ++ if (s) ++ iounmap(ctrl.aon_sram); ++aon_err: + pr_warn("PM: initialization failed with code %d\n", ret); + + return ret; +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c +index ff3cb09c57a63..30e965c410ffd 100644 +--- a/drivers/staging/rtl8712/rtl8712_cmd.c ++++ b/drivers/staging/rtl8712/rtl8712_cmd.c +@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf) + kfree(pdrvcmd->pbuf); + } + +-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf) +-{ +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); +- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; +- +- /* invoke cmd->callback function */ +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback; +- if (!pcmd_callback) +- r8712_free_cmd_obj(pcmd); +- else +- pcmd_callback(padapter, pcmd); +- return H2C_SUCCESS; +-} +- +-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf) +-{ +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); +- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; +- +- /* invoke cmd->callback function */ +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback; +- if (!pcmd_callback) +- r8712_free_cmd_obj(pcmd); +- else +- pcmd_callback(padapter, pcmd); +- return H2C_SUCCESS; +-} +- + static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) + { + struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; +@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter, + pcmd_r = NULL; + + switch (pcmd->cmdcode) { +- case GEN_CMD_CODE(_Read_MACREG): +- read_macreg_hdl(padapter, (u8 *)pcmd); +- pcmd_r = pcmd; +- break; +- case GEN_CMD_CODE(_Write_MACREG): +- write_macreg_hdl(padapter, (u8 *)pcmd); +- pcmd_r = pcmd; +- break; + case GEN_CMD_CODE(_Read_BBREG): + read_bbreg_hdl(padapter, (u8 *)pcmd); + break; +diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c +index 2ec1af8f7968d..fd074c6ebe0d3 100644 +--- a/drivers/thunderbolt/ctl.c ++++ b/drivers/thunderbolt/ctl.c +@@ -388,7 +388,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg) + + static int tb_async_error(const struct ctl_pkg *pkg) + { +- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg; ++ const struct cfg_error_pkg *error = pkg->buffer; + + if (pkg->frame.eof != TB_CFG_PKG_ERROR) + return false; +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index 4bdc12908146e..f3f582c3dc874 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -1277,9 +1277,9 @@ static int lpuart_config_rs485(struct uart_port *port, + * Note: UART is assumed to be active high. + */ + if (rs485->flags & SER_RS485_RTS_ON_SEND) +- modem &= ~UARTMODEM_TXRTSPOL; +- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) + modem |= UARTMODEM_TXRTSPOL; ++ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) ++ modem &= ~UARTMODEM_TXRTSPOL; + } + + /* Store the new configuration */ +@@ -1981,6 +1981,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, + uart_update_timeout(port, termios->c_cflag, baud); + + /* wait transmit engin complete */ ++ lpuart32_write(&sport->port, 0, UARTMODIR); + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); + + /* disable transmit and receive */ +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index b4f97df8e0000..e00ebda492198 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -4587,9 +4587,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) + console_lock(); + if (vc->vc_mode != KD_TEXT) + rc = -EINVAL; +- else if (vc->vc_sw->con_font_set) ++ else if (vc->vc_sw->con_font_set) { ++ if (vc_is_sel(vc)) ++ clear_selection(); + rc = vc->vc_sw->con_font_set(vc, &font, op->flags); +- else ++ } else + rc = -ENOSYS; + console_unlock(); + kfree(font.data); +@@ -4616,9 +4618,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) + console_unlock(); + return -EINVAL; + } +- if (vc->vc_sw->con_font_default) ++ if (vc->vc_sw->con_font_default) { ++ if (vc_is_sel(vc)) ++ clear_selection(); + rc = vc->vc_sw->con_font_default(vc, &font, s); +- else ++ } else + rc = -ENOSYS; + console_unlock(); + if (!rc) { +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 5dc8827ede7e8..edb62b49f572e 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1843,6 +1843,9 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ + .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ + }, ++ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */ ++ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ ++ }, + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 4cf0dc7f330dd..68d860a3fd617 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -5923,6 +5923,11 @@ re_enumerate_no_bos: + * the reset is over (using their post_reset method). + * + * Return: The same as for usb_reset_and_verify_device(). ++ * However, if a reset is already in progress (for instance, if a ++ * driver doesn't have pre_ or post_reset() callbacks, and while ++ * being unbound or re-bound during the ongoing reset its disconnect() ++ * or probe() routine tries to perform a second, nested reset), the ++ * routine returns -EINPROGRESS. + * + * Note: + * The caller must own the device lock. For example, it's safe to use +@@ -5956,6 +5961,10 @@ int usb_reset_device(struct usb_device *udev) + return -EISDIR; + } + ++ if (udev->reset_in_progress) ++ return -EINPROGRESS; ++ udev->reset_in_progress = 1; ++ + port_dev = hub->ports[udev->portnum - 1]; + + /* +@@ -6020,6 +6029,7 @@ int usb_reset_device(struct usb_device *udev) + + usb_autosuspend_device(udev); + memalloc_noio_restore(noio_flag); ++ udev->reset_in_progress = 0; + return ret; + } + EXPORT_SYMBOL_GPL(usb_reset_device); +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c +index 34bb6124f1e2f..4f640c0c51b39 100644 +--- a/drivers/usb/dwc2/platform.c ++++ b/drivers/usb/dwc2/platform.c +@@ -142,9 +142,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) + } else if (hsotg->plat && hsotg->plat->phy_init) { + ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); + } else { +- ret = phy_power_on(hsotg->phy); ++ ret = phy_init(hsotg->phy); + if (ret == 0) +- ret = phy_init(hsotg->phy); ++ ret = phy_power_on(hsotg->phy); + } + + return ret; +@@ -176,9 +176,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg) + } else if (hsotg->plat && hsotg->plat->phy_exit) { + ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); + } else { +- ret = phy_exit(hsotg->phy); ++ ret = phy_power_off(hsotg->phy); + if (ret == 0) +- ret = phy_power_off(hsotg->phy); ++ ret = phy_exit(hsotg->phy); + } + if (ret) + return ret; +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 8cc81f193e9c1..f4655665a1b5c 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -694,15 +694,16 @@ static void dwc3_core_exit(struct dwc3 *dwc) + { + dwc3_event_buffers_cleanup(dwc); + ++ usb_phy_set_suspend(dwc->usb2_phy, 1); ++ usb_phy_set_suspend(dwc->usb3_phy, 1); ++ phy_power_off(dwc->usb2_generic_phy); ++ phy_power_off(dwc->usb3_generic_phy); ++ + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + +- usb_phy_set_suspend(dwc->usb2_phy, 1); +- usb_phy_set_suspend(dwc->usb3_phy, 1); +- phy_power_off(dwc->usb2_generic_phy); +- phy_power_off(dwc->usb3_generic_phy); + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); + reset_control_assert(dwc->reset); + } +@@ -1537,16 +1538,16 @@ err5: + dwc3_debugfs_exit(dwc); + dwc3_event_buffers_cleanup(dwc); + +- usb_phy_shutdown(dwc->usb2_phy); +- usb_phy_shutdown(dwc->usb3_phy); +- phy_exit(dwc->usb2_generic_phy); +- phy_exit(dwc->usb3_generic_phy); +- + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); + ++ usb_phy_shutdown(dwc->usb2_phy); ++ usb_phy_shutdown(dwc->usb3_phy); ++ phy_exit(dwc->usb2_generic_phy); ++ phy_exit(dwc->usb3_generic_phy); ++ + dwc3_ulpi_exit(dwc); + + err4: +diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c +index 7874b97e33227..aed35276e0e0c 100644 +--- a/drivers/usb/dwc3/dwc3-qcom.c ++++ b/drivers/usb/dwc3/dwc3-qcom.c +@@ -190,6 +190,14 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom) + return 0; + } + ++/* Only usable in contexts where the role can not change. */ ++static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) ++{ ++ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); ++ ++ return dwc->xhci; ++} ++ + static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom) + { + if (qcom->hs_phy_irq) { +@@ -297,7 +305,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data) + if (qcom->pm_suspended) + return IRQ_HANDLED; + +- if (dwc->xhci) ++ /* ++ * This is safe as role switching is done from a freezable workqueue ++ * and the wakeup interrupts are disabled as part of resume. ++ */ ++ if (dwc3_qcom_is_host(qcom)) + pm_runtime_resume(&dwc->xhci->dev); + + return IRQ_HANDLED; +diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c +index fa252870c926f..a4eacc5cf58af 100644 +--- a/drivers/usb/dwc3/host.c ++++ b/drivers/usb/dwc3/host.c +@@ -9,8 +9,13 @@ + + #include <linux/platform_device.h> + ++#include "../host/xhci-plat.h" + #include "core.h" + ++static const struct xhci_plat_priv dwc3_xhci_plat_priv = { ++ .quirks = XHCI_SKIP_PHY_INIT, ++}; ++ + static int dwc3_host_get_irq(struct dwc3 *dwc) + { + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); +@@ -85,6 +90,11 @@ int dwc3_host_init(struct dwc3 *dwc) + goto err; + } + ++ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv, ++ sizeof(dwc3_xhci_plat_priv)); ++ if (ret) ++ goto err; ++ + memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); + + if (dwc->usb3_lpm_capable) +@@ -128,4 +138,5 @@ err: + void dwc3_host_exit(struct dwc3 *dwc) + { + platform_device_unregister(dwc->xhci); ++ dwc->xhci = NULL; + } +diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c +index f7e6c42558eb7..021984921f919 100644 +--- a/drivers/usb/gadget/function/storage_common.c ++++ b/drivers/usb/gadget/function/storage_common.c +@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub); + void store_cdrom_address(u8 *dest, int msf, u32 addr) + { + if (msf) { +- /* Convert to Minutes-Seconds-Frames */ +- addr >>= 2; /* Convert to 2048-byte frames */ ++ /* ++ * Convert to Minutes-Seconds-Frames. ++ * Sector size is already set to 2048 bytes. ++ */ + addr += 2*75; /* Lead-in occupies 2 seconds */ + dest[3] = addr % 75; /* Frames */ + addr /= 75; +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 9c066d1c512b1..66cb9f08bff10 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -566,7 +566,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd) + * It will release and re-aquire the lock while calling ACPI + * method. + */ +-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, ++static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, + u16 index, bool on, unsigned long *flags) + { + struct xhci_hub *rhub; +@@ -1555,6 +1555,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) + + status = bus_state->resuming_ports; + ++ /* ++ * SS devices are only visible to roothub after link training completes. ++ * Keep polling roothubs for a grace period after xHC start ++ */ ++ if (xhci->run_graceperiod) { ++ if (time_before(jiffies, xhci->run_graceperiod)) ++ status = 1; ++ else ++ xhci->run_graceperiod = 0; ++ } ++ + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; + + /* For each port, did anything change? If so, set that bit in buf. */ +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 9fe35bb67731e..5ce16a259e612 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -149,9 +149,11 @@ int xhci_start(struct xhci_hcd *xhci) + xhci_err(xhci, "Host took too long to start, " + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); +- if (!ret) ++ if (!ret) { + /* clear state flags. Including dying, halted or removing */ + xhci->xhc_state = 0; ++ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500); ++ } + + return ret; + } +@@ -775,8 +777,6 @@ static void xhci_stop(struct usb_hcd *hcd) + void xhci_shutdown(struct usb_hcd *hcd) + { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +- unsigned long flags; +- int i; + + if (xhci->quirks & XHCI_SPURIOUS_REBOOT) + usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); +@@ -792,21 +792,12 @@ void xhci_shutdown(struct usb_hcd *hcd) + del_timer_sync(&xhci->shared_hcd->rh_timer); + } + +- spin_lock_irqsave(&xhci->lock, flags); ++ spin_lock_irq(&xhci->lock); + xhci_halt(xhci); +- +- /* Power off USB2 ports*/ +- for (i = 0; i < xhci->usb2_rhub.num_ports; i++) +- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags); +- +- /* Power off USB3 ports*/ +- for (i = 0; i < xhci->usb3_rhub.num_ports; i++) +- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags); +- + /* Workaround for spurious wakeups at shutdown with HSW */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); +- spin_unlock_irqrestore(&xhci->lock, flags); ++ spin_unlock_irq(&xhci->lock); + + xhci_cleanup_msix(xhci); + +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 5a6ad776858e3..0dc448630197c 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1814,7 +1814,7 @@ struct xhci_hcd { + + /* Host controller watchdog timer structures */ + unsigned int xhc_state; +- ++ unsigned long run_graceperiod; + u32 command; + struct s3_save s3; + /* Host controller is dying - not responding to commands. "I'm not dead yet!" +@@ -2155,8 +2155,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, + int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); + int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); + struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); +-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, +- bool on, unsigned long *flags); + + void xhci_hc_died(struct xhci_hcd *xhci); + +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index f06a09e59d8ba..f37bde88eb5d5 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -96,7 +96,9 @@ struct ch341_private { + u8 mcr; + u8 msr; + u8 lcr; ++ + unsigned long quirks; ++ u8 version; + }; + + static void ch341_set_termios(struct tty_struct *tty, +@@ -175,13 +177,20 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev, + /* + * CH341A buffers data until a full endpoint-size packet (32 bytes) + * has been received unless bit 7 is set. ++ * ++ * At least one device with version 0x27 appears to have this bit ++ * inverted. + */ +- a |= BIT(7); ++ if (priv->version > 0x27) ++ a |= BIT(7); + + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a); + if (r) + return r; + ++ if (priv->version < 0x30) ++ return 0; ++ + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr); + if (r) + return r; +@@ -233,7 +242,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) + r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size); + if (r < 0) + goto out; +- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]); ++ ++ priv->version = buffer[0]; ++ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version); + + r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0); + if (r < 0) +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index d5a1832aa48eb..bc4fd79a13dbe 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ ++ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */ + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index d671e096594bb..ed1b26193d7a3 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = { + /* IDS GmbH devices */ + { USB_DEVICE(IDS_VID, IDS_SI31A_PID) }, + { USB_DEVICE(IDS_VID, IDS_CM31A_PID) }, ++ /* Omron devices */ ++ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) }, + /* U-Blox devices */ + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 4e92c165c86bf..31c8ccabbbb78 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -661,6 +661,12 @@ + #define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ + #define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */ + ++/* ++ * Omron corporation (https://www.omron.com) ++ */ ++ #define OMRON_VID 0x0590 ++ #define OMRON_CS1W_CIF31_PID 0x00b2 ++ + /* + * Acton Research Corp. + */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 2317ed357d8ef..cbe8ad3cd61fd 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -253,6 +253,7 @@ static void option_instat_callback(struct urb *urb); + #define QUECTEL_PRODUCT_BG96 0x0296 + #define QUECTEL_PRODUCT_EP06 0x0306 + #define QUECTEL_PRODUCT_EM05G 0x030a ++#define QUECTEL_PRODUCT_EM060K 0x030b + #define QUECTEL_PRODUCT_EM12 0x0512 + #define QUECTEL_PRODUCT_RM500Q 0x0800 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002 +@@ -438,6 +439,8 @@ static void option_instat_callback(struct urb *urb); + #define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9 + #define CINTERION_PRODUCT_MV32_WA 0x00f1 + #define CINTERION_PRODUCT_MV32_WB 0x00f2 ++#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3 ++#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4 + + /* Olivetti products */ + #define OLIVETTI_VENDOR_ID 0x0b3c +@@ -573,6 +576,10 @@ static void option_instat_callback(struct urb *urb); + #define WETELECOM_PRODUCT_6802 0x6802 + #define WETELECOM_PRODUCT_WMD300 0x6803 + ++/* OPPO products */ ++#define OPPO_VENDOR_ID 0x22d9 ++#define OPPO_PRODUCT_R11 0x276c ++ + + /* Device flags */ + +@@ -1138,6 +1145,9 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), + .driver_info = RSVD(6) | ZLP }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, +@@ -1993,8 +2003,12 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), + .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff), ++ .driver_info = RSVD(0) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), + .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff), ++ .driver_info = RSVD(0) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = RSVD(4) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), +@@ -2155,6 +2169,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ ++ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, option_ids); +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 66e7f5d123c46..6a59950a63a03 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -2294,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), + ++/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */ ++UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100, ++ "NXP Semiconductors", ++ "PN7462AU", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_IGNORE_RESIDUE ), ++ + /* Supplied with some Castlewood ORB removable drives */ + UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999, + "Double-H Technology", +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c +index 4092248a5936d..0b5cbf5ed1ca6 100644 +--- a/drivers/usb/typec/altmodes/displayport.c ++++ b/drivers/usb/typec/altmodes/displayport.c +@@ -87,8 +87,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con) + case DP_STATUS_CON_UFP_D: + case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */ + conf |= DP_CONF_UFP_U_AS_UFP_D; +- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) & +- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo); ++ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) & ++ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo); + break; + default: + break; +diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c +index 80fdd3ee0565f..57b1e011d2d34 100644 +--- a/drivers/video/fbdev/chipsfb.c ++++ b/drivers/video/fbdev/chipsfb.c +@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) + err_release_fb: + framebuffer_release(p); + err_disable: ++ pci_disable_device(dp); + err_out: + return rc; + } +diff --git a/fs/afs/flock.c b/fs/afs/flock.c +index d5e5a6ddc8478..0fa05998a24d2 100644 +--- a/fs/afs/flock.c ++++ b/fs/afs/flock.c +@@ -75,7 +75,7 @@ void afs_lock_op_done(struct afs_call *call) + if (call->error == 0) { + spin_lock(&vnode->lock); + trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); +- vnode->locked_at = call->reply_time; ++ vnode->locked_at = call->issue_time; + afs_schedule_lock_extension(vnode); + spin_unlock(&vnode->lock); + } +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c +index 5c2729fc07e52..254580b1dc74c 100644 +--- a/fs/afs/fsclient.c ++++ b/fs/afs/fsclient.c +@@ -136,7 +136,7 @@ bad: + + static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) + { +- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry; ++ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; + } + + static void xdr_decode_AFSCallBack(const __be32 **_bp, +diff --git a/fs/afs/internal.h b/fs/afs/internal.h +index c3ad582f9fd0e..8d6582713fe72 100644 +--- a/fs/afs/internal.h ++++ b/fs/afs/internal.h +@@ -159,7 +159,6 @@ struct afs_call { + bool need_attention; /* T if RxRPC poked us */ + bool async; /* T if asynchronous */ + bool upgrade; /* T to request service upgrade */ +- bool have_reply_time; /* T if have got reply_time */ + bool intr; /* T if interruptible */ + bool unmarshalling_error; /* T if an unmarshalling error occurred */ + u16 service_id; /* Actual service ID (after upgrade) */ +@@ -173,7 +172,7 @@ struct afs_call { + } __attribute__((packed)); + __be64 tmp64; + }; +- ktime_t reply_time; /* Time of first reply packet */ ++ ktime_t issue_time; /* Time of issue of operation */ + }; + + struct afs_call_type { +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c +index 6adab30a83993..49fcce6529a60 100644 +--- a/fs/afs/rxrpc.c ++++ b/fs/afs/rxrpc.c +@@ -428,6 +428,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) + if (call->max_lifespan) + rxrpc_kernel_set_max_life(call->net->socket, rxcall, + call->max_lifespan); ++ call->issue_time = ktime_get_real(); + + /* send the request */ + iov[0].iov_base = call->request; +@@ -532,12 +533,6 @@ static void afs_deliver_to_call(struct afs_call *call) + return; + } + +- if (!call->have_reply_time && +- rxrpc_kernel_get_reply_time(call->net->socket, +- call->rxcall, +- &call->reply_time)) +- call->have_reply_time = true; +- + ret = call->type->deliver(call); + state = READ_ONCE(call->state); + if (ret == 0 && call->unmarshalling_error) +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c +index 3b19b009452a2..fa85b359f325b 100644 +--- a/fs/afs/yfsclient.c ++++ b/fs/afs/yfsclient.c +@@ -241,8 +241,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp, + struct afs_callback *cb = &scb->callback; + ktime_t cb_expiry; + +- cb_expiry = call->reply_time; +- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100); ++ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); + cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC); + scb->have_cb = true; + *_bp += xdr_size(x); +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index c7706a769de12..548de841cee53 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -713,15 +713,47 @@ static void pending_bios_fn(struct btrfs_work *work) + run_scheduled_bios(device); + } + +-static bool device_path_matched(const char *path, struct btrfs_device *device) ++/* ++ * Check if the device in the path matches the device in the given struct device. ++ * ++ * Returns: ++ * true If it is the same device. ++ * false If it is not the same device or on error. ++ */ ++static bool device_matched(const struct btrfs_device *device, const char *path) + { +- int found; ++ char *device_name; ++ struct block_device *bdev_old; ++ struct block_device *bdev_new; ++ ++ /* ++ * If we are looking for a device with the matching dev_t, then skip ++ * device without a name (a missing device). ++ */ ++ if (!device->name) ++ return false; ++ ++ device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); ++ if (!device_name) ++ return false; + + rcu_read_lock(); +- found = strcmp(rcu_str_deref(device->name), path); ++ scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name)); + rcu_read_unlock(); + +- return found == 0; ++ bdev_old = lookup_bdev(device_name); ++ kfree(device_name); ++ if (IS_ERR(bdev_old)) ++ return false; ++ ++ bdev_new = lookup_bdev(path); ++ if (IS_ERR(bdev_new)) ++ return false; ++ ++ if (bdev_old == bdev_new) ++ return true; ++ ++ return false; + } + + /* +@@ -754,9 +786,7 @@ static int btrfs_free_stale_devices(const char *path, + &fs_devices->devices, dev_list) { + if (skip_device && skip_device == device) + continue; +- if (path && !device->name) +- continue; +- if (path && !device_path_matched(path, device)) ++ if (path && !device_matched(device, path)) + continue; + if (fs_devices->opened) { + /* for an already deleted device return 0 */ +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 6ae281cff0d50..6039b0cdfe04e 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -3051,7 +3051,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, + static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, + loff_t offset, loff_t len) + { +- struct inode *inode; ++ struct inode *inode = file_inode(file); + struct cifsFileInfo *cfile = file->private_data; + struct file_zero_data_information fsctl_buf; + long rc; +@@ -3060,14 +3060,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, + + xid = get_xid(); + +- inode = d_inode(cfile->dentry); +- ++ inode_lock(inode); + /* Need to make file sparse, if not already, before freeing range. */ + /* Consider adding equivalent for compressed since it could also work */ + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { + rc = -EOPNOTSUPP; +- free_xid(xid); +- return rc; ++ goto out; + } + + /* +@@ -3086,6 +3084,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, + true /* is_fctl */, (char *)&fsctl_buf, + sizeof(struct file_zero_data_information), + CIFSMaxBufSize, NULL, NULL); ++out: ++ inode_unlock(inode); + free_xid(xid); + return rc; + } +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index e595a29bf46e3..e0f07382ebebc 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -742,6 +742,28 @@ void debugfs_remove(struct dentry *dentry) + } + EXPORT_SYMBOL_GPL(debugfs_remove); + ++/** ++ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it ++ * @name: a pointer to a string containing the name of the item to look up. ++ * @parent: a pointer to the parent dentry of the item. ++ * ++ * This is the equlivant of doing something like ++ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting ++ * handled for the directory being looked up. ++ */ ++void debugfs_lookup_and_remove(const char *name, struct dentry *parent) ++{ ++ struct dentry *dentry; ++ ++ dentry = debugfs_lookup(name, parent); ++ if (!dentry) ++ return; ++ ++ debugfs_remove(dentry); ++ dput(dentry); ++} ++EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); ++ + /** + * debugfs_remove_recursive - recursively removes a directory + * @dentry: a pointer to a the dentry of the directory to be removed. If this +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h +index 8fab480a8e4e4..0575ad84cc555 100644 +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion) + + static __always_inline void set_buffer_uptodate(struct buffer_head *bh) + { ++ /* ++ * If somebody else already set this uptodate, they will ++ * have done the memory barrier, and a reader will thus ++ * see *some* valid buffer state. ++ * ++ * Any other serialization (with IO errors or whatever that ++ * might clear the bit) has to come from other state (eg BH_Lock). ++ */ ++ if (test_bit(BH_Uptodate, &bh->b_state)) ++ return; ++ + /* + * make it consistent with folio_mark_uptodate + * pairs with smp_load_acquire in buffer_uptodate +diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h +index 798f0b9b43aee..7e4f156acc2f7 100644 +--- a/include/linux/debugfs.h ++++ b/include/linux/debugfs.h +@@ -85,6 +85,8 @@ struct dentry *debugfs_create_automount(const char *name, + void debugfs_remove(struct dentry *dentry); + void debugfs_remove_recursive(struct dentry *dentry); + ++void debugfs_lookup_and_remove(const char *name, struct dentry *parent); ++ + const struct file_operations *debugfs_real_fops(const struct file *filp); + + int debugfs_file_get(struct dentry *dentry); +@@ -216,6 +218,10 @@ static inline void debugfs_remove(struct dentry *dentry) + static inline void debugfs_remove_recursive(struct dentry *dentry) + { } + ++static inline void debugfs_lookup_and_remove(const char *name, ++ struct dentry *parent) ++{ } ++ + const struct file_operations *debugfs_real_fops(const struct file *filp); + + static inline int debugfs_file_get(struct dentry *dentry) +diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h +index 022bcea9edec5..99a9b09dc839d 100644 +--- a/include/linux/platform_data/x86/pmc_atom.h ++++ b/include/linux/platform_data/x86/pmc_atom.h +@@ -7,6 +7,8 @@ + #ifndef PMC_ATOM_H + #define PMC_ATOM_H + ++#include <linux/bits.h> ++ + /* ValleyView Power Control Unit PCI Device ID */ + #define PCI_DEVICE_ID_VLV_PMC 0x0F1C + /* CherryTrail Power Control Unit PCI Device ID */ +@@ -139,9 +141,9 @@ + #define ACPI_MMIO_REG_LEN 0x100 + + #define PM1_CNT 0x4 +-#define SLEEP_TYPE_MASK 0xFFFFECFF ++#define SLEEP_TYPE_MASK GENMASK(12, 10) + #define SLEEP_TYPE_S5 0x1C00 +-#define SLEEP_ENABLE 0x2000 ++#define SLEEP_ENABLE BIT(13) + + extern int pmc_atom_read(int offset, u32 *value); + extern int pmc_atom_write(int offset, u32 value); +diff --git a/include/linux/usb.h b/include/linux/usb.h +index e656e7b4b1e44..703c7464d8957 100644 +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -580,6 +580,7 @@ struct usb3_lpm_parameters { + * @devaddr: device address, XHCI: assigned by HW, others: same as devnum + * @can_submit: URBs may be submitted + * @persist_enabled: USB_PERSIST enabled for this device ++ * @reset_in_progress: the device is being reset + * @have_langid: whether string_langid is valid + * @authorized: policy has said we can use it; + * (user space) policy determines if we authorize this device to be +@@ -665,6 +666,7 @@ struct usb_device { + + unsigned can_submit:1; + unsigned persist_enabled:1; ++ unsigned reset_in_progress:1; + unsigned have_langid:1; + unsigned authorized:1; + unsigned authenticated:1; +diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h +index fc4c7edb2e8a4..296909ea04f26 100644 +--- a/include/linux/usb/typec_dp.h ++++ b/include/linux/usb/typec_dp.h +@@ -73,6 +73,11 @@ enum { + #define DP_CAP_USB BIT(7) + #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8) + #define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16) ++/* Get pin assignment taking plug & receptacle into consideration */ ++#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ ++ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_)) ++#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ ++ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_)) + + /* DisplayPort Status Update VDO bits */ + #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3) +diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h +index 236f290224aae..8dfb2526b3aa2 100644 +--- a/kernel/cgroup/cgroup-internal.h ++++ b/kernel/cgroup/cgroup-internal.h +@@ -250,9 +250,10 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup, + + int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, + bool threadgroup); +-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) ++struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, ++ bool *locked) + __acquires(&cgroup_threadgroup_rwsem); +-void cgroup_procs_write_finish(struct task_struct *task) ++void cgroup_procs_write_finish(struct task_struct *task, bool locked) + __releases(&cgroup_threadgroup_rwsem); + + void cgroup_lock_and_drain_offline(struct cgroup *cgrp); +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c +index 117d70098cd49..aa7577b189e92 100644 +--- a/kernel/cgroup/cgroup-v1.c ++++ b/kernel/cgroup/cgroup-v1.c +@@ -498,12 +498,13 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, + struct task_struct *task; + const struct cred *cred, *tcred; + ssize_t ret; ++ bool locked; + + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENODEV; + +- task = cgroup_procs_write_start(buf, threadgroup); ++ task = cgroup_procs_write_start(buf, threadgroup, &locked); + ret = PTR_ERR_OR_ZERO(task); + if (ret) + goto out_unlock; +@@ -526,7 +527,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, + ret = cgroup_attach_task(cgrp, task, threadgroup); + + out_finish: +- cgroup_procs_write_finish(task); ++ cgroup_procs_write_finish(task, locked); + out_unlock: + cgroup_kn_unlock(of->kn); + +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 23f0db2900e4b..d14575c0e4640 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -30,6 +30,7 @@ + + #include "cgroup-internal.h" + ++#include <linux/cpu.h> + #include <linux/cred.h> + #include <linux/errno.h> + #include <linux/init_task.h> +@@ -2376,6 +2377,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) + } + EXPORT_SYMBOL_GPL(task_cgroup_path); + ++/** ++ * cgroup_attach_lock - Lock for ->attach() ++ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem ++ * ++ * cgroup migration sometimes needs to stabilize threadgroups against forks and ++ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() ++ * implementations (e.g. cpuset), also need to disable CPU hotplug. ++ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can ++ * lead to deadlocks. ++ * ++ * Bringing up a CPU may involve creating and destroying tasks which requires ++ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside ++ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while ++ * write-locking threadgroup_rwsem, the locking order is reversed and we end up ++ * waiting for an on-going CPU hotplug operation which in turn is waiting for ++ * the threadgroup_rwsem to be released to create new tasks. For more details: ++ * ++ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu ++ * ++ * Resolve the situation by always acquiring cpus_read_lock() before optionally ++ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that ++ * CPU hotplug is disabled on entry. ++ */ ++static void cgroup_attach_lock(bool lock_threadgroup) ++{ ++ cpus_read_lock(); ++ if (lock_threadgroup) ++ percpu_down_write(&cgroup_threadgroup_rwsem); ++} ++ ++/** ++ * cgroup_attach_unlock - Undo cgroup_attach_lock() ++ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem ++ */ ++static void cgroup_attach_unlock(bool lock_threadgroup) ++{ ++ if (lock_threadgroup) ++ percpu_up_write(&cgroup_threadgroup_rwsem); ++ cpus_read_unlock(); ++} ++ + /** + * cgroup_migrate_add_task - add a migration target task to a migration context + * @task: target task +@@ -2856,8 +2898,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, + return ret; + } + +-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) +- __acquires(&cgroup_threadgroup_rwsem) ++struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, ++ bool *threadgroup_locked) + { + struct task_struct *tsk; + pid_t pid; +@@ -2865,7 +2907,17 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) + return ERR_PTR(-EINVAL); + +- percpu_down_write(&cgroup_threadgroup_rwsem); ++ /* ++ * If we migrate a single thread, we don't care about threadgroup ++ * stability. If the thread is `current`, it won't exit(2) under our ++ * hands or change PID through exec(2). We exclude ++ * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write ++ * callers by cgroup_mutex. ++ * Therefore, we can skip the global lock. ++ */ ++ lockdep_assert_held(&cgroup_mutex); ++ *threadgroup_locked = pid || threadgroup; ++ cgroup_attach_lock(*threadgroup_locked); + + rcu_read_lock(); + if (pid) { +@@ -2896,14 +2948,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) + goto out_unlock_rcu; + + out_unlock_threadgroup: +- percpu_up_write(&cgroup_threadgroup_rwsem); ++ cgroup_attach_unlock(*threadgroup_locked); ++ *threadgroup_locked = false; + out_unlock_rcu: + rcu_read_unlock(); + return tsk; + } + +-void cgroup_procs_write_finish(struct task_struct *task) +- __releases(&cgroup_threadgroup_rwsem) ++void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked) + { + struct cgroup_subsys *ss; + int ssid; +@@ -2911,7 +2963,8 @@ void cgroup_procs_write_finish(struct task_struct *task) + /* release reference from cgroup_procs_write_start() */ + put_task_struct(task); + +- percpu_up_write(&cgroup_threadgroup_rwsem); ++ cgroup_attach_unlock(threadgroup_locked); ++ + for_each_subsys(ss, ssid) + if (ss->post_attach) + ss->post_attach(); +@@ -2966,12 +3019,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) + struct cgroup_subsys_state *d_css; + struct cgroup *dsct; + struct css_set *src_cset; ++ bool has_tasks; + int ret; + + lockdep_assert_held(&cgroup_mutex); + +- percpu_down_write(&cgroup_threadgroup_rwsem); +- + /* look up all csses currently attached to @cgrp's subtree */ + spin_lock_irq(&css_set_lock); + cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { +@@ -2982,6 +3034,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) + } + spin_unlock_irq(&css_set_lock); + ++ /* ++ * We need to write-lock threadgroup_rwsem while migrating tasks. ++ * However, if there are no source csets for @cgrp, changing its ++ * controllers isn't gonna produce any task migrations and the ++ * write-locking can be skipped safely. ++ */ ++ has_tasks = !list_empty(&mgctx.preloaded_src_csets); ++ cgroup_attach_lock(has_tasks); ++ + /* NULL dst indicates self on default hierarchy */ + ret = cgroup_migrate_prepare_dst(&mgctx); + if (ret) +@@ -3001,7 +3062,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) + ret = cgroup_migrate_execute(&mgctx); + out_finish: + cgroup_migrate_finish(&mgctx); +- percpu_up_write(&cgroup_threadgroup_rwsem); ++ cgroup_attach_unlock(has_tasks); + return ret; + } + +@@ -4830,12 +4891,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of, + struct task_struct *task; + const struct cred *saved_cred; + ssize_t ret; ++ bool threadgroup_locked; + + dst_cgrp = cgroup_kn_lock_live(of->kn, false); + if (!dst_cgrp) + return -ENODEV; + +- task = cgroup_procs_write_start(buf, true); ++ task = cgroup_procs_write_start(buf, true, &threadgroup_locked); + ret = PTR_ERR_OR_ZERO(task); + if (ret) + goto out_unlock; +@@ -4861,7 +4923,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of, + ret = cgroup_attach_task(dst_cgrp, task, true); + + out_finish: +- cgroup_procs_write_finish(task); ++ cgroup_procs_write_finish(task, threadgroup_locked); + out_unlock: + cgroup_kn_unlock(of->kn); + +@@ -4881,6 +4943,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of, + struct task_struct *task; + const struct cred *saved_cred; + ssize_t ret; ++ bool locked; + + buf = strstrip(buf); + +@@ -4888,7 +4951,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of, + if (!dst_cgrp) + return -ENODEV; + +- task = cgroup_procs_write_start(buf, false); ++ task = cgroup_procs_write_start(buf, false, &locked); + ret = PTR_ERR_OR_ZERO(task); + if (ret) + goto out_unlock; +@@ -4919,7 +4982,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of, + ret = cgroup_attach_task(dst_cgrp, task, false); + + out_finish: +- cgroup_procs_write_finish(task); ++ cgroup_procs_write_finish(task, locked); + out_unlock: + cgroup_kn_unlock(of->kn); + +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index b02eca235ba3f..9ba94a9a67aa4 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -2204,7 +2204,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) + cgroup_taskset_first(tset, &css); + cs = css_cs(css); + +- cpus_read_lock(); ++ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ + percpu_down_write(&cpuset_rwsem); + + /* prepare for attach */ +@@ -2260,7 +2260,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) + wake_up(&cpuset_attach_wq); + + percpu_up_write(&cpuset_rwsem); +- cpus_read_unlock(); + } + + /* The various types of files and directories in a cpuset file system */ +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 9631ecc8a34c9..6e9f5a10e04ae 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1596,6 +1596,7 @@ static int check_kprobe_address_safe(struct kprobe *p, + /* Ensure it is not in reserved area nor out of text */ + if (!(core_kernel_text((unsigned long) p->addr) || + is_module_text_address((unsigned long) p->addr)) || ++ in_gate_area_no_mm((unsigned long) p->addr) || + within_kprobe_blacklist((unsigned long) p->addr) || + jump_label_text_reserved(p->addr, p->addr) || + find_bug((unsigned long)p->addr)) { +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index 3761c79137b17..d8cde7292bf92 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); + void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, + gfp_t gfp) + { +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + kmemleak_alloc(__va(phys), size, min_count, gfp); + } + EXPORT_SYMBOL(kmemleak_alloc_phys); +@@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); + */ + void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) + { +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + kmemleak_free_part(__va(phys), size); + } + EXPORT_SYMBOL(kmemleak_free_part_phys); +@@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); + */ + void __ref kmemleak_not_leak_phys(phys_addr_t phys) + { +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + kmemleak_not_leak(__va(phys)); + } + EXPORT_SYMBOL(kmemleak_not_leak_phys); +@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); + */ + void __ref kmemleak_ignore_phys(phys_addr_t phys) + { +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + kmemleak_ignore(__va(phys)); + } + EXPORT_SYMBOL(kmemleak_ignore_phys); +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 19726d81025d5..01e33724d10c3 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ + /* - Bridged-and-DNAT'ed traffic doesn't + * require ip_forwarding. */ + if (rt->dst.dev == dev) { ++ skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + goto bridged_dnat; + } +@@ -413,6 +414,7 @@ bridged_dnat: + kfree_skb(skb); + return 0; + } ++ skb_dst_drop(skb); + skb_dst_set_noref(skb, &rt->dst); + } + +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c +index e4e0c836c3f51..6b07f30675bb0 100644 +--- a/net/bridge/br_netfilter_ipv6.c ++++ b/net/bridge/br_netfilter_ipv6.c +@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc + kfree_skb(skb); + return 0; + } ++ skb_dst_drop(skb); + skb_dst_set_noref(skb, &rt->dst); + } + +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index ef3e7a3e3a29e..d38c8ca93ba09 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -399,7 +399,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, + dev_match = dev_match || (res.type == RTN_LOCAL && + dev == net->loopback_dev); + if (dev_match) { +- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST; ++ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK; + return ret; + } + if (no_addr) +@@ -411,7 +411,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, + ret = 0; + if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) { + if (res.type == RTN_UNICAST) +- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST; ++ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK; + } + return ret; + +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index f4e00ff909da3..0ebba83dbe220 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2384,6 +2384,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp) + return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); + } + ++static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ ++ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { ++ /* Hold old state until something *above* high_seq ++ * is ACKed. For Reno it is MUST to prevent false ++ * fast retransmits (RFC2582). SACK TCP is safe. */ ++ if (!tcp_any_retrans_done(sk)) ++ tp->retrans_stamp = 0; ++ return true; ++ } ++ return false; ++} ++ + /* People celebrate: "We love our President!" */ + static bool tcp_try_undo_recovery(struct sock *sk) + { +@@ -2406,14 +2421,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) + } else if (tp->rack.reo_wnd_persist) { + tp->rack.reo_wnd_persist--; + } +- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { +- /* Hold old state until something *above* high_seq +- * is ACKed. For Reno it is MUST to prevent false +- * fast retransmits (RFC2582). SACK TCP is safe. */ +- if (!tcp_any_retrans_done(sk)) +- tp->retrans_stamp = 0; ++ if (tcp_is_non_sack_preventing_reopen(sk)) + return true; +- } + tcp_set_ca_state(sk, TCP_CA_Open); + tp->is_sack_reneg = 0; + return false; +@@ -2449,6 +2458,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPSPURIOUSRTOS); + inet_csk(sk)->icsk_retransmits = 0; ++ if (tcp_is_non_sack_preventing_reopen(sk)) ++ return true; + if (frto_undo || tcp_is_sack(tp)) { + tcp_set_ca_state(sk, TCP_CA_Open); + tp->is_sack_reneg = 0; +@@ -3484,11 +3495,11 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) + + /* Then check host-wide RFC 5961 rate limit. */ + now = jiffies / HZ; +- if (now != challenge_timestamp) { ++ if (now != READ_ONCE(challenge_timestamp)) { + u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit); + u32 half = (ack_limit + 1) >> 1; + +- challenge_timestamp = now; ++ WRITE_ONCE(challenge_timestamp, now); + WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit)); + } + count = READ_ONCE(challenge_count); +diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c +index 75421a472d25a..f5c448c276fef 100644 +--- a/net/ipv6/seg6.c ++++ b/net/ipv6/seg6.c +@@ -125,6 +125,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info) + goto out_unlock; + } + ++ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { ++ err = -EINVAL; ++ goto out_unlock; ++ } ++ + if (hinfo) { + err = seg6_hmac_info_del(net, hmackeyid); + if (err) +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c +index ea9e73428ed9c..659a589b1fad1 100644 +--- a/net/kcm/kcmsock.c ++++ b/net/kcm/kcmsock.c +@@ -1413,12 +1413,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock, + psock->sk = csk; + psock->bpf_prog = prog; + +- err = strp_init(&psock->strp, csk, &cb); +- if (err) { +- kmem_cache_free(kcm_psockp, psock); +- goto out; +- } +- + write_lock_bh(&csk->sk_callback_lock); + + /* Check if sk_user_data is aready by KCM or someone else. +@@ -1426,13 +1420,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock, + */ + if (csk->sk_user_data) { + write_unlock_bh(&csk->sk_callback_lock); +- strp_stop(&psock->strp); +- strp_done(&psock->strp); + kmem_cache_free(kcm_psockp, psock); + err = -EALREADY; + goto out; + } + ++ err = strp_init(&psock->strp, csk, &cb); ++ if (err) { ++ write_unlock_bh(&csk->sk_callback_lock); ++ kmem_cache_free(kcm_psockp, psock); ++ goto out; ++ } ++ + psock->save_data_ready = csk->sk_data_ready; + psock->save_write_space = csk->sk_write_space; + psock->save_state_change = csk->sk_state_change; +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c +index 0e26c83b6b412..d5b8568591d4c 100644 +--- a/net/mac80211/ibss.c ++++ b/net/mac80211/ibss.c +@@ -542,6 +542,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) + + sdata_assert_lock(sdata); + ++ /* When not connected/joined, sending CSA doesn't make sense. */ ++ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) ++ return -ENOLINK; ++ + /* update cfg80211 bss information with the new channel */ + if (!is_zero_ether_addr(ifibss->bssid)) { + cbss = cfg80211_get_bss(sdata->local->hw.wiphy, +diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c +index b8ce84618a55b..c439125ef2b91 100644 +--- a/net/mac802154/rx.c ++++ b/net/mac802154/rx.c +@@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, + + switch (mac_cb(skb)->dest.mode) { + case IEEE802154_ADDR_NONE: +- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) ++ if (hdr->source.mode != IEEE802154_ADDR_NONE) + /* FIXME: check if we are PAN coordinator */ + skb->pkt_type = PACKET_OTHERHOST; + else +diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c +index e40988a2f22fb..26245419ef4a9 100644 +--- a/net/netfilter/nf_conntrack_irc.c ++++ b/net/netfilter/nf_conntrack_irc.c +@@ -185,8 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, + + /* dcc_ip can be the internal OR external (NAT'ed) IP */ + tuple = &ct->tuplehash[dir].tuple; +- if (tuple->src.u3.ip != dcc_ip && +- tuple->dst.u3.ip != dcc_ip) { ++ if ((tuple->src.u3.ip != dcc_ip && ++ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) || ++ dcc_port == 0) { + net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", + &tuple->src.u3.ip, + &dcc_ip, dcc_port); +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index 52a24d4ef5d8a..2ba61971f6231 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -451,7 +451,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, + * directly into the target buffer. + */ + sg = _sg; +- nsg = skb_shinfo(skb)->nr_frags; ++ nsg = skb_shinfo(skb)->nr_frags + 1; + if (nsg <= 4) { + nsg = 4; + } else { +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c +index 4074c50ac3d73..3aa6b4dcb1c8e 100644 +--- a/net/sched/sch_sfb.c ++++ b/net/sched/sch_sfb.c +@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) + } + } + +-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) ++static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) + { + u32 sfbhash; + +- sfbhash = sfb_hash(skb, 0); ++ sfbhash = cb->hashes[0]; + if (sfbhash) + increment_one_qlen(sfbhash, 0, q); + +- sfbhash = sfb_hash(skb, 1); ++ sfbhash = cb->hashes[1]; + if (sfbhash) + increment_one_qlen(sfbhash, 1, q); + } +@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + { + + struct sfb_sched_data *q = qdisc_priv(sch); ++ unsigned int len = qdisc_pkt_len(skb); + struct Qdisc *child = q->qdisc; + struct tcf_proto *fl; ++ struct sfb_skb_cb cb; + int i; + u32 p_min = ~0; + u32 minqlen = ~0; +@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + } + + enqueue: ++ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); + ret = qdisc_enqueue(skb, child, to_free); + if (likely(ret == NET_XMIT_SUCCESS)) { +- qdisc_qstats_backlog_inc(sch, skb); ++ sch->qstats.backlog += len; + sch->q.qlen++; +- increment_qlen(skb, q); ++ increment_qlen(&cb, q); + } else if (net_xmit_drop_count(ret)) { + q->stats.childdrop++; + qdisc_qstats_drop(sch); +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c +index 5f72f3f916a5a..a7f60bb2dd513 100644 +--- a/net/sched/sch_tbf.c ++++ b/net/sched/sch_tbf.c +@@ -297,6 +297,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, + struct nlattr *tb[TCA_TBF_MAX + 1]; + struct tc_tbf_qopt *qopt; + struct Qdisc *child = NULL; ++ struct Qdisc *old = NULL; + struct psched_ratecfg rate; + struct psched_ratecfg peak; + u64 max_size; +@@ -388,7 +389,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, + sch_tree_lock(sch); + if (child) { + qdisc_tree_flush_backlog(q->qdisc); +- qdisc_put(q->qdisc); ++ old = q->qdisc; + q->qdisc = child; + } + q->limit = qopt->limit; +@@ -408,6 +409,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, + memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); + + sch_tree_unlock(sch); ++ qdisc_put(old); + err = 0; + done: + return err; +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c +index 394491692a078..5d696b7fb47e1 100644 +--- a/net/smc/af_smc.c ++++ b/net/smc/af_smc.c +@@ -1093,7 +1093,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc) + { + struct sock *newsmcsk = &new_smc->sk; + +- sk_refcnt_debug_inc(newsmcsk); + if (newsmcsk->sk_state == SMC_INIT) + newsmcsk->sk_state = SMC_ACTIVE; + +diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c +index e7155a7743001..0b9ad3b5ff18a 100644 +--- a/net/tipc/monitor.c ++++ b/net/tipc/monitor.c +@@ -130,7 +130,7 @@ static void map_set(u64 *up_map, int i, unsigned int v) + + static int map_get(u64 up_map, int i) + { +- return (up_map & (1 << i)) >> i; ++ return (up_map & (1ULL << i)) >> i; + } + + static struct tipc_peer *peer_prev(struct tipc_peer *peer) +diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c +index 76b845f68ac89..d80b06d669593 100644 +--- a/net/wireless/debugfs.c ++++ b/net/wireless/debugfs.c +@@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file, + { + struct wiphy *wiphy = file->private_data; + char *buf; +- unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; ++ unsigned int offset = 0, buf_size = PAGE_SIZE, i; + enum nl80211_band band; + struct ieee80211_supported_band *sband; ++ ssize_t r; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) +diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c +index 2ddfe22266517..f73ee0798aeab 100644 +--- a/sound/core/seq/oss/seq_oss_midi.c ++++ b/sound/core/seq/oss/seq_oss_midi.c +@@ -267,7 +267,9 @@ snd_seq_oss_midi_clear_all(void) + void + snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp) + { ++ spin_lock_irq(®ister_lock); + dp->max_mididev = max_midi_devs; ++ spin_unlock_irq(®ister_lock); + } + + /* +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index cc93157fa9500..0363670a56e7c 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -121,13 +121,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) + spin_unlock_irqrestore(&clients_lock, flags); + #ifdef CONFIG_MODULES + if (!in_interrupt()) { +- static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS]; +- static char card_requested[SNDRV_CARDS]; ++ static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS); ++ static DECLARE_BITMAP(card_requested, SNDRV_CARDS); ++ + if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) { + int idx; + +- if (!client_requested[clientid]) { +- client_requested[clientid] = 1; ++ if (!test_and_set_bit(clientid, client_requested)) { + for (idx = 0; idx < 15; idx++) { + if (seq_client_load[idx] < 0) + break; +@@ -142,10 +142,8 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) + int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) / + SNDRV_SEQ_CLIENTS_PER_CARD; + if (card < snd_ecards_limit) { +- if (! card_requested[card]) { +- card_requested[card] = 1; ++ if (!test_and_set_bit(card, card_requested)) + snd_request_card(card); +- } + snd_seq_device_load_drivers(); + } + } +diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c +index 452b9eaca815b..474347aba50e6 100644 +--- a/sound/drivers/aloop.c ++++ b/sound/drivers/aloop.c +@@ -463,17 +463,18 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) + cable->streams[SNDRV_PCM_STREAM_PLAYBACK]; + struct loopback_pcm *dpcm_capt = + cable->streams[SNDRV_PCM_STREAM_CAPTURE]; +- unsigned long delta_play = 0, delta_capt = 0; ++ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies; + unsigned int running, count1, count2; + ++ cur_jiffies = jiffies; + running = cable->running ^ cable->pause; + if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { +- delta_play = jiffies - dpcm_play->last_jiffies; ++ delta_play = cur_jiffies - dpcm_play->last_jiffies; + dpcm_play->last_jiffies += delta_play; + } + + if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) { +- delta_capt = jiffies - dpcm_capt->last_jiffies; ++ delta_capt = cur_jiffies - dpcm_capt->last_jiffies; + dpcm_capt->last_jiffies += delta_capt; + } + +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c +index 6530a55fb8780..2cea3d3ee54dc 100644 +--- a/sound/pci/emu10k1/emupcm.c ++++ b/sound/pci/emu10k1/emupcm.c +@@ -123,7 +123,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic + epcm->voices[0]->epcm = epcm; + if (voices > 1) { + for (i = 1; i < voices; i++) { +- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i]; ++ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G]; + epcm->voices[i]->epcm = epcm; + } + } +diff --git a/sound/usb/stream.c b/sound/usb/stream.c +index eff1ac1dc9ba3..d35684e5f07f0 100644 +--- a/sound/usb/stream.c ++++ b/sound/usb/stream.c +@@ -1103,7 +1103,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, + * Dallas DS4201 workaround: It presents 5 altsettings, but the last + * one misses syncpipe, and does not produce any sound. + */ +- if (chip->usb_id == USB_ID(0x04fa, 0x4201)) ++ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4) + num = 4; + + for (i = 0; i < num; i++) { |