diff options
author | Mike Pagano <mpagano@gentoo.org> | 2020-01-09 06:17:15 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2020-01-09 06:17:15 -0500 |
commit | 8cfb80cdfb8bad49484b322c40f95304bbf996a7 (patch) | |
tree | 45af0ceeb5145c232fb772c1d026daf402a47ccf | |
parent | Linux patch 5.4.8 (diff) | |
download | linux-patches-5.4-10.tar.gz linux-patches-5.4-10.tar.bz2 linux-patches-5.4-10.zip |
Linux patches 5.4.9 and 5.4.105.4-10
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 8 | ||||
-rw-r--r-- | 1008_linux-5.4.9.patch | 7489 | ||||
-rw-r--r-- | 1009_linux-5.4.10.patch | 26 |
3 files changed, 7523 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 3519610d..f39e13c4 100644 --- a/0000_README +++ b/0000_README @@ -75,6 +75,14 @@ Patch: 1007_linux-5.4.8.patch From: http://www.kernel.org Desc: Linux 5.4.8 +Patch: 1008_linux-5.4.9.patch +From: http://www.kernel.org +Desc: Linux 5.4.9 + +Patch: 1009_linux-5.4.10.patch +From: http://www.kernel.org +Desc: Linux 5.4.10 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1008_linux-5.4.9.patch b/1008_linux-5.4.9.patch new file mode 100644 index 00000000..fcc91284 --- /dev/null +++ b/1008_linux-5.4.9.patch @@ -0,0 +1,7489 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index f5a551e4332d..5594c8bf1dcd 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -113,7 +113,7 @@ + the GPE dispatcher. + This facility can be used to prevent such uncontrolled + GPE floodings. +- Format: <int> ++ Format: <byte> + + acpi_no_auto_serialize [HW,ACPI] + Disable auto-serialization of AML methods +diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt +index e96e085271c1..83f6c6a7c41c 100644 +--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt ++++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt +@@ -46,7 +46,7 @@ Required properties: + Example (R-Car H3): + + usb2_clksel: clock-controller@e6590630 { +- compatible = "renesas,r8a77950-rcar-usb2-clock-sel", ++ compatible = "renesas,r8a7795-rcar-usb2-clock-sel", + "renesas,rcar-gen3-usb2-clock-sel"; + reg = <0 0xe6590630 0 0x02>; + clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>; +diff --git a/Makefile b/Makefile +index 1adee1b06f3d..3ba15c3528c8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 8 ++SUBLEVEL = 9 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +index 6039adda12ee..b0b12e389835 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +@@ -296,7 +296,7 @@ + }; + + &usb0_phy { +- status = "okay"; ++ status = "disabled"; + phy-supply = <&usb_otg_pwr>; + }; + +@@ -306,7 +306,7 @@ + }; + + &usb0 { +- status = "okay"; ++ status = "disabled"; + }; + + &usb1 { +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +index 2a5cd303123d..8d6f316a5c7b 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +@@ -192,6 +192,9 @@ + bluetooth { + compatible = "brcm,bcm43438-bt"; + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; ++ max-speed = <2000000>; ++ clocks = <&wifi32k>; ++ clock-names = "lpo"; + }; + }; + +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +index f25ddd18a607..4d67eb715b91 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +@@ -409,6 +409,9 @@ + bluetooth { + compatible = "brcm,bcm43438-bt"; + shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; ++ max-speed = <2000000>; ++ clocks = <&wifi32k>; ++ clock-names = "lpo"; + }; + }; + +diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi +index 9682d4dd7496..1bae90705746 100644 +--- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi +@@ -23,6 +23,43 @@ + }; + }; + ++/* ++ * The laptop FW does not appear to support the retention state as it is ++ * not advertised as enabled in ACPI, and enabling it in DT can cause boot ++ * hangs. ++ */ ++&CPU0 { ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; ++}; ++ ++&CPU1 { ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; ++}; ++ ++&CPU2 { ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; ++}; ++ ++&CPU3 { ++ cpu-idle-states = <&LITTLE_CPU_SLEEP_1>; ++}; ++ ++&CPU4 { ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>; ++}; ++ ++&CPU5 { ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>; ++}; ++ ++&CPU6 { ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>; ++}; ++ ++&CPU7 { ++ cpu-idle-states = <&BIG_CPU_SLEEP_1>; ++}; ++ + &qusb2phy { + status = "okay"; + +diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h +index 8dc6c5cdabe6..baf52baaa2a5 100644 +--- a/arch/arm64/include/asm/pgtable-prot.h ++++ b/arch/arm64/include/asm/pgtable-prot.h +@@ -85,13 +85,12 @@ + #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) + #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) + #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) +-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) + + #define __P000 PAGE_NONE + #define __P001 PAGE_READONLY + #define __P010 PAGE_READONLY + #define __P011 PAGE_READONLY +-#define __P100 PAGE_EXECONLY ++#define __P100 PAGE_READONLY_EXEC + #define __P101 PAGE_READONLY_EXEC + #define __P110 PAGE_READONLY_EXEC + #define __P111 PAGE_READONLY_EXEC +@@ -100,7 +99,7 @@ + #define __S001 PAGE_READONLY + #define __S010 PAGE_SHARED + #define __S011 PAGE_SHARED +-#define __S100 PAGE_EXECONLY ++#define __S100 PAGE_READONLY_EXEC + #define __S101 PAGE_READONLY_EXEC + #define __S110 PAGE_SHARED_EXEC + #define __S111 PAGE_SHARED_EXEC +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 565aa45ef134..13ebe2bad79f 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) + + #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) +-/* +- * Execute-only user mappings do not have the PTE_USER bit set. All valid +- * kernel mappings have the PTE_UXN bit set. +- */ + #define pte_valid_not_user(pte) \ +- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) ++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) + #define pte_valid_young(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) + #define pte_valid_user(pte) \ +@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + + /* + * p??_access_permitted() is true for valid user mappings (subject to the +- * write permission check) other than user execute-only which do not have the +- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. ++ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit ++ * set. + */ + #define pte_access_permitted(pte, write) \ + (pte_valid_user(pte) && (!(write) || pte_write(pte))) +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index 9fc6db0bcbad..d26e6cd28953 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -454,7 +454,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, + const struct fault_info *inf; + struct mm_struct *mm = current->mm; + vm_fault_t fault, major = 0; +- unsigned long vm_flags = VM_READ | VM_WRITE; ++ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + if (kprobe_page_fault(regs, esr)) +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index 60c929f3683b..d10247fab0fd 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -1069,7 +1069,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct zone *zone; + + /* + * FIXME: Cleanup page tables (also in arch_add_memory() in case +@@ -1078,7 +1077,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, + * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be + * unlocked yet. + */ +- zone = page_zone(pfn_to_page(start_pfn)); +- __remove_pages(zone, start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + } + #endif +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c +index bf9df2625bc8..a6dd80a2c939 100644 +--- a/arch/ia64/mm/init.c ++++ b/arch/ia64/mm/init.c +@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct zone *zone; + +- zone = page_zone(pfn_to_page(start_pfn)); +- __remove_pages(zone, start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + } + #endif +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index a0bd9bdb5f83..e5c2d47608fe 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -46,7 +46,7 @@ config MIPS + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES + select HAVE_ASM_MODVERSIONS +- select HAVE_EBPF_JIT if (!CPU_MICROMIPS) ++ select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 + select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS + select HAVE_C_RECORDMCOUNT +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h +index 4993db40482c..ee26f9a4575d 100644 +--- a/arch/mips/include/asm/thread_info.h ++++ b/arch/mips/include/asm/thread_info.h +@@ -49,8 +49,26 @@ struct thread_info { + .addr_limit = KERNEL_DS, \ + } + +-/* How to get the thread information struct from C. */ ++/* ++ * A pointer to the struct thread_info for the currently executing thread is ++ * held in register $28/$gp. ++ * ++ * We declare __current_thread_info as a global register variable rather than a ++ * local register variable within current_thread_info() because clang doesn't ++ * support explicit local register variables. ++ * ++ * When building the VDSO we take care not to declare the global register ++ * variable because this causes GCC to not preserve the value of $28/$gp in ++ * functions that change its value (which is common in the PIC VDSO when ++ * accessing the GOT). Since the VDSO shouldn't be accessing ++ * __current_thread_info anyway we declare it extern in order to cause a link ++ * failure if it's referenced. ++ */ ++#ifdef __VDSO__ ++extern struct thread_info *__current_thread_info; ++#else + register struct thread_info *__current_thread_info __asm__("$28"); ++#endif + + static inline struct thread_info *current_thread_info(void) + { +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c +index 46b76751f3a5..a2405d5f7d1e 100644 +--- a/arch/mips/net/ebpf_jit.c ++++ b/arch/mips/net/ebpf_jit.c +@@ -1803,7 +1803,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) + unsigned int image_size; + u8 *image_ptr; + +- if (!prog->jit_requested || MIPS_ISA_REV < 2) ++ if (!prog->jit_requested) + return prog; + + tmp = bpf_jit_blind_constants(prog); +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c +index be941d382c8d..460afa415434 100644 +--- a/arch/powerpc/mm/mem.c ++++ b/arch/powerpc/mm/mem.c +@@ -104,6 +104,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) + return -ENODEV; + } + ++#define FLUSH_CHUNK_SIZE SZ_1G ++/** ++ * flush_dcache_range_chunked(): Write any modified data cache blocks out to ++ * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE ++ * Does not invalidate the corresponding instruction cache blocks. ++ * ++ * @start: the start address ++ * @stop: the stop address (exclusive) ++ * @chunk: the max size of the chunks ++ */ ++static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, ++ unsigned long chunk) ++{ ++ unsigned long i; ++ ++ for (i = start; i < stop; i += chunk) { ++ flush_dcache_range(i, min(stop, start + chunk)); ++ cond_resched(); ++ } ++} ++ + int __ref arch_add_memory(int nid, u64 start, u64 size, + struct mhp_restrictions *restrictions) + { +@@ -120,7 +141,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, + start, start + size, rc); + return -EFAULT; + } +- flush_dcache_range(start, start + size); ++ ++ flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); + + return __add_pages(nid, start_pfn, nr_pages, restrictions); + } +@@ -130,14 +152,14 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); + int ret; + +- __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + + /* Remove htab bolted mappings for this section of memory */ + start = (unsigned long)__va(start); +- flush_dcache_range(start, start + size); ++ flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); ++ + ret = remove_section_mapping(start, start + size); + WARN_ON_ONCE(ret); + +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index 42bbcd47cc85..dffe1a45b6ed 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { + + #endif + +-static inline bool slice_addr_is_low(unsigned long addr) ++static inline notrace bool slice_addr_is_low(unsigned long addr) + { + u64 tmp = (u64)addr; + +@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, + mm_ctx_user_psize(¤t->mm->context), 1); + } + +-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) ++unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) + { + unsigned char *psizes; + int index, mask_index; +diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c +index b94d8db5ddcc..c40fdcdeb950 100644 +--- a/arch/riscv/kernel/ftrace.c ++++ b/arch/riscv/kernel/ftrace.c +@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + */ + old = *parent; + +- if (function_graph_enter(old, self_addr, frame_pointer, parent)) ++ if (!function_graph_enter(old, self_addr, frame_pointer, parent)) + *parent = return_hooker; + } + +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c +index 7511b71d2931..fdb8083e7870 100644 +--- a/arch/s390/kernel/perf_cpum_sf.c ++++ b/arch/s390/kernel/perf_cpum_sf.c +@@ -1313,18 +1313,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) + */ + if (flush_all && done) + break; +- +- /* If an event overflow happened, discard samples by +- * processing any remaining sample-data-blocks. +- */ +- if (event_overflow) +- flush_all = 1; + } + + /* Account sample overflows in the event hardware structure */ + if (sampl_overflow) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + + sampl_overflow, 1 + num_sdb); ++ ++ /* Perf_event_overflow() and perf_event_account_interrupt() limit ++ * the interrupt rate to an upper limit. Roughly 1000 samples per ++ * task tick. ++ * Hitting this limit results in a large number ++ * of throttled REF_REPORT_THROTTLE entries and the samples ++ * are dropped. ++ * Slightly increase the interval to avoid hitting this limit. ++ */ ++ if (event_overflow) { ++ SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); ++ debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", ++ __func__, ++ DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); ++ } ++ + if (sampl_overflow || event_overflow) + debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " + "overflow stats: sample=%llu event=%llu\n", +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c +index d95c85780e07..06dddd7c4290 100644 +--- a/arch/s390/kernel/smp.c ++++ b/arch/s390/kernel/smp.c +@@ -727,39 +727,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early) + + static int smp_add_present_cpu(int cpu); + +-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) ++static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, ++ bool configured, bool early) + { + struct pcpu *pcpu; +- cpumask_t avail; +- int cpu, nr, i, j; ++ int cpu, nr, i; + u16 address; + + nr = 0; +- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); +- cpu = cpumask_first(&avail); +- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { +- if (sclp.has_core_type && info->core[i].type != boot_core_type) ++ if (sclp.has_core_type && core->type != boot_core_type) ++ return nr; ++ cpu = cpumask_first(avail); ++ address = core->core_id << smp_cpu_mt_shift; ++ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { ++ if (pcpu_find_address(cpu_present_mask, address + i)) + continue; +- address = info->core[i].core_id << smp_cpu_mt_shift; +- for (j = 0; j <= smp_cpu_mtid; j++) { +- if (pcpu_find_address(cpu_present_mask, address + j)) +- continue; +- pcpu = pcpu_devices + cpu; +- pcpu->address = address + j; +- pcpu->state = +- (cpu >= info->configured*(smp_cpu_mtid + 1)) ? +- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; +- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); +- set_cpu_present(cpu, true); +- if (sysfs_add && smp_add_present_cpu(cpu) != 0) +- set_cpu_present(cpu, false); +- else +- nr++; +- cpu = cpumask_next(cpu, &avail); +- if (cpu >= nr_cpu_ids) ++ pcpu = pcpu_devices + cpu; ++ pcpu->address = address + i; ++ if (configured) ++ pcpu->state = CPU_STATE_CONFIGURED; ++ else ++ pcpu->state = CPU_STATE_STANDBY; ++ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); ++ set_cpu_present(cpu, true); ++ if (!early && smp_add_present_cpu(cpu) != 0) ++ set_cpu_present(cpu, false); ++ else ++ nr++; ++ cpumask_clear_cpu(cpu, avail); ++ cpu = cpumask_next(cpu, avail); ++ } ++ return nr; ++} ++ ++static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) ++{ ++ struct sclp_core_entry *core; ++ cpumask_t avail; ++ bool configured; ++ u16 core_id; ++ int nr, i; ++ ++ nr = 0; ++ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); ++ /* ++ * Add IPL core first (which got logical CPU number 0) to make sure ++ * that all SMT threads get subsequent logical CPU numbers. ++ */ ++ if (early) { ++ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift; ++ for (i = 0; i < info->configured; i++) { ++ core = &info->core[i]; ++ if (core->core_id == core_id) { ++ nr += smp_add_core(core, &avail, true, early); + break; ++ } + } + } ++ for (i = 0; i < info->combined; i++) { ++ configured = i < info->configured; ++ nr += smp_add_core(&info->core[i], &avail, configured, early); ++ } + return nr; + } + +@@ -808,7 +836,7 @@ void __init smp_detect_cpus(void) + + /* Add CPUs present at boot */ + get_online_cpus(); +- __smp_rescan_cpus(info, 0); ++ __smp_rescan_cpus(info, true); + put_online_cpus(); + memblock_free_early((unsigned long)info, sizeof(*info)); + } +@@ -1153,7 +1181,7 @@ int __ref smp_rescan_cpus(void) + smp_get_core_info(info, 0); + get_online_cpus(); + mutex_lock(&smp_cpu_state_mutex); +- nr = __smp_rescan_cpus(info, 1); ++ nr = __smp_rescan_cpus(info, false); + mutex_unlock(&smp_cpu_state_mutex); + put_online_cpus(); + kfree(info); +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c +index a124f19f7b3c..c1d96e588152 100644 +--- a/arch/s390/mm/init.c ++++ b/arch/s390/mm/init.c +@@ -291,10 +291,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct zone *zone; + +- zone = page_zone(pfn_to_page(start_pfn)); +- __remove_pages(zone, start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + vmem_remove_mapping(start, size); + } + #endif /* CONFIG_MEMORY_HOTPLUG */ +diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c +index dfdbaa50946e..d1b1ff2be17a 100644 +--- a/arch/sh/mm/init.c ++++ b/arch/sh/mm/init.c +@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = PFN_DOWN(start); + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct zone *zone; + +- zone = page_zone(pfn_to_page(start_pfn)); +- __remove_pages(zone, start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + } + #endif /* CONFIG_MEMORY_HOTPLUG */ +diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c +index 5ee3fed881d3..741540d849f3 100644 +--- a/arch/x86/events/intel/bts.c ++++ b/arch/x86/events/intel/bts.c +@@ -63,9 +63,17 @@ struct bts_buffer { + + static struct pmu bts_pmu; + ++static int buf_nr_pages(struct page *page) ++{ ++ if (!PagePrivate(page)) ++ return 1; ++ ++ return 1 << page_private(page); ++} ++ + static size_t buf_size(struct page *page) + { +- return 1 << (PAGE_SHIFT + page_private(page)); ++ return buf_nr_pages(page) * PAGE_SIZE; + } + + static void * +@@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, + /* count all the high order buffers */ + for (pg = 0, nbuf = 0; pg < nr_pages;) { + page = virt_to_page(pages[pg]); +- if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) +- return NULL; +- pg += 1 << page_private(page); ++ pg += buf_nr_pages(page); + nbuf++; + } + +@@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, + unsigned int __nr_pages; + + page = virt_to_page(pages[pg]); +- __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; ++ __nr_pages = buf_nr_pages(page); + buf->buf[nbuf].page = page; + buf->buf[nbuf].offset = offset; + buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c +index 930edeb41ec3..0a74407ef92e 100644 +--- a/arch/x86/mm/init_32.c ++++ b/arch/x86/mm/init_32.c +@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct zone *zone; + +- zone = page_zone(pfn_to_page(start_pfn)); +- __remove_pages(zone, start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + } + #endif + +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index a6b5c653727b..b8541d77452c 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, + { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; +- struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); +- struct zone *zone = page_zone(page); + +- __remove_pages(zone, start_pfn, nr_pages, altmap); ++ __remove_pages(start_pfn, nr_pages, altmap); + kernel_physical_mapping_remove(start, start + size); + } + #endif /* CONFIG_MEMORY_HOTPLUG */ +diff --git a/block/bio.c b/block/bio.c +index 43df756b68c4..c822ceb7c4de 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -535,6 +535,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) + } + EXPORT_SYMBOL(zero_fill_bio_iter); + ++void bio_truncate(struct bio *bio, unsigned new_size) ++{ ++ struct bio_vec bv; ++ struct bvec_iter iter; ++ unsigned int done = 0; ++ bool truncated = false; ++ ++ if (new_size >= bio->bi_iter.bi_size) ++ return; ++ ++ if (bio_data_dir(bio) != READ) ++ goto exit; ++ ++ bio_for_each_segment(bv, bio, iter) { ++ if (done + bv.bv_len > new_size) { ++ unsigned offset; ++ ++ if (!truncated) ++ offset = new_size - done; ++ else ++ offset = 0; ++ zero_user(bv.bv_page, offset, bv.bv_len - offset); ++ truncated = true; ++ } ++ done += bv.bv_len; ++ } ++ ++ exit: ++ /* ++ * Don't touch bvec table here and make it really immutable, since ++ * fs bio user has to retrieve all pages via bio_for_each_segment_all ++ * in its .end_bio() callback. ++ * ++ * It is enough to truncate bio by updating .bi_size since we can make ++ * correct bvec with the updated .bi_size for drivers. ++ */ ++ bio->bi_iter.bi_size = new_size; ++} ++ + /** + * bio_put - release a reference to a bio + * @bio: bio to release reference to +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c +index 6ca015f92766..7f053468b50d 100644 +--- a/block/compat_ioctl.c ++++ b/block/compat_ioctl.c +@@ -6,6 +6,7 @@ + #include <linux/compat.h> + #include <linux/elevator.h> + #include <linux/hdreg.h> ++#include <linux/pr.h> + #include <linux/slab.h> + #include <linux/syscalls.h> + #include <linux/types.h> +@@ -354,6 +355,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) + * but we call blkdev_ioctl, which gets the lock for us + */ + case BLKRRPART: ++ case BLKREPORTZONE: ++ case BLKRESETZONE: ++ case BLKGETZONESZ: ++ case BLKGETNRZONES: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); + case BLKBSZSET_32: +@@ -401,6 +406,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) + case BLKTRACETEARDOWN: /* compatible */ + ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); + return ret; ++ case IOC_PR_REGISTER: ++ case IOC_PR_RESERVE: ++ case IOC_PR_RELEASE: ++ case IOC_PR_PREEMPT: ++ case IOC_PR_PREEMPT_ABORT: ++ case IOC_PR_CLEAR: ++ return blkdev_ioctl(bdev, mode, cmd, ++ (unsigned long)compat_ptr(arg)); + default: + if (disk->fops->compat_ioctl) + ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index 75948a3f1a20..c60d2c6d31d6 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -819,14 +819,14 @@ end: + * interface: + * echo unmask > /sys/firmware/acpi/interrupts/gpe00 + */ +-#define ACPI_MASKABLE_GPE_MAX 0xFF ++#define ACPI_MASKABLE_GPE_MAX 0x100 + static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata; + + static int __init acpi_gpe_set_masked_gpes(char *val) + { + u8 gpe; + +- if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) ++ if (kstrtou8(val, 0, &gpe)) + return -EINVAL; + set_bit(gpe, acpi_masked_gpes_map); + +@@ -838,7 +838,7 @@ void __init acpi_gpe_apply_masked_gpes(void) + { + acpi_handle handle; + acpi_status status; +- u8 gpe; ++ u16 gpe; + + for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) { + status = acpi_get_gpe_device(gpe, &handle); +diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c +index f41744b9b38a..66a570d0da83 100644 +--- a/drivers/ata/ahci_brcm.c ++++ b/drivers/ata/ahci_brcm.c +@@ -76,8 +76,7 @@ enum brcm_ahci_version { + }; + + enum brcm_ahci_quirks { +- BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), +- BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), ++ BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0), + }; + + struct brcm_ahci_priv { +@@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv) + brcm_sata_phy_disable(priv, i); + } + +-static u32 brcm_ahci_get_portmask(struct platform_device *pdev, ++static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv, + struct brcm_ahci_priv *priv) + { +- void __iomem *ahci; +- struct resource *res; + u32 impl; + +- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"); +- ahci = devm_ioremap_resource(&pdev->dev, res); +- if (IS_ERR(ahci)) +- return 0; +- +- impl = readl(ahci + HOST_PORTS_IMPL); ++ impl = readl(hpriv->mmio + HOST_PORTS_IMPL); + + if (fls(impl) > SATA_TOP_MAX_PHYS) + dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n", +@@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, + else if (!impl) + dev_info(priv->dev, "no ports found\n"); + +- devm_iounmap(&pdev->dev, ahci); +- devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); +- + return impl; + } + +@@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev, + /* Perform the SATA PHY reset sequence */ + brcm_sata_phy_disable(priv, ap->port_no); + ++ /* Reset the SATA clock */ ++ ahci_platform_disable_clks(hpriv); ++ msleep(10); ++ ++ ahci_platform_enable_clks(hpriv); ++ msleep(10); ++ + /* Bring the PHY back on */ + brcm_sata_phy_enable(priv, ap->port_no); + +@@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev) + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + struct brcm_ahci_priv *priv = hpriv->plat_data; +- int ret; + +- ret = ahci_platform_suspend(dev); + brcm_sata_phys_disable(priv); +- return ret; ++ ++ return ahci_platform_suspend(dev); + } + + static int brcm_ahci_resume(struct device *dev) +@@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev) + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + struct brcm_ahci_priv *priv = hpriv->plat_data; ++ int ret; ++ ++ /* Make sure clocks are turned on before re-configuration */ ++ ret = ahci_platform_enable_clks(hpriv); ++ if (ret) ++ return ret; + + brcm_sata_init(priv); + brcm_sata_phys_enable(priv); + brcm_sata_alpm_init(hpriv); +- return ahci_platform_resume(dev); ++ ++ /* Since we had to enable clocks earlier on, we cannot use ++ * ahci_platform_resume() as-is since a second call to ++ * ahci_platform_enable_resources() would bump up the resources ++ * (regulators, clocks, PHYs) count artificially so we copy the part ++ * after ahci_platform_enable_resources(). ++ */ ++ ret = ahci_platform_enable_phys(hpriv); ++ if (ret) ++ goto out_disable_phys; ++ ++ ret = ahci_platform_resume_host(dev); ++ if (ret) ++ goto out_disable_platform_phys; ++ ++ /* We resumed so update PM runtime state */ ++ pm_runtime_disable(dev); ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ ++ return 0; ++ ++out_disable_platform_phys: ++ ahci_platform_disable_phys(hpriv); ++out_disable_phys: ++ brcm_sata_phys_disable(priv); ++ ahci_platform_disable_clks(hpriv); ++ return ret; + } + #endif + +@@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev) + if (!IS_ERR_OR_NULL(priv->rcdev)) + reset_control_deassert(priv->rcdev); + +- if ((priv->version == BRCM_SATA_BCM7425) || +- (priv->version == BRCM_SATA_NSP)) { +- priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; ++ hpriv = ahci_platform_get_resources(pdev, 0); ++ if (IS_ERR(hpriv)) { ++ ret = PTR_ERR(hpriv); ++ goto out_reset; ++ } ++ ++ hpriv->plat_data = priv; ++ hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; ++ ++ switch (priv->version) { ++ case BRCM_SATA_BCM7425: ++ hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; ++ /* fall through */ ++ case BRCM_SATA_NSP: ++ hpriv->flags |= AHCI_HFLAG_NO_NCQ; + priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; ++ break; ++ default: ++ break; + } + ++ ret = ahci_platform_enable_clks(hpriv); ++ if (ret) ++ goto out_reset; ++ ++ /* Must be first so as to configure endianness including that ++ * of the standard AHCI register space. ++ */ + brcm_sata_init(priv); + +- priv->port_mask = brcm_ahci_get_portmask(pdev, priv); +- if (!priv->port_mask) +- return -ENODEV; ++ /* Initializes priv->port_mask which is used below */ ++ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); ++ if (!priv->port_mask) { ++ ret = -ENODEV; ++ goto out_disable_clks; ++ } + ++ /* Must be done before ahci_platform_enable_phys() */ + brcm_sata_phys_enable(priv); + +- hpriv = ahci_platform_get_resources(pdev, 0); +- if (IS_ERR(hpriv)) +- return PTR_ERR(hpriv); +- hpriv->plat_data = priv; +- hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; +- + brcm_sata_alpm_init(hpriv); + +- ret = ahci_platform_enable_resources(hpriv); ++ ret = ahci_platform_enable_phys(hpriv); + if (ret) +- return ret; +- +- if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) +- hpriv->flags |= AHCI_HFLAG_NO_NCQ; +- hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO; ++ goto out_disable_phys; + + ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, + &ahci_platform_sht); + if (ret) +- return ret; ++ goto out_disable_platform_phys; + + dev_info(dev, "Broadcom AHCI SATA3 registered\n"); + + return 0; ++ ++out_disable_platform_phys: ++ ahci_platform_disable_phys(hpriv); ++out_disable_phys: ++ brcm_sata_phys_disable(priv); ++out_disable_clks: ++ ahci_platform_disable_clks(hpriv); ++out_reset: ++ if (!IS_ERR_OR_NULL(priv->rcdev)) ++ reset_control_assert(priv->rcdev); ++ return ret; + } + + static int brcm_ahci_remove(struct platform_device *pdev) +@@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev) + struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; + ++ brcm_sata_phys_disable(priv); ++ + ret = ata_platform_remove_one(pdev); + if (ret) + return ret; + +- brcm_sata_phys_disable(priv); +- + return 0; + } + +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c +index 8befce036af8..129556fcf6be 100644 +--- a/drivers/ata/libahci_platform.c ++++ b/drivers/ata/libahci_platform.c +@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops); + * RETURNS: + * 0 on success otherwise a negative error code + */ +-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) ++int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) + { + int rc, i; + +@@ -74,6 +74,7 @@ disable_phys: + } + return rc; + } ++EXPORT_SYMBOL_GPL(ahci_platform_enable_phys); + + /** + * ahci_platform_disable_phys - Disable PHYs +@@ -81,7 +82,7 @@ disable_phys: + * + * This function disables all PHYs found in hpriv->phys. + */ +-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) ++void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) + { + int i; + +@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) + phy_exit(hpriv->phys[i]); + } + } ++EXPORT_SYMBOL_GPL(ahci_platform_disable_phys); + + /** + * ahci_platform_enable_clks - Enable platform clocks +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 74c9b3032d46..84b183a6424e 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -5325,6 +5325,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc) + } + } + ++/** ++ * ata_qc_get_active - get bitmask of active qcs ++ * @ap: port in question ++ * ++ * LOCKING: ++ * spin_lock_irqsave(host lock) ++ * ++ * RETURNS: ++ * Bitmask of active qcs ++ */ ++u64 ata_qc_get_active(struct ata_port *ap) ++{ ++ u64 qc_active = ap->qc_active; ++ ++ /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ ++ if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { ++ qc_active |= (1 << 0); ++ qc_active &= ~(1ULL << ATA_TAG_INTERNAL); ++ } ++ ++ return qc_active; ++} ++EXPORT_SYMBOL_GPL(ata_qc_get_active); ++ + /** + * ata_qc_complete_multiple - Complete multiple qcs successfully + * @ap: port in question +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c +index 8e9cb198fcd1..ca6c706e9c25 100644 +--- a/drivers/ata/sata_fsl.c ++++ b/drivers/ata/sata_fsl.c +@@ -1278,7 +1278,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) + i, ioread32(hcr_base + CC), + ioread32(hcr_base + CA)); + } +- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); ++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); + return; + + } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c +index ad385a113391..bde695a32097 100644 +--- a/drivers/ata/sata_mv.c ++++ b/drivers/ata/sata_mv.c +@@ -2827,7 +2827,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp + } + + if (work_done) { +- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); ++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); + + /* Update the software queue position index in hardware */ + writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | +diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c +index 56946012d113..7510303111fa 100644 +--- a/drivers/ata/sata_nv.c ++++ b/drivers/ata/sata_nv.c +@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) + check_commands = 0; + check_commands &= ~(1 << pos); + } +- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); ++ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); + } + } + +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c +index fd1e19f1a49f..3666afa639d1 100644 +--- a/drivers/block/xen-blkback/blkback.c ++++ b/drivers/block/xen-blkback/blkback.c +@@ -936,6 +936,8 @@ next: + out_of_memory: + pr_alert("%s: out of memory\n", __func__); + put_free_pages(ring, pages_to_gnt, segs_to_map); ++ for (i = last_map; i < num; i++) ++ pages[i]->handle = BLKBACK_INVALID_HANDLE; + return -ENOMEM; + } + +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c +index b90dbcd99c03..c4cd68116e7f 100644 +--- a/drivers/block/xen-blkback/xenbus.c ++++ b/drivers/block/xen-blkback/xenbus.c +@@ -171,6 +171,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) + blkif->domid = domid; + atomic_set(&blkif->refcnt, 1); + init_completion(&blkif->drain_complete); ++ ++ /* ++ * Because freeing back to the cache may be deferred, it is not ++ * safe to unload the module (and hence destroy the cache) until ++ * this has completed. To prevent premature unloading, take an ++ * extra module reference here and release only when the object ++ * has been freed back to the cache. ++ */ ++ __module_get(THIS_MODULE); + INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); + + return blkif; +@@ -320,6 +329,7 @@ static void xen_blkif_free(struct xen_blkif *blkif) + + /* Make sure everything is drained before shutting down */ + kmem_cache_free(xen_blkif_cachep, blkif); ++ module_put(THIS_MODULE); + } + + int __init xen_blkif_interface_init(void) +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 23e606aaaea4..04cf767d0708 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -1200,7 +1200,7 @@ static int btusb_open(struct hci_dev *hdev) + if (data->setup_on_usb) { + err = data->setup_on_usb(hdev); + if (err < 0) +- return err; ++ goto setup_fail; + } + + data->intf->needs_remote_wakeup = 1; +@@ -1239,6 +1239,7 @@ done: + + failed: + clear_bit(BTUSB_INTR_RUNNING, &data->flags); ++setup_fail: + usb_autopm_put_interface(data->intf); + return err; + } +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c +index 470c7ef02ea4..4b04ffbe5e7e 100644 +--- a/drivers/clocksource/timer-riscv.c ++++ b/drivers/clocksource/timer-riscv.c +@@ -41,7 +41,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs) + return get_cycles64(); + } + +-static u64 riscv_sched_clock(void) ++static u64 notrace riscv_sched_clock(void) + { + return get_cycles64(); + } +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index 3a1484e7a3ae..c64d20fdc187 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -551,26 +551,30 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, + void *devp) + { + struct devfreq *devfreq = container_of(nb, struct devfreq, nb); +- int ret; ++ int err = -EINVAL; + + mutex_lock(&devfreq->lock); + + devfreq->scaling_min_freq = find_available_min_freq(devfreq); +- if (!devfreq->scaling_min_freq) { +- mutex_unlock(&devfreq->lock); +- return -EINVAL; +- } ++ if (!devfreq->scaling_min_freq) ++ goto out; + + devfreq->scaling_max_freq = find_available_max_freq(devfreq); + if (!devfreq->scaling_max_freq) { +- mutex_unlock(&devfreq->lock); +- return -EINVAL; ++ devfreq->scaling_max_freq = ULONG_MAX; ++ goto out; + } + +- ret = update_devfreq(devfreq); ++ err = update_devfreq(devfreq); ++ ++out: + mutex_unlock(&devfreq->lock); ++ if (err) ++ dev_err(devfreq->dev.parent, ++ "failed to update frequency from OPP notifier (%d)\n", ++ err); + +- return ret; ++ return NOTIFY_OK; + } + + /** +@@ -584,11 +588,6 @@ static void devfreq_dev_release(struct device *dev) + struct devfreq *devfreq = to_devfreq(dev); + + mutex_lock(&devfreq_list_lock); +- if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { +- mutex_unlock(&devfreq_list_lock); +- dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); +- return; +- } + list_del(&devfreq->node); + mutex_unlock(&devfreq_list_lock); + +@@ -643,6 +642,7 @@ struct devfreq *devfreq_add_device(struct device *dev, + devfreq->dev.parent = dev; + devfreq->dev.class = devfreq_class; + devfreq->dev.release = devfreq_dev_release; ++ INIT_LIST_HEAD(&devfreq->node); + devfreq->profile = profile; + strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); + devfreq->previous_freq = profile->initial_freq; +@@ -1196,7 +1196,7 @@ static ssize_t available_governors_show(struct device *d, + * The devfreq with immutable governor (e.g., passive) shows + * only own governor. + */ +- if (df->governor->immutable) { ++ if (df->governor && df->governor->immutable) { + count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, + "%s ", df->governor_name); + /* +diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c +index cafb1cc065bb..bf95f1d551c5 100644 +--- a/drivers/dma/dma-jz4780.c ++++ b/drivers/dma/dma-jz4780.c +@@ -1004,7 +1004,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { + static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { + .nb_channels = 6, + .transfer_ord_max = 5, +- .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, ++ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | ++ JZ_SOC_DATA_BREAK_LINKS, + }; + + static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { +diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c +index ec4adf4260a0..256fc662c500 100644 +--- a/drivers/dma/virt-dma.c ++++ b/drivers/dma/virt-dma.c +@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg) + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); +- vchan_vdesc_fini(vd); +- + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); ++ vchan_vdesc_fini(vd); + } + } + +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c +index b132ab9ad607..715e491dfbc3 100644 +--- a/drivers/firewire/net.c ++++ b/drivers/firewire/net.c +@@ -250,7 +250,11 @@ static int fwnet_header_cache(const struct neighbour *neigh, + h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); + h->h_proto = type; + memcpy(h->h_dest, neigh->ha, net->addr_len); +- hh->hh_len = FWNET_HLEN; ++ ++ /* Pairs with the READ_ONCE() in neigh_resolve_output(), ++ * neigh_hh_output() and neigh_update_hhs(). ++ */ ++ smp_store_release(&hh->hh_len, FWNET_HLEN); + + return 0; + } +diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c +index 92f843eaf1e0..7a30952b463d 100644 +--- a/drivers/firmware/arm_scmi/bus.c ++++ b/drivers/firmware/arm_scmi/bus.c +@@ -135,8 +135,10 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) + return NULL; + + id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); +- if (id < 0) +- goto free_mem; ++ if (id < 0) { ++ kfree(scmi_dev); ++ return NULL; ++ } + + scmi_dev->id = id; + scmi_dev->protocol_id = protocol; +@@ -154,8 +156,6 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) + put_dev: + put_device(&scmi_dev->dev); + ida_simple_remove(&scmi_bus_id, id); +-free_mem: +- kfree(scmi_dev); + return NULL; + } + +diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c +index 76b0c354a027..de1a9a1f9f14 100644 +--- a/drivers/firmware/efi/rci2-table.c ++++ b/drivers/firmware/efi/rci2-table.c +@@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void) + struct kobject *tables_kobj; + int ret = -ENOMEM; + ++ if (rci2_table_phys == EFI_INVALID_TABLE_ADDR) ++ return 0; ++ + rci2_base = memremap(rci2_table_phys, + sizeof(struct rci2_table_global_hdr), + MEMREMAP_WB); +diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c +index 43d3fa5f511a..0fb2211f9573 100644 +--- a/drivers/gpio/gpio-xtensa.c ++++ b/drivers/gpio/gpio-xtensa.c +@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable) + unsigned long flags; + + local_irq_save(flags); +- RSR_CPENABLE(*cpenable); +- WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP)); +- ++ *cpenable = xtensa_get_sr(cpenable); ++ xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable); + return flags; + } + + static inline void disable_cp(unsigned long flags, unsigned long cpenable) + { +- WSR_CPENABLE(cpenable); ++ xtensa_set_sr(cpenable, cpenable); + local_irq_restore(flags); + } + +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 104ed299d5ea..99d19f80440e 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc) + chip = gpiod_to_chip(desc); + offset = gpio_chip_hwgpio(desc); + ++ /* ++ * Open drain emulation using input mode may incorrectly report ++ * input here, fix that up. ++ */ ++ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && ++ test_bit(FLAG_IS_OUT, &desc->flags)) ++ return 0; ++ + if (!chip->get_direction) + return -ENOTSUPP; + +diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +index 5850c8e34caa..97d11d792351 100644 +--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c ++++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +@@ -261,23 +261,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, + { + u32 tmp; + +- /* Put DF on broadcast mode */ +- adev->df_funcs->enable_broadcast_mode(adev, true); +- +- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { +- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); +- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; +- tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; +- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); +- } else { +- tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); +- tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; +- tmp |= DF_V3_6_MGCG_DISABLE; +- WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); +- } ++ if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { ++ /* Put DF on broadcast mode */ ++ adev->df_funcs->enable_broadcast_mode(adev, true); ++ ++ if (enable) { ++ tmp = RREG32_SOC15(DF, 0, ++ mmDF_PIE_AON0_DfGlobalClkGater); ++ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; ++ tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; ++ WREG32_SOC15(DF, 0, ++ mmDF_PIE_AON0_DfGlobalClkGater, tmp); ++ } else { ++ tmp = RREG32_SOC15(DF, 0, ++ mmDF_PIE_AON0_DfGlobalClkGater); ++ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; ++ tmp |= DF_V3_6_MGCG_DISABLE; ++ WREG32_SOC15(DF, 0, ++ mmDF_PIE_AON0_DfGlobalClkGater, tmp); ++ } + +- /* Exit broadcast mode */ +- adev->df_funcs->enable_broadcast_mode(adev, false); ++ /* Exit broadcast mode */ ++ adev->df_funcs->enable_broadcast_mode(adev, false); ++ } + } + + static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +index 87dd55e9d72b..cc88ba76a8d4 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +@@ -6184,7 +6184,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + +- /* EVENT_WRITE_EOP - flush caches, send int */ ++ /* Workaround for cache flush problems. First send a dummy EOP ++ * event down the pipe with seq one below. ++ */ ++ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); ++ amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | ++ EOP_TC_ACTION_EN | ++ EOP_TC_WB_ACTION_EN | ++ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | ++ EVENT_INDEX(5))); ++ amdgpu_ring_write(ring, addr & 0xfffffffc); ++ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | ++ DATA_SEL(1) | INT_SEL(0)); ++ amdgpu_ring_write(ring, lower_32_bits(seq - 1)); ++ amdgpu_ring_write(ring, upper_32_bits(seq - 1)); ++ ++ /* Then send the real EOP event down the pipe: ++ * EVENT_WRITE_EOP - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | +@@ -6926,7 +6942,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { + 5 + /* COND_EXEC */ + 7 + /* PIPELINE_SYNC */ + VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ +- 8 + /* FENCE for VM_FLUSH */ ++ 12 + /* FENCE for VM_FLUSH */ + 20 + /* GDS switch */ + 4 + /* double SWITCH_BUFFER, + the first COND_EXEC jump to the place just +@@ -6938,7 +6954,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { + 31 + /* DE_META */ + 3 + /* CNTX_CTRL */ + 5 + /* HDP_INVL */ +- 8 + 8 + /* FENCE x2 */ ++ 12 + 12 + /* FENCE x2 */ + 2, /* SWITCH_BUFFER */ + .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ + .emit_ib = gfx_v8_0_ring_emit_ib_gfx, +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c +index 067f5579f452..793aa8e8ec9a 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c +@@ -373,7 +373,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) + + if (GPIO_RESULT_OK != dal_ddc_open( + ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { +- dal_gpio_destroy_ddc(&ddc); ++ dal_ddc_close(ddc); + + return present; + } +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index 5a583707d198..0ab890c927ec 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -3492,7 +3492,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) + if (link_enc->funcs->fec_set_enable && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (link->fec_state == dc_link_fec_ready && enable) { +- msleep(1); ++ /* Accord to DP spec, FEC enable sequence can first ++ * be transmitted anytime after 1000 LL codes have ++ * been transmitted on the link after link training ++ * completion. Using 1 lane RBR should have the maximum ++ * time for transmitting 1000 LL codes which is 6.173 us. ++ * So use 7 microseconds delay instead. ++ */ ++ udelay(7); + link_enc->funcs->fec_set_enable(link_enc, true); + link->fec_state = dc_link_fec_enabled; + } else if (link->fec_state == dc_link_fec_enabled && !enable) { +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +index 78b2cc2e122f..3b7769a3e67e 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +@@ -1419,13 +1419,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state + + static void acquire_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, +- struct display_stream_compressor **dsc) ++ struct display_stream_compressor **dsc, ++ int pipe_idx) + { + int i; + + ASSERT(*dsc == NULL); + *dsc = NULL; + ++ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { ++ *dsc = pool->dscs[pipe_idx]; ++ res_ctx->is_dsc_acquired[pipe_idx] = true; ++ return; ++ } ++ + /* Find first free DSC */ + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (!res_ctx->is_dsc_acquired[i]) { +@@ -1468,7 +1475,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, + if (pipe_ctx->stream != dc_stream) + continue; + +- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); ++ acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); + + /* The number of DSCs can be less than the number of pipes */ + if (!pipe_ctx->stream_res.dsc) { +@@ -1669,7 +1676,7 @@ static bool dcn20_split_stream_for_odm( + next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (next_odm_pipe->stream->timing.flags.DSC == 1) { +- acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); ++ acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); + ASSERT(next_odm_pipe->stream_res.dsc); + if (next_odm_pipe->stream_res.dsc == NULL) + return false; +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +index 5ab9d6240498..e95025b1d14d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +@@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank( + DP_VID_N_MUL, n_multiply); + } + +- /* set DIG_START to 0x1 to reset FIFO */ ++ /* make sure stream is disabled before resetting steer fifo */ ++ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); ++ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); + ++ /* set DIG_START to 0x1 to reset FIFO */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); ++ udelay(1); + + /* write 0 to take the FIFO out of reset */ + + REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); + +- /* switch DP encoder to CRTC data */ ++ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen ++ * that it overflows during mode transition, and sometimes doesn't recover. ++ */ ++ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); ++ udelay(10); + + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index de182185fe1f..b0e5e64df212 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -258,7 +258,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 23.84, + .return_bus_width_bytes = 64, +- .dispclk_dppclk_vco_speed_mhz = 3550, ++ .dispclk_dppclk_vco_speed_mhz = 3600, + .xfc_bus_transport_time_us = 4, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 1, +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +index 58c091ab67b2..a066e9297777 100644 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +@@ -844,6 +844,7 @@ static int smu_sw_init(void *handle) + smu->smu_baco.platform_support = false; + + mutex_init(&smu->sensor_lock); ++ mutex_init(&smu->metrics_lock); + + smu->watermarks_bitmap = 0; + smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; +diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +index d493a3f8c07a..08a717a34bd6 100644 +--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +@@ -910,18 +910,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu, + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + ++ mutex_lock(&smu->metrics_lock); + if (!smu_table->metrics_time || + time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); ++ mutex_unlock(&smu->metrics_lock); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); ++ mutex_unlock(&smu->metrics_lock); + + return ret; + } +@@ -1388,12 +1391,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, + "VR", + "COMPUTE", + "CUSTOM"}; ++ static const char *title[] = { ++ "PROFILE_INDEX(NAME)"}; + uint32_t i, size = 0; + int16_t workload_type = 0; + + if (!smu->pm_enabled || !buf) + return -EINVAL; + ++ size += sprintf(buf + size, "%16s\n", ++ title[0]); ++ + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT +diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +index 23171a4d9a31..5ad9a7878f6b 100644 +--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h ++++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +@@ -345,6 +345,7 @@ struct smu_context + const struct pptable_funcs *ppt_funcs; + struct mutex mutex; + struct mutex sensor_lock; ++ struct mutex metrics_lock; + uint64_t pool_size; + + struct smu_table_context smu_table; +diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +index 328e258a6895..7d913a06ebac 100644 +--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +@@ -547,17 +547,20 @@ static int navi10_get_metrics_table(struct smu_context *smu, + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + ++ mutex_lock(&smu->metrics_lock); + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); ++ mutex_unlock(&smu->metrics_lock); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); ++ mutex_unlock(&smu->metrics_lock); + + return ret; + } +diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +index 92c393f613d3..3c3f719971f7 100644 +--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +@@ -1691,17 +1691,20 @@ static int vega20_get_metrics_table(struct smu_context *smu, + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + ++ mutex_lock(&smu->metrics_lock); + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); ++ mutex_unlock(&smu->metrics_lock); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); ++ mutex_unlock(&smu->metrics_lock); + + return ret; + } +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c +index 06a506c29463..d564bfcab6a3 100644 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c +@@ -525,7 +525,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) + */ + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags)) { +- spin_lock(&rq->lock); ++ spin_lock_nested(&rq->lock, ++ SINGLE_DEPTH_NESTING); + i915_request_cancel_breadcrumb(rq); + spin_unlock(&rq->lock); + } +diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c +index f9c9e32b299c..35bb825d1918 100644 +--- a/drivers/gpu/drm/mcde/mcde_dsi.c ++++ b/drivers/gpu/drm/mcde/mcde_dsi.c +@@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, + for_each_available_child_of_node(dev->of_node, child) { + panel = of_drm_find_panel(child); + if (IS_ERR(panel)) { +- dev_err(dev, "failed to find panel try bridge (%lu)\n", ++ dev_err(dev, "failed to find panel try bridge (%ld)\n", + PTR_ERR(panel)); ++ panel = NULL; ++ + bridge = of_drm_find_bridge(child); + if (IS_ERR(bridge)) { +- dev_err(dev, "failed to find bridge (%lu)\n", ++ dev_err(dev, "failed to find bridge (%ld)\n", + PTR_ERR(bridge)); + return PTR_ERR(bridge); + } +diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c +index a052364a5d74..edd45f434ccd 100644 +--- a/drivers/gpu/drm/msm/msm_gpu.c ++++ b/drivers/gpu/drm/msm/msm_gpu.c +@@ -16,6 +16,7 @@ + #include <linux/pm_opp.h> + #include <linux/devfreq.h> + #include <linux/devcoredump.h> ++#include <linux/sched/task.h> + + /* + * Power Management: +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c +index b5b1a34f896f..d735ea7e2d88 100644 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c +@@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder, + * same size as the native one (e.g. different + * refresh rate) + */ +- if (adjusted_mode->hdisplay == native_mode->hdisplay && +- adjusted_mode->vdisplay == native_mode->vdisplay && +- adjusted_mode->type & DRM_MODE_TYPE_DRIVER) ++ if (mode->hdisplay == native_mode->hdisplay && ++ mode->vdisplay == native_mode->vdisplay && ++ mode->type & DRM_MODE_TYPE_DRIVER) + break; + mode = native_mode; + asyc->scaler.full = true; +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index a442a955f98c..eb31c5b6c8e9 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector) + void + nouveau_conn_reset(struct drm_connector *connector) + { ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_conn_atom *asyc; + +- if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) +- return; ++ if (drm_drv_uses_atomic_modeset(connector->dev)) { ++ if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) ++ return; ++ ++ if (connector->state) ++ nouveau_conn_atomic_destroy_state(connector, ++ connector->state); ++ ++ __drm_atomic_helper_connector_reset(connector, &asyc->state); ++ } else { ++ asyc = &nv_connector->properties_state; ++ } + +- if (connector->state) +- nouveau_conn_atomic_destroy_state(connector, connector->state); +- __drm_atomic_helper_connector_reset(connector, &asyc->state); + asyc->dither.mode = DITHERING_MODE_AUTO; + asyc->dither.depth = DITHERING_DEPTH_AUTO; + asyc->scaler.mode = DRM_MODE_SCALE_NONE; +@@ -276,8 +284,14 @@ void + nouveau_conn_attach_properties(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state); + struct nouveau_display *disp = nouveau_display(dev); ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct nouveau_conn_atom *armc; ++ ++ if (drm_drv_uses_atomic_modeset(connector->dev)) ++ armc = nouveau_conn_atom(connector->state); ++ else ++ armc = &nv_connector->properties_state; + + /* Init DVI-I specific properties. */ + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) +@@ -749,9 +763,9 @@ static int + nouveau_connector_set_property(struct drm_connector *connector, + struct drm_property *property, uint64_t value) + { +- struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; ++ struct nouveau_conn_atom *asyc = &nv_connector->properties_state; + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + int ret; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h +index f43a8d63aef8..de84fb4708c7 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.h ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h +@@ -29,6 +29,7 @@ + + #include <nvif/notify.h> + ++#include <drm/drm_crtc.h> + #include <drm/drm_edid.h> + #include <drm/drm_encoder.h> + #include <drm/drm_dp_helper.h> +@@ -44,6 +45,60 @@ struct dcb_output; + struct nouveau_backlight; + #endif + ++#define nouveau_conn_atom(p) \ ++ container_of((p), struct nouveau_conn_atom, state) ++ ++struct nouveau_conn_atom { ++ struct drm_connector_state state; ++ ++ struct { ++ /* The enum values specifically defined here match nv50/gf119 ++ * hw values, and the code relies on this. ++ */ ++ enum { ++ DITHERING_MODE_OFF = 0x00, ++ DITHERING_MODE_ON = 0x01, ++ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, ++ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, ++ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, ++ DITHERING_MODE_AUTO ++ } mode; ++ enum { ++ DITHERING_DEPTH_6BPC = 0x00, ++ DITHERING_DEPTH_8BPC = 0x02, ++ DITHERING_DEPTH_AUTO ++ } depth; ++ } dither; ++ ++ struct { ++ int mode; /* DRM_MODE_SCALE_* */ ++ struct { ++ enum { ++ UNDERSCAN_OFF, ++ UNDERSCAN_ON, ++ UNDERSCAN_AUTO, ++ } mode; ++ u32 hborder; ++ u32 vborder; ++ } underscan; ++ bool full; ++ } scaler; ++ ++ struct { ++ int color_vibrance; ++ int vibrant_hue; ++ } procamp; ++ ++ union { ++ struct { ++ bool dither:1; ++ bool scaler:1; ++ bool procamp:1; ++ }; ++ u8 mask; ++ } set; ++}; ++ + struct nouveau_connector { + struct drm_connector base; + enum dcb_connector_type type; +@@ -63,6 +118,12 @@ struct nouveau_connector { + #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT + struct nouveau_backlight *backlight; + #endif ++ /* ++ * Our connector property code expects a nouveau_conn_atom struct ++ * even on pre-nv50 where we do not support atomic. This embedded ++ * version gets used in the non atomic modeset case. ++ */ ++ struct nouveau_conn_atom properties_state; + }; + + static inline struct nouveau_connector *nouveau_connector( +@@ -121,61 +182,6 @@ extern int nouveau_ignorelid; + extern int nouveau_duallink; + extern int nouveau_hdmimhz; + +-#include <drm/drm_crtc.h> +-#define nouveau_conn_atom(p) \ +- container_of((p), struct nouveau_conn_atom, state) +- +-struct nouveau_conn_atom { +- struct drm_connector_state state; +- +- struct { +- /* The enum values specifically defined here match nv50/gf119 +- * hw values, and the code relies on this. +- */ +- enum { +- DITHERING_MODE_OFF = 0x00, +- DITHERING_MODE_ON = 0x01, +- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, +- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, +- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, +- DITHERING_MODE_AUTO +- } mode; +- enum { +- DITHERING_DEPTH_6BPC = 0x00, +- DITHERING_DEPTH_8BPC = 0x02, +- DITHERING_DEPTH_AUTO +- } depth; +- } dither; +- +- struct { +- int mode; /* DRM_MODE_SCALE_* */ +- struct { +- enum { +- UNDERSCAN_OFF, +- UNDERSCAN_ON, +- UNDERSCAN_AUTO, +- } mode; +- u32 hborder; +- u32 vborder; +- } underscan; +- bool full; +- } scaler; +- +- struct { +- int color_vibrance; +- int vibrant_hue; +- } procamp; +- +- union { +- struct { +- bool dither:1; +- bool scaler:1; +- bool procamp:1; +- }; +- u8 mask; +- } set; +-}; +- + void nouveau_conn_attach_properties(struct drm_connector *); + void nouveau_conn_reset(struct drm_connector *); + struct drm_connector_state * +diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +index eb8071a4d6d0..9c3bdfd20337 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c ++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +@@ -683,8 +683,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, + struct sun4i_hdmi *hdmi = dev_get_drvdata(dev); + + cec_unregister_adapter(hdmi->cec_adap); +- drm_connector_cleanup(&hdmi->connector); +- drm_encoder_cleanup(&hdmi->encoder); + i2c_del_adapter(hdmi->i2c); + i2c_put_adapter(hdmi->ddc_i2c); + clk_disable_unprepare(hdmi->mod_clk); +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index 7608ee053114..ac44bf752ff1 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -48,6 +48,7 @@ + #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) + #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) + #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) ++#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) + + /* flags */ + #define I2C_HID_STARTED 0 +@@ -174,6 +175,8 @@ static const struct i2c_hid_quirks { + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { USB_VENDOR_ID_ELAN, HID_ANY_ID, + I2C_HID_QUIRK_BOGUS_IRQ }, ++ { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, ++ I2C_HID_QUIRK_RESET_ON_RESUME }, + { 0, 0 } + }; + +@@ -1214,8 +1217,15 @@ static int i2c_hid_resume(struct device *dev) + * solves "incomplete reports" on Raydium devices 2386:3118 and + * 2386:4B33 and fixes various SIS touchscreens no longer sending + * data after a suspend/resume. ++ * ++ * However some ALPS touchpads generate IRQ storm without reset, so ++ * let's still reset them here. + */ +- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); ++ if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME) ++ ret = i2c_hid_hwreset(client); ++ else ++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); ++ + if (ret) + return ret; + +diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c +index 2e37f8a6d8cf..be661396095c 100644 +--- a/drivers/iio/accel/st_accel_core.c ++++ b/drivers/iio/accel/st_accel_core.c +@@ -993,6 +993,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = { + #define ST_ACCEL_TRIGGER_OPS NULL + #endif + ++#ifdef CONFIG_ACPI + static const struct iio_mount_matrix * + get_mount_matrix(const struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +@@ -1013,7 +1014,6 @@ static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = { + static int apply_acpi_orientation(struct iio_dev *indio_dev, + struct iio_chan_spec *channels) + { +-#ifdef CONFIG_ACPI + struct st_sensor_data *adata = iio_priv(indio_dev); + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_device *adev; +@@ -1141,10 +1141,14 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev, + out: + kfree(buffer.pointer); + return ret; ++} + #else /* !CONFIG_ACPI */ ++static int apply_acpi_orientation(struct iio_dev *indio_dev, ++ struct iio_chan_spec *channels) ++{ + return 0; +-#endif + } ++#endif + + /* + * st_accel_get_settings() - get sensor settings from device name +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c +index da073d72f649..e480529b3f04 100644 +--- a/drivers/iio/adc/max9611.c ++++ b/drivers/iio/adc/max9611.c +@@ -89,6 +89,12 @@ + #define MAX9611_TEMP_SCALE_NUM 1000000 + #define MAX9611_TEMP_SCALE_DIV 2083 + ++/* ++ * Conversion time is 2 ms (typically) at Ta=25 degreeC ++ * No maximum value is known, so play it safe. ++ */ ++#define MAX9611_CONV_TIME_US_RANGE 3000, 3300 ++ + struct max9611_dev { + struct device *dev; + struct i2c_client *i2c_client; +@@ -236,11 +242,9 @@ static int max9611_read_single(struct max9611_dev *max9611, + return ret; + } + +- /* +- * need a delay here to make register configuration +- * stabilize. 1 msec at least, from empirical testing. +- */ +- usleep_range(1000, 2000); ++ /* need a delay here to make register configuration stabilize. */ ++ ++ usleep_range(MAX9611_CONV_TIME_US_RANGE); + + ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr); + if (ret < 0) { +@@ -507,7 +511,7 @@ static int max9611_init(struct max9611_dev *max9611) + MAX9611_REG_CTRL2, 0); + return ret; + } +- usleep_range(1000, 2000); ++ usleep_range(MAX9611_CONV_TIME_US_RANGE); + + return 0; + } +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index d78f67623f24..50052e9a1731 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -4736,6 +4736,7 @@ err_ib: + err: + unregister_netdevice_notifier(&cma_nb); + ib_sa_unregister_client(&sa_client); ++ unregister_pernet_subsys(&cma_pernet_operations); + err_wq: + destroy_workqueue(cma_wq); + return ret; +diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c +index 680ad27f497d..023478107f0e 100644 +--- a/drivers/infiniband/core/counters.c ++++ b/drivers/infiniband/core/counters.c +@@ -282,6 +282,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) + struct rdma_counter *counter; + int ret; + ++ if (!qp->res.valid) ++ return 0; ++ + if (!rdma_is_port_valid(dev, port)) + return -EINVAL; + +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 8d2f1e38b891..907d99822bf0 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -3008,16 +3008,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) + ibdev->ib_active = false; + flush_workqueue(wq); + +- mlx4_ib_close_sriov(ibdev); +- mlx4_ib_mad_cleanup(ibdev); +- ib_unregister_device(&ibdev->ib_dev); +- mlx4_ib_diag_cleanup(ibdev); + if (ibdev->iboe.nb.notifier_call) { + if (unregister_netdevice_notifier(&ibdev->iboe.nb)) + pr_warn("failure unregistering notifier\n"); + ibdev->iboe.nb.notifier_call = NULL; + } + ++ mlx4_ib_close_sriov(ibdev); ++ mlx4_ib_mad_cleanup(ibdev); ++ ib_unregister_device(&ibdev->ib_dev); ++ mlx4_ib_diag_cleanup(ibdev); ++ + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); + kfree(ibdev->ib_uc_qpns_bitmap); +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index 831539419c30..e1cfbedefcbc 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -3548,10 +3548,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, + } + + INIT_LIST_HEAD(&handler->list); +- if (dst) { +- memcpy(&dest_arr[0], dst, sizeof(*dst)); +- dest_num++; +- } + + for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { + err = parse_flow_attr(dev->mdev, spec, +@@ -3564,6 +3560,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, + ib_flow += ((union ib_flow_spec *)ib_flow)->size; + } + ++ if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { ++ memcpy(&dest_arr[0], dst, sizeof(*dst)); ++ dest_num++; ++ } ++ + if (!flow_is_multicast_only(flow_attr)) + set_underlay_qp(dev, spec, underlay_qpn); + +@@ -3604,10 +3605,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, + } + + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { +- if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { ++ if (!dest_num) + rule_dst = NULL; +- dest_num = 0; +- } + } else { + if (is_egress) + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; +diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c +index f9a492ed900b..831ad578a7b2 100644 +--- a/drivers/infiniband/sw/rxe/rxe_recv.c ++++ b/drivers/infiniband/sw/rxe/rxe_recv.c +@@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb) + + calc_icrc = rxe_icrc_hdr(pkt, skb); + calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), +- payload_size(pkt)); ++ payload_size(pkt) + bth_pad(pkt)); + calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); + if (unlikely(calc_icrc != pack_icrc)) { + if (skb->protocol == htons(ETH_P_IPV6)) +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c +index c5d9b558fa90..e5031172c019 100644 +--- a/drivers/infiniband/sw/rxe/rxe_req.c ++++ b/drivers/infiniband/sw/rxe/rxe_req.c +@@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, + if (err) + return err; + } ++ if (bth_pad(pkt)) { ++ u8 *pad = payload_addr(pkt) + paylen; ++ ++ memset(pad, 0, bth_pad(pkt)); ++ crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); ++ } + } + p = payload_addr(pkt) + paylen + bth_pad(pkt); + +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c +index 1cbfbd98eb22..c4a8195bf670 100644 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c +@@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp, + if (err) + pr_err("Failed copying memory\n"); + ++ if (bth_pad(&ack_pkt)) { ++ struct rxe_dev *rxe = to_rdev(qp->ibqp.device); ++ u8 *pad = payload_addr(&ack_pkt) + payload; ++ ++ memset(pad, 0, bth_pad(&ack_pkt)); ++ icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); ++ } + p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); + *p = ~icrc; + +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index 9b159132405d..dca88f9fdf29 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d + { + struct qi_desc desc; + +- /* +- * Do PASID granu IOTLB invalidation if page selective capability is +- * not available. +- */ +- if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { ++ if (pages == -1) { + desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | + QI_EIOTLB_DID(sdev->did) | + QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index bb29aeefcbd0..c7137f50bd1d 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -2781,7 +2781,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, + write_targets++; + } + } +- if (bio->bi_end_io) { ++ if (rdev && bio->bi_end_io) { + atomic_inc(&rdev->nr_pending); + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; + bio_set_dev(bio, rdev->bdev); +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 12a8ce83786e..36cd7c2fbf40 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) + do_flush = false; + } + +- if (!sh->batch_head) ++ if (!sh->batch_head || sh == sh->batch_head) + set_bit(STRIPE_HANDLE, &sh->state); + clear_bit(STRIPE_DELAYED, &sh->state); + if ((!sh->batch_head || sh == sh->batch_head) && +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c +index 5ef7daeb8cbd..b14c09cd9593 100644 +--- a/drivers/media/cec/cec-adap.c ++++ b/drivers/media/cec/cec-adap.c +@@ -378,7 +378,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status) + } else { + list_del_init(&data->list); + if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) +- data->adap->transmit_queue_sz--; ++ if (!WARN_ON(!data->adap->transmit_queue_sz)) ++ data->adap->transmit_queue_sz--; + } + + if (data->msg.tx_status & CEC_TX_STATUS_OK) { +@@ -430,6 +431,14 @@ static void cec_flush(struct cec_adapter *adap) + * need to do anything special in that case. + */ + } ++ /* ++ * If something went wrong and this counter isn't what it should ++ * be, then this will reset it back to 0. Warn if it is not 0, ++ * since it indicates a bug, either in this framework or in a ++ * CEC driver. ++ */ ++ if (WARN_ON(adap->transmit_queue_sz)) ++ adap->transmit_queue_sz = 0; + } + + /* +@@ -454,7 +463,7 @@ int cec_thread_func(void *_adap) + bool timeout = false; + u8 attempts; + +- if (adap->transmitting) { ++ if (adap->transmit_in_progress) { + int err; + + /* +@@ -489,7 +498,7 @@ int cec_thread_func(void *_adap) + goto unlock; + } + +- if (adap->transmitting && timeout) { ++ if (adap->transmit_in_progress && timeout) { + /* + * If we timeout, then log that. Normally this does + * not happen and it is an indication of a faulty CEC +@@ -498,14 +507,18 @@ int cec_thread_func(void *_adap) + * so much traffic on the bus that the adapter was + * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). + */ +- pr_warn("cec-%s: message %*ph timed out\n", adap->name, +- adap->transmitting->msg.len, +- adap->transmitting->msg.msg); ++ if (adap->transmitting) { ++ pr_warn("cec-%s: message %*ph timed out\n", adap->name, ++ adap->transmitting->msg.len, ++ adap->transmitting->msg.msg); ++ /* Just give up on this. */ ++ cec_data_cancel(adap->transmitting, ++ CEC_TX_STATUS_TIMEOUT); ++ } else { ++ pr_warn("cec-%s: transmit timed out\n", adap->name); ++ } + adap->transmit_in_progress = false; + adap->tx_timeouts++; +- /* Just give up on this. */ +- cec_data_cancel(adap->transmitting, +- CEC_TX_STATUS_TIMEOUT); + goto unlock; + } + +@@ -520,7 +533,8 @@ int cec_thread_func(void *_adap) + data = list_first_entry(&adap->transmit_queue, + struct cec_data, list); + list_del_init(&data->list); +- adap->transmit_queue_sz--; ++ if (!WARN_ON(!data->adap->transmit_queue_sz)) ++ adap->transmit_queue_sz--; + + /* Make this the current transmitting message */ + adap->transmitting = data; +@@ -1083,11 +1097,11 @@ void cec_received_msg_ts(struct cec_adapter *adap, + valid_la = false; + else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) + valid_la = false; +- else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4)) ++ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) + valid_la = false; + else if (cec_msg_is_broadcast(msg) && +- adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 && +- !(dir_fl & BCAST2_0)) ++ adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && ++ !(dir_fl & BCAST1_4)) + valid_la = false; + } + if (valid_la && min_len) { +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c +index d1331f828108..039963a7765b 100644 +--- a/drivers/media/usb/b2c2/flexcop-usb.c ++++ b/drivers/media/usb/b2c2/flexcop-usb.c +@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, + + mutex_unlock(&fc_usb->data_mutex); + +- return 0; ++ return ret; + } + + /* actual bus specific access functions, +diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c +index 02697d86e8c1..ac93e88d7038 100644 +--- a/drivers/media/usb/dvb-usb/af9005.c ++++ b/drivers/media/usb/dvb-usb/af9005.c +@@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev, + else if (reply == 0x02) + *cold = 0; + else +- return -EIO; +- deb_info("Identify state cold = %d\n", *cold); ++ ret = -EIO; ++ if (!ret) ++ deb_info("Identify state cold = %d\n", *cold); + + err: + kfree(buf); +diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c +index ac88ade94cda..59609556d969 100644 +--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c ++++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c +@@ -116,6 +116,7 @@ struct pulse8 { + unsigned int vers; + struct completion cmd_done; + struct work_struct work; ++ u8 work_result; + struct delayed_work ping_eeprom_work; + struct cec_msg rx_msg; + u8 data[DATA_SIZE]; +@@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work) + { + struct pulse8 *pulse8 = + container_of(work, struct pulse8, work); ++ u8 result = pulse8->work_result; + +- switch (pulse8->data[0] & 0x3f) { ++ pulse8->work_result = 0; ++ switch (result & 0x3f) { + case MSGCODE_FRAME_DATA: + cec_received_msg(pulse8->adap, &pulse8->rx_msg); + break; +@@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, + pulse8->escape = false; + } else if (data == MSGEND) { + struct cec_msg *msg = &pulse8->rx_msg; ++ u8 msgcode = pulse8->buf[0]; + + if (debug) + dev_info(pulse8->dev, "received: %*ph\n", + pulse8->idx, pulse8->buf); +- pulse8->data[0] = pulse8->buf[0]; +- switch (pulse8->buf[0] & 0x3f) { ++ switch (msgcode & 0x3f) { + case MSGCODE_FRAME_START: + msg->len = 1; + msg->msg[0] = pulse8->buf[1]; +@@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, + if (msg->len == CEC_MAX_MSG_SIZE) + break; + msg->msg[msg->len++] = pulse8->buf[1]; +- if (pulse8->buf[0] & MSGCODE_FRAME_EOM) ++ if (msgcode & MSGCODE_FRAME_EOM) { ++ WARN_ON(pulse8->work_result); ++ pulse8->work_result = msgcode; + schedule_work(&pulse8->work); ++ break; ++ } + break; + case MSGCODE_TRANSMIT_SUCCEEDED: + case MSGCODE_TRANSMIT_FAILED_LINE: + case MSGCODE_TRANSMIT_FAILED_ACK: + case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: + case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: ++ WARN_ON(pulse8->work_result); ++ pulse8->work_result = msgcode; + schedule_work(&pulse8->work); + break; + case MSGCODE_HIGH_ERROR: +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +index 4e8e80ac8341..9cec5c216e1f 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +@@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + struct ath_htc_rx_status *rxstatus; + struct ath_rx_status rx_stats; + bool decrypt_error = false; ++ __be16 rs_datalen; ++ bool is_phyerr; + + if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { + ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", +@@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + + rxstatus = (struct ath_htc_rx_status *)skb->data; + +- if (be16_to_cpu(rxstatus->rs_datalen) - +- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) { ++ rs_datalen = be16_to_cpu(rxstatus->rs_datalen); ++ if (unlikely(rs_datalen - ++ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) { + ath_err(common, + "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n", +- rxstatus->rs_datalen, skb->len); ++ rs_datalen, skb->len); ++ goto rx_next; ++ } ++ ++ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY; ++ /* ++ * Discard zero-length packets and packets smaller than an ACK ++ * which are not PHY_ERROR (short radar pulses have a length of 3) ++ */ ++ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { ++ ath_warn(common, ++ "Short RX data len, dropping (dlen: %d)\n", ++ rs_datalen); + goto rx_next; + } + +@@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + * Process PHY errors and return so that the packet + * can be dropped. + */ +- if (rx_stats.rs_status & ATH9K_RXERR_PHY) { ++ if (unlikely(is_phyerr)) { + /* TODO: Not using DFS processing now. */ + if (ath_cmn_process_fft(&priv->spec_priv, hdr, + &rx_stats, rx_status->mactime)) { +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c +index 265f89e11d8b..59474bd0c728 100644 +--- a/drivers/nvme/host/fc.c ++++ b/drivers/nvme/host/fc.c +@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, + !template->ls_req || !template->fcp_io || + !template->ls_abort || !template->fcp_abort || + !template->max_hw_queues || !template->max_sgl_segments || +- !template->max_dif_sgl_segments || !template->dma_boundary) { ++ !template->max_dif_sgl_segments || !template->dma_boundary || ++ !template->module) { + ret = -EINVAL; + goto out_reghost_failed; + } +@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref) + { + struct nvme_fc_ctrl *ctrl = + container_of(ref, struct nvme_fc_ctrl, ref); ++ struct nvme_fc_lport *lport = ctrl->lport; + unsigned long flags; + + if (ctrl->ctrl.tagset) { +@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref) + if (ctrl->ctrl.opts) + nvmf_free_options(ctrl->ctrl.opts); + kfree(ctrl); ++ module_put(lport->ops->module); + } + + static void +@@ -2907,10 +2910,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) + static void + __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) + { +- nvme_stop_keep_alive(&ctrl->ctrl); ++ /* ++ * if state is connecting - the error occurred as part of a ++ * reconnect attempt. The create_association error paths will ++ * clean up any outstanding io. ++ * ++ * if it's a different state - ensure all pending io is ++ * terminated. Given this can delay while waiting for the ++ * aborted io to return, we recheck adapter state below ++ * before changing state. ++ */ ++ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { ++ nvme_stop_keep_alive(&ctrl->ctrl); + +- /* will block will waiting for io to terminate */ +- nvme_fc_delete_association(ctrl); ++ /* will block will waiting for io to terminate */ ++ nvme_fc_delete_association(ctrl); ++ } + + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && + !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) +@@ -3056,10 +3071,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + goto out_fail; + } + ++ if (!try_module_get(lport->ops->module)) { ++ ret = -EUNATCH; ++ goto out_free_ctrl; ++ } ++ + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); + if (idx < 0) { + ret = -ENOSPC; +- goto out_free_ctrl; ++ goto out_mod_put; + } + + ctrl->ctrl.opts = opts; +@@ -3212,6 +3232,8 @@ out_free_queues: + out_free_ida: + put_device(ctrl->dev); + ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); ++out_mod_put: ++ module_put(lport->ops->module); + out_free_ctrl: + kfree(ctrl); + out_fail: +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 869f462e6b6e..14d513087a14 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -68,14 +68,14 @@ static int io_queue_depth = 1024; + module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); + MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); + +-static int write_queues; +-module_param(write_queues, int, 0644); ++static unsigned int write_queues; ++module_param(write_queues, uint, 0644); + MODULE_PARM_DESC(write_queues, + "Number of queues to use for writes. If not set, reads and writes " + "will share a queue set."); + +-static int poll_queues; +-module_param(poll_queues, int, 0644); ++static unsigned int poll_queues; ++module_param(poll_queues, uint, 0644); + MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); + + struct nvme_dev; +@@ -2060,7 +2060,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) + .priv = dev, + }; + unsigned int irq_queues, this_p_queues; +- unsigned int nr_cpus = num_possible_cpus(); + + /* + * Poll queues don't need interrupts, but we need at least one IO +@@ -2071,10 +2070,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) + this_p_queues = nr_io_queues - 1; + irq_queues = 1; + } else { +- if (nr_cpus < nr_io_queues - this_p_queues) +- irq_queues = nr_cpus + 1; +- else +- irq_queues = nr_io_queues - this_p_queues + 1; ++ irq_queues = nr_io_queues - this_p_queues + 1; + } + dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; + +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c +index b50b53db3746..1c50af6219f3 100644 +--- a/drivers/nvme/target/fcloop.c ++++ b/drivers/nvme/target/fcloop.c +@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) + #define FCLOOP_DMABOUND_4G 0xFFFFFFFF + + static struct nvme_fc_port_template fctemplate = { ++ .module = THIS_MODULE, + .localport_delete = fcloop_localport_delete, + .remoteport_delete = fcloop_remoteport_delete, + .create_queue = fcloop_create_queue, +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c +index c423e94baf0f..9617b7df7c4d 100644 +--- a/drivers/of/overlay.c ++++ b/drivers/of/overlay.c +@@ -305,7 +305,6 @@ static int add_changeset_property(struct overlay_changeset *ovcs, + { + struct property *new_prop = NULL, *prop; + int ret = 0; +- bool check_for_non_overlay_node = false; + + if (target->in_livetree) + if (!of_prop_cmp(overlay_prop->name, "name") || +@@ -318,6 +317,25 @@ static int add_changeset_property(struct overlay_changeset *ovcs, + else + prop = NULL; + ++ if (prop) { ++ if (!of_prop_cmp(prop->name, "#address-cells")) { ++ if (!of_prop_val_eq(prop, overlay_prop)) { ++ pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", ++ target->np); ++ ret = -EINVAL; ++ } ++ return ret; ++ ++ } else if (!of_prop_cmp(prop->name, "#size-cells")) { ++ if (!of_prop_val_eq(prop, overlay_prop)) { ++ pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", ++ target->np); ++ ret = -EINVAL; ++ } ++ return ret; ++ } ++ } ++ + if (is_symbols_prop) { + if (prop) + return -EINVAL; +@@ -330,33 +348,18 @@ static int add_changeset_property(struct overlay_changeset *ovcs, + return -ENOMEM; + + if (!prop) { +- check_for_non_overlay_node = true; + if (!target->in_livetree) { + new_prop->next = target->np->deadprops; + target->np->deadprops = new_prop; + } + ret = of_changeset_add_property(&ovcs->cset, target->np, + new_prop); +- } else if (!of_prop_cmp(prop->name, "#address-cells")) { +- if (!of_prop_val_eq(prop, new_prop)) { +- pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", +- target->np); +- ret = -EINVAL; +- } +- } else if (!of_prop_cmp(prop->name, "#size-cells")) { +- if (!of_prop_val_eq(prop, new_prop)) { +- pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", +- target->np); +- ret = -EINVAL; +- } + } else { +- check_for_non_overlay_node = true; + ret = of_changeset_update_property(&ovcs->cset, target->np, + new_prop); + } + +- if (check_for_non_overlay_node && +- !of_node_check_flag(target->np, OF_OVERLAY)) ++ if (!of_node_check_flag(target->np, OF_OVERLAY)) + pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", + target->np, new_prop->name); + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index a97e2571a527..fcfaadc774ee 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -5854,6 +5854,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, + return 0; + } + ++#ifdef CONFIG_ACPI ++bool pci_pr3_present(struct pci_dev *pdev) ++{ ++ struct acpi_device *adev; ++ ++ if (acpi_disabled) ++ return false; ++ ++ adev = ACPI_COMPANION(&pdev->dev); ++ if (!adev) ++ return false; ++ ++ return adev->power.flags.power_resources && ++ acpi_has_method(adev->handle, "_PR3"); ++} ++EXPORT_SYMBOL_GPL(pci_pr3_present); ++#endif ++ + /** + * pci_add_dma_alias - Add a DMA devfn alias for a device + * @dev: the PCI device for which alias is added +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +index 6fd1390fd06e..bfb22f868857 100644 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +@@ -615,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) + return PTR_ERR(channel->base); + + /* call request_irq for OTG */ +- irq = platform_get_irq(pdev, 0); ++ irq = platform_get_irq_optional(pdev, 0); + if (irq >= 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c +index 07d1b911e72f..52ef1419b671 100644 +--- a/drivers/platform/x86/pmc_atom.c ++++ b/drivers/platform/x86/pmc_atom.c +@@ -429,6 +429,14 @@ static const struct dmi_system_id critclk_systems[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"), + }, + }, ++ { ++ .ident = "CONNECT X300", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"), ++ }, ++ }, ++ + { /*sentinel*/ } + }; + +diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c +index efb2f01a9101..f60e1b26c2d2 100644 +--- a/drivers/regulator/ab8500.c ++++ b/drivers/regulator/ab8500.c +@@ -953,23 +953,6 @@ static struct ab8500_regulator_info + .update_val_idle = 0x82, + .update_val_normal = 0x02, + }, +- [AB8505_LDO_USB] = { +- .desc = { +- .name = "LDO-USB", +- .ops = &ab8500_regulator_mode_ops, +- .type = REGULATOR_VOLTAGE, +- .id = AB8505_LDO_USB, +- .owner = THIS_MODULE, +- .n_voltages = 1, +- .volt_table = fixed_3300000_voltage, +- }, +- .update_bank = 0x03, +- .update_reg = 0x82, +- .update_mask = 0x03, +- .update_val = 0x01, +- .update_val_idle = 0x03, +- .update_val_normal = 0x01, +- }, + [AB8505_LDO_AUDIO] = { + .desc = { + .name = "LDO-AUDIO", +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c +index 989506bd90b1..16f0c8570036 100644 +--- a/drivers/regulator/axp20x-regulator.c ++++ b/drivers/regulator/axp20x-regulator.c +@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) + int i; + + for (i = 0; i < rate_count; i++) { +- if (ramp <= slew_rates[i]) +- cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); +- else ++ if (ramp > slew_rates[i]) + break; ++ ++ if (id == AXP20X_DCDC2) ++ cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i); ++ else ++ cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); + } + + if (cfg == 0xff) { +@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = { + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), + AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, + AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, +- AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), ++ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), + AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, + AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), +diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c +index 0248a61f1006..6041839ec38c 100644 +--- a/drivers/regulator/bd70528-regulator.c ++++ b/drivers/regulator/bd70528-regulator.c +@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = { + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, +- .set_ramp_delay = bd70528_set_ramp_delay, + }; + + static const struct regulator_ops bd70528_led_ops = { +diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c +index f47b4b281b14..d7302c2052f9 100644 +--- a/drivers/scsi/libsas/sas_discover.c ++++ b/drivers/scsi/libsas/sas_discover.c +@@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port) + else + dev->dev_type = SAS_SATA_DEV; + dev->tproto = SAS_PROTOCOL_SATA; +- } else { ++ } else if (port->oob_mode == SAS_OOB_MODE) { + struct sas_identify_frame *id = + (struct sas_identify_frame *) dev->frame_rcvd; + dev->dev_type = id->dev_type; + dev->iproto = id->initiator_bits; + dev->tproto = id->target_bits; ++ } else { ++ /* If the oob mode is OOB_NOT_CONNECTED, the port is ++ * disconnected due to race with PHY down. We cannot ++ * continue to discover this port ++ */ ++ sas_put_device(dev); ++ pr_warn("Port %016llx is disconnected when discovering\n", ++ SAS_ADDR(port->attached_sas_addr)); ++ return -ENODEV; + } + + sas_init_dev(dev); +diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c +index 39a736b887b1..6c2b03415a2c 100644 +--- a/drivers/scsi/lpfc/lpfc_bsg.c ++++ b/drivers/scsi/lpfc/lpfc_bsg.c +@@ -4489,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, + phba->mbox_ext_buf_ctx.seqNum++; + nemb_tp = phba->mbox_ext_buf_ctx.nembType; + +- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); +- if (!dd_data) { +- rc = -ENOMEM; +- goto job_error; +- } +- + pbuf = (uint8_t *)dmabuf->virt; + size = job->request_payload.payload_len; + sg_copy_to_buffer(job->request_payload.sg_list, +@@ -4531,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, + "2968 SLI_CONFIG ext-buffer wr all %d " + "ebuffers received\n", + phba->mbox_ext_buf_ctx.numBuf); ++ ++ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); ++ if (!dd_data) { ++ rc = -ENOMEM; ++ goto job_error; ++ } ++ + /* mailbox command structure for base driver */ + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) { +@@ -4579,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, + return SLI_CONFIG_HANDLED; + + job_error: ++ if (pmboxq) ++ mempool_free(pmboxq, phba->mbox_mem_pool); + lpfc_bsg_dma_page_free(phba, dmabuf); + kfree(dd_data); + +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c +index 1286c658ba34..ee70d14e7a9d 100644 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c +@@ -4843,6 +4843,44 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) + } + } + ++/* ++ * Sets the mailbox completion handler to be used for the ++ * unreg_rpi command. The handler varies based on the state of ++ * the port and what will be happening to the rpi next. ++ */ ++static void ++lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, ++ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) ++{ ++ unsigned long iflags; ++ ++ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { ++ mbox->ctx_ndlp = ndlp; ++ mbox->mbox_cmpl = lpfc_nlp_logo_unreg; ++ ++ } else if (phba->sli_rev == LPFC_SLI_REV4 && ++ (!(vport->load_flag & FC_UNLOADING)) && ++ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= ++ LPFC_SLI_INTF_IF_TYPE_2) && ++ (kref_read(&ndlp->kref) > 0)) { ++ mbox->ctx_ndlp = lpfc_nlp_get(ndlp); ++ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; ++ } else { ++ if (vport->load_flag & FC_UNLOADING) { ++ if (phba->sli_rev == LPFC_SLI_REV4) { ++ spin_lock_irqsave(&vport->phba->ndlp_lock, ++ iflags); ++ ndlp->nlp_flag |= NLP_RELEASE_RPI; ++ spin_unlock_irqrestore(&vport->phba->ndlp_lock, ++ iflags); ++ } ++ lpfc_nlp_get(ndlp); ++ } ++ mbox->ctx_ndlp = ndlp; ++ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; ++ } ++} ++ + /* + * Free rpi associated with LPFC_NODELIST entry. + * This routine is called from lpfc_freenode(), when we are removing +@@ -4893,33 +4931,12 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) + + lpfc_unreg_login(phba, vport->vpi, rpi, mbox); + mbox->vport = vport; +- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { +- mbox->ctx_ndlp = ndlp; +- mbox->mbox_cmpl = lpfc_nlp_logo_unreg; +- } else { +- if (phba->sli_rev == LPFC_SLI_REV4 && +- (!(vport->load_flag & FC_UNLOADING)) && +- (bf_get(lpfc_sli_intf_if_type, +- &phba->sli4_hba.sli_intf) >= +- LPFC_SLI_INTF_IF_TYPE_2) && +- (kref_read(&ndlp->kref) > 0)) { +- mbox->ctx_ndlp = lpfc_nlp_get(ndlp); +- mbox->mbox_cmpl = +- lpfc_sli4_unreg_rpi_cmpl_clr; +- /* +- * accept PLOGIs after unreg_rpi_cmpl +- */ +- acc_plogi = 0; +- } else if (vport->load_flag & FC_UNLOADING) { +- mbox->ctx_ndlp = NULL; +- mbox->mbox_cmpl = +- lpfc_sli_def_mbox_cmpl; +- } else { +- mbox->ctx_ndlp = ndlp; +- mbox->mbox_cmpl = +- lpfc_sli_def_mbox_cmpl; +- } +- } ++ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); ++ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) ++ /* ++ * accept PLOGIs after unreg_rpi_cmpl ++ */ ++ acc_plogi = 0; + if (((ndlp->nlp_DID & Fabric_DID_MASK) != + Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) +@@ -5060,6 +5077,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mb, *nextmb; + struct lpfc_dmabuf *mp; ++ unsigned long iflags; + + /* Cleanup node for NPort <nlp_DID> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, +@@ -5141,8 +5159,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) + lpfc_cleanup_vports_rrqs(vport, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; +- lpfc_unreg_rpi(vport, ndlp); +- ++ if (!lpfc_unreg_rpi(vport, ndlp)) { ++ /* Clean up unregistered and non freed rpis */ ++ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) && ++ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) { ++ lpfc_sli4_free_rpi(vport->phba, ++ ndlp->nlp_rpi); ++ spin_lock_irqsave(&vport->phba->ndlp_lock, ++ iflags); ++ ndlp->nlp_flag &= ~NLP_RELEASE_RPI; ++ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; ++ spin_unlock_irqrestore(&vport->phba->ndlp_lock, ++ iflags); ++ } ++ } + return 0; + } + +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c +index a227e36cbdc2..8e0f03ef346b 100644 +--- a/drivers/scsi/lpfc/lpfc_nvme.c ++++ b/drivers/scsi/lpfc/lpfc_nvme.c +@@ -1976,6 +1976,8 @@ out_unlock: + + /* Declare and initialization an instance of the FC NVME template. */ + static struct nvme_fc_port_template lpfc_nvme_template = { ++ .module = THIS_MODULE, ++ + /* initiator-based functions */ + .localport_delete = lpfc_nvme_localport_delete, + .remoteport_delete = lpfc_nvme_remoteport_delete, +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 2b0e7b32c2df..8860f41af3ff 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -2526,6 +2526,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) + } else { + __lpfc_sli_rpi_release(vport, ndlp); + } ++ if (vport->load_flag & FC_UNLOADING) ++ lpfc_nlp_put(ndlp); + pmb->ctx_ndlp = NULL; + } + } +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h +index d5386edddaf6..1eb3fe281cc3 100644 +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -2401,6 +2401,7 @@ typedef struct fc_port { + unsigned int id_changed:1; + unsigned int scan_needed:1; + unsigned int n2n_flag:1; ++ unsigned int explicit_logout:1; + + struct completion nvme_del_done; + uint32_t nvme_prli_service_param; +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c +index 5d31e3d52b6b..80f276d67c14 100644 +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -4927,14 +4927,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) + set_bit(RSCN_UPDATE, &flags); + clear_bit(LOCAL_LOOP_UPDATE, &flags); + +- } else if (ha->current_topology == ISP_CFG_N) { +- clear_bit(RSCN_UPDATE, &flags); +- if (qla_tgt_mode_enabled(vha)) { +- /* allow the other side to start the login */ +- clear_bit(LOCAL_LOOP_UPDATE, &flags); +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags); +- } +- } else if (ha->current_topology == ISP_CFG_NL) { ++ } else if (ha->current_topology == ISP_CFG_NL || ++ ha->current_topology == ISP_CFG_N) { + clear_bit(RSCN_UPDATE, &flags); + set_bit(LOCAL_LOOP_UPDATE, &flags); + } else if (!vha->flags.online || +@@ -5051,7 +5045,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) + memcpy(&ha->plogi_els_payld.data, + (void *)ha->init_cb, + sizeof(ha->plogi_els_payld.data)); +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } else { + ql_dbg(ql_dbg_init, vha, 0x00d1, + "PLOGI ELS param read fail.\n"); +diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c +index 518eb954cf42..bdf1994251b9 100644 +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -2405,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) + static void + qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) + { ++ u16 control_flags = LCF_COMMAND_LOGO; + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; +- logio->control_flags = +- cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); +- if (!sp->fcport->keep_nport_handle) +- logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); ++ ++ if (sp->fcport->explicit_logout) { ++ control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; ++ } else { ++ control_flags |= LCF_IMPL_LOGO; ++ ++ if (!sp->fcport->keep_nport_handle) ++ control_flags |= LCF_FREE_NPORT; ++ } ++ ++ logio->control_flags = cpu_to_le16(control_flags); + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); + logio->port_id[0] = sp->fcport->d_id.b.al_pa; + logio->port_id[1] = sp->fcport->d_id.b.area; +@@ -2676,7 +2684,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, + "PLOGI ELS IOCB:\n"); + ql_dump_buffer(ql_log_info, vha, 0x0109, +- (uint8_t *)els_iocb, 0x70); ++ (uint8_t *)els_iocb, ++ sizeof(*els_iocb)); + } else { + els_iocb->control_flags = 1 << 13; + els_iocb->tx_byte_count = +@@ -2842,7 +2851,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, + + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, +- (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); ++ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, ++ sizeof(*elsio->u.els_plogi.els_plogi_pyld)); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c +index 9204e8467a4e..b3766b1879e3 100644 +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -1061,8 +1061,6 @@ global_port_update: + ql_dbg(ql_dbg_async, vha, 0x5011, + "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", + mb[1], mb[2], mb[3]); +- +- qlt_async_event(mb[0], vha, mb); + break; + } + +@@ -1079,8 +1077,6 @@ global_port_update: + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(VP_CONFIG_OK, &vha->vp_flags); +- +- qlt_async_event(mb[0], vha, mb); + break; + + case MBA_RSCN_UPDATE: /* State Change Registration */ +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c +index 4d90cf101f5f..eac76e934cbe 100644 +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -3920,6 +3920,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, + vha->d_id.b24 = 0; + vha->d_id.b.al_pa = 1; + ha->flags.n2n_bigger = 1; ++ ha->flags.n2n_ae = 0; + + id.b.al_pa = 2; + ql_dbg(ql_dbg_async, vha, 0x5075, +@@ -3930,6 +3931,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, + "Format 1: Remote login - Waiting for WWPN %8phC.\n", + rptid_entry->u.f1.port_name); + ha->flags.n2n_bigger = 0; ++ ha->flags.n2n_ae = 1; + } + qla24xx_post_newsess_work(vha, &id, + rptid_entry->u.f1.port_name, +@@ -3941,7 +3943,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, + /* if our portname is higher then initiate N2N login */ + + set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); +- ha->flags.n2n_ae = 1; + return; + break; + case TOPO_FL: +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c +index 941aa53363f5..bfcd02fdf2b8 100644 +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) + } + + static struct nvme_fc_port_template qla_nvme_fc_transport = { ++ .module = THIS_MODULE, + .localport_delete = qla_nvme_localport_delete, + .remoteport_delete = qla_nvme_remoteport_delete, + .create_queue = qla_nvme_alloc_queue, +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c +index a9bd0f513316..74a378a91b71 100644 +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -1104,6 +1104,7 @@ void qlt_free_session_done(struct work_struct *work) + } + } + ++ sess->explicit_logout = 0; + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + sess->free_pending = 0; + +@@ -1264,7 +1265,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) + "Scheduling sess %p for deletion %8phC\n", + sess, sess->port_name); + +- INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); + WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); + } + +@@ -4803,6 +4803,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, + + switch (sess->disc_state) { + case DSC_DELETED: ++ case DSC_LOGIN_PEND: + qlt_plogi_ack_unref(vha, pla); + break; + +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index bab2073c1f72..abe7f79bb789 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -350,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) + target_sess_cmd_list_set_waiting(se_sess); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + ++ sess->explicit_logout = 1; + tcm_qla2xxx_put_sess(sess); + } + +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 8c674eca09f1..2323432a0edb 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) + return QLA_SUCCESS; + + mem_alloc_error_exit: +- qla4xxx_mem_free(ha); + return QLA_ERROR; + } + +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index 417b868d8735..ed8d9709b9b9 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -24,6 +24,8 @@ + + #define ISCSI_TRANSPORT_VERSION "2.0-870" + ++#define ISCSI_SEND_MAX_ALLOWED 10 ++ + #define CREATE_TRACE_POINTS + #include <trace/events/iscsi.h> + +@@ -3682,6 +3684,7 @@ iscsi_if_rx(struct sk_buff *skb) + struct nlmsghdr *nlh; + struct iscsi_uevent *ev; + uint32_t group; ++ int retries = ISCSI_SEND_MAX_ALLOWED; + + nlh = nlmsg_hdr(skb); + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || +@@ -3712,6 +3715,10 @@ iscsi_if_rx(struct sk_buff *skb) + break; + err = iscsi_if_send_reply(portid, nlh->nlmsg_type, + ev, sizeof(*ev)); ++ if (err == -EAGAIN && --retries < 0) { ++ printk(KERN_WARNING "Send reply failed, error %d\n", err); ++ break; ++ } + } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); + skb_pull(skb, rlen); + } +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index bec758e978fb..d47bd26577b3 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -583,21 +583,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi) + dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; + + if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { +- /* Write two TX FIFO entries first, and then the corresponding +- * CMD FIFO entry. ++ /* Write the CMD FIFO entry first, and then the two ++ * corresponding TX FIFO entries. + */ + u32 data = dspi_pop_tx(dspi); + +- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { +- /* LSB */ +- tx_fifo_write(dspi, data & 0xFFFF); +- tx_fifo_write(dspi, data >> 16); +- } else { +- /* MSB */ +- tx_fifo_write(dspi, data >> 16); +- tx_fifo_write(dspi, data & 0xFFFF); +- } + cmd_fifo_write(dspi); ++ tx_fifo_write(dspi, data & 0xFFFF); ++ tx_fifo_write(dspi, data >> 16); + } else { + /* Write one entry to both TX FIFO and CMD FIFO + * simultaneously. +diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c +index 47cde1864630..ce9b30112e26 100644 +--- a/drivers/spi/spi-uniphier.c ++++ b/drivers/spi/spi-uniphier.c +@@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv) + } + } + +-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) ++static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv, ++ unsigned int threshold) + { +- unsigned int fifo_threshold, fill_bytes; + u32 val; + +- fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, +- bytes_per_word(priv->bits_per_word)); +- fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); +- +- fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes); +- +- /* set fifo threshold */ + val = readl(priv->base + SSI_FC); + val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); +- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); +- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); ++ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold); ++ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold); + writel(val, priv->base + SSI_FC); ++} ++ ++static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) ++{ ++ unsigned int fifo_threshold, fill_words; ++ unsigned int bpw = bytes_per_word(priv->bits_per_word); ++ ++ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw); ++ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); ++ ++ uniphier_spi_set_fifo_threshold(priv, fifo_threshold); ++ ++ fill_words = fifo_threshold - ++ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw); + +- while (fill_bytes--) ++ while (fill_words--) + uniphier_spi_send(priv); + } + +diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig +index ac136663fa8e..082c16a31616 100644 +--- a/drivers/staging/wlan-ng/Kconfig ++++ b/drivers/staging/wlan-ng/Kconfig +@@ -4,6 +4,7 @@ config PRISM2_USB + depends on WLAN && USB && CFG80211 + select WIRELESS_EXT + select WEXT_PRIV ++ select CRC32 + help + This is the wlan-ng prism 2.5/3 USB driver for a wide range of + old USB wireless devices. +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index 00964b6e4ac1..e0718ee5d42a 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -1580,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s, + int num_newlines = 0; + bool replaced = false; + void __iomem *tf; ++ int locked = 1; + + if (is_uartdm) + tf = port->membase + UARTDM_TF; +@@ -1592,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s, + num_newlines++; + count += num_newlines; + +- spin_lock(&port->lock); ++ if (port->sysrq) ++ locked = 0; ++ else if (oops_in_progress) ++ locked = spin_trylock(&port->lock); ++ else ++ spin_lock(&port->lock); ++ + if (is_uartdm) + msm_reset_dm_count(port, count); + +@@ -1628,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s, + iowrite32_rep(tf, buf, 1); + i += num_chars; + } +- spin_unlock(&port->lock); ++ ++ if (locked) ++ spin_unlock(&port->lock); + } + + static void msm_console_write(struct console *co, const char *s, +diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c +index 6ce044008cf6..460d5d7c984f 100644 +--- a/drivers/usb/gadget/function/f_ecm.c ++++ b/drivers/usb/gadget/function/f_ecm.c +@@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f) + + DBG(cdev, "ecm deactivated\n"); + +- if (ecm->port.in_ep->enabled) ++ if (ecm->port.in_ep->enabled) { + gether_disconnect(&ecm->port); ++ } else { ++ ecm->port.in_ep->desc = NULL; ++ ecm->port.out_ep->desc = NULL; ++ } + + usb_ep_disable(ecm->notify); + ecm->notify->desc = NULL; +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c +index d48df36622b7..0d8e4a364ca6 100644 +--- a/drivers/usb/gadget/function/f_rndis.c ++++ b/drivers/usb/gadget/function/f_rndis.c +@@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f) + gether_disconnect(&rndis->port); + + usb_ep_disable(rndis->notify); ++ rndis->notify->desc = NULL; + } + + /*-------------------------------------------------------------------------*/ +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig +index 58e7c100b6ad..4c761abc5688 100644 +--- a/drivers/watchdog/Kconfig ++++ b/drivers/watchdog/Kconfig +@@ -1444,6 +1444,7 @@ config SMSC37B787_WDT + config TQMX86_WDT + tristate "TQ-Systems TQMX86 Watchdog Timer" + depends on X86 ++ select WATCHDOG_CORE + help + This is the driver for the hardware watchdog timer in the TQMX86 IO + controller found on some of their ComExpress Modules. +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 5bae515c8e25..bed90d612e48 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -395,7 +395,8 @@ static struct notifier_block xen_memory_nb = { + #else + static enum bp_state reserve_additional_memory(void) + { +- balloon_stats.target_pages = balloon_stats.current_pages; ++ balloon_stats.target_pages = balloon_stats.current_pages + ++ balloon_stats.target_unpopulated; + return BP_ECANCELED; + } + #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c +index 4150280509ff..7503899c0a1b 100644 +--- a/fs/afs/dynroot.c ++++ b/fs/afs/dynroot.c +@@ -136,6 +136,9 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr + + ASSERTCMP(d_inode(dentry), ==, NULL); + ++ if (flags & LOOKUP_CREATE) ++ return ERR_PTR(-EOPNOTSUPP); ++ + if (dentry->d_name.len >= AFSNAMEMAX) { + _leave(" = -ENAMETOOLONG"); + return ERR_PTR(-ENAMETOOLONG); +diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c +index f532d6d3bd28..79bc5f1338ed 100644 +--- a/fs/afs/mntpt.c ++++ b/fs/afs/mntpt.c +@@ -126,7 +126,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) + if (src_as->cell) + ctx->cell = afs_get_cell(src_as->cell); + +- if (size > PAGE_SIZE - 1) ++ if (size < 2 || size > PAGE_SIZE - 1) + return -EINVAL; + + page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); +@@ -140,7 +140,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) + } + + buf = kmap(page); +- ret = vfs_parse_fs_string(fc, "source", buf, size); ++ ret = -EINVAL; ++ if (buf[size - 1] == '.') ++ ret = vfs_parse_fs_string(fc, "source", buf, size - 1); + kunmap(page); + put_page(page); + if (ret < 0) +diff --git a/fs/afs/server.c b/fs/afs/server.c +index 64d440aaabc0..ca8115ba1724 100644 +--- a/fs/afs/server.c ++++ b/fs/afs/server.c +@@ -32,18 +32,11 @@ static void afs_dec_servers_outstanding(struct afs_net *net) + struct afs_server *afs_find_server(struct afs_net *net, + const struct sockaddr_rxrpc *srx) + { +- const struct sockaddr_in6 *a = &srx->transport.sin6, *b; + const struct afs_addr_list *alist; + struct afs_server *server = NULL; + unsigned int i; +- bool ipv6 = true; + int seq = 0, diff; + +- if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 || +- srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 || +- srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff)) +- ipv6 = false; +- + rcu_read_lock(); + + do { +@@ -52,7 +45,8 @@ struct afs_server *afs_find_server(struct afs_net *net, + server = NULL; + read_seqbegin_or_lock(&net->fs_addr_lock, &seq); + +- if (ipv6) { ++ if (srx->transport.family == AF_INET6) { ++ const struct sockaddr_in6 *a = &srx->transport.sin6, *b; + hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) { + alist = rcu_dereference(server->addresses); + for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { +@@ -68,15 +62,16 @@ struct afs_server *afs_find_server(struct afs_net *net, + } + } + } else { ++ const struct sockaddr_in *a = &srx->transport.sin, *b; + hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) { + alist = rcu_dereference(server->addresses); + for (i = 0; i < alist->nr_ipv4; i++) { +- b = &alist->addrs[i].transport.sin6; +- diff = ((u16 __force)a->sin6_port - +- (u16 __force)b->sin6_port); ++ b = &alist->addrs[i].transport.sin; ++ diff = ((u16 __force)a->sin_port - ++ (u16 __force)b->sin_port); + if (diff == 0) +- diff = ((u32 __force)a->sin6_addr.s6_addr32[3] - +- (u32 __force)b->sin6_addr.s6_addr32[3]); ++ diff = ((u32 __force)a->sin_addr.s_addr - ++ (u32 __force)b->sin_addr.s_addr); + if (diff == 0) + goto found; + } +diff --git a/fs/afs/super.c b/fs/afs/super.c +index 488641b1a418..d9a6036b70b9 100644 +--- a/fs/afs/super.c ++++ b/fs/afs/super.c +@@ -448,7 +448,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) + /* allocate the root inode and dentry */ + if (as->dyn_root) { + inode = afs_iget_pseudo_dir(sb, true); +- sb->s_flags |= SB_RDONLY; + } else { + sprintf(sb->s_id, "%llu", as->volume->vid); + afs_activate_volume(as->volume); +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c +index 10a04b99798a..3f3110975f88 100644 +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -53,16 +53,6 @@ struct btrfs_workqueue { + struct __btrfs_workqueue *high; + }; + +-static void normal_work_helper(struct btrfs_work *work); +- +-#define BTRFS_WORK_HELPER(name) \ +-noinline_for_stack void btrfs_##name(struct work_struct *arg) \ +-{ \ +- struct btrfs_work *work = container_of(arg, struct btrfs_work, \ +- normal_work); \ +- normal_work_helper(work); \ +-} +- + struct btrfs_fs_info * + btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) + { +@@ -89,29 +79,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) + return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; + } + +-BTRFS_WORK_HELPER(worker_helper); +-BTRFS_WORK_HELPER(delalloc_helper); +-BTRFS_WORK_HELPER(flush_delalloc_helper); +-BTRFS_WORK_HELPER(cache_helper); +-BTRFS_WORK_HELPER(submit_helper); +-BTRFS_WORK_HELPER(fixup_helper); +-BTRFS_WORK_HELPER(endio_helper); +-BTRFS_WORK_HELPER(endio_meta_helper); +-BTRFS_WORK_HELPER(endio_meta_write_helper); +-BTRFS_WORK_HELPER(endio_raid56_helper); +-BTRFS_WORK_HELPER(endio_repair_helper); +-BTRFS_WORK_HELPER(rmw_helper); +-BTRFS_WORK_HELPER(endio_write_helper); +-BTRFS_WORK_HELPER(freespace_write_helper); +-BTRFS_WORK_HELPER(delayed_meta_helper); +-BTRFS_WORK_HELPER(readahead_helper); +-BTRFS_WORK_HELPER(qgroup_rescan_helper); +-BTRFS_WORK_HELPER(extent_refs_helper); +-BTRFS_WORK_HELPER(scrub_helper); +-BTRFS_WORK_HELPER(scrubwrc_helper); +-BTRFS_WORK_HELPER(scrubnc_helper); +-BTRFS_WORK_HELPER(scrubparity_helper); +- + static struct __btrfs_workqueue * + __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, + unsigned int flags, int limit_active, int thresh) +@@ -302,12 +269,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, + * original work item cannot depend on the recycled work + * item in that case (see find_worker_executing_work()). + * +- * Note that the work of one Btrfs filesystem may depend +- * on the work of another Btrfs filesystem via, e.g., a +- * loop device. Therefore, we must not allow the current +- * work item to be recycled until we are really done, +- * otherwise we break the above assumption and can +- * deadlock. ++ * Note that different types of Btrfs work can depend on ++ * each other, and one type of work on one Btrfs ++ * filesystem may even depend on the same type of work ++ * on another Btrfs filesystem via, e.g., a loop device. ++ * Therefore, we must not allow the current work item to ++ * be recycled until we are really done, otherwise we ++ * break the above assumption and can deadlock. + */ + free_self = true; + } else { +@@ -331,8 +299,10 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, + } + } + +-static void normal_work_helper(struct btrfs_work *work) ++static void btrfs_work_helper(struct work_struct *normal_work) + { ++ struct btrfs_work *work = container_of(normal_work, struct btrfs_work, ++ normal_work); + struct __btrfs_workqueue *wq; + void *wtag; + int need_order = 0; +@@ -362,15 +332,13 @@ static void normal_work_helper(struct btrfs_work *work) + trace_btrfs_all_work_done(wq->fs_info, wtag); + } + +-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, +- btrfs_func_t func, +- btrfs_func_t ordered_func, +- btrfs_func_t ordered_free) ++void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, ++ btrfs_func_t ordered_func, btrfs_func_t ordered_free) + { + work->func = func; + work->ordered_func = ordered_func; + work->ordered_free = ordered_free; +- INIT_WORK(&work->normal_work, uniq_func); ++ INIT_WORK(&work->normal_work, btrfs_work_helper); + INIT_LIST_HEAD(&work->ordered_list); + work->flags = 0; + } +diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h +index 7861c9feba5f..c5bf2b117c05 100644 +--- a/fs/btrfs/async-thread.h ++++ b/fs/btrfs/async-thread.h +@@ -29,42 +29,13 @@ struct btrfs_work { + unsigned long flags; + }; + +-#define BTRFS_WORK_HELPER_PROTO(name) \ +-void btrfs_##name(struct work_struct *arg) +- +-BTRFS_WORK_HELPER_PROTO(worker_helper); +-BTRFS_WORK_HELPER_PROTO(delalloc_helper); +-BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); +-BTRFS_WORK_HELPER_PROTO(cache_helper); +-BTRFS_WORK_HELPER_PROTO(submit_helper); +-BTRFS_WORK_HELPER_PROTO(fixup_helper); +-BTRFS_WORK_HELPER_PROTO(endio_helper); +-BTRFS_WORK_HELPER_PROTO(endio_meta_helper); +-BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); +-BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); +-BTRFS_WORK_HELPER_PROTO(endio_repair_helper); +-BTRFS_WORK_HELPER_PROTO(rmw_helper); +-BTRFS_WORK_HELPER_PROTO(endio_write_helper); +-BTRFS_WORK_HELPER_PROTO(freespace_write_helper); +-BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); +-BTRFS_WORK_HELPER_PROTO(readahead_helper); +-BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); +-BTRFS_WORK_HELPER_PROTO(extent_refs_helper); +-BTRFS_WORK_HELPER_PROTO(scrub_helper); +-BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); +-BTRFS_WORK_HELPER_PROTO(scrubnc_helper); +-BTRFS_WORK_HELPER_PROTO(scrubparity_helper); +- +- + struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, + const char *name, + unsigned int flags, + int limit_active, + int thresh); +-void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, +- btrfs_func_t func, +- btrfs_func_t ordered_func, +- btrfs_func_t ordered_free); ++void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, ++ btrfs_func_t ordered_func, btrfs_func_t ordered_free); + void btrfs_queue_work(struct btrfs_workqueue *wq, + struct btrfs_work *work); + void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 0d2da2366869..7dcfa7d7632a 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -695,8 +695,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, + caching_ctl->block_group = cache; + caching_ctl->progress = cache->key.objectid; + refcount_set(&caching_ctl->count, 1); +- btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, +- caching_thread, NULL, NULL); ++ btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); + + spin_lock(&cache->lock); + /* +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 57a9ad3e8c29..c7a53e79c66d 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1367,8 +1367,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, + return -ENOMEM; + + async_work->delayed_root = delayed_root; +- btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, +- btrfs_async_run_delayed_root, NULL, NULL); ++ btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL, ++ NULL); + async_work->nr = nr; + + btrfs_queue_work(fs_info->delayed_workers, &async_work->work); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 3895c21853cc..bae334212ee2 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -706,43 +706,31 @@ static void end_workqueue_bio(struct bio *bio) + struct btrfs_end_io_wq *end_io_wq = bio->bi_private; + struct btrfs_fs_info *fs_info; + struct btrfs_workqueue *wq; +- btrfs_work_func_t func; + + fs_info = end_io_wq->info; + end_io_wq->status = bio->bi_status; + + if (bio_op(bio) == REQ_OP_WRITE) { +- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { ++ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) + wq = fs_info->endio_meta_write_workers; +- func = btrfs_endio_meta_write_helper; +- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { ++ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) + wq = fs_info->endio_freespace_worker; +- func = btrfs_freespace_write_helper; +- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { ++ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) + wq = fs_info->endio_raid56_workers; +- func = btrfs_endio_raid56_helper; +- } else { ++ else + wq = fs_info->endio_write_workers; +- func = btrfs_endio_write_helper; +- } + } else { +- if (unlikely(end_io_wq->metadata == +- BTRFS_WQ_ENDIO_DIO_REPAIR)) { ++ if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) + wq = fs_info->endio_repair_workers; +- func = btrfs_endio_repair_helper; +- } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { ++ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) + wq = fs_info->endio_raid56_workers; +- func = btrfs_endio_raid56_helper; +- } else if (end_io_wq->metadata) { ++ else if (end_io_wq->metadata) + wq = fs_info->endio_meta_workers; +- func = btrfs_endio_meta_helper; +- } else { ++ else + wq = fs_info->endio_workers; +- func = btrfs_endio_helper; +- } + } + +- btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); ++ btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); + btrfs_queue_work(wq, &end_io_wq->work); + } + +@@ -835,8 +823,8 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, + async->mirror_num = mirror_num; + async->submit_bio_start = submit_bio_start; + +- btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, +- run_one_async_done, run_one_async_free); ++ btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, ++ run_one_async_free); + + async->bio_offset = bio_offset; + +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index be9dc78aa727..33c6b191ca59 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -1899,7 +1899,7 @@ static int __process_pages_contig(struct address_space *mapping, + if (page_ops & PAGE_SET_PRIVATE2) + SetPagePrivate2(pages[i]); + +- if (pages[i] == locked_page) { ++ if (locked_page && pages[i] == locked_page) { + put_page(pages[i]); + pages_locked++; + continue; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index e5758f62e8d8..0b2758961b1c 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -712,10 +712,12 @@ cleanup_and_bail_uncompressed: + * to our extent and set things up for the async work queue to run + * cow_file_range to do the normal delalloc dance. + */ +- if (page_offset(async_chunk->locked_page) >= start && +- page_offset(async_chunk->locked_page) <= end) ++ if (async_chunk->locked_page && ++ (page_offset(async_chunk->locked_page) >= start && ++ page_offset(async_chunk->locked_page)) <= end) { + __set_page_dirty_nobuffers(async_chunk->locked_page); + /* unlocked later on in the async handlers */ ++ } + + if (redirty) + extent_range_redirty_for_io(inode, start, end); +@@ -795,7 +797,7 @@ retry: + async_extent->start + + async_extent->ram_size - 1, + WB_SYNC_ALL); +- else if (ret) ++ else if (ret && async_chunk->locked_page) + unlock_page(async_chunk->locked_page); + kfree(async_extent); + cond_resched(); +@@ -1264,14 +1266,27 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, + async_chunk[i].inode = inode; + async_chunk[i].start = start; + async_chunk[i].end = cur_end; +- async_chunk[i].locked_page = locked_page; + async_chunk[i].write_flags = write_flags; + INIT_LIST_HEAD(&async_chunk[i].extents); + +- btrfs_init_work(&async_chunk[i].work, +- btrfs_delalloc_helper, +- async_cow_start, async_cow_submit, +- async_cow_free); ++ /* ++ * The locked_page comes all the way from writepage and its ++ * the original page we were actually given. As we spread ++ * this large delalloc region across multiple async_chunk ++ * structs, only the first struct needs a pointer to locked_page ++ * ++ * This way we don't need racey decisions about who is supposed ++ * to unlock it. ++ */ ++ if (locked_page) { ++ async_chunk[i].locked_page = locked_page; ++ locked_page = NULL; ++ } else { ++ async_chunk[i].locked_page = NULL; ++ } ++ ++ btrfs_init_work(&async_chunk[i].work, async_cow_start, ++ async_cow_submit, async_cow_free); + + nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); + atomic_add(nr_pages, &fs_info->async_delalloc_pages); +@@ -1439,10 +1454,10 @@ next_slot: + disk_num_bytes = + btrfs_file_extent_disk_num_bytes(leaf, fi); + /* +- * If extent we got ends before our range starts, skip +- * to next extent ++ * If the extent we got ends before our current offset, ++ * skip to the next extent. + */ +- if (extent_end <= start) { ++ if (extent_end <= cur_offset) { + path->slots[0]++; + goto next_slot; + } +@@ -2264,8 +2279,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) + + SetPageChecked(page); + get_page(page); +- btrfs_init_work(&fixup->work, btrfs_fixup_helper, +- btrfs_writepage_fixup_worker, NULL, NULL); ++ btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); + fixup->page = page; + btrfs_queue_work(fs_info->fixup_workers, &fixup->work); + return -EBUSY; +@@ -3258,7 +3272,6 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_ordered_extent *ordered_extent = NULL; + struct btrfs_workqueue *wq; +- btrfs_work_func_t func; + + trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); + +@@ -3267,16 +3280,12 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, + end - start + 1, uptodate)) + return; + +- if (btrfs_is_free_space_inode(BTRFS_I(inode))) { ++ if (btrfs_is_free_space_inode(BTRFS_I(inode))) + wq = fs_info->endio_freespace_worker; +- func = btrfs_freespace_write_helper; +- } else { ++ else + wq = fs_info->endio_write_workers; +- func = btrfs_endio_write_helper; +- } + +- btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, +- NULL); ++ btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); + btrfs_queue_work(wq, &ordered_extent->work); + } + +@@ -8213,18 +8222,14 @@ static void __endio_write_update_ordered(struct inode *inode, + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_ordered_extent *ordered = NULL; + struct btrfs_workqueue *wq; +- btrfs_work_func_t func; + u64 ordered_offset = offset; + u64 ordered_bytes = bytes; + u64 last_offset; + +- if (btrfs_is_free_space_inode(BTRFS_I(inode))) { ++ if (btrfs_is_free_space_inode(BTRFS_I(inode))) + wq = fs_info->endio_freespace_worker; +- func = btrfs_freespace_write_helper; +- } else { ++ else + wq = fs_info->endio_write_workers; +- func = btrfs_endio_write_helper; +- } + + while (ordered_offset < offset + bytes) { + last_offset = ordered_offset; +@@ -8232,9 +8237,8 @@ static void __endio_write_update_ordered(struct inode *inode, + &ordered_offset, + ordered_bytes, + uptodate)) { +- btrfs_init_work(&ordered->work, func, +- finish_ordered_fn, +- NULL, NULL); ++ btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, ++ NULL); + btrfs_queue_work(wq, &ordered->work); + } + /* +@@ -10119,8 +10123,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode + init_completion(&work->completion); + INIT_LIST_HEAD(&work->list); + work->inode = inode; +- btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, +- btrfs_run_delalloc_work, NULL, NULL); ++ btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); + + return work; + } +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c +index 24b6c72b9a59..6240a5a1f2c0 100644 +--- a/fs/btrfs/ordered-data.c ++++ b/fs/btrfs/ordered-data.c +@@ -547,7 +547,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, + spin_unlock(&root->ordered_extent_lock); + + btrfs_init_work(&ordered->flush_work, +- btrfs_flush_delalloc_helper, + btrfs_run_ordered_extent_work, NULL, NULL); + list_add_tail(&ordered->work_list, &works); + btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 3ad151655eb8..27a903aaf43b 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -3280,7 +3280,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, + memset(&fs_info->qgroup_rescan_work, 0, + sizeof(fs_info->qgroup_rescan_work)); + btrfs_init_work(&fs_info->qgroup_rescan_work, +- btrfs_qgroup_rescan_helper, + btrfs_qgroup_rescan_worker, NULL, NULL); + return 0; + } +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c +index 57a2ac721985..8f47a85944eb 100644 +--- a/fs/btrfs/raid56.c ++++ b/fs/btrfs/raid56.c +@@ -190,7 +190,7 @@ static void scrub_parity_work(struct btrfs_work *work); + + static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) + { +- btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); ++ btrfs_init_work(&rbio->work, work_func, NULL, NULL); + btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); + } + +@@ -1743,8 +1743,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) + plug = container_of(cb, struct btrfs_plug_cb, cb); + + if (from_schedule) { +- btrfs_init_work(&plug->work, btrfs_rmw_helper, +- unplug_work, NULL, NULL); ++ btrfs_init_work(&plug->work, unplug_work, NULL, NULL); + btrfs_queue_work(plug->info->rmw_workers, + &plug->work); + return; +diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c +index dd4f9c2b7107..1feaeadc8cf5 100644 +--- a/fs/btrfs/reada.c ++++ b/fs/btrfs/reada.c +@@ -819,8 +819,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info) + /* FIXME we cannot handle this properly right now */ + BUG(); + } +- btrfs_init_work(&rmw->work, btrfs_readahead_helper, +- reada_start_machine_worker, NULL, NULL); ++ btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); + rmw->fs_info = fs_info; + + btrfs_queue_work(fs_info->readahead_workers, &rmw->work); +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c +index a0770a6aee00..a7b043fd7a57 100644 +--- a/fs/btrfs/scrub.c ++++ b/fs/btrfs/scrub.c +@@ -598,8 +598,8 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( + sbio->index = i; + sbio->sctx = sctx; + sbio->page_count = 0; +- btrfs_init_work(&sbio->work, btrfs_scrub_helper, +- scrub_bio_end_io_worker, NULL, NULL); ++ btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, ++ NULL); + + if (i != SCRUB_BIOS_PER_SCTX - 1) + sctx->bios[i]->next_free = i + 1; +@@ -1720,8 +1720,7 @@ static void scrub_wr_bio_end_io(struct bio *bio) + sbio->status = bio->bi_status; + sbio->bio = bio; + +- btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, +- scrub_wr_bio_end_io_worker, NULL, NULL); ++ btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); + btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); + } + +@@ -2203,8 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) + raid56_add_scrub_pages(rbio, spage->page, spage->logical); + } + +- btrfs_init_work(&sblock->work, btrfs_scrub_helper, +- scrub_missing_raid56_worker, NULL, NULL); ++ btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); + scrub_block_get(sblock); + scrub_pending_bio_inc(sctx); + raid56_submit_missing_rbio(rbio); +@@ -2742,8 +2740,8 @@ static void scrub_parity_bio_endio(struct bio *bio) + + bio_put(bio); + +- btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, +- scrub_parity_bio_endio_worker, NULL, NULL); ++ btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, ++ NULL); + btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); + } + +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index e04409f85063..d8d7b1ee83ca 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -6676,8 +6676,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, + else + generate_random_uuid(dev->uuid); + +- btrfs_init_work(&dev->work, btrfs_submit_helper, +- pending_bios_fn, NULL, NULL); ++ btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); + + return dev; + } +diff --git a/fs/buffer.c b/fs/buffer.c +index 86a38b979323..7744488f7bde 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -2994,8 +2994,6 @@ static void end_bio_bh_io_sync(struct bio *bio) + void guard_bio_eod(int op, struct bio *bio) + { + sector_t maxsector; +- struct bio_vec *bvec = bio_last_bvec_all(bio); +- unsigned truncated_bytes; + struct hd_struct *part; + + rcu_read_lock(); +@@ -3021,28 +3019,7 @@ void guard_bio_eod(int op, struct bio *bio) + if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) + return; + +- /* Uhhuh. We've got a bio that straddles the device size! */ +- truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); +- +- /* +- * The bio contains more than one segment which spans EOD, just return +- * and let IO layer turn it into an EIO +- */ +- if (truncated_bytes > bvec->bv_len) +- return; +- +- /* Truncate the bio.. */ +- bio->bi_iter.bi_size -= truncated_bytes; +- bvec->bv_len -= truncated_bytes; +- +- /* ..and clear the end of the buffer for reads */ +- if (op == REQ_OP_READ) { +- struct bio_vec bv; +- +- mp_bvec_last_segment(bvec, &bv); +- zero_user(bv.bv_page, bv.bv_offset + bv.bv_len, +- truncated_bytes); +- } ++ bio_truncate(bio, maxsector << 9); + } + + static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, +diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c +index 1692c0c6c23a..2faa05860a48 100644 +--- a/fs/cifs/dfs_cache.c ++++ b/fs/cifs/dfs_cache.c +@@ -1317,7 +1317,6 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi, + int rc; + struct dfs_info3_param ref = {0}; + char *mdata = NULL, *devname = NULL; +- bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0; + struct TCP_Server_Info *server; + struct cifs_ses *ses; + struct smb_vol vol; +@@ -1344,7 +1343,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi, + goto out; + } + +- rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3); ++ rc = cifs_setup_volume_info(&vol, mdata, devname, false); + kfree(devname); + + if (rc) { +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index df9377828e2f..ed59e4a8db59 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -163,7 +163,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) + + spin_lock(&inode->i_lock); + /* we do not want atime to be less than mtime, it broke some apps */ +- if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime)) ++ if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0) + inode->i_atime = fattr->cf_mtime; + else + inode->i_atime = fattr->cf_atime; +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index c985caa2d955..e1d8cec6ba2e 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) + if (tcon == NULL) + return 0; + +- if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) ++ if (smb2_command == SMB2_TREE_CONNECT) + return 0; + + if (tcon->tidStatus == CifsExiting) { +@@ -426,16 +426,9 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, + * SMB information in the SMB header. If the return code is zero, this + * function must have filled in request_buf pointer. + */ +-static int +-smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, +- void **request_buf, unsigned int *total_len) ++static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, ++ void **request_buf, unsigned int *total_len) + { +- int rc; +- +- rc = smb2_reconnect(smb2_command, tcon); +- if (rc) +- return rc; +- + /* BB eventually switch this to SMB2 specific small buf size */ + if (smb2_command == SMB2_SET_INFO) + *request_buf = cifs_buf_get(); +@@ -456,7 +449,31 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, + cifs_stats_inc(&tcon->num_smbs_sent); + } + +- return rc; ++ return 0; ++} ++ ++static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, ++ void **request_buf, unsigned int *total_len) ++{ ++ int rc; ++ ++ rc = smb2_reconnect(smb2_command, tcon); ++ if (rc) ++ return rc; ++ ++ return __smb2_plain_req_init(smb2_command, tcon, request_buf, ++ total_len); ++} ++ ++static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon, ++ void **request_buf, unsigned int *total_len) ++{ ++ /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */ ++ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) { ++ return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, ++ total_len); ++ } ++ return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len); + } + + /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */ +@@ -2661,7 +2678,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, + int rc; + char *in_data_buf; + +- rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len); ++ rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len); + if (rc) + return rc; + +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c +index a7ec2d3dff92..e0226b2138d6 100644 +--- a/fs/compat_ioctl.c ++++ b/fs/compat_ioctl.c +@@ -1032,10 +1032,11 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, + #endif + + case FICLONE: ++ goto do_ioctl; + case FICLONERANGE: + case FIDEDUPERANGE: + case FS_IOC_FIEMAP: +- goto do_ioctl; ++ goto found_handler; + + case FIBMAP: + case FIGETBSZ: +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 74e786578c77..a60c6315a348 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -239,7 +239,7 @@ struct io_ring_ctx { + + struct user_struct *user; + +- struct cred *creds; ++ const struct cred *creds; + + struct completion ctx_done; + +@@ -3876,7 +3876,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) + ctx->account_mem = account_mem; + ctx->user = user; + +- ctx->creds = prepare_creds(); ++ ctx->creds = get_current_cred(); + if (!ctx->creds) { + ret = -ENOMEM; + goto err; +diff --git a/fs/locks.c b/fs/locks.c +index 6970f55daf54..44b6da032842 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, + } + if (inode) { + /* userspace relies on this representation of dev_t */ +- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, ++ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid, + MAJOR(inode->i_sb->s_dev), + MINOR(inode->i_sb->s_dev), inode->i_ino); + } else { +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index c65aeaa812d4..08f6eb2b73f8 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -3548,12 +3548,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp, + (bool)seq->cachethis) + return false; + /* +- * If there's an error than the reply can have fewer ops than +- * the call. But if we cached a reply with *more* ops than the +- * call you're sending us now, then this new call is clearly not +- * really a replay of the old one: ++ * If there's an error then the reply can have fewer ops than ++ * the call. + */ +- if (slot->sl_opcnt < argp->opcnt) ++ if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) ++ return false; ++ /* ++ * But if we cached a reply with *more* ops than the call you're ++ * sending us now, then this new call is clearly not really a ++ * replay of the old one: ++ */ ++ if (slot->sl_opcnt > argp->opcnt) + return false; + /* This is the only check explicitly called by spec: */ + if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c +index 6e774c5ea13b..8a2e284ccfcd 100644 +--- a/fs/ocfs2/dlmglue.c ++++ b/fs/ocfs2/dlmglue.c +@@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb) + + debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root, + &dlm_debug->d_filter_secs); ++ ocfs2_get_dlm_debug(dlm_debug); + } + + static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb) +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c +index 8caff834f002..487ee39b438a 100644 +--- a/fs/pstore/ram.c ++++ b/fs/pstore/ram.c +@@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record) + + prz = cxt->dprzs[cxt->dump_write_cnt]; + ++ /* ++ * Since this is a new crash dump, we need to reset the buffer in ++ * case it still has an old dump present. Without this, the new dump ++ * will get appended, which would seriously confuse anything trying ++ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() ++ * expects to find a dump header in the beginning of buffer data, so ++ * we must to reset the buffer values, in order to ensure that the ++ * header will be written to the beginning of the buffer. ++ */ ++ persistent_ram_zap(prz); ++ + /* Build header and append record contents. */ + hlen = ramoops_write_kmsg_hdr(prz, record); + if (!hlen) +@@ -577,6 +588,7 @@ static int ramoops_init_przs(const char *name, + dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", + name, record_size, + (unsigned long long)*paddr, err); ++ kfree(label); + + while (i > 0) { + i--; +@@ -622,6 +634,7 @@ static int ramoops_init_prz(const char *name, + + dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", + name, sz, (unsigned long long)*paddr, err); ++ kfree(label); + return err; + } + +diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c +index a384a0f9ff32..234be1c4dc87 100644 +--- a/fs/ubifs/tnc_commit.c ++++ b/fs/ubifs/tnc_commit.c +@@ -212,7 +212,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key, + /** + * layout_leb_in_gaps - layout index nodes using in-the-gaps method. + * @c: UBIFS file-system description object +- * @p: return LEB number here ++ * @p: return LEB number in @c->gap_lebs[p] + * + * This function lays out new index nodes for dirty znodes using in-the-gaps + * method of TNC commit. +@@ -221,7 +221,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key, + * This function returns the number of index nodes written into the gaps, or a + * negative error code on failure. + */ +-static int layout_leb_in_gaps(struct ubifs_info *c, int *p) ++static int layout_leb_in_gaps(struct ubifs_info *c, int p) + { + struct ubifs_scan_leb *sleb; + struct ubifs_scan_node *snod; +@@ -236,7 +236,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p) + * filled, however we do not check there at present. + */ + return lnum; /* Error code */ +- *p = lnum; ++ c->gap_lebs[p] = lnum; + dbg_gc("LEB %d", lnum); + /* + * Scan the index LEB. We use the generic scan for this even though +@@ -355,7 +355,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt) + */ + static int layout_in_gaps(struct ubifs_info *c, int cnt) + { +- int err, leb_needed_cnt, written, *p; ++ int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs; + + dbg_gc("%d znodes to write", cnt); + +@@ -364,9 +364,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) + if (!c->gap_lebs) + return -ENOMEM; + +- p = c->gap_lebs; ++ old_idx_lebs = c->lst.idx_lebs; + do { +- ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs); ++ ubifs_assert(c, p < c->lst.idx_lebs); + written = layout_leb_in_gaps(c, p); + if (written < 0) { + err = written; +@@ -392,9 +392,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) + leb_needed_cnt = get_leb_cnt(c, cnt); + dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt, + leb_needed_cnt, c->ileb_cnt); ++ /* ++ * Dynamically change the size of @c->gap_lebs to prevent ++ * oob, because @c->lst.idx_lebs could be increased by ++ * function @get_idx_gc_leb (called by layout_leb_in_gaps-> ++ * ubifs_find_dirty_idx_leb) during loop. Only enlarge ++ * @c->gap_lebs when needed. ++ * ++ */ ++ if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs && ++ old_idx_lebs < c->lst.idx_lebs) { ++ old_idx_lebs = c->lst.idx_lebs; ++ gap_lebs = krealloc(c->gap_lebs, sizeof(int) * ++ (old_idx_lebs + 1), GFP_NOFS); ++ if (!gap_lebs) { ++ kfree(c->gap_lebs); ++ c->gap_lebs = NULL; ++ return -ENOMEM; ++ } ++ c->gap_lebs = gap_lebs; ++ } + } while (leb_needed_cnt > c->ileb_cnt); + +- *p = -1; ++ c->gap_lebs[p] = -1; + return 0; + } + +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index 02469d59c787..3f76da11197c 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -5300,7 +5300,7 @@ __xfs_bunmapi( + * Make sure we don't touch multiple AGF headers out of order + * in a single transaction, as that could cause AB-BA deadlocks. + */ +- if (!wasdel) { ++ if (!wasdel && !isrt) { + agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); + if (prev_agno != NULLAGNUMBER && prev_agno > agno) + break; +diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h +index 003a772cd26c..2e50d146105d 100644 +--- a/fs/xfs/scrub/common.h ++++ b/fs/xfs/scrub/common.h +@@ -14,8 +14,15 @@ + static inline bool + xchk_should_terminate( + struct xfs_scrub *sc, +- int *error) ++ int *error) + { ++ /* ++ * If preemption is disabled, we need to yield to the scheduler every ++ * few seconds so that we don't run afoul of the soft lockup watchdog ++ * or RCU stall detector. ++ */ ++ cond_resched(); ++ + if (fatal_signal_pending(current)) { + if (*error == 0) + *error = -EAGAIN; +diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h +index 6782f0d45ebe..49e5383d4222 100644 +--- a/include/linux/ahci_platform.h ++++ b/include/linux/ahci_platform.h +@@ -19,6 +19,8 @@ struct ahci_host_priv; + struct platform_device; + struct scsi_host_template; + ++int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); ++void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); + int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); + void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); + int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); +diff --git a/include/linux/bio.h b/include/linux/bio.h +index 3cdb84cdc488..853d92ceee64 100644 +--- a/include/linux/bio.h ++++ b/include/linux/bio.h +@@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, + gfp_t); + extern int bio_uncopy_user(struct bio *); + void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); ++void bio_truncate(struct bio *bio, unsigned new_size); + + static inline void zero_fill_bio(struct bio *bio) + { +diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h +index 8fcdee1c0cf9..dad4a68fa009 100644 +--- a/include/linux/dmaengine.h ++++ b/include/linux/dmaengine.h +@@ -1364,8 +1364,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, + static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) + { + struct dma_slave_caps caps; ++ int ret; + +- dma_get_slave_caps(tx->chan, &caps); ++ ret = dma_get_slave_caps(tx->chan, &caps); ++ if (ret) ++ return ret; + + if (caps.descriptor_reuse) { + tx->flags |= DMA_CTRL_REUSE; +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 207e7ee764ce..fa0c3dae2094 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -1174,6 +1174,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); + extern void ata_qc_complete(struct ata_queued_cmd *qc); + extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); ++extern u64 ata_qc_get_active(struct ata_port *ap); + extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); + extern int ata_std_bios_param(struct scsi_device *sdev, + struct block_device *bdev, +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h +index f46ea71b4ffd..451efd4499cc 100644 +--- a/include/linux/memory_hotplug.h ++++ b/include/linux/memory_hotplug.h +@@ -125,8 +125,8 @@ static inline bool movable_node_is_enabled(void) + + extern void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap); +-extern void __remove_pages(struct zone *zone, unsigned long start_pfn, +- unsigned long nr_pages, struct vmem_altmap *altmap); ++extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, ++ struct vmem_altmap *altmap); + + /* reasonably generic interface to expand the physical pages */ + extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, +@@ -345,6 +345,9 @@ extern int add_memory(int nid, u64 start, u64 size); + extern int add_memory_resource(int nid, struct resource *resource); + extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); ++extern void remove_pfn_range_from_zone(struct zone *zone, ++ unsigned long start_pfn, ++ unsigned long nr_pages); + extern bool is_memblock_offlined(struct memory_block *mem); + extern int sparse_add_section(int nid, unsigned long pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); +diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h +index 10f81629b9ce..6d0d70f3219c 100644 +--- a/include/linux/nvme-fc-driver.h ++++ b/include/linux/nvme-fc-driver.h +@@ -270,6 +270,8 @@ struct nvme_fc_remote_port { + * + * Host/Initiator Transport Entrypoints/Parameters: + * ++ * @module: The LLDD module using the interface ++ * + * @localport_delete: The LLDD initiates deletion of a localport via + * nvme_fc_deregister_localport(). However, the teardown is + * asynchronous. This routine is called upon the completion of the +@@ -383,6 +385,8 @@ struct nvme_fc_remote_port { + * Value is Mandatory. Allowed to be zero. + */ + struct nvme_fc_port_template { ++ struct module *module; ++ + /* initiator-based functions */ + void (*localport_delete)(struct nvme_fc_local_port *); + void (*remoteport_delete)(struct nvme_fc_remote_port *); +diff --git a/include/linux/pci.h b/include/linux/pci.h +index f9088c89a534..be529d311122 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -2310,9 +2310,11 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); + + void + pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); ++bool pci_pr3_present(struct pci_dev *pdev); + #else + static inline struct irq_domain * + pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } ++static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } + #endif + + #ifdef CONFIG_EEH +diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h +index 7cf8f797e13a..505e94a6e3e8 100644 +--- a/include/linux/regulator/ab8500.h ++++ b/include/linux/regulator/ab8500.h +@@ -37,7 +37,6 @@ enum ab8505_regulator_id { + AB8505_LDO_AUX6, + AB8505_LDO_INTCORE, + AB8505_LDO_ADC, +- AB8505_LDO_USB, + AB8505_LDO_AUDIO, + AB8505_LDO_ANAMIC1, + AB8505_LDO_ANAMIC2, +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 5e679c8dae0b..8ec77bfdc1a4 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -467,7 +467,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb + + do { + seq = read_seqbegin(&hh->hh_lock); +- hh_len = hh->hh_len; ++ hh_len = READ_ONCE(hh->hh_len); + if (likely(hh_len <= HH_DATA_MOD)) { + hh_alen = HH_DATA_MOD; + +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 47e61956168d..32e418dba133 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -149,8 +149,8 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) + static inline bool qdisc_is_empty(const struct Qdisc *qdisc) + { + if (qdisc_is_percpu_stats(qdisc)) +- return qdisc->empty; +- return !qdisc->q.qlen; ++ return READ_ONCE(qdisc->empty); ++ return !READ_ONCE(qdisc->q.qlen); + } + + static inline bool qdisc_run_begin(struct Qdisc *qdisc) +@@ -158,7 +158,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) + if (qdisc->flags & TCQ_F_NOLOCK) { + if (!spin_trylock(&qdisc->seqlock)) + return false; +- qdisc->empty = false; ++ WRITE_ONCE(qdisc->empty, false); + } else if (qdisc_is_running(qdisc)) { + return false; + } +diff --git a/include/net/sock.h b/include/net/sock.h +index e09e2886a836..6c5a3809483e 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -2589,9 +2589,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) + */ + static inline void sk_pacing_shift_update(struct sock *sk, int val) + { +- if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) ++ if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) + return; +- sk->sk_pacing_shift = val; ++ WRITE_ONCE(sk->sk_pacing_shift, val); + } + + /* if a socket is bound to a device, check that the given device +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 9e7cee5307e0..5c51021775af 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -852,7 +852,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = { + BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 + }; + +-static void __mark_reg_not_init(struct bpf_reg_state *reg); ++static void __mark_reg_not_init(const struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg); + + /* Mark the unknown part of a register (variable offset or scalar value) as + * known to have the value @imm. +@@ -890,7 +891,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env, + verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); + /* Something bad happened, let's kill all regs */ + for (regno = 0; regno < MAX_BPF_REG; regno++) +- __mark_reg_not_init(regs + regno); ++ __mark_reg_not_init(env, regs + regno); + return; + } + __mark_reg_known_zero(regs + regno); +@@ -999,7 +1000,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg) + } + + /* Mark a register as having a completely unknown (scalar) value. */ +-static void __mark_reg_unknown(struct bpf_reg_state *reg) ++static void __mark_reg_unknown(const struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg) + { + /* + * Clear type, id, off, and union(map_ptr, range) and +@@ -1009,6 +1011,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg) + reg->type = SCALAR_VALUE; + reg->var_off = tnum_unknown; + reg->frameno = 0; ++ reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? ++ true : false; + __mark_reg_unbounded(reg); + } + +@@ -1019,19 +1023,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, + verbose(env, "mark_reg_unknown(regs, %u)\n", regno); + /* Something bad happened, let's kill all regs except FP */ + for (regno = 0; regno < BPF_REG_FP; regno++) +- __mark_reg_not_init(regs + regno); ++ __mark_reg_not_init(env, regs + regno); + return; + } +- regs += regno; +- __mark_reg_unknown(regs); +- /* constant backtracking is enabled for root without bpf2bpf calls */ +- regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? +- true : false; ++ __mark_reg_unknown(env, regs + regno); + } + +-static void __mark_reg_not_init(struct bpf_reg_state *reg) ++static void __mark_reg_not_init(const struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg) + { +- __mark_reg_unknown(reg); ++ __mark_reg_unknown(env, reg); + reg->type = NOT_INIT; + } + +@@ -1042,10 +1043,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, + verbose(env, "mark_reg_not_init(regs, %u)\n", regno); + /* Something bad happened, let's kill all regs except FP */ + for (regno = 0; regno < BPF_REG_FP; regno++) +- __mark_reg_not_init(regs + regno); ++ __mark_reg_not_init(env, regs + regno); + return; + } +- __mark_reg_not_init(regs + regno); ++ __mark_reg_not_init(env, regs + regno); + } + + #define DEF_NOT_SUBREG (0) +@@ -3066,7 +3067,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, + } + if (state->stack[spi].slot_type[0] == STACK_SPILL && + state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { +- __mark_reg_unknown(&state->stack[spi].spilled_ptr); ++ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); + for (j = 0; j < BPF_REG_SIZE; j++) + state->stack[spi].slot_type[j] = STACK_MISC; + goto mark; +@@ -3706,7 +3707,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, + if (!reg) + continue; + if (reg_is_pkt_pointer_any(reg)) +- __mark_reg_unknown(reg); ++ __mark_reg_unknown(env, reg); + } + } + +@@ -3734,7 +3735,7 @@ static void release_reg_references(struct bpf_verifier_env *env, + if (!reg) + continue; + if (reg->ref_obj_id == ref_obj_id) +- __mark_reg_unknown(reg); ++ __mark_reg_unknown(env, reg); + } + } + +@@ -4357,7 +4358,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + /* Taint dst register if offset had invalid bounds derived from + * e.g. dead branches. + */ +- __mark_reg_unknown(dst_reg); ++ __mark_reg_unknown(env, dst_reg); + return 0; + } + +@@ -4609,13 +4610,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, + /* Taint dst register if offset had invalid bounds derived from + * e.g. dead branches. + */ +- __mark_reg_unknown(dst_reg); ++ __mark_reg_unknown(env, dst_reg); + return 0; + } + + if (!src_known && + opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { +- __mark_reg_unknown(dst_reg); ++ __mark_reg_unknown(env, dst_reg); + return 0; + } + +@@ -6746,7 +6747,7 @@ static void clean_func_state(struct bpf_verifier_env *env, + /* since the register is unused, clear its state + * to make further comparison simpler + */ +- __mark_reg_not_init(&st->regs[i]); ++ __mark_reg_not_init(env, &st->regs[i]); + } + + for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { +@@ -6754,7 +6755,7 @@ static void clean_func_state(struct bpf_verifier_env *env, + /* liveness must not touch this stack slot anymore */ + st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; + if (!(live & REG_LIVE_READ)) { +- __mark_reg_not_init(&st->stack[i].spilled_ptr); ++ __mark_reg_not_init(env, &st->stack[i].spilled_ptr); + for (j = 0; j < BPF_REG_SIZE; j++) + st->stack[i].slot_type[j] = STACK_INVALID; + } +diff --git a/kernel/cred.c b/kernel/cred.c +index c0a4c12d38b2..9ed51b70ed80 100644 +--- a/kernel/cred.c ++++ b/kernel/cred.c +@@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void) + new->magic = CRED_MAGIC; + #endif + +- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) ++ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) + goto error; + + return new; +@@ -282,7 +282,7 @@ struct cred *prepare_creds(void) + new->security = NULL; + #endif + +- if (security_prepare_creds(new, old, GFP_KERNEL) < 0) ++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) + goto error; + validate_creds(new); + return new; +@@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) + #ifdef CONFIG_SECURITY + new->security = NULL; + #endif +- if (security_prepare_creds(new, old, GFP_KERNEL) < 0) ++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) + goto error; + + put_cred(old); +diff --git a/kernel/exit.c b/kernel/exit.c +index d351fd09e739..22dfaac9e48c 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father, + } + + write_unlock_irq(&tasklist_lock); +- if (unlikely(pid_ns == &init_pid_ns)) { +- panic("Attempted to kill init! exitcode=0x%08x\n", +- father->signal->group_exit_code ?: father->exit_code); +- } + + list_for_each_entry_safe(p, n, dead, ptrace_entry) { + list_del_init(&p->ptrace_entry); +@@ -766,6 +762,14 @@ void __noreturn do_exit(long code) + acct_update_integrals(tsk); + group_dead = atomic_dec_and_test(&tsk->signal->live); + if (group_dead) { ++ /* ++ * If the last thread of global init has exited, panic ++ * immediately to get a useable coredump. ++ */ ++ if (unlikely(is_global_init(tsk))) ++ panic("Attempted to kill init! exitcode=0x%08x\n", ++ tsk->signal->group_exit_code ?: (int)code); ++ + #ifdef CONFIG_POSIX_TIMERS + hrtimer_cancel(&tsk->signal->real_timer); + exit_itimers(tsk->signal); +diff --git a/kernel/module.c b/kernel/module.c +index ff2d7359a418..cb09a5f37a5f 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -1033,6 +1033,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, + strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); + + free_module(mod); ++ /* someone could wait for the module in add_unformed_module() */ ++ wake_up_all(&module_wq); + return 0; + out: + mutex_unlock(&module_mutex); +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c +index 83105874f255..26b9168321e7 100644 +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -734,8 +734,15 @@ zone_found: + * We have found the zone. Now walk the radix tree to find the leaf node + * for our PFN. + */ ++ ++ /* ++ * If the zone we wish to scan is the the current zone and the ++ * pfn falls into the current node then we do not need to walk ++ * the tree. ++ */ + node = bm->cur.node; +- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) ++ if (zone == bm->cur.zone && ++ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) + goto node_found; + + node = zone->rtree; +diff --git a/kernel/seccomp.c b/kernel/seccomp.c +index dba52a7db5e8..614a557a0814 100644 +--- a/kernel/seccomp.c ++++ b/kernel/seccomp.c +@@ -1015,6 +1015,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter, + struct seccomp_notif unotif; + ssize_t ret; + ++ /* Verify that we're not given garbage to keep struct extensible. */ ++ ret = check_zeroed_user(buf, sizeof(unotif)); ++ if (ret < 0) ++ return ret; ++ if (!ret) ++ return -EINVAL; ++ + memset(&unotif, 0, sizeof(unotif)); + + ret = down_interruptible(&filter->notif->request); +diff --git a/kernel/taskstats.c b/kernel/taskstats.c +index 13a0f2e6ebc2..e2ac0e37c4ae 100644 +--- a/kernel/taskstats.c ++++ b/kernel/taskstats.c +@@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) + static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) + { + struct signal_struct *sig = tsk->signal; +- struct taskstats *stats; ++ struct taskstats *stats_new, *stats; + +- if (sig->stats || thread_group_empty(tsk)) +- goto ret; ++ /* Pairs with smp_store_release() below. */ ++ stats = smp_load_acquire(&sig->stats); ++ if (stats || thread_group_empty(tsk)) ++ return stats; + + /* No problem if kmem_cache_zalloc() fails */ +- stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); ++ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); + + spin_lock_irq(&tsk->sighand->siglock); +- if (!sig->stats) { +- sig->stats = stats; +- stats = NULL; ++ stats = sig->stats; ++ if (!stats) { ++ /* ++ * Pairs with smp_store_release() above and order the ++ * kmem_cache_zalloc(). ++ */ ++ smp_store_release(&sig->stats, stats_new); ++ stats = stats_new; ++ stats_new = NULL; + } + spin_unlock_irq(&tsk->sighand->siglock); + +- if (stats) +- kmem_cache_free(taskstats_cache, stats); +-ret: +- return sig->stats; ++ if (stats_new) ++ kmem_cache_free(taskstats_cache, stats_new); ++ ++ return stats; + } + + /* Send pid data out on exit */ +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index f296d89be757..0708a41cfe2d 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -524,8 +524,7 @@ static int function_stat_show(struct seq_file *m, void *v) + } + + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +- avg = rec->time; +- do_div(avg, rec->counter); ++ avg = div64_ul(rec->time, rec->counter); + if (tracing_thresh && (avg < tracing_thresh)) + goto out; + #endif +@@ -551,7 +550,8 @@ static int function_stat_show(struct seq_file *m, void *v) + * Divide only 1000 for ns^2 -> us^2 conversion. + * trace_print_graph_duration will divide 1000 again. + */ +- do_div(stddev, rec->counter * (rec->counter - 1) * 1000); ++ stddev = div64_ul(stddev, ++ rec->counter * (rec->counter - 1) * 1000); + } + + trace_seq_init(&s); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 2fa72419bbd7..d8bd9b1d8bce 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4590,6 +4590,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) + + int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) + { ++ if ((mask == TRACE_ITER_RECORD_TGID) || ++ (mask == TRACE_ITER_RECORD_CMD)) ++ lockdep_assert_held(&event_mutex); ++ + /* do nothing if flag is already set */ + if (!!(tr->trace_flags & mask) == !!enabled) + return 0; +@@ -4657,6 +4661,7 @@ static int trace_set_options(struct trace_array *tr, char *option) + + cmp += len; + ++ mutex_lock(&event_mutex); + mutex_lock(&trace_types_lock); + + ret = match_string(trace_options, -1, cmp); +@@ -4667,6 +4672,7 @@ static int trace_set_options(struct trace_array *tr, char *option) + ret = set_tracer_flag(tr, 1 << ret, !neg); + + mutex_unlock(&trace_types_lock); ++ mutex_unlock(&event_mutex); + + /* + * If the first trailing whitespace is replaced with '\0' by strstrip, +@@ -7972,9 +7978,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, + if (val != 0 && val != 1) + return -EINVAL; + ++ mutex_lock(&event_mutex); + mutex_lock(&trace_types_lock); + ret = set_tracer_flag(tr, 1 << index, val); + mutex_unlock(&trace_types_lock); ++ mutex_unlock(&event_mutex); + + if (ret < 0) + return ret; +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index fba87d10f0c1..995061bb2dec 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable) + struct trace_event_file *file; + struct trace_array *tr; + +- mutex_lock(&event_mutex); ++ lockdep_assert_held(&event_mutex); ++ + do_for_each_event_file(tr, file) { + + if (!(file->flags & EVENT_FILE_FL_ENABLED)) +@@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable) + clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); + } + } while_for_each_event_file(); +- mutex_unlock(&event_mutex); + } + + void trace_event_enable_tgid_record(bool enable) +@@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable) + struct trace_event_file *file; + struct trace_array *tr; + +- mutex_lock(&event_mutex); ++ lockdep_assert_held(&event_mutex); ++ + do_for_each_event_file(tr, file) { + if (!(file->flags & EVENT_FILE_FL_ENABLED)) + continue; +@@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable) + &file->flags); + } + } while_for_each_event_file(); +- mutex_unlock(&event_mutex); + } + + static int __ftrace_event_enable_disable(struct trace_event_file *file, +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index c9a74f82b14a..bf44f6bbd0c3 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1662,7 +1662,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, + parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); + return -EINVAL; + fail_mem: +- kfree(filter); ++ __free_filter(filter); + /* If any call succeeded, we still need to sync */ + if (!fail) + tracepoint_synchronize_unregister(); +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index 7482a1466ebf..c2783915600c 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -911,7 +911,26 @@ static notrace void trace_event_raw_event_synth(void *__data, + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { +- entry->fields[n_u64] = var_ref_vals[var_ref_idx + i]; ++ struct synth_field *field = event->fields[i]; ++ u64 val = var_ref_vals[var_ref_idx + i]; ++ ++ switch (field->size) { ++ case 1: ++ *(u8 *)&entry->fields[n_u64] = (u8)val; ++ break; ++ ++ case 2: ++ *(u16 *)&entry->fields[n_u64] = (u16)val; ++ break; ++ ++ case 4: ++ *(u32 *)&entry->fields[n_u64] = (u32)val; ++ break; ++ ++ default: ++ entry->fields[n_u64] = val; ++ break; ++ } + n_u64++; + } + } +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c +index 9a1c22310323..9e31bfc818ff 100644 +--- a/kernel/trace/tracing_map.c ++++ b/kernel/trace/tracing_map.c +@@ -148,8 +148,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b) + #define DEFINE_TRACING_MAP_CMP_FN(type) \ + static int tracing_map_cmp_##type(void *val_a, void *val_b) \ + { \ +- type a = *(type *)val_a; \ +- type b = *(type *)val_b; \ ++ type a = (type)(*(u64 *)val_a); \ ++ type b = (type)(*(u64 *)val_b); \ + \ + return (a > b) ? 1 : ((a < b) ? -1 : 0); \ + } +diff --git a/lib/ubsan.c b/lib/ubsan.c +index 0c4681118fcd..f007a406f89c 100644 +--- a/lib/ubsan.c ++++ b/lib/ubsan.c +@@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, + } + } + +-static DEFINE_SPINLOCK(report_lock); +- +-static void ubsan_prologue(struct source_location *location, +- unsigned long *flags) ++static void ubsan_prologue(struct source_location *location) + { + current->in_ubsan++; +- spin_lock_irqsave(&report_lock, *flags); + + pr_err("========================================" + "========================================\n"); + print_source_location("UBSAN: Undefined behaviour in", location); + } + +-static void ubsan_epilogue(unsigned long *flags) ++static void ubsan_epilogue(void) + { + dump_stack(); + pr_err("========================================" + "========================================\n"); +- spin_unlock_irqrestore(&report_lock, *flags); ++ + current->in_ubsan--; + } + +@@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, + { + + struct type_descriptor *type = data->type; +- unsigned long flags; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); +@@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, + rhs_val_str, + type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + void __ubsan_handle_add_overflow(struct overflow_data *data, +@@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); + void __ubsan_handle_negate_overflow(struct overflow_data *data, + void *old_val) + { +- unsigned long flags; + char old_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + + pr_err("negation of %s cannot be represented in type %s:\n", + old_val_str, data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + +@@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + void __ubsan_handle_divrem_overflow(struct overflow_data *data, + void *lhs, void *rhs) + { +- unsigned long flags; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + +@@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, + else + pr_err("division by zero\n"); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); + + static void handle_null_ptr_deref(struct type_mismatch_data_common *data) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + + pr_err("%s null pointer of type %s\n", + type_check_kinds[data->type_check_kind], + data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void handle_misaligned_access(struct type_mismatch_data_common *data, + unsigned long ptr) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + + pr_err("%s misaligned address %p for type %s\n", + type_check_kinds[data->type_check_kind], + (void *)ptr, data->type->type_name); + pr_err("which requires %ld byte alignment\n", data->alignment); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void handle_object_size_mismatch(struct type_mismatch_data_common *data, + unsigned long ptr) + { +- unsigned long flags; +- + if (suppress_report(data->location)) + return; + +- ubsan_prologue(data->location, &flags); ++ ubsan_prologue(data->location); + pr_err("%s address %p with insufficient space\n", + type_check_kinds[data->type_check_kind], + (void *) ptr); + pr_err("for an object of type %s\n", data->type->type_name); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + + static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, +@@ -351,25 +338,23 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); + + void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) + { +- unsigned long flags; + char index_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(index_str, sizeof(index_str), data->index_type, index); + pr_err("index %s is out of range for type %s\n", index_str, + data->array_type->type_name); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); + + void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + void *lhs, void *rhs) + { +- unsigned long flags; + struct type_descriptor *rhs_type = data->rhs_type; + struct type_descriptor *lhs_type = data->lhs_type; + char rhs_str[VALUE_LENGTH]; +@@ -379,7 +364,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + if (suppress_report(&data->location)) + goto out; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); +@@ -402,7 +387,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + lhs_str, rhs_str, + lhs_type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + out: + user_access_restore(ua_flags); + } +@@ -411,11 +396,9 @@ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); + + void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) + { +- unsigned long flags; +- +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + pr_err("calling __builtin_unreachable()\n"); +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + panic("can't return from __builtin_unreachable()"); + } + EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); +@@ -423,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); + void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, + void *val) + { +- unsigned long flags; + char val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + +- ubsan_prologue(&data->location, &flags); ++ ubsan_prologue(&data->location); + + val_to_string(val_str, sizeof(val_str), data->type, val); + + pr_err("load of value %s is not a valid value for type %s\n", + val_str, data->type->type_name); + +- ubsan_epilogue(&flags); ++ ubsan_epilogue(); + } + EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); +diff --git a/mm/filemap.c b/mm/filemap.c +index 85b7d087eb45..1f5731768222 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -2329,27 +2329,6 @@ EXPORT_SYMBOL(generic_file_read_iter); + + #ifdef CONFIG_MMU + #define MMAP_LOTSAMISS (100) +-static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, +- struct file *fpin) +-{ +- int flags = vmf->flags; +- +- if (fpin) +- return fpin; +- +- /* +- * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or +- * anything, so we only pin the file and drop the mmap_sem if only +- * FAULT_FLAG_ALLOW_RETRY is set. +- */ +- if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == +- FAULT_FLAG_ALLOW_RETRY) { +- fpin = get_file(vmf->vma->vm_file); +- up_read(&vmf->vma->vm_mm->mmap_sem); +- } +- return fpin; +-} +- + /* + * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem + * @vmf - the vm_fault for this fault. +diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c +index 7dd602d7f8db..ad9d5b1c4473 100644 +--- a/mm/gup_benchmark.c ++++ b/mm/gup_benchmark.c +@@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd, + unsigned long i, nr_pages, addr, next; + int nr; + struct page **pages; ++ int ret = 0; + + if (gup->size > ULONG_MAX) + return -EINVAL; +@@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd, + NULL); + break; + default: +- return -1; ++ kvfree(pages); ++ ret = -EINVAL; ++ goto out; + } + + if (nr <= 0) +@@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd, + gup->put_delta_usec = ktime_us_delta(end_time, start_time); + + kvfree(pages); +- return 0; ++out: ++ return ret; + } + + static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd, +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index b45a95363a84..e0afd582ca01 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -27,6 +27,7 @@ + #include <linux/swapops.h> + #include <linux/jhash.h> + #include <linux/numa.h> ++#include <linux/llist.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -1255,7 +1256,7 @@ static inline void ClearPageHugeTemporary(struct page *page) + page[2].mapping = NULL; + } + +-void free_huge_page(struct page *page) ++static void __free_huge_page(struct page *page) + { + /* + * Can't pass hstate in here because it is called from the +@@ -1318,6 +1319,54 @@ void free_huge_page(struct page *page) + spin_unlock(&hugetlb_lock); + } + ++/* ++ * As free_huge_page() can be called from a non-task context, we have ++ * to defer the actual freeing in a workqueue to prevent potential ++ * hugetlb_lock deadlock. ++ * ++ * free_hpage_workfn() locklessly retrieves the linked list of pages to ++ * be freed and frees them one-by-one. As the page->mapping pointer is ++ * going to be cleared in __free_huge_page() anyway, it is reused as the ++ * llist_node structure of a lockless linked list of huge pages to be freed. ++ */ ++static LLIST_HEAD(hpage_freelist); ++ ++static void free_hpage_workfn(struct work_struct *work) ++{ ++ struct llist_node *node; ++ struct page *page; ++ ++ node = llist_del_all(&hpage_freelist); ++ ++ while (node) { ++ page = container_of((struct address_space **)node, ++ struct page, mapping); ++ node = node->next; ++ __free_huge_page(page); ++ } ++} ++static DECLARE_WORK(free_hpage_work, free_hpage_workfn); ++ ++void free_huge_page(struct page *page) ++{ ++ /* ++ * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. ++ */ ++ if (!in_task()) { ++ /* ++ * Only call schedule_work() if hpage_freelist is previously ++ * empty. Otherwise, schedule_work() had been called but the ++ * workfn hasn't retrieved the list yet. ++ */ ++ if (llist_add((struct llist_node *)&page->mapping, ++ &hpage_freelist)) ++ schedule_work(&free_hpage_work); ++ return; ++ } ++ ++ __free_huge_page(page); ++} ++ + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) + { + INIT_LIST_HEAD(&page->lru); +diff --git a/mm/internal.h b/mm/internal.h +index 0d5f720c75ab..7dd7fbb577a9 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm_area_struct *vma) + return max(start, vma->vm_start); + } + ++static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, ++ struct file *fpin) ++{ ++ int flags = vmf->flags; ++ ++ if (fpin) ++ return fpin; ++ ++ /* ++ * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or ++ * anything, so we only pin the file and drop the mmap_sem if only ++ * FAULT_FLAG_ALLOW_RETRY is set. ++ */ ++ if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == ++ FAULT_FLAG_ALLOW_RETRY) { ++ fpin = get_file(vmf->vma->vm_file); ++ up_read(&vmf->vma->vm_mm->mmap_sem); ++ } ++ return fpin; ++} ++ + #else /* !CONFIG_MMU */ + static inline void clear_page_mlock(struct page *page) { } + static inline void mlock_vma_page(struct page *page) { } +diff --git a/mm/memory.c b/mm/memory.c +index b1ca51a079f2..cb7c940cf800 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2227,10 +2227,11 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) + * + * The function expects the page to be locked and unlocks it. + */ +-static void fault_dirty_shared_page(struct vm_area_struct *vma, +- struct page *page) ++static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) + { ++ struct vm_area_struct *vma = vmf->vma; + struct address_space *mapping; ++ struct page *page = vmf->page; + bool dirtied; + bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; + +@@ -2245,16 +2246,30 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma, + mapping = page_rmapping(page); + unlock_page(page); + ++ if (!page_mkwrite) ++ file_update_time(vma->vm_file); ++ ++ /* ++ * Throttle page dirtying rate down to writeback speed. ++ * ++ * mapping may be NULL here because some device drivers do not ++ * set page.mapping but still dirty their pages ++ * ++ * Drop the mmap_sem before waiting on IO, if we can. The file ++ * is pinning the mapping, as per above. ++ */ + if ((dirtied || page_mkwrite) && mapping) { +- /* +- * Some device drivers do not set page.mapping +- * but still dirty their pages +- */ ++ struct file *fpin; ++ ++ fpin = maybe_unlock_mmap_for_io(vmf, NULL); + balance_dirty_pages_ratelimited(mapping); ++ if (fpin) { ++ fput(fpin); ++ return VM_FAULT_RETRY; ++ } + } + +- if (!page_mkwrite) +- file_update_time(vma->vm_file); ++ return 0; + } + + /* +@@ -2497,6 +2512,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) + __releases(vmf->ptl) + { + struct vm_area_struct *vma = vmf->vma; ++ vm_fault_t ret = VM_FAULT_WRITE; + + get_page(vmf->page); + +@@ -2520,10 +2536,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) + wp_page_reuse(vmf); + lock_page(vmf->page); + } +- fault_dirty_shared_page(vma, vmf->page); ++ ret |= fault_dirty_shared_page(vmf); + put_page(vmf->page); + +- return VM_FAULT_WRITE; ++ return ret; + } + + /* +@@ -3567,7 +3583,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) + return ret; + } + +- fault_dirty_shared_page(vma, vmf->page); ++ ret |= fault_dirty_shared_page(vmf); + return ret; + } + +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index f307bd82d750..fab540685279 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -465,8 +465,9 @@ static void update_pgdat_span(struct pglist_data *pgdat) + pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; + } + +-static void __remove_zone(struct zone *zone, unsigned long start_pfn, +- unsigned long nr_pages) ++void __ref remove_pfn_range_from_zone(struct zone *zone, ++ unsigned long start_pfn, ++ unsigned long nr_pages) + { + struct pglist_data *pgdat = zone->zone_pgdat; + unsigned long flags; +@@ -481,28 +482,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, + return; + #endif + ++ clear_zone_contiguous(zone); ++ + pgdat_resize_lock(zone->zone_pgdat, &flags); + shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); + update_pgdat_span(pgdat); + pgdat_resize_unlock(zone->zone_pgdat, &flags); ++ ++ set_zone_contiguous(zone); + } + +-static void __remove_section(struct zone *zone, unsigned long pfn, +- unsigned long nr_pages, unsigned long map_offset, +- struct vmem_altmap *altmap) ++static void __remove_section(unsigned long pfn, unsigned long nr_pages, ++ unsigned long map_offset, ++ struct vmem_altmap *altmap) + { + struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn)); + + if (WARN_ON_ONCE(!valid_section(ms))) + return; + +- __remove_zone(zone, pfn, nr_pages); + sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); + } + + /** +- * __remove_pages() - remove sections of pages from a zone +- * @zone: zone from which pages need to be removed ++ * __remove_pages() - remove sections of pages + * @pfn: starting pageframe (must be aligned to start of a section) + * @nr_pages: number of pages to remove (must be multiple of section size) + * @altmap: alternative device page map or %NULL if default memmap is used +@@ -512,16 +515,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn, + * sure that pages are marked reserved and zones are adjust properly by + * calling offline_pages(). + */ +-void __remove_pages(struct zone *zone, unsigned long pfn, +- unsigned long nr_pages, struct vmem_altmap *altmap) ++void __remove_pages(unsigned long pfn, unsigned long nr_pages, ++ struct vmem_altmap *altmap) + { + unsigned long map_offset = 0; + unsigned long nr, start_sec, end_sec; + + map_offset = vmem_altmap_offset(altmap); + +- clear_zone_contiguous(zone); +- + if (check_pfn_span(pfn, nr_pages, "remove")) + return; + +@@ -533,13 +534,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn, + cond_resched(); + pfns = min(nr_pages, PAGES_PER_SECTION + - (pfn & ~PAGE_SECTION_MASK)); +- __remove_section(zone, pfn, pfns, map_offset, altmap); ++ __remove_section(pfn, pfns, map_offset, altmap); + pfn += pfns; + nr_pages -= pfns; + map_offset = 0; + } +- +- set_zone_contiguous(zone); + } + + int set_online_page_callback(online_page_callback_t callback) +@@ -867,6 +866,7 @@ failed_addition: + (unsigned long long) pfn << PAGE_SHIFT, + (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); + memory_notify(MEM_CANCEL_ONLINE, &arg); ++ remove_pfn_range_from_zone(zone, pfn, nr_pages); + mem_hotplug_done(); + return ret; + } +@@ -1602,6 +1602,7 @@ static int __ref __offline_pages(unsigned long start_pfn, + writeback_set_ratelimit(); + + memory_notify(MEM_OFFLINE, &arg); ++ remove_pfn_range_from_zone(zone, start_pfn, nr_pages); + mem_hotplug_done(); + return 0; + +diff --git a/mm/memremap.c b/mm/memremap.c +index 03ccbdfeb697..c51c6bd2fe34 100644 +--- a/mm/memremap.c ++++ b/mm/memremap.c +@@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap) + + mem_hotplug_begin(); + if (pgmap->type == MEMORY_DEVICE_PRIVATE) { +- __remove_pages(page_zone(first_page), PHYS_PFN(res->start), ++ __remove_pages(PHYS_PFN(res->start), + PHYS_PFN(resource_size(res)), NULL); + } else { + arch_remove_memory(nid, res->start, resource_size(res), +diff --git a/mm/migrate.c b/mm/migrate.c +index 4fe45d1428c8..45d3303e0022 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1516,9 +1516,11 @@ static int do_move_pages_to_node(struct mm_struct *mm, + /* + * Resolves the given address to a struct page, isolates it from the LRU and + * puts it to the given pagelist. +- * Returns -errno if the page cannot be found/isolated or 0 when it has been +- * queued or the page doesn't need to be migrated because it is already on +- * the target node ++ * Returns: ++ * errno - if the page cannot be found/isolated ++ * 0 - when it doesn't have to be migrated because it is already on the ++ * target node ++ * 1 - when it has been queued + */ + static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, + int node, struct list_head *pagelist, bool migrate_all) +@@ -1557,7 +1559,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, + if (PageHuge(page)) { + if (PageHead(page)) { + isolate_huge_page(page, pagelist); +- err = 0; ++ err = 1; + } + } else { + struct page *head; +@@ -1567,7 +1569,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, + if (err) + goto out_putpage; + +- err = 0; ++ err = 1; + list_add_tail(&head->lru, pagelist); + mod_node_page_state(page_pgdat(head), + NR_ISOLATED_ANON + page_is_file_cache(head), +@@ -1644,8 +1646,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, + */ + err = add_page_for_migration(mm, addr, current_node, + &pagelist, flags & MPOL_MF_MOVE_ALL); +- if (!err) ++ ++ if (!err) { ++ /* The page is already on the target node */ ++ err = store_status(status, i, current_node, 1); ++ if (err) ++ goto out_flush; + continue; ++ } else if (err > 0) { ++ /* The page is successfully queued for migration */ ++ continue; ++ } + + err = store_status(status, i, err, 1); + if (err) +diff --git a/mm/mmap.c b/mm/mmap.c +index a7d8c84d19b7..4390dbea4aa5 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm, + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (copy) copy w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes +- * +- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and +- * MAP_PRIVATE: +- * r: (no) no +- * w: (no) no +- * x: (yes) yes + */ + pgprot_t protection_map[16] __ro_after_init = { + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, +diff --git a/mm/oom_kill.c b/mm/oom_kill.c +index 71e3acea7817..d58c481b3df8 100644 +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message) + K(get_mm_counter(mm, MM_FILEPAGES)), + K(get_mm_counter(mm, MM_SHMEMPAGES)), + from_kuid(&init_user_ns, task_uid(victim)), +- mm_pgtables_bytes(mm), victim->signal->oom_score_adj); ++ mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); + task_unlock(victim); + + /* +diff --git a/mm/shmem.c b/mm/shmem.c +index 7a22e3e03d11..6074714fdbd4 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2022,16 +2022,14 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) + shmem_falloc->waitq && + vmf->pgoff >= shmem_falloc->start && + vmf->pgoff < shmem_falloc->next) { ++ struct file *fpin; + wait_queue_head_t *shmem_falloc_waitq; + DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); + + ret = VM_FAULT_NOPAGE; +- if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && +- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { +- /* It's polite to up mmap_sem if we can */ +- up_read(&vma->vm_mm->mmap_sem); ++ fpin = maybe_unlock_mmap_for_io(vmf, NULL); ++ if (fpin) + ret = VM_FAULT_RETRY; +- } + + shmem_falloc_waitq = shmem_falloc->waitq; + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, +@@ -2049,6 +2047,9 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) + spin_lock(&inode->i_lock); + finish_wait(shmem_falloc_waitq, &shmem_fault_wait); + spin_unlock(&inode->i_lock); ++ ++ if (fpin) ++ fput(fpin); + return ret; + } + spin_unlock(&inode->i_lock); +diff --git a/mm/sparse.c b/mm/sparse.c +index f6891c1992b1..c2c01b6330af 100644 +--- a/mm/sparse.c ++++ b/mm/sparse.c +@@ -647,7 +647,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) + #endif + + #ifdef CONFIG_SPARSEMEM_VMEMMAP +-static struct page *populate_section_memmap(unsigned long pfn, ++static struct page * __meminit populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) + { + return __populate_section_memmap(pfn, nr_pages, nid, altmap); +@@ -669,7 +669,7 @@ static void free_map_bootmem(struct page *memmap) + vmemmap_free(start, end, NULL); + } + #else +-struct page *populate_section_memmap(unsigned long pfn, ++struct page * __meminit populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) + { + struct page *page, *ret; +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c +index 2b2b9aae8a3c..22d17ecfe7df 100644 +--- a/mm/zsmalloc.c ++++ b/mm/zsmalloc.c +@@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, + zs_pool_dec_isolated(pool); + } + ++ if (page_zone(newpage) != page_zone(page)) { ++ dec_zone_page_state(page, NR_ZSPAGES); ++ inc_zone_page_state(newpage, NR_ZSPAGES); ++ } ++ + reset_page(page); + put_page(page); + page = newpage; +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 7ff92dd4c53c..87691404d0c6 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1176,8 +1176,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, + if (!conn) + return ERR_PTR(-ENOMEM); + +- if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) ++ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { ++ hci_conn_del(conn); + return ERR_PTR(-EBUSY); ++ } + + conn->state = BT_CONNECT; + set_bit(HCI_CONN_SCANNING, &conn->flags); +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index da7fdbdf9c41..a845786258a0 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -4936,10 +4936,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) + BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", + chan, result, local_amp_id, remote_amp_id); + +- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) { +- l2cap_chan_unlock(chan); ++ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) + return; +- } + + if (chan->state != BT_CONNECTED) { + l2cap_do_create(chan, result, local_amp_id, remote_amp_id); +diff --git a/net/core/dev.c b/net/core/dev.c +index 046307445ece..3e11c6bb4dd6 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3386,7 +3386,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, + qdisc_calculate_pkt_len(skb, q); + + if (q->flags & TCQ_F_NOLOCK) { +- if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && ++ if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) && + qdisc_run_begin(q)) { + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, + &q->state))) { +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 08ebc3ac5343..f2452496ad9f 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1194,7 +1194,7 @@ static void neigh_update_hhs(struct neighbour *neigh) + + if (update) { + hh = &neigh->hh; +- if (hh->hh_len) { ++ if (READ_ONCE(hh->hh_len)) { + write_seqlock_bh(&hh->hh_lock); + update(hh, neigh->dev, neigh->ha); + write_sequnlock_bh(&hh->hh_lock); +@@ -1473,7 +1473,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) + struct net_device *dev = neigh->dev; + unsigned int seq; + +- if (dev->header_ops->cache && !neigh->hh.hh_len) ++ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) + neigh_hh_init(neigh); + + do { +diff --git a/net/core/sock.c b/net/core/sock.c +index ac78a570e43a..b4d1112174c1 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2918,7 +2918,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) + + sk->sk_max_pacing_rate = ~0UL; + sk->sk_pacing_rate = ~0UL; +- sk->sk_pacing_shift = 10; ++ WRITE_ONCE(sk->sk_pacing_shift, 10); + sk->sk_incoming_cpu = -1; + + sk_rx_queue_clear(sk); +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c +index eb29e5adc84d..9f9e00ba3ad7 100644 +--- a/net/core/sysctl_net_core.c ++++ b/net/core/sysctl_net_core.c +@@ -288,6 +288,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, + return ret; + } + ++# ifdef CONFIG_HAVE_EBPF_JIT + static int + proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, +@@ -298,6 +299,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, + + return proc_dointvec_minmax(table, write, buffer, lenp, ppos); + } ++# endif /* CONFIG_HAVE_EBPF_JIT */ + + static int + proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, +diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c +index 17374afee28f..9040fe55e0f5 100644 +--- a/net/ethernet/eth.c ++++ b/net/ethernet/eth.c +@@ -244,7 +244,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 + eth->h_proto = type; + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); + memcpy(eth->h_dest, neigh->ha, ETH_ALEN); +- hh->hh_len = ETH_HLEN; ++ ++ /* Pairs with READ_ONCE() in neigh_resolve_output(), ++ * neigh_hh_output() and neigh_update_hhs(). ++ */ ++ smp_store_release(&hh->hh_len, ETH_HLEN); ++ + return 0; + } + EXPORT_SYMBOL(eth_header_cache); +diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c +index 94447974a3c0..6618a9d8e58e 100644 +--- a/net/hsr/hsr_debugfs.c ++++ b/net/hsr/hsr_debugfs.c +@@ -64,7 +64,6 @@ hsr_node_table_open(struct inode *inode, struct file *filp) + } + + static const struct file_operations hsr_fops = { +- .owner = THIS_MODULE, + .open = hsr_node_table_open, + .read = seq_read, + .llseek = seq_lseek, +@@ -78,15 +77,14 @@ static const struct file_operations hsr_fops = { + * When debugfs is configured this routine sets up the node_table file per + * hsr device for dumping the node_table entries + */ +-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) ++void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) + { +- int rc = -1; + struct dentry *de = NULL; + + de = debugfs_create_dir(hsr_dev->name, NULL); +- if (!de) { ++ if (IS_ERR(de)) { + pr_err("Cannot create hsr debugfs root\n"); +- return rc; ++ return; + } + + priv->node_tbl_root = de; +@@ -94,13 +92,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) + de = debugfs_create_file("node_table", S_IFREG | 0444, + priv->node_tbl_root, priv, + &hsr_fops); +- if (!de) { ++ if (IS_ERR(de)) { + pr_err("Cannot create hsr node_table directory\n"); +- return rc; ++ debugfs_remove(priv->node_tbl_root); ++ priv->node_tbl_root = NULL; ++ return; + } + priv->node_tbl_file = de; +- +- return 0; + } + + /* hsr_debugfs_term - Tear down debugfs intrastructure +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c +index b01e1bae4ddc..62c03f0d0079 100644 +--- a/net/hsr/hsr_device.c ++++ b/net/hsr/hsr_device.c +@@ -368,7 +368,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) + del_timer_sync(&hsr->prune_timer); + del_timer_sync(&hsr->announce_timer); + +- hsr_del_self_node(&hsr->self_node_db); ++ hsr_del_self_node(hsr); + hsr_del_nodes(&hsr->node_db); + } + +@@ -440,11 +440,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], + INIT_LIST_HEAD(&hsr->ports); + INIT_LIST_HEAD(&hsr->node_db); + INIT_LIST_HEAD(&hsr->self_node_db); ++ spin_lock_init(&hsr->list_lock); + + ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); + + /* Make sure we recognize frames from ourselves in hsr_rcv() */ +- res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr, ++ res = hsr_create_self_node(hsr, hsr_dev->dev_addr, + slave[1]->dev_addr); + if (res < 0) + return res; +@@ -477,31 +478,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], + + res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER); + if (res) +- goto err_add_port; ++ goto err_add_master; + + res = register_netdevice(hsr_dev); + if (res) +- goto fail; ++ goto err_unregister; + + res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A); + if (res) +- goto fail; ++ goto err_add_slaves; ++ + res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B); + if (res) +- goto fail; ++ goto err_add_slaves; + ++ hsr_debugfs_init(hsr, hsr_dev); + mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); +- res = hsr_debugfs_init(hsr, hsr_dev); +- if (res) +- goto fail; + + return 0; + +-fail: ++err_add_slaves: ++ unregister_netdevice(hsr_dev); ++err_unregister: + list_for_each_entry_safe(port, tmp, &hsr->ports, port_list) + hsr_del_port(port); +-err_add_port: +- hsr_del_self_node(&hsr->self_node_db); ++err_add_master: ++ hsr_del_self_node(hsr); + + return res; + } +diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c +index 292be446007b..27dc65d7de67 100644 +--- a/net/hsr/hsr_framereg.c ++++ b/net/hsr/hsr_framereg.c +@@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db, + /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize + * frames from self that's been looped over the HSR ring. + */ +-int hsr_create_self_node(struct list_head *self_node_db, ++int hsr_create_self_node(struct hsr_priv *hsr, + unsigned char addr_a[ETH_ALEN], + unsigned char addr_b[ETH_ALEN]) + { ++ struct list_head *self_node_db = &hsr->self_node_db; + struct hsr_node *node, *oldnode; + + node = kmalloc(sizeof(*node), GFP_KERNEL); +@@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db, + ether_addr_copy(node->macaddress_A, addr_a); + ether_addr_copy(node->macaddress_B, addr_b); + +- rcu_read_lock(); ++ spin_lock_bh(&hsr->list_lock); + oldnode = list_first_or_null_rcu(self_node_db, + struct hsr_node, mac_list); + if (oldnode) { + list_replace_rcu(&oldnode->mac_list, &node->mac_list); +- rcu_read_unlock(); +- synchronize_rcu(); +- kfree(oldnode); ++ spin_unlock_bh(&hsr->list_lock); ++ kfree_rcu(oldnode, rcu_head); + } else { +- rcu_read_unlock(); + list_add_tail_rcu(&node->mac_list, self_node_db); ++ spin_unlock_bh(&hsr->list_lock); + } + + return 0; + } + +-void hsr_del_self_node(struct list_head *self_node_db) ++void hsr_del_self_node(struct hsr_priv *hsr) + { ++ struct list_head *self_node_db = &hsr->self_node_db; + struct hsr_node *node; + +- rcu_read_lock(); ++ spin_lock_bh(&hsr->list_lock); + node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); +- rcu_read_unlock(); + if (node) { + list_del_rcu(&node->mac_list); +- kfree(node); ++ kfree_rcu(node, rcu_head); + } ++ spin_unlock_bh(&hsr->list_lock); + } + + void hsr_del_nodes(struct list_head *node_db) +@@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db) + * seq_out is used to initialize filtering of outgoing duplicate frames + * originating from the newly added node. + */ +-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], +- u16 seq_out) ++static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, ++ struct list_head *node_db, ++ unsigned char addr[], ++ u16 seq_out) + { +- struct hsr_node *node; ++ struct hsr_node *new_node, *node; + unsigned long now; + int i; + +- node = kzalloc(sizeof(*node), GFP_ATOMIC); +- if (!node) ++ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); ++ if (!new_node) + return NULL; + +- ether_addr_copy(node->macaddress_A, addr); ++ ether_addr_copy(new_node->macaddress_A, addr); + + /* We are only interested in time diffs here, so use current jiffies + * as initialization. (0 could trigger an spurious ring error warning). + */ + now = jiffies; + for (i = 0; i < HSR_PT_PORTS; i++) +- node->time_in[i] = now; ++ new_node->time_in[i] = now; + for (i = 0; i < HSR_PT_PORTS; i++) +- node->seq_out[i] = seq_out; +- +- list_add_tail_rcu(&node->mac_list, node_db); ++ new_node->seq_out[i] = seq_out; + ++ spin_lock_bh(&hsr->list_lock); ++ list_for_each_entry_rcu(node, node_db, mac_list) { ++ if (ether_addr_equal(node->macaddress_A, addr)) ++ goto out; ++ if (ether_addr_equal(node->macaddress_B, addr)) ++ goto out; ++ } ++ list_add_tail_rcu(&new_node->mac_list, node_db); ++ spin_unlock_bh(&hsr->list_lock); ++ return new_node; ++out: ++ spin_unlock_bh(&hsr->list_lock); ++ kfree(new_node); + return node; + } + +@@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, + bool is_sup) + { + struct list_head *node_db = &port->hsr->node_db; ++ struct hsr_priv *hsr = port->hsr; + struct hsr_node *node; + struct ethhdr *ethhdr; + u16 seq_out; +@@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, + seq_out = HSR_SEQNR_START; + } + +- return hsr_add_node(node_db, ethhdr->h_source, seq_out); ++ return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out); + } + + /* Use the Supervision frame's info about an eventual macaddress_B for merging +@@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, + void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, + struct hsr_port *port_rcv) + { +- struct ethhdr *ethhdr; +- struct hsr_node *node_real; ++ struct hsr_priv *hsr = port_rcv->hsr; + struct hsr_sup_payload *hsr_sp; ++ struct hsr_node *node_real; + struct list_head *node_db; ++ struct ethhdr *ethhdr; + int i; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); +@@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, + node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A); + if (!node_real) + /* No frame received from AddrA of this node yet */ +- node_real = hsr_add_node(node_db, hsr_sp->macaddress_A, ++ node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, + HSR_SEQNR_START - 1); + if (!node_real) + goto done; /* No mem */ +@@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, + } + node_real->addr_B_port = port_rcv->type; + ++ spin_lock_bh(&hsr->list_lock); + list_del_rcu(&node_curr->mac_list); ++ spin_unlock_bh(&hsr->list_lock); + kfree_rcu(node_curr, rcu_head); + + done: +@@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t) + { + struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); + struct hsr_node *node; ++ struct hsr_node *tmp; + struct hsr_port *port; + unsigned long timestamp; + unsigned long time_a, time_b; + +- rcu_read_lock(); +- list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { ++ spin_lock_bh(&hsr->list_lock); ++ list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) { + /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A] + * nor time_in[HSR_PT_SLAVE_B], will ever be updated for + * the master port. Thus the master node will be repeatedly +@@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t) + kfree_rcu(node, rcu_head); + } + } +- rcu_read_unlock(); ++ spin_unlock_bh(&hsr->list_lock); + + /* Restart timer */ + mod_timer(&hsr->prune_timer, +diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h +index 89a3ce38151d..0f0fa12b4329 100644 +--- a/net/hsr/hsr_framereg.h ++++ b/net/hsr/hsr_framereg.h +@@ -12,10 +12,8 @@ + + struct hsr_node; + +-void hsr_del_self_node(struct list_head *self_node_db); ++void hsr_del_self_node(struct hsr_priv *hsr); + void hsr_del_nodes(struct list_head *node_db); +-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], +- u16 seq_out); + struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, + bool is_sup); + void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, +@@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, + + void hsr_prune_nodes(struct timer_list *t); + +-int hsr_create_self_node(struct list_head *self_node_db, ++int hsr_create_self_node(struct hsr_priv *hsr, + unsigned char addr_a[ETH_ALEN], + unsigned char addr_b[ETH_ALEN]); + +diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c +index b9988a662ee1..6deb8fa8d5c8 100644 +--- a/net/hsr/hsr_main.c ++++ b/net/hsr/hsr_main.c +@@ -64,7 +64,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, + + /* Make sure we recognize frames from ourselves in hsr_rcv() */ + port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); +- res = hsr_create_self_node(&hsr->self_node_db, ++ res = hsr_create_self_node(hsr, + master->dev->dev_addr, + port ? + port->dev->dev_addr : +diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h +index 96fac696a1e1..9ec38e33b8b1 100644 +--- a/net/hsr/hsr_main.h ++++ b/net/hsr/hsr_main.h +@@ -160,8 +160,9 @@ struct hsr_priv { + int announce_count; + u16 sequence_nr; + u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ +- u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ +- spinlock_t seqnr_lock; /* locking for sequence_nr */ ++ u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ ++ spinlock_t seqnr_lock; /* locking for sequence_nr */ ++ spinlock_t list_lock; /* locking for node list */ + unsigned char sup_multicast_addr[ETH_ALEN]; + #ifdef CONFIG_DEBUG_FS + struct dentry *node_tbl_root; +@@ -184,15 +185,12 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb) + } + + #if IS_ENABLED(CONFIG_DEBUG_FS) +-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); ++void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); + void hsr_debugfs_term(struct hsr_priv *priv); + #else +-static inline int hsr_debugfs_init(struct hsr_priv *priv, +- struct net_device *hsr_dev) +-{ +- return 0; +-} +- ++static inline void hsr_debugfs_init(struct hsr_priv *priv, ++ struct net_device *hsr_dev) ++{} + static inline void hsr_debugfs_term(struct hsr_priv *priv) + {} + #endif +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index d8876f0e9672..e537a4b6531b 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1958,8 +1958,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + struct sk_buff *skb, *last; + u32 urg_hole = 0; + struct scm_timestamping_internal tss; +- bool has_tss = false; +- bool has_cmsg; ++ int cmsg_flags; + + if (unlikely(flags & MSG_ERRQUEUE)) + return inet_recv_error(sk, msg, len, addr_len); +@@ -1974,7 +1973,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, + if (sk->sk_state == TCP_LISTEN) + goto out; + +- has_cmsg = tp->recvmsg_inq; ++ cmsg_flags = tp->recvmsg_inq ? 1 : 0; + timeo = sock_rcvtimeo(sk, nonblock); + + /* Urgent data needs to be handled specially. */ +@@ -2157,8 +2156,7 @@ skip_copy: + + if (TCP_SKB_CB(skb)->has_rxtstamp) { + tcp_update_recv_tstamps(skb, &tss); +- has_tss = true; +- has_cmsg = true; ++ cmsg_flags |= 2; + } + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + goto found_fin_ok; +@@ -2183,10 +2181,10 @@ found_fin_ok: + + release_sock(sk); + +- if (has_cmsg) { +- if (has_tss) ++ if (cmsg_flags) { ++ if (cmsg_flags & 2) + tcp_recv_timestamp(msg, sk, &tss); +- if (tp->recvmsg_inq) { ++ if (cmsg_flags & 1) { + inq = tcp_inq_hint(sk); + put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); + } +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c +index 32772d6ded4e..a6545ef0d27b 100644 +--- a/net/ipv4/tcp_bbr.c ++++ b/net/ipv4/tcp_bbr.c +@@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk) + /* Sort of tcp_tso_autosize() but ignoring + * driver provided sk_gso_max_size. + */ +- bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift, ++ bytes = min_t(unsigned long, ++ sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), + GSO_MAX_SIZE - 1 - MAX_TCP_HEADER); + segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 0269584e9cf7..e4ba915c4bb5 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1728,7 +1728,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, + u32 bytes, segs; + + bytes = min_t(unsigned long, +- sk->sk_pacing_rate >> sk->sk_pacing_shift, ++ sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), + sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); + + /* Goal is to send at least one packet per ms, +@@ -2263,7 +2263,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, + + limit = max_t(unsigned long, + 2 * skb->truesize, +- sk->sk_pacing_rate >> sk->sk_pacing_shift); ++ sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); + if (sk->sk_pacing_status == SK_PACING_NONE) + limit = min_t(unsigned long, limit, + sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); +diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c +index a2b58de82600..f8f52ff99cfb 100644 +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, + goto err; + } + +- if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) { ++ if (skb_dst(skb) && !skb_dst_force(skb)) { + status = -ENETDOWN; + goto err; + } +diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c +index f92a82c73880..95980154ef02 100644 +--- a/net/netfilter/nft_tproxy.c ++++ b/net/netfilter/nft_tproxy.c +@@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr, + taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); + + if (priv->sreg_port) +- tport = regs->data[priv->sreg_port]; ++ tport = nft_reg_load16(®s->data[priv->sreg_port]); + if (!tport) + tport = hp->dest; + +@@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr, + taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); + + if (priv->sreg_port) +- tport = regs->data[priv->sreg_port]; ++ tport = nft_reg_load16(®s->data[priv->sreg_port]); + if (!tport) + tport = hp->dest; + +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index 8769b4b8807d..7c3c5fdb82a9 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -657,7 +657,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) + if (likely(skb)) { + qdisc_update_stats_at_dequeue(qdisc, skb); + } else { +- qdisc->empty = true; ++ WRITE_ONCE(qdisc->empty, true); + } + + return skb; +diff --git a/net/socket.c b/net/socket.c +index d7a106028f0e..ca8de9e1582d 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -955,7 +955,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) + .msg_iocb = iocb}; + ssize_t res; + +- if (file->f_flags & O_NONBLOCK) ++ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT)) + msg.msg_flags = MSG_DONTWAIT; + + if (iocb->ki_pos != 0) +@@ -980,7 +980,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) + if (iocb->ki_pos != 0) + return -ESPIPE; + +- if (file->f_flags & O_NONBLOCK) ++ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT)) + msg.msg_flags = MSG_DONTWAIT; + + if (sock->type == SOCK_SEQPACKET) +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index a349094f6fb7..f740cb51802a 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -53,9 +53,6 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail) + h->last_refresh = now; + } + +-static inline int cache_is_valid(struct cache_head *h); +-static void cache_fresh_locked(struct cache_head *head, time_t expiry, +- struct cache_detail *detail); + static void cache_fresh_unlocked(struct cache_head *head, + struct cache_detail *detail); + +@@ -105,9 +102,6 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, + if (cache_is_expired(detail, tmp)) { + hlist_del_init_rcu(&tmp->cache_list); + detail->entries --; +- if (cache_is_valid(tmp) == -EAGAIN) +- set_bit(CACHE_NEGATIVE, &tmp->flags); +- cache_fresh_locked(tmp, 0, detail); + freeme = tmp; + break; + } +diff --git a/samples/seccomp/user-trap.c b/samples/seccomp/user-trap.c +index 6d0125ca8af7..20291ec6489f 100644 +--- a/samples/seccomp/user-trap.c ++++ b/samples/seccomp/user-trap.c +@@ -298,14 +298,14 @@ int main(void) + req = malloc(sizes.seccomp_notif); + if (!req) + goto out_close; +- memset(req, 0, sizeof(*req)); + + resp = malloc(sizes.seccomp_notif_resp); + if (!resp) + goto out_req; +- memset(resp, 0, sizeof(*resp)); ++ memset(resp, 0, sizes.seccomp_notif_resp); + + while (1) { ++ memset(req, 0, sizes.seccomp_notif); + if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) { + perror("ioctl recv"); + goto out_resp; +diff --git a/samples/trace_printk/trace-printk.c b/samples/trace_printk/trace-printk.c +index 7affc3b50b61..cfc159580263 100644 +--- a/samples/trace_printk/trace-printk.c ++++ b/samples/trace_printk/trace-printk.c +@@ -36,6 +36,7 @@ static int __init trace_printk_init(void) + + /* Kick off printing in irq context */ + irq_work_queue(&irqwork); ++ irq_work_sync(&irqwork); + + trace_printk("This is a %s that will use trace_bprintk()\n", + "static string"); +diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig +index d33de0b9f4f5..e3569543bdac 100644 +--- a/scripts/gcc-plugins/Kconfig ++++ b/scripts/gcc-plugins/Kconfig +@@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS + An arch should select this symbol if it supports building with + GCC plugins. + +-config GCC_PLUGINS +- bool ++menuconfig GCC_PLUGINS ++ bool "GCC plugins" + depends on HAVE_GCC_PLUGINS + depends on PLUGIN_HOSTCC != "" + default y +@@ -25,8 +25,7 @@ config GCC_PLUGINS + + See Documentation/core-api/gcc-plugins.rst for details. + +-menu "GCC plugins" +- depends on GCC_PLUGINS ++if GCC_PLUGINS + + config GCC_PLUGIN_CYC_COMPLEXITY + bool "Compute the cyclomatic complexity of a function" if EXPERT +@@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK + bool + depends on GCC_PLUGINS && ARM + +-endmenu ++endif +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index 45d13b6462aa..90d21675c3ad 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -593,7 +593,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt) + + void __aa_bump_ns_revision(struct aa_ns *ns) + { +- ns->revision++; ++ WRITE_ONCE(ns->revision, ns->revision + 1); + wake_up_interruptible(&ns->wait); + } + +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index 9e0492795267..039ca71872ce 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm, + + if (!bprm || !profile->xattr_count) + return 0; ++ might_sleep(); + + /* transition from exec match to xattr set */ + state = aa_dfa_null_transition(profile->xmatch, state); +@@ -361,10 +362,11 @@ out: + } + + /** +- * __attach_match_ - find an attachment match ++ * find_attach - do attachment search for unconfined processes + * @bprm - binprm structure of transitioning task +- * @name - to match against (NOT NULL) ++ * @ns: the current namespace (NOT NULL) + * @head - profile list to walk (NOT NULL) ++ * @name - to match against (NOT NULL) + * @info - info message if there was an error (NOT NULL) + * + * Do a linear search on the profiles in the list. There is a matching +@@ -374,12 +376,11 @@ out: + * + * Requires: @head not be shared or have appropriate locks held + * +- * Returns: profile or NULL if no match found ++ * Returns: label or NULL if no match found + */ +-static struct aa_profile *__attach_match(const struct linux_binprm *bprm, +- const char *name, +- struct list_head *head, +- const char **info) ++static struct aa_label *find_attach(const struct linux_binprm *bprm, ++ struct aa_ns *ns, struct list_head *head, ++ const char *name, const char **info) + { + int candidate_len = 0, candidate_xattrs = 0; + bool conflict = false; +@@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, + AA_BUG(!name); + AA_BUG(!head); + ++ rcu_read_lock(); ++restart: + list_for_each_entry_rcu(profile, head, base.list) { + if (profile->label.flags & FLAG_NULL && + &profile->label == ns_unconfined(profile->ns)) +@@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, + perm = dfa_user_allow(profile->xmatch, state); + /* any accepting state means a valid match. */ + if (perm & MAY_EXEC) { +- int ret; ++ int ret = 0; + + if (count < candidate_len) + continue; + +- ret = aa_xattrs_match(bprm, profile, state); +- /* Fail matching if the xattrs don't match */ +- if (ret < 0) +- continue; +- ++ if (bprm && profile->xattr_count) { ++ long rev = READ_ONCE(ns->revision); ++ ++ if (!aa_get_profile_not0(profile)) ++ goto restart; ++ rcu_read_unlock(); ++ ret = aa_xattrs_match(bprm, profile, ++ state); ++ rcu_read_lock(); ++ aa_put_profile(profile); ++ if (rev != ++ READ_ONCE(ns->revision)) ++ /* policy changed */ ++ goto restart; ++ /* ++ * Fail matching if the xattrs don't ++ * match ++ */ ++ if (ret < 0) ++ continue; ++ } + /* + * TODO: allow for more flexible best match + * +@@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm, + candidate_xattrs = ret; + conflict = false; + } +- } else if (!strcmp(profile->base.name, name)) ++ } else if (!strcmp(profile->base.name, name)) { + /* + * old exact non-re match, without conditionals such + * as xattrs. no more searching required + */ +- return profile; ++ candidate = profile; ++ goto out; ++ } + } + +- if (conflict) { +- *info = "conflicting profile attachments"; ++ if (!candidate || conflict) { ++ if (conflict) ++ *info = "conflicting profile attachments"; ++ rcu_read_unlock(); + return NULL; + } + +- return candidate; +-} +- +-/** +- * find_attach - do attachment search for unconfined processes +- * @bprm - binprm structure of transitioning task +- * @ns: the current namespace (NOT NULL) +- * @list: list to search (NOT NULL) +- * @name: the executable name to match against (NOT NULL) +- * @info: info message if there was an error +- * +- * Returns: label or NULL if no match found +- */ +-static struct aa_label *find_attach(const struct linux_binprm *bprm, +- struct aa_ns *ns, struct list_head *list, +- const char *name, const char **info) +-{ +- struct aa_profile *profile; +- +- rcu_read_lock(); +- profile = aa_get_profile(__attach_match(bprm, name, list, info)); ++out: ++ candidate = aa_get_newest_profile(candidate); + rcu_read_unlock(); + +- return profile ? &profile->label : NULL; ++ return &candidate->label; + } + + static const char *next_name(int xtype, const char *name) +diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c +index ade333074c8e..06355717ee84 100644 +--- a/security/apparmor/policy.c ++++ b/security/apparmor/policy.c +@@ -1124,8 +1124,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, + if (!name) { + /* remove namespace - can only happen if fqname[0] == ':' */ + mutex_lock_nested(&ns->parent->lock, ns->level); +- __aa_remove_ns(ns); + __aa_bump_ns_revision(ns); ++ __aa_remove_ns(ns); + mutex_unlock(&ns->parent->lock); + } else { + /* remove profile */ +@@ -1137,9 +1137,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, + goto fail_ns_lock; + } + name = profile->base.hname; ++ __aa_bump_ns_revision(ns); + __remove_profile(profile); + __aa_labelset_update_subtree(ns); +- __aa_bump_ns_revision(ns); + mutex_unlock(&ns->lock); + } + +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index d4280568a41e..5c74ea2bb44b 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -3408,7 +3408,8 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, + #endif /* CONFIG_GENERIC_ALLOCATOR */ + #ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */ + if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page && +- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) ++ (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV || ++ substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC)) + return dma_mmap_coherent(substream->dma_buffer.dev.dev, + area, + substream->runtime->dma_area, +diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c +index ea46fb4c1b5a..126a7bd187bb 100644 +--- a/sound/firewire/motu/motu-proc.c ++++ b/sound/firewire/motu/motu-proc.c +@@ -16,7 +16,7 @@ static const char *const clock_names[] = { + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT] = "S/PDIF on optical interface", + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A] = "S/PDIF on optical interface A", + [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B] = "S/PDIF on optical interface B", +- [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PCIF on coaxial interface", ++ [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PDIF on coaxial interface", + [SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR] = "AESEBU on XLR interface", + [SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC] = "Word clock on BNC interface", + }; +diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c +index 78dd213589b4..fa3c39cff5f8 100644 +--- a/sound/isa/cs423x/cs4236.c ++++ b/sound/isa/cs423x/cs4236.c +@@ -278,7 +278,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev) + } else { + mpu_port[dev] = pnp_port_start(pdev, 0); + if (mpu_irq[dev] >= 0 && +- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) { ++ pnp_irq_valid(pdev, 0) && ++ pnp_irq(pdev, 0) != (resource_size_t)-1) { + mpu_irq[dev] = pnp_irq(pdev, 0); + } else { + mpu_irq[dev] = -1; /* disable interrupt */ +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c +index 6387c7e90918..76b507058cb4 100644 +--- a/sound/pci/hda/hda_controller.c ++++ b/sound/pci/hda/hda_controller.c +@@ -884,7 +884,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, + return -EAGAIN; /* give a chance to retry */ + } + +- dev_WARN(chip->card->dev, ++ dev_err(chip->card->dev, + "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", + bus->last_cmd[addr]); + chip->single_cmd = 1; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 86a416cdeb29..f6cbb831b86a 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -280,12 +280,13 @@ enum { + + /* quirks for old Intel chipsets */ + #define AZX_DCAPS_INTEL_ICH \ +- (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE) ++ (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\ ++ AZX_DCAPS_SYNC_WRITE) + + /* quirks for Intel PCH */ + #define AZX_DCAPS_INTEL_PCH_BASE \ + (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\ +- AZX_DCAPS_SNOOP_TYPE(SCH)) ++ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) + + /* PCH up to IVB; no runtime PM; bind with i915 gfx */ + #define AZX_DCAPS_INTEL_PCH_NOPM \ +@@ -300,13 +301,13 @@ enum { + #define AZX_DCAPS_INTEL_HASWELL \ + (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\ + AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ +- AZX_DCAPS_SNOOP_TYPE(SCH)) ++ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) + + /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */ + #define AZX_DCAPS_INTEL_BROADWELL \ + (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\ + AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\ +- AZX_DCAPS_SNOOP_TYPE(SCH)) ++ AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE) + + #define AZX_DCAPS_INTEL_BAYTRAIL \ + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT) +@@ -1280,11 +1281,17 @@ static void init_vga_switcheroo(struct azx *chip) + { + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + struct pci_dev *p = get_bound_vga(chip->pci); ++ struct pci_dev *parent; + if (p) { + dev_info(chip->card->dev, + "Handle vga_switcheroo audio client\n"); + hda->use_vga_switcheroo = 1; +- chip->bus.keep_power = 1; /* cleared in either gpu_bound op or codec probe */ ++ ++ /* cleared in either gpu_bound op or codec probe, or when its ++ * upstream port has _PR3 (i.e. dGPU). ++ */ ++ parent = pci_upstream_bridge(p); ++ chip->bus.keep_power = parent ? !pci_pr3_present(parent) : 1; + chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; + pci_dev_put(p); + } +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index e1229dbad6b2..252888f426de 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -501,6 +501,7 @@ static void alc_shutup_pins(struct hda_codec *codec) + struct alc_spec *spec = codec->spec; + + switch (codec->core.vendor_id) { ++ case 0x10ec0283: + case 0x10ec0286: + case 0x10ec0288: + case 0x10ec0298: +@@ -5547,6 +5548,16 @@ static void alc295_fixup_disable_dac3(struct hda_codec *codec, + } + } + ++/* force NID 0x17 (Bass Speaker) to DAC1 to share it with the main speaker */ ++static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ hda_nid_t conn[1] = { 0x02 }; ++ snd_hda_override_conn_list(codec, 0x17, 1, conn); ++ } ++} ++ + /* Hook to update amp GPIO4 for automute */ + static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, + struct hda_jack_callback *jack) +@@ -5849,6 +5860,7 @@ enum { + ALC225_FIXUP_DISABLE_MIC_VREF, + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC295_FIXUP_DISABLE_DAC3, ++ ALC285_FIXUP_SPEAKER2_TO_DAC1, + ALC280_FIXUP_HP_HEADSET_MIC, + ALC221_FIXUP_HP_FRONT_MIC, + ALC292_FIXUP_TPT460, +@@ -5893,9 +5905,12 @@ enum { + ALC256_FIXUP_ASUS_HEADSET_MIC, + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, + ALC299_FIXUP_PREDATOR_SPK, +- ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, + ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, +- ALC294_FIXUP_ASUS_INTSPK_GPIO, ++ ALC289_FIXUP_DELL_SPK2, ++ ALC289_FIXUP_DUAL_SPK, ++ ALC294_FIXUP_SPK2_TO_DAC1, ++ ALC294_FIXUP_ASUS_DUAL_SPK, ++ + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -6649,6 +6664,10 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_disable_dac3, + }, ++ [ALC285_FIXUP_SPEAKER2_TO_DAC1] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_speaker2_to_dac1, ++ }, + [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +@@ -6966,33 +6985,45 @@ static const struct hda_fixup alc269_fixups[] = { + { } + } + }, +- [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = { ++ [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +- { 0x14, 0x411111f0 }, /* disable confusing internal speaker */ +- { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */ ++ { 0x19, 0x04a11040 }, ++ { 0x21, 0x04211020 }, + { } + }, + .chained = true, +- .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC ++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE + }, +- [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { ++ [ALC289_FIXUP_DELL_SPK2] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +- { 0x19, 0x04a11040 }, +- { 0x21, 0x04211020 }, ++ { 0x17, 0x90170130 }, /* bass spk */ + { } + }, + .chained = true, +- .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE ++ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE + }, +- [ALC294_FIXUP_ASUS_INTSPK_GPIO] = { ++ [ALC289_FIXUP_DUAL_SPK] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_speaker2_to_dac1, ++ .chained = true, ++ .chain_id = ALC289_FIXUP_DELL_SPK2 ++ }, ++ [ALC294_FIXUP_SPK2_TO_DAC1] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc285_fixup_speaker2_to_dac1, ++ .chained = true, ++ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC ++ }, ++ [ALC294_FIXUP_ASUS_DUAL_SPK] = { + .type = HDA_FIXUP_FUNC, + /* The GPIO must be pulled to initialize the AMP */ + .v.func = alc_fixup_gpio4, + .chained = true, +- .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC ++ .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 + }, ++ + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -7065,6 +7096,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), ++ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), ++ SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -7152,7 +7185,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), +- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO), ++ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), +@@ -7224,6 +7257,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ++ SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1), + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), +@@ -7408,6 +7442,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, + {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, + {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, ++ {.id = ALC285_FIXUP_SPEAKER2_TO_DAC1, .name = "alc285-speaker2-to-dac1"}, + {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, + {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, + {.id = ALC298_FIXUP_SPK_VOLUME, .name = "alc298-spk-volume"}, +diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c +index e62c11816683..f360b33a1042 100644 +--- a/sound/pci/ice1712/ice1724.c ++++ b/sound/pci/ice1712/ice1724.c +@@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, + unsigned long flags; + unsigned char mclk_change; + unsigned int i, old_rate; ++ bool call_set_rate = false; + + if (rate > ice->hw_rates->list[ice->hw_rates->count - 1]) + return -EINVAL; +@@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, + * setting clock rate for internal clock mode */ + old_rate = ice->get_rate(ice); + if (force || (old_rate != rate)) +- ice->set_rate(ice, rate); ++ call_set_rate = true; + else if (rate == ice->cur_rate) { + spin_unlock_irqrestore(&ice->reg_lock, flags); + return 0; +@@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, + } + + ice->cur_rate = rate; ++ spin_unlock_irqrestore(&ice->reg_lock, flags); ++ ++ if (call_set_rate) ++ ice->set_rate(ice, rate); + + /* setting master clock */ + mclk_change = ice->set_mclk(ice, rate); + +- spin_unlock_irqrestore(&ice->reg_lock, flags); +- + if (mclk_change && ice->gpio.i2s_mclk_changed) + ice->gpio.i2s_mclk_changed(ice); + if (ice->gpio.set_pro_rate) +diff --git a/sound/usb/card.h b/sound/usb/card.h +index 2991b9986f66..395403a2d33f 100644 +--- a/sound/usb/card.h ++++ b/sound/usb/card.h +@@ -145,6 +145,7 @@ struct snd_usb_substream { + struct snd_usb_endpoint *sync_endpoint; + unsigned long flags; + bool need_setup_ep; /* (re)configure EP at prepare? */ ++ bool need_setup_fmt; /* (re)configure fmt after resume? */ + unsigned int speed; /* USB_SPEED_XXX */ + + u64 formats; /* format bitmasks (all or'ed) */ +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index ff5ab24f3bd1..a04c727dcd19 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -506,15 +506,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) + if (WARN_ON(!iface)) + return -EINVAL; + alts = usb_altnum_to_altsetting(iface, fmt->altsetting); +- altsd = get_iface_desc(alts); +- if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) ++ if (WARN_ON(!alts)) + return -EINVAL; ++ altsd = get_iface_desc(alts); + +- if (fmt == subs->cur_audiofmt) ++ if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt) + return 0; + + /* close the old interface */ +- if (subs->interface >= 0 && subs->interface != fmt->iface) { ++ if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) { + if (!subs->stream->chip->keep_iface) { + err = usb_set_interface(subs->dev, subs->interface, 0); + if (err < 0) { +@@ -528,6 +528,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) + subs->altset_idx = 0; + } + ++ if (subs->need_setup_fmt) ++ subs->need_setup_fmt = false; ++ + /* set interface */ + if (iface->cur_altsetting != alts) { + err = snd_usb_select_mode_quirk(subs, fmt); +@@ -1735,6 +1738,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea + subs->data_endpoint->retire_data_urb = retire_playback_urb; + subs->running = 0; + return 0; ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ if (subs->stream->chip->setup_fmt_after_resume_quirk) { ++ stop_endpoints(subs, true); ++ subs->need_setup_fmt = true; ++ return 0; ++ } ++ break; + } + + return -EINVAL; +@@ -1767,6 +1777,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream + subs->data_endpoint->retire_data_urb = retire_capture_urb; + subs->running = 1; + return 0; ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ if (subs->stream->chip->setup_fmt_after_resume_quirk) { ++ stop_endpoints(subs, true); ++ subs->need_setup_fmt = true; ++ return 0; ++ } ++ break; + } + + return -EINVAL; +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index 70c338f3ae24..d187aa6d50db 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), + .vendor_name = "Dell", + .product_name = "WD19 Dock", + .profile_name = "Dell-WD15-Dock", +- .ifnum = QUIRK_NO_INTERFACE ++ .ifnum = QUIRK_ANY_INTERFACE, ++ .type = QUIRK_SETUP_FMT_AFTER_RESUME + } + }, + /* MOTU Microbook II */ +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 349e1e52996d..a81c2066499f 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip, + return snd_usb_create_mixer(chip, quirk->ifnum, 0); + } + ++ ++static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip, ++ struct usb_interface *iface, ++ struct usb_driver *driver, ++ const struct snd_usb_audio_quirk *quirk) ++{ ++ chip->setup_fmt_after_resume_quirk = 1; ++ return 1; /* Continue with creating streams and mixer */ ++} ++ + /* + * audio-interface quirks + * +@@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, + [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, + [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, + [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, ++ [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk, + }; + + if (quirk->type < QUIRK_TYPE_COUNT) { +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index feb30f9c1716..e360680f45f3 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -33,7 +33,7 @@ struct snd_usb_audio { + wait_queue_head_t shutdown_wait; + unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ + unsigned int tx_length_quirk:1; /* Put length specifier in transfers */ +- ++ unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */ + int num_interfaces; + int num_suspended_intf; + int sample_rate_read_error; +@@ -98,6 +98,7 @@ enum quirk_type { + QUIRK_AUDIO_EDIROL_UAXX, + QUIRK_AUDIO_ALIGN_TRANSFER, + QUIRK_AUDIO_STANDARD_MIXER, ++ QUIRK_SETUP_FMT_AFTER_RESUME, + + QUIRK_TYPE_COUNT + }; +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c +index 70a9f8716a4b..888814df758d 100644 +--- a/tools/perf/util/machine.c ++++ b/tools/perf/util/machine.c +@@ -2403,7 +2403,7 @@ static int thread__resolve_callchain_sample(struct thread *thread, + } + + check_calls: +- if (callchain_param.order != ORDER_CALLEE) { ++ if (chain && callchain_param.order != ORDER_CALLEE) { + err = find_prev_cpumode(chain, thread, cursor, parent, root_al, + &cpumode, chain->nr - first_call); + if (err) +diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c +index eec2663261f2..e8a657a5f48a 100644 +--- a/tools/testing/selftests/rseq/param_test.c ++++ b/tools/testing/selftests/rseq/param_test.c +@@ -15,7 +15,7 @@ + #include <errno.h> + #include <stddef.h> + +-static inline pid_t gettid(void) ++static inline pid_t rseq_gettid(void) + { + return syscall(__NR_gettid); + } +@@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg) + rseq_percpu_unlock(&data->lock, cpu); + #ifndef BENCHMARK + if (i != 0 && !(i % (reps / 10))) +- printf_verbose("tid %d: count %lld\n", (int) gettid(), i); ++ printf_verbose("tid %d: count %lld\n", ++ (int) rseq_gettid(), i); + #endif + } + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", +- (int) gettid(), nr_abort, signals_delivered); ++ (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && thread_data->reg && + rseq_unregister_current_thread()) + abort(); +@@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg) + } while (rseq_unlikely(ret)); + #ifndef BENCHMARK + if (i != 0 && !(i % (reps / 10))) +- printf_verbose("tid %d: count %lld\n", (int) gettid(), i); ++ printf_verbose("tid %d: count %lld\n", ++ (int) rseq_gettid(), i); + #endif + } + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", +- (int) gettid(), nr_abort, signals_delivered); ++ (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && thread_data->reg && + rseq_unregister_current_thread()) + abort(); +@@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg) + } + + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", +- (int) gettid(), nr_abort, signals_delivered); ++ (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); + +@@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg) + } + + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", +- (int) gettid(), nr_abort, signals_delivered); ++ (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); + +@@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg) + } + + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", +- (int) gettid(), nr_abort, signals_delivered); ++ (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); + +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c +index b505bb062d07..96bbda4f10fc 100644 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c +@@ -3147,7 +3147,18 @@ TEST(user_notification_basic) + EXPECT_GT(poll(&pollfd, 1, -1), 0); + EXPECT_EQ(pollfd.revents, POLLIN); + +- EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); ++ /* Test that we can't pass garbage to the kernel. */ ++ memset(&req, 0, sizeof(req)); ++ req.pid = -1; ++ errno = 0; ++ ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ if (ret) { ++ req.pid = 0; ++ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); ++ } + + pollfd.fd = listener; + pollfd.events = POLLIN | POLLOUT; +@@ -3267,6 +3278,7 @@ TEST(user_notification_signal) + + close(sk_pair[1]); + ++ memset(&req, 0, sizeof(req)); + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + + EXPECT_EQ(kill(pid, SIGUSR1), 0); +@@ -3285,6 +3297,7 @@ TEST(user_notification_signal) + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); + EXPECT_EQ(errno, ENOENT); + ++ memset(&req, 0, sizeof(req)); + EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); + + resp.id = req.id; +diff --git a/usr/gen_initramfs_list.sh b/usr/gen_initramfs_list.sh +index 0aad760fcd8c..2bbac73e6477 100755 +--- a/usr/gen_initramfs_list.sh ++++ b/usr/gen_initramfs_list.sh +@@ -128,7 +128,7 @@ parse() { + str="${ftype} ${name} ${location} ${str}" + ;; + "nod") +- local dev=`LC_ALL=C ls -l "${location}"` ++ local dev="`LC_ALL=C ls -l "${location}"`" + local maj=`field 5 ${dev}` + local min=`field 6 ${dev}` + maj=${maj%,} diff --git a/1009_linux-5.4.10.patch b/1009_linux-5.4.10.patch new file mode 100644 index 00000000..ef483044 --- /dev/null +++ b/1009_linux-5.4.10.patch @@ -0,0 +1,26 @@ +diff --git a/Makefile b/Makefile +index 3ba15c3528c8..726bb3dacd5b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 9 ++SUBLEVEL = 10 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c +index 460afa415434..d30a2e6e68b4 100644 +--- a/arch/powerpc/mm/mem.c ++++ b/arch/powerpc/mm/mem.c +@@ -120,7 +120,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, + unsigned long i; + + for (i = start; i < stop; i += chunk) { +- flush_dcache_range(i, min(stop, start + chunk)); ++ flush_dcache_range(i, min(stop, i + chunk)); + cond_resched(); + } + } |