diff options
author | 2018-04-24 07:30:18 -0400 | |
---|---|---|
committer | 2018-04-24 07:30:18 -0400 | |
commit | 4d5402093010704318c47ebf9833c72e4ca54ddc (patch) | |
tree | 22795662b3f9f99cc94ecbbf841403283fff1405 | |
parent | Linux patch 4.9.95 (diff) | |
download | linux-patches-4.9-99.tar.gz linux-patches-4.9-99.tar.bz2 linux-patches-4.9-99.zip |
Linux patch 4.9.964.9-99
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1095_linux-4.9.96.patch | 2918 |
2 files changed, 2922 insertions, 0 deletions
diff --git a/0000_README b/0000_README index a826f60e..0d1f8898 100644 --- a/0000_README +++ b/0000_README @@ -423,6 +423,10 @@ Patch: 1094_linux-4.9.95.patch From: http://www.kernel.org Desc: Linux 4.9.95 +Patch: 1095_linux-4.9.96.patch +From: http://www.kernel.org +Desc: Linux 4.9.96 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1095_linux-4.9.96.patch b/1095_linux-4.9.96.patch new file mode 100644 index 00000000..26d756a1 --- /dev/null +++ b/1095_linux-4.9.96.patch @@ -0,0 +1,2918 @@ +diff --git a/Makefile b/Makefile +index 1aeec9df709d..50ae573e8951 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 95 ++SUBLEVEL = 96 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi +index a7da0dd0c98f..0898213f3bb2 100644 +--- a/arch/arm/boot/dts/at91sam9g25.dtsi ++++ b/arch/arm/boot/dts/at91sam9g25.dtsi +@@ -21,7 +21,7 @@ + atmel,mux-mask = < + /* A B C */ + 0xffffffff 0xffe0399f 0xc000001c /* pioA */ +- 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */ ++ 0x0007ffff 0x00047e3f 0x00000000 /* pioB */ + 0x80000000 0x07c0ffff 0xb83fffff /* pioC */ + 0x003fffff 0x003f8000 0x00000000 /* pioD */ + >; +diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi +index f7357d99b47c..64de33d067c9 100644 +--- a/arch/arm/boot/dts/exynos5250.dtsi ++++ b/arch/arm/boot/dts/exynos5250.dtsi +@@ -640,7 +640,7 @@ + power-domains = <&pd_gsc>; + clocks = <&clock CLK_GSCL0>; + clock-names = "gscl"; +- iommu = <&sysmmu_gsc0>; ++ iommus = <&sysmmu_gsc0>; + }; + + gsc_1: gsc@13e10000 { +@@ -650,7 +650,7 @@ + power-domains = <&pd_gsc>; + clocks = <&clock CLK_GSCL1>; + clock-names = "gscl"; +- iommu = <&sysmmu_gsc1>; ++ iommus = <&sysmmu_gsc1>; + }; + + gsc_2: gsc@13e20000 { +@@ -660,7 +660,7 @@ + power-domains = <&pd_gsc>; + clocks = <&clock CLK_GSCL2>; + clock-names = "gscl"; +- iommu = <&sysmmu_gsc2>; ++ iommus = <&sysmmu_gsc2>; + }; + + gsc_3: gsc@13e30000 { +@@ -670,7 +670,7 @@ + power-domains = <&pd_gsc>; + clocks = <&clock CLK_GSCL3>; + clock-names = "gscl"; +- iommu = <&sysmmu_gsc3>; ++ iommus = <&sysmmu_gsc3>; + }; + + hdmi: hdmi@14530000 { +diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi +index 65e725fb5679..de0e189711f6 100644 +--- a/arch/arm/boot/dts/sama5d4.dtsi ++++ b/arch/arm/boot/dts/sama5d4.dtsi +@@ -1362,7 +1362,7 @@ + pinctrl@fc06a000 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; ++ compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus"; + ranges = <0xfc068000 0xfc068000 0x100 + 0xfc06a000 0xfc06a000 0x4000>; + /* WARNING: revisit as pin spec has changed */ +diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h +index 89fa5c0b1579..c92f4c28db7f 100644 +--- a/arch/mips/include/asm/uaccess.h ++++ b/arch/mips/include/asm/uaccess.h +@@ -1257,6 +1257,13 @@ __clear_user(void __user *addr, __kernel_size_t size) + { + __kernel_size_t res; + ++#ifdef CONFIG_CPU_MICROMIPS ++/* micromips memset / bzero also clobbers t7 & t8 */ ++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" ++#else ++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" ++#endif /* CONFIG_CPU_MICROMIPS */ ++ + if (eva_kernel_access()) { + __asm__ __volatile__( + "move\t$4, %1\n\t" +@@ -1266,7 +1273,7 @@ __clear_user(void __user *addr, __kernel_size_t size) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) +- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); ++ : bzero_clobbers); + } else { + might_fault(); + __asm__ __volatile__( +@@ -1277,7 +1284,7 @@ __clear_user(void __user *addr, __kernel_size_t size) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) +- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); ++ : bzero_clobbers); + } + + return res; +diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S +index 18a1ccd4d134..2b1bf93b5c80 100644 +--- a/arch/mips/lib/memset.S ++++ b/arch/mips/lib/memset.S +@@ -218,7 +218,7 @@ + 1: PTR_ADDIU a0, 1 /* fill bytewise */ + R10KCBARRIER(0(ra)) + bne t1, a0, 1b +- sb a1, -1(a0) ++ EX(sb, a1, -1(a0), .Lsmall_fixup\@) + + 2: jr ra /* done */ + move a2, zero +@@ -251,13 +251,18 @@ + PTR_L t0, TI_TASK($28) + andi a2, STORMASK + LONG_L t0, THREAD_BUADDR(t0) +- LONG_ADDU a2, t1 ++ LONG_ADDU a2, a0 + jr ra + LONG_SUBU a2, t0 + + .Llast_fixup\@: + jr ra +- andi v1, a2, STORMASK ++ nop ++ ++.Lsmall_fixup\@: ++ PTR_SUBU a2, t1, a0 ++ jr ra ++ PTR_ADDIU a2, 1 + + .endm + +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h +index c0deafc212b8..798ab37c9930 100644 +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -34,7 +34,8 @@ + #define rmb() __asm__ __volatile__ ("sync" : : : "memory") + #define wmb() __asm__ __volatile__ ("sync" : : : "memory") + +-#ifdef __SUBARCH_HAS_LWSYNC ++/* The sub-arch has lwsync */ ++#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) + # define SMPWMB LWSYNC + #else + # define SMPWMB eieio +diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h +index e958b7096f19..9e5e0d910b91 100644 +--- a/arch/powerpc/include/asm/opal.h ++++ b/arch/powerpc/include/asm/opal.h +@@ -21,6 +21,9 @@ + /* We calculate number of sg entries based on PAGE_SIZE */ + #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) + ++/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */ ++#define OPAL_BUSY_DELAY_MS 10 ++ + /* /sys/firmware/opal */ + extern struct kobject *opal_kobj; + +diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h +index 78efe8d5d775..30f2d6d4c640 100644 +--- a/arch/powerpc/include/asm/synch.h ++++ b/arch/powerpc/include/asm/synch.h +@@ -5,10 +5,6 @@ + #include <linux/stringify.h> + #include <asm/feature-fixups.h> + +-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) +-#define __SUBARCH_HAS_LWSYNC +-#endif +- + #ifndef __ASSEMBLY__ + extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; + extern void do_lwsync_fixups(unsigned long value, void *fixup_start, +diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c +index de7d091c4c31..1abd8dd77ec1 100644 +--- a/arch/powerpc/kernel/eeh_pe.c ++++ b/arch/powerpc/kernel/eeh_pe.c +@@ -795,7 +795,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) + eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); + + /* PCI Command: 0x4 */ +- eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); ++ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | ++ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + + /* Check the PCIe link is ready */ + eeh_bridge_check_link(edev); +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c +index e86bfa111f3c..46c8338a61bc 100644 +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, + unsigned int *target = (unsigned int *)branch_target(src); + + /* Branch within the section doesn't need translating */ +- if (target < alt_start || target >= alt_end) { ++ if (target < alt_start || target > alt_end) { + instr = translate_branch(dest, src); + if (!instr) + return 1; +diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c +index 9db4398ded5d..1bceb95f422d 100644 +--- a/arch/powerpc/platforms/powernv/opal-nvram.c ++++ b/arch/powerpc/platforms/powernv/opal-nvram.c +@@ -11,6 +11,7 @@ + + #define DEBUG + ++#include <linux/delay.h> + #include <linux/kernel.h> + #include <linux/init.h> + #include <linux/of.h> +@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) + + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + rc = opal_write_nvram(__pa(buf), count, off); +- if (rc == OPAL_BUSY_EVENT) ++ if (rc == OPAL_BUSY_EVENT) { ++ msleep(OPAL_BUSY_DELAY_MS); + opal_poll_events(NULL); ++ } else if (rc == OPAL_BUSY) { ++ msleep(OPAL_BUSY_DELAY_MS); ++ } + } ++ ++ if (rc) ++ return -EIO; ++ + *index += count; + return count; + } +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c +index 09bccb224d03..2a17123130d3 100644 +--- a/arch/s390/hypfs/inode.c ++++ b/arch/s390/hypfs/inode.c +@@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb) + + if (sb->s_root) + hypfs_delete_tree(sb->s_root); +- if (sb_info->update_file) ++ if (sb_info && sb_info->update_file) + hypfs_remove(sb_info->update_file); + kfree(sb->s_fs_info); + sb->s_fs_info = NULL; +diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c +index 2db18cbbb0ea..c0197097c86e 100644 +--- a/arch/um/os-Linux/file.c ++++ b/arch/um/os-Linux/file.c +@@ -12,6 +12,7 @@ + #include <sys/mount.h> + #include <sys/socket.h> + #include <sys/stat.h> ++#include <sys/sysmacros.h> + #include <sys/un.h> + #include <sys/types.h> + #include <os.h> +diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c +index a86d7cc2c2d8..bf0acb8aad8b 100644 +--- a/arch/um/os-Linux/signal.c ++++ b/arch/um/os-Linux/signal.c +@@ -16,6 +16,7 @@ + #include <os.h> + #include <sysdep/mcontext.h> + #include <um_malloc.h> ++#include <sys/ucontext.h> + + void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = { + [SIGTRAP] = relay_signal, +@@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = { + + static void hard_handler(int sig, siginfo_t *si, void *p) + { +- struct ucontext *uc = p; ++ ucontext_t *uc = p; + mcontext_t *mc = &uc->uc_mcontext; + unsigned long pending = 1UL << sig; + +diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c +index 1518d2805ae8..27361cbb7ca9 100644 +--- a/arch/x86/um/stub_segv.c ++++ b/arch/x86/um/stub_segv.c +@@ -6,11 +6,12 @@ + #include <sysdep/stub.h> + #include <sysdep/faultinfo.h> + #include <sysdep/mcontext.h> ++#include <sys/ucontext.h> + + void __attribute__ ((__section__ (".__syscall_stub"))) + stub_segv_handler(int sig, siginfo_t *info, void *p) + { +- struct ucontext *uc = p; ++ ucontext_t *uc = p; + + GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA), + &uc->uc_mcontext); +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 5ca4e4cd8cba..24fc09cf7f17 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2019,15 +2019,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + + blk_mq_init_cpu_queues(q, set->nr_hw_queues); + +- mutex_lock(&all_q_mutex); + get_online_cpus(); ++ mutex_lock(&all_q_mutex); + + list_add_tail(&q->all_q_node, &all_q_list); + blk_mq_add_queue_tag_set(set, q); + blk_mq_map_swqueue(q, cpu_online_mask); + +- put_online_cpus(); + mutex_unlock(&all_q_mutex); ++ put_online_cpus(); + + return q; + +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index 37032545c58e..3874eec972cd 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -967,8 +967,11 @@ static ssize_t scrub_show(struct device *dev, + if (nd_desc) { + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + ++ mutex_lock(&acpi_desc->init_mutex); + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, +- (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); ++ work_busy(&acpi_desc->work) ++ && !acpi_desc->cancel ? "+\n" : "\n"); ++ mutex_unlock(&acpi_desc->init_mutex); + } + device_unlock(dev); + return rc; +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c +index 02ded25c82e4..cdc47375178e 100644 +--- a/drivers/acpi/video_detect.c ++++ b/drivers/acpi/video_detect.c +@@ -213,6 +213,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { + "3570R/370R/470R/450R/510R/4450RV"), + }, + }, ++ { ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */ ++ .callback = video_detect_force_video, ++ .ident = "SAMSUNG 670Z5E", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"), ++ }, ++ }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ + .callback = video_detect_force_video, +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index ae63bb0875ea..a7b0fc7cb468 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -1736,7 +1736,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, + return -EINVAL; + if (val_len % map->format.val_bytes) + return -EINVAL; +- if (map->max_raw_write && map->max_raw_write > val_len) ++ if (map->max_raw_write && map->max_raw_write < val_len) + return -E2BIG; + + map->lock(map->lock_arg); +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 0c23ced255cb..8d08a8062904 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -434,8 +434,9 @@ struct crng_state primary_crng = { + * its value (from 0->1->2). + */ + static int crng_init = 0; +-#define crng_ready() (likely(crng_init > 0)) ++#define crng_ready() (likely(crng_init > 1)) + static int crng_init_cnt = 0; ++static unsigned long crng_global_init_time = 0; + #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) + static void _extract_crng(struct crng_state *crng, + __u8 out[CHACHA20_BLOCK_SIZE]); +@@ -741,7 +742,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) + + static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) + { +- const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); ++ const int nbits_max = r->poolinfo->poolwords * 32; + + if (nbits < 0) + return -EINVAL; +@@ -800,7 +801,7 @@ static int crng_fast_load(const char *cp, size_t len) + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; +- if (crng_ready()) { ++ if (crng_init != 0) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } +@@ -836,7 +837,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) + _crng_backtrack_protect(&primary_crng, buf.block, + CHACHA20_KEY_SIZE); + } +- spin_lock_irqsave(&primary_crng.lock, flags); ++ spin_lock_irqsave(&crng->lock, flags); + for (i = 0; i < 8; i++) { + unsigned long rv; + if (!arch_get_random_seed_long(&rv) && +@@ -852,7 +853,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) + wake_up_interruptible(&crng_init_wait); + pr_notice("random: crng init done\n"); + } +- spin_unlock_irqrestore(&primary_crng.lock, flags); ++ spin_unlock_irqrestore(&crng->lock, flags); + } + + static inline void maybe_reseed_primary_crng(void) +@@ -872,8 +873,9 @@ static void _extract_crng(struct crng_state *crng, + { + unsigned long v, flags; + +- if (crng_init > 1 && +- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) ++ if (crng_ready() && ++ (time_after(crng_global_init_time, crng->init_time) || ++ time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) + crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); + spin_lock_irqsave(&crng->lock, flags); + if (arch_get_random_long(&v)) +@@ -1153,7 +1155,7 @@ void add_interrupt_randomness(int irq, int irq_flags) + fast_mix(fast_pool); + add_interrupt_bench(cycles); + +- if (!crng_ready()) { ++ if (unlikely(crng_init == 0)) { + if ((fast_pool->count >= 64) && + crng_fast_load((char *) fast_pool->pool, + sizeof(fast_pool->pool))) { +@@ -1668,6 +1670,7 @@ static int rand_initialize(void) + init_std_data(&input_pool); + init_std_data(&blocking_pool); + crng_initialize(&primary_crng); ++ crng_global_init_time = jiffies; + + #ifdef CONFIG_NUMA + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); +@@ -1854,6 +1857,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) + input_pool.entropy_count = 0; + blocking_pool.entropy_count = 0; + return 0; ++ case RNDRESEEDCRNG: ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ if (crng_init < 2) ++ return -ENODATA; ++ crng_reseed(&primary_crng, NULL); ++ crng_global_init_time = jiffies - 1; ++ return 0; + default: + return -EINVAL; + } +@@ -2148,7 +2159,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, + { + struct entropy_store *poolp = &input_pool; + +- if (!crng_ready()) { ++ if (unlikely(crng_init == 0)) { + crng_fast_load(buffer, count); + return; + } +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c +index abdc149941e2..73aab6e984cd 100644 +--- a/drivers/clk/bcm/clk-bcm2835.c ++++ b/drivers/clk/bcm/clk-bcm2835.c +@@ -545,9 +545,7 @@ static void bcm2835_pll_off(struct clk_hw *hw) + const struct bcm2835_pll_data *data = pll->data; + + spin_lock(&cprman->regs_lock); +- cprman_write(cprman, data->cm_ctrl_reg, +- cprman_read(cprman, data->cm_ctrl_reg) | +- CM_PLL_ANARST); ++ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST); + cprman_write(cprman, data->a2w_ctrl_reg, + cprman_read(cprman, data->a2w_ctrl_reg) | + A2W_PLL_CTRL_PWRDN); +@@ -583,6 +581,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) + cpu_relax(); + } + ++ cprman_write(cprman, data->a2w_ctrl_reg, ++ cprman_read(cprman, data->a2w_ctrl_reg) | ++ A2W_PLL_CTRL_PRST_DISABLE); ++ + return 0; + } + +diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c +index 8bccf4ecdab6..9ff4ea63932d 100644 +--- a/drivers/clk/mvebu/armada-38x.c ++++ b/drivers/clk/mvebu/armada-38x.c +@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar) + } + + static const u32 armada_38x_cpu_frequencies[] __initconst = { +- 0, 0, 0, 0, +- 1066 * 1000 * 1000, 0, 0, 0, ++ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0, ++ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0, + 1332 * 1000 * 1000, 0, 0, 0, +- 1600 * 1000 * 1000, ++ 1600 * 1000 * 1000, 0, 0, 0, ++ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000, + }; + + static u32 __init armada_38x_get_cpu_freq(void __iomem *sar) +@@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = { + }; + + static const int armada_38x_cpu_l2_ratios[32][2] __initconst = { +- {0, 1}, {0, 1}, {0, 1}, {0, 1}, +- {1, 2}, {0, 1}, {0, 1}, {0, 1}, ++ {1, 2}, {0, 1}, {1, 2}, {0, 1}, ++ {1, 2}, {0, 1}, {1, 2}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, +- {0, 1}, {0, 1}, {0, 1}, {0, 1}, ++ {1, 2}, {0, 1}, {0, 1}, {1, 2}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = { + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, +- {0, 1}, {0, 1}, {0, 1}, {0, 1}, ++ {1, 2}, {0, 1}, {0, 1}, {7, 15}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c +index eea38f6ea77e..3892346c4fcc 100644 +--- a/drivers/clk/renesas/clk-sh73a0.c ++++ b/drivers/clk/renesas/clk-sh73a0.c +@@ -46,7 +46,7 @@ struct div4_clk { + unsigned int shift; + }; + +-static struct div4_clk div4_clks[] = { ++static const struct div4_clk div4_clks[] = { + { "zg", "pll0", CPG_FRQCRA, 16 }, + { "m3", "pll1", CPG_FRQCRA, 12 }, + { "b", "pll1", CPG_FRQCRA, 8 }, +@@ -79,7 +79,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg, + { + const struct clk_div_table *table = NULL; + unsigned int shift, reg, width; +- const char *parent_name; ++ const char *parent_name = NULL; + unsigned int mult = 1; + unsigned int div = 1; + +@@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg, + shift = 24; + width = 5; + } else { +- struct div4_clk *c; ++ const struct div4_clk *c; + + for (c = div4_clks; c->name; c++) { + if (!strcmp(name, c->name)) { +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c +index b7d7f2d443a1..ee7b48d5243c 100644 +--- a/drivers/dma/at_xdmac.c ++++ b/drivers/dma/at_xdmac.c +@@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); +- initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); +- rmb(); + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + rmb(); ++ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); ++ rmb(); + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +index 0e8f8972a160..0217f5d6ecb9 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +@@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { + { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, ++ { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0, 0, 0, 0, 0 }, + }; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +index c02db01f6583..fe011c7ec70a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +@@ -201,8 +201,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, + for (i = 0; i < list->num_entries; i++) { + unsigned priority = list->array[i].priority; + +- list_add_tail(&list->array[i].tv.head, +- &bucket[priority]); ++ if (!list->array[i].robj->parent) ++ list_add_tail(&list->array[i].tv.head, ++ &bucket[priority]); ++ + list->array[i].user_pages = NULL; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index cb505f66d3aa..c801624f33bd 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -519,7 +519,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, + INIT_LIST_HEAD(&duplicates); + amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); + +- if (p->uf_entry.robj) ++ if (p->uf_entry.robj && !p->uf_entry.robj->parent) + list_add(&p->uf_entry.tv.head, &p->validated); + + if (need_mmap_lock) +diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c +index 002862be2df6..3fa8320e49c1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c ++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c +@@ -6449,9 +6449,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, + { + u32 lane_width; + u32 new_lane_width = +- (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; ++ ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + u32 current_lane_width = +- (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; ++ ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + + if (new_lane_width != current_lane_width) { + amdgpu_set_pcie_lanes(adev, new_lane_width); +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index 574ab0016a57..b82ef5ed727c 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -5969,9 +5969,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev, + { + u32 lane_width; + u32 new_lane_width = +- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; ++ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + u32 current_lane_width = +- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; ++ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + + if (new_lane_width != current_lane_width) { + radeon_set_pcie_lanes(rdev, new_lane_width); +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +index 6e3c4acb16ac..32d87c6035c9 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +@@ -1386,6 +1386,9 @@ static int vop_initial(struct vop *vop) + usleep_range(10, 20); + reset_control_deassert(ahb_rst); + ++ VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1); ++ VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0); ++ + memcpy(vop->regsbak, vop->regs, vop->len); + + for (i = 0; i < vop_data->table_size; i++) +@@ -1541,17 +1544,9 @@ static int vop_bind(struct device *dev, struct device *master, void *data) + + mutex_init(&vop->vsync_mutex); + +- ret = devm_request_irq(dev, vop->irq, vop_isr, +- IRQF_SHARED, dev_name(dev), vop); +- if (ret) +- return ret; +- +- /* IRQ is initially disabled; it gets enabled in power_on */ +- disable_irq(vop->irq); +- + ret = vop_create_crtc(vop); + if (ret) +- goto err_enable_irq; ++ return ret; + + pm_runtime_enable(&pdev->dev); + +@@ -1561,13 +1556,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data) + goto err_disable_pm_runtime; + } + ++ ret = devm_request_irq(dev, vop->irq, vop_isr, ++ IRQF_SHARED, dev_name(dev), vop); ++ if (ret) ++ goto err_disable_pm_runtime; ++ ++ /* IRQ is initially disabled; it gets enabled in power_on */ ++ disable_irq(vop->irq); ++ + return 0; + + err_disable_pm_runtime: + pm_runtime_disable(&pdev->dev); + vop_destroy_crtc(vop); +-err_enable_irq: +- enable_irq(vop->irq); /* To balance out the disable_irq above */ + return ret; + } + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 49406e106cee..7944a1f589eb 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1370,7 +1370,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) + * of implement() working on 8 byte chunks + */ + +- int len = hid_report_len(report) + 7; ++ u32 len = hid_report_len(report) + 7; + + return kmalloc(len, flags); + } +@@ -1435,7 +1435,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report, + { + char *buf; + int ret; +- int len; ++ u32 len; + + buf = hid_alloc_report_buf(report, GFP_KERNEL); + if (!buf) +@@ -1461,14 +1461,14 @@ void __hid_request(struct hid_device *hid, struct hid_report *report, + } + EXPORT_SYMBOL_GPL(__hid_request); + +-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, ++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, + int interrupt) + { + struct hid_report_enum *report_enum = hid->report_enum + type; + struct hid_report *report; + struct hid_driver *hdrv; + unsigned int a; +- int rsize, csize = size; ++ u32 rsize, csize = size; + u8 *cdata = data; + int ret = 0; + +@@ -1526,7 +1526,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event); + * + * This is data entry for lower layers. + */ +-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt) ++int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt) + { + struct hid_report_enum *report_enum; + struct hid_driver *hdrv; +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 40233315d5f5..5ff6dd8147b6 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -1279,7 +1279,8 @@ static void hidinput_led_worker(struct work_struct *work) + led_work); + struct hid_field *field; + struct hid_report *report; +- int len, ret; ++ int ret; ++ u32 len; + __u8 *buf; + + field = hidinput_get_led_field(hid); +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 89e9032ab1e7..fba655d639af 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -315,7 +315,8 @@ static struct attribute_group mt_attribute_group = { + static void mt_get_feature(struct hid_device *hdev, struct hid_report *report) + { + struct mt_device *td = hid_get_drvdata(hdev); +- int ret, size = hid_report_len(report); ++ int ret; ++ u32 size = hid_report_len(report); + u8 *buf; + + /* +@@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev) + struct hid_report_enum *re; + struct mt_class *cls = &td->mtclass; + char *buf; +- int report_len; ++ u32 report_len; + + if (td->inputmode < 0) + return; +diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c +index be89bcbf6a71..276d12d4b576 100644 +--- a/drivers/hid/hid-rmi.c ++++ b/drivers/hid/hid-rmi.c +@@ -110,8 +110,8 @@ struct rmi_data { + u8 *writeReport; + u8 *readReport; + +- int input_report_size; +- int output_report_size; ++ u32 input_report_size; ++ u32 output_report_size; + + unsigned long flags; + +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c +index f0e2757cb909..216f0338a1f7 100644 +--- a/drivers/hid/hidraw.c ++++ b/drivers/hid/hidraw.c +@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t + int ret = 0, len; + unsigned char report_number; + ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ++ ret = -ENODEV; ++ goto out; ++ } ++ + dev = hidraw_table[minor]->hid; + + if (!dev->ll_driver->raw_request) { +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index 7d6da9b43dab..2548c5dbdc75 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -142,10 +142,10 @@ struct i2c_hid { + * register of the HID + * descriptor. */ + unsigned int bufsize; /* i2c buffer size */ +- char *inbuf; /* Input buffer */ +- char *rawbuf; /* Raw Input buffer */ +- char *cmdbuf; /* Command buffer */ +- char *argsbuf; /* Command arguments buffer */ ++ u8 *inbuf; /* Input buffer */ ++ u8 *rawbuf; /* Raw Input buffer */ ++ u8 *cmdbuf; /* Command buffer */ ++ u8 *argsbuf; /* Command arguments buffer */ + + unsigned long flags; /* device flags */ + unsigned long quirks; /* Various quirks */ +@@ -451,7 +451,8 @@ static int i2c_hid_hwreset(struct i2c_client *client) + + static void i2c_hid_get_input(struct i2c_hid *ihid) + { +- int ret, ret_size; ++ int ret; ++ u32 ret_size; + int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); + + if (size > ihid->bufsize) +@@ -476,7 +477,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) + return; + } + +- if (ret_size > size) { ++ if ((ret_size > size) || (ret_size <= 2)) { + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", + __func__, size, ret_size); + return; +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index 7a4d39ce51d9..e8b90b534f08 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -351,7 +351,7 @@ static int wacom_set_device_mode(struct hid_device *hdev, + u8 *rep_data; + struct hid_report *r; + struct hid_report_enum *re; +- int length; ++ u32 length; + int error = -ENOMEM, limit = 0; + + if (wacom_wac->mode_report < 0) +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index 4d732810f6fc..cb79d171d1e4 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -1231,6 +1231,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx, + if (!optlen) + return -EINVAL; + ++ if (!ctx->cm_id->device) ++ return -EINVAL; ++ + memset(&sa_path, 0, sizeof(sa_path)); + + ib_sa_unpack_path(path_data->path_rec, &sa_path); +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c +index 59f37f412a7f..ced416f5dffb 100644 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c +@@ -747,9 +747,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, + memcpy(wqe->dma.sge, ibwr->sg_list, + num_sge * sizeof(struct ib_sge)); + +- wqe->iova = (mask & WR_ATOMIC_MASK) ? +- atomic_wr(ibwr)->remote_addr : +- rdma_wr(ibwr)->remote_addr; ++ wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr : ++ mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0; + wqe->mask = mask; + wqe->dma.length = length; + wqe->dma.resid = length; +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index 84f91858b5e6..463ea592a42a 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -2626,9 +2626,11 @@ static int srp_abort(struct scsi_cmnd *scmnd) + ret = FAST_IO_FAIL; + else + ret = FAILED; +- srp_free_req(ch, req, scmnd, 0); +- scmnd->result = DID_ABORT << 16; +- scmnd->scsi_done(scmnd); ++ if (ret == SUCCESS) { ++ srp_free_req(ch, req, scmnd, 0); ++ scmnd->result = DID_ABORT << 16; ++ scmnd->scsi_done(scmnd); ++ } + + return ret; + } +@@ -3395,12 +3397,10 @@ static ssize_t srp_create_target(struct device *dev, + num_online_nodes()); + const int ch_end = ((node_idx + 1) * target->ch_count / + num_online_nodes()); +- const int cv_start = (node_idx * ibdev->num_comp_vectors / +- num_online_nodes() + target->comp_vector) +- % ibdev->num_comp_vectors; +- const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / +- num_online_nodes() + target->comp_vector) +- % ibdev->num_comp_vectors; ++ const int cv_start = node_idx * ibdev->num_comp_vectors / ++ num_online_nodes(); ++ const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors / ++ num_online_nodes(); + int cpu_idx = 0; + + for_each_online_cpu(cpu) { +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index 3a1c40684213..f846f0140a9d 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -389,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ + pasid_max - 1, GFP_KERNEL); + if (ret < 0) { + kfree(svm); ++ kfree(sdev); + goto out; + } + svm->pasid = ret; +diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c +index 9ae71804b5dd..1c2ca8d51a70 100644 +--- a/drivers/irqchip/irq-gic-common.c ++++ b/drivers/irqchip/irq-gic-common.c +@@ -21,6 +21,8 @@ + + #include "irq-gic-common.h" + ++static DEFINE_RAW_SPINLOCK(irq_controller_lock); ++ + static const struct gic_kvm_info *gic_kvm_info; + + const struct gic_kvm_info *gic_get_kvm_info(void) +@@ -52,11 +54,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type, + u32 confoff = (irq / 16) * 4; + u32 val, oldval; + int ret = 0; ++ unsigned long flags; + + /* + * Read current configuration register, and insert the config + * for "irq", depending on "type". + */ ++ raw_spin_lock_irqsave(&irq_controller_lock, flags); + val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); + if (type & IRQ_TYPE_LEVEL_MASK) + val &= ~confmask; +@@ -64,8 +68,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type, + val |= confmask; + + /* If the current configuration is the same, then we are done */ +- if (val == oldval) ++ if (val == oldval) { ++ raw_spin_unlock_irqrestore(&irq_controller_lock, flags); + return 0; ++ } + + /* + * Write back the new configuration, and possibly re-enable +@@ -83,6 +89,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type, + pr_warn("GIC: PPI%d is secure or misconfigured\n", + irq - 16); + } ++ raw_spin_unlock_irqrestore(&irq_controller_lock, flags); + + if (sync_access) + sync_access(); +diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c +index 684087db170b..1752007397f9 100644 +--- a/drivers/mmc/host/jz4740_mmc.c ++++ b/drivers/mmc/host/jz4740_mmc.c +@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, + host->irq_mask &= ~irq; + else + host->irq_mask |= irq; +- spin_unlock_irqrestore(&host->lock, flags); + + writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); ++ spin_unlock_irqrestore(&host->lock, flags); + } + + static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, +diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c +index 46913ef25bc0..479a5f02d10b 100644 +--- a/drivers/mtd/ubi/block.c ++++ b/drivers/mtd/ubi/block.c +@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode) + * in any case. + */ + if (mode & FMODE_WRITE) { +- ret = -EPERM; ++ ret = -EROFS; + goto out_unlock; + } + +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c +index 85d54f37e28f..6cb5ca52cb5a 100644 +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -894,6 +894,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + return -EINVAL; + } + ++ /* ++ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes. ++ * MLC NAND is different and needs special care, otherwise UBI or UBIFS ++ * will die soon and you will lose all your data. ++ */ ++ if (mtd->type == MTD_MLCNANDFLASH) { ++ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n", ++ mtd->index); ++ return -EINVAL; ++ } ++ + if (ubi_num == UBI_DEV_NUM_AUTO) { + /* Search for an empty slot in the @ubi_devices array */ + for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) +diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c +index 4f0bd6b4422a..69dd21679a30 100644 +--- a/drivers/mtd/ubi/fastmap-wl.c ++++ b/drivers/mtd/ubi/fastmap-wl.c +@@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) + { + int i; + +- flush_work(&ubi->fm_work); + return_unused_pool_pebs(ubi, &ubi->fm_pool); + return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); + +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index b09c81e882b4..1b287861e34f 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -2038,7 +2038,10 @@ static void netback_changed(struct xenbus_device *dev, + case XenbusStateInitialised: + case XenbusStateReconfiguring: + case XenbusStateReconfigured: ++ break; ++ + case XenbusStateUnknown: ++ wake_up_all(&module_unload_q); + break; + + case XenbusStateInitWait: +@@ -2169,7 +2172,9 @@ static int xennet_remove(struct xenbus_device *dev) + xenbus_switch_state(dev, XenbusStateClosing); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == +- XenbusStateClosing); ++ XenbusStateClosing || ++ xenbus_read_driver_state(dev->otherend) == ++ XenbusStateUnknown); + + xenbus_switch_state(dev, XenbusStateClosed); + wait_event(module_unload_q, +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c +index b8fb1ef1fc15..74257ac92490 100644 +--- a/drivers/nvdimm/namespace_devs.c ++++ b/drivers/nvdimm/namespace_devs.c +@@ -1747,7 +1747,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, + } + + if (i < nd_region->ndr_mappings) { +- struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); ++ struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm; + + /* + * Give up if we don't find an instance of a uuid at each +@@ -1755,7 +1755,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, + * find a dimm with two instances of the same uuid. + */ + dev_err(&nd_region->dev, "%s missing label for %pUb\n", +- dev_name(ndd->dev), nd_label->uuid); ++ nvdimm_name(nvdimm), nd_label->uuid); + rc = -EINVAL; + goto err; + } +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c +index a46b585fae31..d44b55879c67 100644 +--- a/drivers/pci/hotplug/acpiphp_glue.c ++++ b/drivers/pci/hotplug/acpiphp_glue.c +@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) + { + unsigned long long sta = 0; + struct acpiphp_func *func; ++ u32 dvid; + + list_for_each_entry(func, &slot->funcs, sibling) { + if (func->flags & FUNC_HAS_STA) { +@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) + if (ACPI_SUCCESS(status) && sta) + break; + } else { +- u32 dvid; +- +- pci_bus_read_config_dword(slot->bus, +- PCI_DEVFN(slot->device, +- func->function), +- PCI_VENDOR_ID, &dvid); +- if (dvid != 0xffffffff) { ++ if (pci_bus_read_dev_vendor_id(slot->bus, ++ PCI_DEVFN(slot->device, func->function), ++ &dvid, 0)) { + sta = ACPI_STA_ALL; + break; + } + } + } + ++ if (!sta) { ++ /* ++ * Check for the slot itself since it may be that the ++ * ACPI slot is a device below PCIe upstream port so in ++ * that case it may not even be reachable yet. ++ */ ++ if (pci_bus_read_dev_vendor_id(slot->bus, ++ PCI_DEVFN(slot->device, 0), &dvid, 0)) { ++ sta = ACPI_STA_ALL; ++ } ++ } ++ + return (unsigned int)sta; + } + +diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c +index 1c85ecc9e7ac..0fcf94ffad32 100644 +--- a/drivers/pwm/pwm-rcar.c ++++ b/drivers/pwm/pwm-rcar.c +@@ -156,8 +156,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + if (div < 0) + return div; + +- /* Let the core driver set pwm->period if disabled and duty_ns == 0 */ +- if (!pwm_is_enabled(pwm) && !duty_ns) ++ /* ++ * Let the core driver set pwm->period if disabled and duty_ns == 0. ++ * But, this driver should prevent to set the new duty_ns if current ++ * duty_cycle is not set ++ */ ++ if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle) + return 0; + + rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 6db80635ace8..c2e85e23d538 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -743,8 +743,14 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, + for (i = 0; i < sgs; i++) { + + if (vmalloced_buf || kmap_buf) { +- min = min_t(size_t, +- len, desc_len - offset_in_page(buf)); ++ /* ++ * Next scatterlist entry size is the minimum between ++ * the desc_len and the remaining buffer length that ++ * fits in a page. ++ */ ++ min = min_t(size_t, desc_len, ++ min_t(size_t, len, ++ PAGE_SIZE - offset_in_page(buf))); + if (vmalloced_buf) + vm_page = vmalloc_to_page(buf); + else +diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c +index 06912f0602b7..b7cb49afa056 100644 +--- a/drivers/thermal/imx_thermal.c ++++ b/drivers/thermal/imx_thermal.c +@@ -587,6 +587,9 @@ static int imx_thermal_probe(struct platform_device *pdev) + regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); + regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); + ++ data->irq_enabled = true; ++ data->mode = THERMAL_DEVICE_ENABLED; ++ + ret = devm_request_threaded_irq(&pdev->dev, data->irq, + imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread, + 0, "imx_thermal", data); +@@ -598,9 +601,6 @@ static int imx_thermal_probe(struct platform_device *pdev) + return ret; + } + +- data->irq_enabled = true; +- data->mode = THERMAL_DEVICE_ENABLED; +- + return 0; + } + +diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c +index a8c20413dbda..cba6bc6ab9ed 100644 +--- a/drivers/thunderbolt/nhi.c ++++ b/drivers/thunderbolt/nhi.c +@@ -628,6 +628,7 @@ static const struct dev_pm_ops nhi_pm_ops = { + * we just disable hotplug, the + * pci-tunnels stay alive. + */ ++ .thaw_noirq = nhi_resume_noirq, + .restore_noirq = nhi_resume_noirq, + }; + +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index faf50df81622..1c70541a1467 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -2182,6 +2182,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, + } + if (tty_hung_up_p(file)) + break; ++ /* ++ * Abort readers for ttys which never actually ++ * get hung up. See __tty_hangup(). ++ */ ++ if (test_bit(TTY_HUPPING, &tty->flags)) ++ break; + if (!timeout) + break; + if (file->f_flags & O_NONBLOCK) { +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index fb9bada5f1d5..4ee0a9de7556 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -709,6 +709,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) + return; + } + ++ /* ++ * Some console devices aren't actually hung up for technical and ++ * historical reasons, which can lead to indefinite interruptible ++ * sleep in n_tty_read(). The following explicitly tells ++ * n_tty_read() to abort readers. ++ */ ++ set_bit(TTY_HUPPING, &tty->flags); ++ + /* inuse_filps is protected by the single tty lock, + this really needs to change if we want to flush the + workqueue with the lock held */ +@@ -763,6 +771,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) + * from the ldisc side, which is now guaranteed. + */ + set_bit(TTY_HUPPED, &tty->flags); ++ clear_bit(TTY_HUPPING, &tty->flags); + tty_unlock(tty); + + if (f) +diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c +index 358ca8dd784f..a5240b4d7ab9 100644 +--- a/drivers/usb/core/generic.c ++++ b/drivers/usb/core/generic.c +@@ -208,8 +208,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg) + if (!udev->parent) + rc = hcd_bus_suspend(udev, msg); + +- /* Non-root devices don't need to do anything for FREEZE or PRETHAW */ +- else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) ++ /* ++ * Non-root USB2 devices don't need to do anything for FREEZE ++ * or PRETHAW. USB3 devices don't support global suspend and ++ * needs to be selectively suspended. ++ */ ++ else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) ++ && (udev->speed < USB_SPEED_SUPER)) + rc = 0; + else + rc = usb_port_suspend(udev, msg); +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 427291a19e6d..d6493abcf6bc 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -173,7 +173,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, + ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res)); + if (ret) { + dev_err(dev, "couldn't add resources to dwc3 device\n"); +- return ret; ++ goto err; + } + + dwc3->dev.parent = dev; +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c +index a5719f271bf0..70ac1963b598 100644 +--- a/drivers/usb/gadget/function/f_midi.c ++++ b/drivers/usb/gadget/function/f_midi.c +@@ -389,7 +389,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) + if (err) { + ERROR(midi, "%s: couldn't enqueue request: %d\n", + midi->out_ep->name, err); +- free_ep_req(midi->out_ep, req); ++ if (req->buf != NULL) ++ free_ep_req(midi->out_ep, req); + return err; + } + } +diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h +index 7d53a4773d1a..2f03334c6874 100644 +--- a/drivers/usb/gadget/u_f.h ++++ b/drivers/usb/gadget/u_f.h +@@ -64,7 +64,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len); + /* Frees a usb_request previously allocated by alloc_ep_req() */ + static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) + { ++ WARN_ON(req->buf == NULL); + kfree(req->buf); ++ req->buf = NULL; + usb_ep_free_request(ep, req); + } + +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 7d658565b20f..188961780b8a 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -248,6 +248,9 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request); + * arranges to poll once per interval, and the gadget driver usually will + * have queued some data to transfer at that time. + * ++ * Note that @req's ->complete() callback must never be called from ++ * within usb_ep_queue() as that can create deadlock situations. ++ * + * Returns zero, or a negative error code. Endpoints that are not enabled + * report errors; errors will also be + * reported when the usb peripheral is disconnected. +diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c +index 844a309fe895..e85b9c2a4910 100644 +--- a/drivers/usb/musb/musb_gadget_ep0.c ++++ b/drivers/usb/musb/musb_gadget_ep0.c +@@ -114,15 +114,19 @@ static int service_tx_status_request( + } + + is_in = epnum & USB_DIR_IN; +- if (is_in) { +- epnum &= 0x0f; ++ epnum &= 0x0f; ++ if (epnum >= MUSB_C_NUM_EPS) { ++ handled = -EINVAL; ++ break; ++ } ++ ++ if (is_in) + ep = &musb->endpoints[epnum].ep_in; +- } else { ++ else + ep = &musb->endpoints[epnum].ep_out; +- } + regs = musb->endpoints[epnum].regs; + +- if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { ++ if (!ep->desc) { + handled = -EINVAL; + break; + } +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index 9f1ec4392209..7b8a957b008d 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -810,6 +810,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, + { + __le16 *ctrl = (__le16 *)(vdev->vconfig + pos - + offset + PCI_EXP_DEVCTL); ++ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ; + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); + if (count < 0) +@@ -835,6 +836,27 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, + pci_try_reset_function(vdev->pdev); + } + ++ /* ++ * MPS is virtualized to the user, writes do not change the physical ++ * register since determining a proper MPS value requires a system wide ++ * device view. The MRRS is largely independent of MPS, but since the ++ * user does not have that system-wide view, they might set a safe, but ++ * inefficiently low value. Here we allow writes through to hardware, ++ * but we set the floor to the physical device MPS setting, so that ++ * we can at least use full TLPs, as defined by the MPS value. ++ * ++ * NB, if any devices actually depend on an artificially low MRRS ++ * setting, this will need to be revisited, perhaps with a quirk ++ * though pcie_set_readrq(). ++ */ ++ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) { ++ readrq = 128 << ++ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12); ++ readrq = max(readrq, pcie_get_mps(vdev->pdev)); ++ ++ pcie_set_readrq(vdev->pdev, readrq); ++ } ++ + return count; + } + +@@ -853,11 +875,12 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm) + * Allow writes to device control fields, except devctl_phantom, + * which could confuse IOMMU, MPS, which can break communication + * with other physical devices, and the ARI bit in devctl2, which +- * is set at probe time. FLR gets virtualized via our writefn. ++ * is set at probe time. FLR and MRRS get virtualized via our ++ * writefn. + */ + p_setw(perm, PCI_EXP_DEVCTL, +- PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD, +- ~PCI_EXP_DEVCTL_PHANTOM); ++ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD | ++ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM); + p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); + return 0; + } +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c +index 8658dba21768..e682bf046e50 100644 +--- a/drivers/watchdog/f71808e_wdt.c ++++ b/drivers/watchdog/f71808e_wdt.c +@@ -496,7 +496,7 @@ static bool watchdog_is_running(void) + + is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0)) + && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF) +- & F71808FG_FLAG_WD_EN); ++ & BIT(F71808FG_FLAG_WD_EN)); + + superio_exit(watchdog.sioaddr); + +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c +index a11f73174877..6182d693cf43 100644 +--- a/fs/autofs4/root.c ++++ b/fs/autofs4/root.c +@@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, + + autofs4_del_active(dentry); + +- inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); ++ inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode); + if (!inode) + return -ENOMEM; + d_add(dentry, inode); +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 7b496a4e650e..4ed4736b5bc6 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -1412,6 +1412,7 @@ struct dfs_info3_param { + #define CIFS_FATTR_NEED_REVAL 0x4 + #define CIFS_FATTR_INO_COLLISION 0x8 + #define CIFS_FATTR_UNKNOWN_NLINK 0x10 ++#define CIFS_FATTR_FAKE_ROOT_INO 0x20 + + struct cifs_fattr { + u32 cf_flags; +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index 7ab5be7944aa..24c19eb94fa3 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -701,6 +701,18 @@ cifs_get_file_info(struct file *filp) + return rc; + } + ++/* Simple function to return a 64 bit hash of string. Rarely called */ ++static __u64 simple_hashstr(const char *str) ++{ ++ const __u64 hash_mult = 1125899906842597L; /* a big enough prime */ ++ __u64 hash = 0; ++ ++ while (*str) ++ hash = (hash + (__u64) *str++) * hash_mult; ++ ++ return hash; ++} ++ + int + cifs_get_inode_info(struct inode **inode, const char *full_path, + FILE_ALL_INFO *data, struct super_block *sb, int xid, +@@ -810,6 +822,14 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, + tmprc); + fattr.cf_uniqueid = iunique(sb, ROOT_I); + cifs_autodisable_serverino(cifs_sb); ++ } else if ((fattr.cf_uniqueid == 0) && ++ strlen(full_path) == 0) { ++ /* some servers ret bad root ino ie 0 */ ++ cifs_dbg(FYI, "Invalid (0) inodenum\n"); ++ fattr.cf_flags |= ++ CIFS_FATTR_FAKE_ROOT_INO; ++ fattr.cf_uniqueid = ++ simple_hashstr(tcon->treeName); + } + } + } else +@@ -826,6 +846,16 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, + &fattr.cf_uniqueid, data); + if (tmprc) + fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid; ++ else if ((fattr.cf_uniqueid == 0) && ++ strlen(full_path) == 0) { ++ /* ++ * Reuse existing root inode num since ++ * inum zero for root causes ls of . and .. to ++ * not be returned ++ */ ++ cifs_dbg(FYI, "Srv ret 0 inode num for root\n"); ++ fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid; ++ } + } else + fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid; + } +@@ -887,6 +917,9 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, + } + + cgii_exit: ++ if ((*inode) && ((*inode)->i_ino == 0)) ++ cifs_dbg(FYI, "inode number of zero returned\n"); ++ + kfree(buf); + cifs_put_tlink(tlink); + return rc; +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index e04ec868e37e..176b4b27a27a 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, + */ + ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), + sb->s_blocksize * 8, bh->b_data); +- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); +- ext4_group_desc_csum_set(sb, block_group, gdp); + return 0; + } + +@@ -447,6 +445,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) + err = ext4_init_block_bitmap(sb, bh, block_group, desc); + set_bitmap_uptodate(bh); + set_buffer_uptodate(bh); ++ set_buffer_verified(bh); + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + if (err) { +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index 2d94e8524839..79a9a1bddafc 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) + memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); + } + +-/* Initializes an uninitialized inode bitmap */ +-static int ext4_init_inode_bitmap(struct super_block *sb, +- struct buffer_head *bh, +- ext4_group_t block_group, +- struct ext4_group_desc *gdp) +-{ +- struct ext4_group_info *grp; +- struct ext4_sb_info *sbi = EXT4_SB(sb); +- J_ASSERT_BH(bh, buffer_locked(bh)); +- +- /* If checksum is bad mark all blocks and inodes use to prevent +- * allocation, essentially implementing a per-group read-only flag. */ +- if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { +- grp = ext4_get_group_info(sb, block_group); +- if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) +- percpu_counter_sub(&sbi->s_freeclusters_counter, +- grp->bb_free); +- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); +- if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { +- int count; +- count = ext4_free_inodes_count(sb, gdp); +- percpu_counter_sub(&sbi->s_freeinodes_counter, +- count); +- } +- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); +- return -EFSBADCRC; +- } +- +- memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); +- ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, +- bh->b_data); +- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, +- EXT4_INODES_PER_GROUP(sb) / 8); +- ext4_group_desc_csum_set(sb, block_group, gdp); +- +- return 0; +-} +- + void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate) + { + if (uptodate) { +@@ -184,17 +146,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) + + ext4_lock_group(sb, block_group); + if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { +- err = ext4_init_inode_bitmap(sb, bh, block_group, desc); ++ memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); ++ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), ++ sb->s_blocksize * 8, bh->b_data); + set_bitmap_uptodate(bh); + set_buffer_uptodate(bh); + set_buffer_verified(bh); + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); +- if (err) { +- ext4_error(sb, "Failed to init inode bitmap for group " +- "%u: %d", block_group, err); +- goto out; +- } + return bh; + } + ext4_unlock_group(sb, block_group); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 5cccec68a0a5..340428274532 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -3396,7 +3396,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) + { + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; +- struct ext4_inode_info *ei = EXT4_I(inode); + ssize_t ret; + loff_t offset = iocb->ki_pos; + size_t count = iov_iter_count(iter); +@@ -3420,7 +3419,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) + goto out; + } + orphan = 1; +- ei->i_disksize = inode->i_size; ++ ext4_update_i_disksize(inode, inode->i_size); + ext4_journal_stop(handle); + } + +@@ -3548,7 +3547,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) + if (ret > 0) { + loff_t end = offset + ret; + if (end > inode->i_size) { +- ei->i_disksize = end; ++ ext4_update_i_disksize(inode, end); + i_size_write(inode, end); + /* + * We're going to return a positive `ret' +@@ -4494,6 +4493,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) + goto bad_inode; + raw_inode = ext4_raw_inode(&iloc); + ++ if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { ++ EXT4_ERROR_INODE(inode, "root inode unallocated"); ++ ret = -EFSCORRUPTED; ++ goto bad_inode; ++ } ++ + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { + ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 1ec4b6e34747..bfb83d76d128 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2260,6 +2260,8 @@ static int ext4_check_descriptors(struct super_block *sb, + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u overlaps " + "superblock", i); ++ if (!(sb->s_flags & MS_RDONLY)) ++ return 0; + } + if (block_bitmap < first_block || block_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " +@@ -2272,6 +2274,8 @@ static int ext4_check_descriptors(struct super_block *sb, + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u overlaps " + "superblock", i); ++ if (!(sb->s_flags & MS_RDONLY)) ++ return 0; + } + if (inode_bitmap < first_block || inode_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " +@@ -2284,6 +2288,8 @@ static int ext4_check_descriptors(struct super_block *sb, + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode table for group %u overlaps " + "superblock", i); ++ if (!(sb->s_flags & MS_RDONLY)) ++ return 0; + } + if (inode_table < first_block || + inode_table + sbi->s_itb_per_group - 1 > last_block) { +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index 0703a1179847..3d8b35f28a9b 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) + */ + if (inode && inode_to_wb_is_valid(inode)) { + struct bdi_writeback *wb; +- bool locked, congested; ++ struct wb_lock_cookie lock_cookie = {}; ++ bool congested; + +- wb = unlocked_inode_to_wb_begin(inode, &locked); ++ wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); + congested = wb_congested(wb, cong_bits); +- unlocked_inode_to_wb_end(inode, locked); ++ unlocked_inode_to_wb_end(inode, &lock_cookie); + return congested; + } + +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c +index 047c8ef620fe..542e33d29088 100644 +--- a/fs/jbd2/journal.c ++++ b/fs/jbd2/journal.c +@@ -951,7 +951,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) + } + + /* +- * This is a variaon of __jbd2_update_log_tail which checks for validity of ++ * This is a variation of __jbd2_update_log_tail which checks for validity of + * provided log tail and locks j_checkpoint_mutex. So it is safe against races + * with other threads updating log tail. + */ +@@ -1394,6 +1394,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, + journal_superblock_t *sb = journal->j_superblock; + int ret; + ++ if (is_journal_aborted(journal)) ++ return -EIO; ++ + BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); + jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n", + tail_block, tail_tid); +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c +index 5ef21f4c4c77..59c019a148f6 100644 +--- a/fs/jffs2/super.c ++++ b/fs/jffs2/super.c +@@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb) + static void jffs2_kill_sb(struct super_block *sb) + { + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); +- if (!(sb->s_flags & MS_RDONLY)) ++ if (c && !(sb->s_flags & MS_RDONLY)) + jffs2_stop_garbage_collect_thread(c); + kill_mtd_super(sb); + kfree(c); +diff --git a/fs/namespace.c b/fs/namespace.c +index d7360f9897b4..6c873b330a93 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1033,7 +1033,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, + goto out_free; + } + +- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); ++ mnt->mnt.mnt_flags = old->mnt.mnt_flags; ++ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); + /* Don't allow unprivileged users to change mount flags */ + if (flag & CL_UNPRIVILEGED) { + mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index e0e5f7c3c99f..8a459b179183 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, + u32 event_mask, + void *data, int data_type) + { +- __u32 marks_mask, marks_ignored_mask; ++ __u32 marks_mask = 0, marks_ignored_mask = 0; + struct path *path = data; + + pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" +@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, + !d_can_lookup(path->dentry)) + return false; + +- if (inode_mark && vfsmnt_mark) { +- marks_mask = (vfsmnt_mark->mask | inode_mark->mask); +- marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); +- } else if (inode_mark) { +- /* +- * if the event is for a child and this inode doesn't care about +- * events on the child, don't send it! +- */ +- if ((event_mask & FS_EVENT_ON_CHILD) && +- !(inode_mark->mask & FS_EVENT_ON_CHILD)) +- return false; +- marks_mask = inode_mark->mask; +- marks_ignored_mask = inode_mark->ignored_mask; +- } else if (vfsmnt_mark) { +- marks_mask = vfsmnt_mark->mask; +- marks_ignored_mask = vfsmnt_mark->ignored_mask; +- } else { +- BUG(); ++ /* ++ * if the event is for a child and this inode doesn't care about ++ * events on the child, don't send it! ++ */ ++ if (inode_mark && ++ (!(event_mask & FS_EVENT_ON_CHILD) || ++ (inode_mark->mask & FS_EVENT_ON_CHILD))) { ++ marks_mask |= inode_mark->mask; ++ marks_ignored_mask |= inode_mark->ignored_mask; ++ } ++ ++ if (vfsmnt_mark) { ++ marks_mask |= vfsmnt_mark->mask; ++ marks_ignored_mask |= vfsmnt_mark->ignored_mask; + } + + if (d_is_dir(path->dentry) && +diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c +index 629d8c917fa6..6e35ef6521b4 100644 +--- a/fs/orangefs/super.c ++++ b/fs/orangefs/super.c +@@ -559,6 +559,11 @@ void orangefs_kill_sb(struct super_block *sb) + /* provided sb cleanup */ + kill_anon_super(sb); + ++ if (!ORANGEFS_SB(sb)) { ++ mutex_lock(&orangefs_request_mutex); ++ mutex_unlock(&orangefs_request_mutex); ++ return; ++ } + /* + * issue the unmount to userspace to tell it to remove the + * dynamic mount info it has for this superblock +diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c +index 76108185854e..2a5c4813c47d 100644 +--- a/fs/reiserfs/journal.c ++++ b/fs/reiserfs/journal.c +@@ -2640,7 +2640,7 @@ static int journal_init_dev(struct super_block *super, + if (IS_ERR(journal->j_dev_bd)) { + result = PTR_ERR(journal->j_dev_bd); + journal->j_dev_bd = NULL; +- reiserfs_warning(super, ++ reiserfs_warning(super, "sh-457", + "journal_init_dev: Cannot open '%s': %i", + jdev_name, result); + return result; +diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c +index 4ec051089186..03dda1cbe485 100644 +--- a/fs/ubifs/super.c ++++ b/fs/ubifs/super.c +@@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c) + + dbg_save_space_info(c); + +- for (i = 0; i < c->jhead_cnt; i++) +- ubifs_wbuf_sync(&c->jheads[i].wbuf); ++ for (i = 0; i < c->jhead_cnt; i++) { ++ err = ubifs_wbuf_sync(&c->jheads[i].wbuf); ++ if (err) ++ ubifs_ro_mode(c, err); ++ } + + c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); + c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); +@@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb) + int err; + + /* Synchronize write-buffers */ +- for (i = 0; i < c->jhead_cnt; i++) +- ubifs_wbuf_sync(&c->jheads[i].wbuf); ++ for (i = 0; i < c->jhead_cnt; i++) { ++ err = ubifs_wbuf_sync(&c->jheads[i].wbuf); ++ if (err) ++ ubifs_ro_mode(c, err); ++ } + + /* + * We are being cleanly unmounted which means the +diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c +index 695389a4fc23..3a3be23689b3 100644 +--- a/fs/udf/unicode.c ++++ b/fs/udf/unicode.c +@@ -28,6 +28,9 @@ + + #include "udf_sb.h" + ++#define SURROGATE_MASK 0xfffff800 ++#define SURROGATE_PAIR 0x0000d800 ++ + static int udf_uni2char_utf8(wchar_t uni, + unsigned char *out, + int boundlen) +@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni, + if (boundlen <= 0) + return -ENAMETOOLONG; + ++ if ((uni & SURROGATE_MASK) == SURROGATE_PAIR) ++ return -EINVAL; ++ + if (uni < 0x80) { + out[u_len++] = (unsigned char)uni; + } else if (uni < 0x800) { +diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h +index 2062c67e2e51..a72db8d23ed6 100644 +--- a/include/dt-bindings/clock/mt2701-clk.h ++++ b/include/dt-bindings/clock/mt2701-clk.h +@@ -176,7 +176,8 @@ + #define CLK_TOP_AUD_EXT1 156 + #define CLK_TOP_AUD_EXT2 157 + #define CLK_TOP_NFI1X_PAD 158 +-#define CLK_TOP_NR 159 ++#define CLK_TOP_AXISEL_D4 159 ++#define CLK_TOP_NR 160 + + /* APMIXEDSYS */ + +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h +index c357f27d5483..32728ff8095c 100644 +--- a/include/linux/backing-dev-defs.h ++++ b/include/linux/backing-dev-defs.h +@@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) + set_wb_congested(bdi->wb.congested, sync); + } + ++struct wb_lock_cookie { ++ bool locked; ++ unsigned long flags; ++}; ++ + #ifdef CONFIG_CGROUP_WRITEBACK + + /** +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h +index 43b93a947e61..63f17b106a4a 100644 +--- a/include/linux/backing-dev.h ++++ b/include/linux/backing-dev.h +@@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) + /** + * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction + * @inode: target inode +- * @lockedp: temp bool output param, to be passed to the end function ++ * @cookie: output param, to be passed to the end function + * + * The caller wants to access the wb associated with @inode but isn't + * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This +@@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) + * association doesn't change until the transaction is finished with + * unlocked_inode_to_wb_end(). + * +- * The caller must call unlocked_inode_to_wb_end() with *@lockdep +- * afterwards and can't sleep during transaction. IRQ may or may not be +- * disabled on return. ++ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and ++ * can't sleep during the transaction. IRQs may or may not be disabled on ++ * return. + */ + static inline struct bdi_writeback * +-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) ++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) + { + rcu_read_lock(); + +@@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) + * Paired with store_release in inode_switch_wb_work_fn() and + * ensures that we see the new wb if we see cleared I_WB_SWITCH. + */ +- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; ++ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + +- if (unlikely(*lockedp)) +- spin_lock_irq(&inode->i_mapping->tree_lock); ++ if (unlikely(cookie->locked)) ++ spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags); + + /* + * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. +@@ -402,12 +402,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) + /** + * unlocked_inode_to_wb_end - end inode wb access transaction + * @inode: target inode +- * @locked: *@lockedp from unlocked_inode_to_wb_begin() ++ * @cookie: @cookie from unlocked_inode_to_wb_begin() + */ +-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) ++static inline void unlocked_inode_to_wb_end(struct inode *inode, ++ struct wb_lock_cookie *cookie) + { +- if (unlikely(locked)) +- spin_unlock_irq(&inode->i_mapping->tree_lock); ++ if (unlikely(cookie->locked)) ++ spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags); + + rcu_read_unlock(); + } +@@ -454,12 +455,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) + } + + static inline struct bdi_writeback * +-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) ++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) + { + return inode_to_wb(inode); + } + +-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) ++static inline void unlocked_inode_to_wb_end(struct inode *inode, ++ struct wb_lock_cookie *cookie) + { + } + +diff --git a/include/linux/hid.h b/include/linux/hid.h +index b2ec82712baa..fab65b61d6d4 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -801,7 +801,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force); + extern void hidinput_disconnect(struct hid_device *); + + int hid_set_field(struct hid_field *, unsigned, __s32); +-int hid_input_report(struct hid_device *, int type, u8 *, int, int); ++int hid_input_report(struct hid_device *, int type, u8 *, u32, int); + int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); + struct hid_field *hidinput_get_led_field(struct hid_device *hid); + unsigned int hidinput_count_leds(struct hid_device *hid); +@@ -1106,13 +1106,13 @@ static inline void hid_hw_wait(struct hid_device *hdev) + * + * @report: the report we want to know the length + */ +-static inline int hid_report_len(struct hid_report *report) ++static inline u32 hid_report_len(struct hid_report *report) + { + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); + } + +-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, ++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, + int interrupt); + + /* HID quirks API */ +diff --git a/include/linux/tty.h b/include/linux/tty.h +index a41244fe58d0..6f1ee8528210 100644 +--- a/include/linux/tty.h ++++ b/include/linux/tty.h +@@ -355,6 +355,7 @@ struct tty_file_private { + #define TTY_PTY_LOCK 16 /* pty private */ + #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ + #define TTY_HUPPED 18 /* Post driver->hangup() */ ++#define TTY_HUPPING 19 /* Hangup in progress */ + #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ + + /* Values for tty->flow_change */ +diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h +index 760c969d885d..12bbf8c81112 100644 +--- a/include/sound/pcm_oss.h ++++ b/include/sound/pcm_oss.h +@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime { + char *buffer; /* vmallocated period */ + size_t buffer_used; /* used length from period buffer */ + struct mutex params_lock; ++ atomic_t rw_ref; /* concurrent read/write accesses */ + #ifdef CONFIG_SND_PCM_OSS_PLUGINS + struct snd_pcm_plugin *plugin_first; + struct snd_pcm_plugin *plugin_last; +diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h +index 3f93d1695e7f..b455b0d86f26 100644 +--- a/include/uapi/linux/random.h ++++ b/include/uapi/linux/random.h +@@ -34,6 +34,9 @@ + /* Clear the entropy pool and associated counters. (Superuser only.) */ + #define RNDCLEARPOOL _IO( 'R', 0x06 ) + ++/* Reseed CRNG. (Superuser only.) */ ++#define RNDRESEEDCRNG _IO( 'R', 0x07 ) ++ + struct rand_pool_info { + int entropy_count; + int buf_size; +diff --git a/ipc/shm.c b/ipc/shm.c +index de93d01bfce2..b626745e771c 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma) + if (IS_ERR(shp)) + return PTR_ERR(shp); + ++ if (shp->shm_file != sfd->file) { ++ /* ID was reused */ ++ shm_unlock(shp); ++ return -EINVAL; ++ } ++ + shp->shm_atim = get_seconds(); + shp->shm_lprid = task_tgid_vnr(current); + shp->shm_nattch++; +@@ -425,8 +431,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) + int ret; + + /* +- * In case of remap_file_pages() emulation, the file can represent +- * removed IPC ID: propogate shm_lock() error to caller. ++ * In case of remap_file_pages() emulation, the file can represent an ++ * IPC ID that was removed, and possibly even reused by another shm ++ * segment already. Propagate this case as an error to caller. + */ + ret =__shm_open(vma); + if (ret) +@@ -450,6 +457,7 @@ static int shm_release(struct inode *ino, struct file *file) + struct shm_file_data *sfd = shm_file_data(file); + + put_ipc_ns(sfd->ns); ++ fput(sfd->file); + shm_file_data(file) = NULL; + kfree(sfd); + return 0; +@@ -1212,7 +1220,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, + file->f_mapping = shp->shm_file->f_mapping; + sfd->id = shp->shm_perm.id; + sfd->ns = get_ipc_ns(ns); +- sfd->file = shp->shm_file; ++ /* ++ * We need to take a reference to the real shm file to prevent the ++ * pointer from becoming stale in cases where the lifetime of the outer ++ * file extends beyond that of the shm segment. It's not usually ++ * possible, but it can happen during remap_file_pages() emulation as ++ * that unmaps the memory, then does ->mmap() via file reference only. ++ * We'll deny the ->mmap() if the shm segment was since removed, but to ++ * detect shm ID reuse we need to compare the file pointers. ++ */ ++ sfd->file = get_file(shp->shm_file); + sfd->vm_ops = NULL; + + err = security_mmap_file(file, prot, flags); +diff --git a/kernel/resource.c b/kernel/resource.c +index 9b5f04404152..7ee3dd1ad2af 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -633,7 +633,8 @@ static int __find_resource(struct resource *root, struct resource *old, + alloc.start = constraint->alignf(constraint->alignf_data, &avail, + size, constraint->align); + alloc.end = alloc.start + size - 1; +- if (resource_contains(&avail, &alloc)) { ++ if (alloc.start <= alloc.end && ++ resource_contains(&avail, &alloc)) { + new->start = alloc.start; + new->end = alloc.end; + return 0; +diff --git a/mm/filemap.c b/mm/filemap.c +index edfb90e3830c..6d2f561d517c 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -616,7 +616,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) + VM_BUG_ON_PAGE(!PageLocked(new), new); + VM_BUG_ON_PAGE(new->mapping, new); + +- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); ++ error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); + if (!error) { + struct address_space *mapping = old->mapping; + void (*freepage)(struct page *); +@@ -672,7 +672,7 @@ static int __add_to_page_cache_locked(struct page *page, + return error; + } + +- error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); ++ error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); + if (error) { + if (!huge) + mem_cgroup_cancel_charge(page, memcg, false); +@@ -1247,8 +1247,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, + if (fgp_flags & FGP_ACCESSED) + __SetPageReferenced(page); + +- err = add_to_page_cache_lru(page, mapping, offset, +- gfp_mask & GFP_RECLAIM_MASK); ++ err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); + if (unlikely(err)) { + put_page(page); + page = NULL; +@@ -1996,7 +1995,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) + if (!page) + return -ENOMEM; + +- ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); ++ ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); + if (ret == 0) + ret = mapping->a_ops->readpage(file, page); + else if (ret == -EEXIST) +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 439cc63ad903..807236aed275 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -2506,13 +2506,13 @@ void account_page_redirty(struct page *page) + if (mapping && mapping_cap_account_dirty(mapping)) { + struct inode *inode = mapping->host; + struct bdi_writeback *wb; +- bool locked; ++ struct wb_lock_cookie cookie = {}; + +- wb = unlocked_inode_to_wb_begin(inode, &locked); ++ wb = unlocked_inode_to_wb_begin(inode, &cookie); + current->nr_dirtied--; + dec_node_page_state(page, NR_DIRTIED); + dec_wb_stat(wb, WB_DIRTIED); +- unlocked_inode_to_wb_end(inode, locked); ++ unlocked_inode_to_wb_end(inode, &cookie); + } + } + EXPORT_SYMBOL(account_page_redirty); +@@ -2618,15 +2618,15 @@ void cancel_dirty_page(struct page *page) + if (mapping_cap_account_dirty(mapping)) { + struct inode *inode = mapping->host; + struct bdi_writeback *wb; +- bool locked; ++ struct wb_lock_cookie cookie = {}; + + lock_page_memcg(page); +- wb = unlocked_inode_to_wb_begin(inode, &locked); ++ wb = unlocked_inode_to_wb_begin(inode, &cookie); + + if (TestClearPageDirty(page)) + account_page_cleaned(page, mapping, wb); + +- unlocked_inode_to_wb_end(inode, locked); ++ unlocked_inode_to_wb_end(inode, &cookie); + unlock_page_memcg(page); + } else { + ClearPageDirty(page); +@@ -2658,7 +2658,7 @@ int clear_page_dirty_for_io(struct page *page) + if (mapping && mapping_cap_account_dirty(mapping)) { + struct inode *inode = mapping->host; + struct bdi_writeback *wb; +- bool locked; ++ struct wb_lock_cookie cookie = {}; + + /* + * Yes, Virginia, this is indeed insane. +@@ -2695,7 +2695,7 @@ int clear_page_dirty_for_io(struct page *page) + * always locked coming in here, so we get the desired + * exclusion. + */ +- wb = unlocked_inode_to_wb_begin(inode, &locked); ++ wb = unlocked_inode_to_wb_begin(inode, &cookie); + if (TestClearPageDirty(page)) { + mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); + dec_node_page_state(page, NR_FILE_DIRTY); +@@ -2703,7 +2703,7 @@ int clear_page_dirty_for_io(struct page *page) + dec_wb_stat(wb, WB_RECLAIMABLE); + ret = 1; + } +- unlocked_inode_to_wb_end(inode, locked); ++ unlocked_inode_to_wb_end(inode, &cookie); + return ret; + } + return TestClearPageDirty(page); +diff --git a/mm/slab.c b/mm/slab.c +index 1f82d16a0518..c59844dbd034 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -4096,7 +4096,8 @@ static void cache_reap(struct work_struct *w) + next_reap_node(); + out: + /* Set up the next iteration */ +- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); ++ schedule_delayed_work_on(smp_processor_id(), work, ++ round_jiffies_relative(REAPTIMEOUT_AC)); + } + + #ifdef CONFIG_SLABINFO +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c +index 61a504fb1ae2..34f94052c519 100644 +--- a/net/sunrpc/rpc_pipe.c ++++ b/net/sunrpc/rpc_pipe.c +@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry) + struct dentry *clnt_dir = pipe_dentry->d_parent; + struct dentry *gssd_dir = clnt_dir->d_parent; + ++ dget(pipe_dentry); + __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); + __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); + __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index fa8741afadf5..cfb8f5896787 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -834,8 +834,25 @@ static int choose_rate(struct snd_pcm_substream *substream, + return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL); + } + +-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, +- bool trylock) ++/* parameter locking: returns immediately if tried during streaming */ ++static int lock_params(struct snd_pcm_runtime *runtime) ++{ ++ if (mutex_lock_interruptible(&runtime->oss.params_lock)) ++ return -ERESTARTSYS; ++ if (atomic_read(&runtime->oss.rw_ref)) { ++ mutex_unlock(&runtime->oss.params_lock); ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++static void unlock_params(struct snd_pcm_runtime *runtime) ++{ ++ mutex_unlock(&runtime->oss.params_lock); ++} ++ ++/* call with params_lock held */ ++static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) + { + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_pcm_hw_params *params, *sparams; +@@ -849,11 +866,8 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, + struct snd_mask sformat_mask; + struct snd_mask mask; + +- if (trylock) { +- if (!(mutex_trylock(&runtime->oss.params_lock))) +- return -EAGAIN; +- } else if (mutex_lock_interruptible(&runtime->oss.params_lock)) +- return -EINTR; ++ if (!runtime->oss.params) ++ return 0; + sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL); + params = kmalloc(sizeof(*params), GFP_KERNEL); + sparams = kmalloc(sizeof(*sparams), GFP_KERNEL); +@@ -1079,6 +1093,23 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, + kfree(sw_params); + kfree(params); + kfree(sparams); ++ return err; ++} ++ ++/* this one takes the lock by itself */ ++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, ++ bool trylock) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ int err; ++ ++ if (trylock) { ++ if (!(mutex_trylock(&runtime->oss.params_lock))) ++ return -EAGAIN; ++ } else if (mutex_lock_interruptible(&runtime->oss.params_lock)) ++ return -ERESTARTSYS; ++ ++ err = snd_pcm_oss_change_params_locked(substream); + mutex_unlock(&runtime->oss.params_lock); + return err; + } +@@ -1107,6 +1138,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil + return 0; + } + ++/* call with params_lock held */ ++/* NOTE: this always call PREPARE unconditionally no matter whether ++ * runtime->oss.prepare is set or not ++ */ + static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream) + { + int err; +@@ -1131,14 +1166,35 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream) + struct snd_pcm_runtime *runtime; + int err; + +- if (substream == NULL) +- return 0; + runtime = substream->runtime; + if (runtime->oss.params) { + err = snd_pcm_oss_change_params(substream, false); + if (err < 0) + return err; + } ++ if (runtime->oss.prepare) { ++ if (mutex_lock_interruptible(&runtime->oss.params_lock)) ++ return -ERESTARTSYS; ++ err = snd_pcm_oss_prepare(substream); ++ mutex_unlock(&runtime->oss.params_lock); ++ if (err < 0) ++ return err; ++ } ++ return 0; ++} ++ ++/* call with params_lock held */ ++static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime; ++ int err; ++ ++ runtime = substream->runtime; ++ if (runtime->oss.params) { ++ err = snd_pcm_oss_change_params_locked(substream); ++ if (err < 0) ++ return err; ++ } + if (runtime->oss.prepare) { + err = snd_pcm_oss_prepare(substream); + if (err < 0) +@@ -1367,13 +1423,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha + if (atomic_read(&substream->mmap_count)) + return -ENXIO; + +- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) +- return tmp; ++ atomic_inc(&runtime->oss.rw_ref); + while (bytes > 0) { + if (mutex_lock_interruptible(&runtime->oss.params_lock)) { + tmp = -ERESTARTSYS; + break; + } ++ tmp = snd_pcm_oss_make_ready_locked(substream); ++ if (tmp < 0) ++ goto err; + if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { + tmp = bytes; + if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) +@@ -1429,6 +1487,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha + } + tmp = 0; + } ++ atomic_dec(&runtime->oss.rw_ref); + return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; + } + +@@ -1474,13 +1533,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use + if (atomic_read(&substream->mmap_count)) + return -ENXIO; + +- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) +- return tmp; ++ atomic_inc(&runtime->oss.rw_ref); + while (bytes > 0) { + if (mutex_lock_interruptible(&runtime->oss.params_lock)) { + tmp = -ERESTARTSYS; + break; + } ++ tmp = snd_pcm_oss_make_ready_locked(substream); ++ if (tmp < 0) ++ goto err; + if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { + if (runtime->oss.buffer_used == 0) { + tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); +@@ -1521,6 +1582,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use + } + tmp = 0; + } ++ atomic_dec(&runtime->oss.rw_ref); + return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; + } + +@@ -1536,10 +1598,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file) + continue; + runtime = substream->runtime; + snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); ++ mutex_lock(&runtime->oss.params_lock); + runtime->oss.prepare = 1; + runtime->oss.buffer_used = 0; + runtime->oss.prev_hw_ptr_period = 0; + runtime->oss.period_ptr = 0; ++ mutex_unlock(&runtime->oss.params_lock); + } + return 0; + } +@@ -1625,9 +1689,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + goto __direct; + if ((err = snd_pcm_oss_make_ready(substream)) < 0) + return err; ++ atomic_inc(&runtime->oss.rw_ref); ++ if (mutex_lock_interruptible(&runtime->oss.params_lock)) { ++ atomic_dec(&runtime->oss.rw_ref); ++ return -ERESTARTSYS; ++ } + format = snd_pcm_oss_format_from(runtime->oss.format); + width = snd_pcm_format_physical_width(format); +- mutex_lock(&runtime->oss.params_lock); + if (runtime->oss.buffer_used > 0) { + #ifdef OSS_DEBUG + pcm_dbg(substream->pcm, "sync: buffer_used\n"); +@@ -1637,10 +1705,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + runtime->oss.buffer + runtime->oss.buffer_used, + size); + err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes); +- if (err < 0) { +- mutex_unlock(&runtime->oss.params_lock); +- return err; +- } ++ if (err < 0) ++ goto unlock; + } else if (runtime->oss.period_ptr > 0) { + #ifdef OSS_DEBUG + pcm_dbg(substream->pcm, "sync: period_ptr\n"); +@@ -1650,10 +1716,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + runtime->oss.buffer, + size * 8 / width); + err = snd_pcm_oss_sync1(substream, size); +- if (err < 0) { +- mutex_unlock(&runtime->oss.params_lock); +- return err; +- } ++ if (err < 0) ++ goto unlock; + } + /* + * The ALSA's period might be a bit large than OSS one. +@@ -1684,7 +1748,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + snd_pcm_lib_writev(substream, buffers, size); + } + } ++unlock: + mutex_unlock(&runtime->oss.params_lock); ++ atomic_dec(&runtime->oss.rw_ref); ++ if (err < 0) ++ return err; + /* + * finish sync: drain the buffer + */ +@@ -1695,7 +1763,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + substream->f_flags = saved_f_flags; + if (err < 0) + return err; ++ mutex_lock(&runtime->oss.params_lock); + runtime->oss.prepare = 1; ++ mutex_unlock(&runtime->oss.params_lock); + } + + substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; +@@ -1706,8 +1776,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); + if (err < 0) + return err; ++ mutex_lock(&runtime->oss.params_lock); + runtime->oss.buffer_used = 0; + runtime->oss.prepare = 1; ++ mutex_unlock(&runtime->oss.params_lock); + } + return 0; + } +@@ -1719,6 +1791,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate) + for (idx = 1; idx >= 0; --idx) { + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; + struct snd_pcm_runtime *runtime; ++ int err; ++ + if (substream == NULL) + continue; + runtime = substream->runtime; +@@ -1726,10 +1800,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate) + rate = 1000; + else if (rate > 192000) + rate = 192000; ++ err = lock_params(runtime); ++ if (err < 0) ++ return err; + if (runtime->oss.rate != rate) { + runtime->oss.params = 1; + runtime->oss.rate = rate; + } ++ unlock_params(runtime); + } + return snd_pcm_oss_get_rate(pcm_oss_file); + } +@@ -1754,13 +1832,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig + for (idx = 1; idx >= 0; --idx) { + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; + struct snd_pcm_runtime *runtime; ++ int err; ++ + if (substream == NULL) + continue; + runtime = substream->runtime; ++ err = lock_params(runtime); ++ if (err < 0) ++ return err; + if (runtime->oss.channels != channels) { + runtime->oss.params = 1; + runtime->oss.channels = channels; + } ++ unlock_params(runtime); + } + return snd_pcm_oss_get_channels(pcm_oss_file); + } +@@ -1833,6 +1917,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) + static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) + { + int formats, idx; ++ int err; + + if (format != AFMT_QUERY) { + formats = snd_pcm_oss_get_formats(pcm_oss_file); +@@ -1846,10 +1931,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for + if (substream == NULL) + continue; + runtime = substream->runtime; ++ err = lock_params(runtime); ++ if (err < 0) ++ return err; + if (runtime->oss.format != format) { + runtime->oss.params = 1; + runtime->oss.format = format; + } ++ unlock_params(runtime); + } + } + return snd_pcm_oss_get_format(pcm_oss_file); +@@ -1869,8 +1958,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s + { + struct snd_pcm_runtime *runtime; + +- if (substream == NULL) +- return 0; + runtime = substream->runtime; + if (subdivide == 0) { + subdivide = runtime->oss.subdivision; +@@ -1894,9 +1981,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int + + for (idx = 1; idx >= 0; --idx) { + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; ++ struct snd_pcm_runtime *runtime; ++ + if (substream == NULL) + continue; +- if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0) ++ runtime = substream->runtime; ++ err = lock_params(runtime); ++ if (err < 0) ++ return err; ++ err = snd_pcm_oss_set_subdivide1(substream, subdivide); ++ unlock_params(runtime); ++ if (err < 0) + return err; + } + return err; +@@ -1906,8 +2001,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign + { + struct snd_pcm_runtime *runtime; + +- if (substream == NULL) +- return 0; + runtime = substream->runtime; + if (runtime->oss.subdivision || runtime->oss.fragshift) + return -EINVAL; +@@ -1927,9 +2020,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig + + for (idx = 1; idx >= 0; --idx) { + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; ++ struct snd_pcm_runtime *runtime; ++ + if (substream == NULL) + continue; +- if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0) ++ runtime = substream->runtime; ++ err = lock_params(runtime); ++ if (err < 0) ++ return err; ++ err = snd_pcm_oss_set_fragment1(substream, val); ++ unlock_params(runtime); ++ if (err < 0) + return err; + } + return err; +@@ -2013,6 +2114,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr + } + if (psubstream) { + runtime = psubstream->runtime; ++ cmd = 0; ++ if (mutex_lock_interruptible(&runtime->oss.params_lock)) ++ return -ERESTARTSYS; + if (trigger & PCM_ENABLE_OUTPUT) { + if (runtime->oss.trigger) + goto _skip1; +@@ -2030,13 +2134,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr + cmd = SNDRV_PCM_IOCTL_DROP; + runtime->oss.prepare = 1; + } +- err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL); +- if (err < 0) +- return err; +- } + _skip1: ++ mutex_unlock(&runtime->oss.params_lock); ++ if (cmd) { ++ err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL); ++ if (err < 0) ++ return err; ++ } ++ } + if (csubstream) { + runtime = csubstream->runtime; ++ cmd = 0; ++ if (mutex_lock_interruptible(&runtime->oss.params_lock)) ++ return -ERESTARTSYS; + if (trigger & PCM_ENABLE_INPUT) { + if (runtime->oss.trigger) + goto _skip2; +@@ -2051,11 +2161,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr + cmd = SNDRV_PCM_IOCTL_DROP; + runtime->oss.prepare = 1; + } +- err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL); +- if (err < 0) +- return err; +- } + _skip2: ++ mutex_unlock(&runtime->oss.params_lock); ++ if (cmd) { ++ err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL); ++ if (err < 0) ++ return err; ++ } ++ } + return 0; + } + +@@ -2307,6 +2420,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream, + runtime->oss.maxfrags = 0; + runtime->oss.subdivision = 0; + substream->pcm_release = snd_pcm_oss_release_substream; ++ atomic_set(&runtime->oss.rw_ref, 0); + } + + static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file) +diff --git a/sound/core/pcm.c b/sound/core/pcm.c +index 074363b63cc4..6bda8f6c5f84 100644 +--- a/sound/core/pcm.c ++++ b/sound/core/pcm.c +@@ -28,6 +28,7 @@ + #include <sound/core.h> + #include <sound/minors.h> + #include <sound/pcm.h> ++#include <sound/timer.h> + #include <sound/control.h> + #include <sound/info.h> + +@@ -1025,8 +1026,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream) + snd_free_pages((void*)runtime->control, + PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))); + kfree(runtime->hw_constraints.rules); +- kfree(runtime); ++ /* Avoid concurrent access to runtime via PCM timer interface */ ++ if (substream->timer) ++ spin_lock_irq(&substream->timer->lock); + substream->runtime = NULL; ++ if (substream->timer) ++ spin_unlock_irq(&substream->timer->lock); ++ kfree(runtime); + put_pid(substream->pid); + substream->pid = NULL; + substream->pstr->substream_opened--; +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index f69764d7cdd7..e30e30ba6e39 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, + struct snd_rawmidi_params params; + unsigned int val; + +- if (rfile->output == NULL) +- return -EINVAL; + if (get_user(params.stream, &src->stream) || + get_user(params.buffer_size, &src->buffer_size) || + get_user(params.avail_min, &src->avail_min) || +@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, + params.no_active_sensing = val; + switch (params.stream) { + case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ if (!rfile->output) ++ return -EINVAL; + return snd_rawmidi_output_params(rfile->output, ¶ms); + case SNDRV_RAWMIDI_STREAM_INPUT: ++ if (!rfile->input) ++ return -EINVAL; + return snd_rawmidi_input_params(rfile->input, ¶ms); + } + return -EINVAL; +@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + int err; + struct snd_rawmidi_status status; + +- if (rfile->output == NULL) +- return -EINVAL; + if (get_user(status.stream, &src->stream)) + return -EFAULT; + + switch (status.stream) { + case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ if (!rfile->output) ++ return -EINVAL; + err = snd_rawmidi_output_status(rfile->output, &status); + break; + case SNDRV_RAWMIDI_STREAM_INPUT: ++ if (!rfile->input) ++ return -EINVAL; + err = snd_rawmidi_input_status(rfile->input, &status); + break; + default: +@@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, + int err; + struct snd_rawmidi_status status; + +- if (rfile->output == NULL) +- return -EINVAL; + if (get_user(status.stream, &src->stream)) + return -EFAULT; + + switch (status.stream) { + case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ if (!rfile->output) ++ return -EINVAL; + err = snd_rawmidi_output_status(rfile->output, &status); + break; + case SNDRV_RAWMIDI_STREAM_INPUT: ++ if (!rfile->input) ++ return -EINVAL; + err = snd_rawmidi_input_status(rfile->input, &status); + break; + default: +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 733b3423baa2..7d3f88d90eec 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1514,7 +1514,8 @@ static void azx_check_snoop_available(struct azx *chip) + */ + u8 val; + pci_read_config_byte(chip->pci, 0x42, &val); +- if (!(val & 0x80) && chip->pci->revision == 0x30) ++ if (!(val & 0x80) && (chip->pci->revision == 0x30 || ++ chip->pci->revision == 0x20)) + snoop = false; + } + +diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c +index 993bde29ca1b..7693e63078b1 100644 +--- a/sound/soc/codecs/ssm2602.c ++++ b/sound/soc/codecs/ssm2602.c +@@ -54,10 +54,17 @@ struct ssm2602_priv { + * using 2 wire for device control, so we cache them instead. + * There is no point in caching the reset register + */ +-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = { +- 0x0097, 0x0097, 0x0079, 0x0079, +- 0x000a, 0x0008, 0x009f, 0x000a, +- 0x0000, 0x0000 ++static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = { ++ { .reg = 0x00, .def = 0x0097 }, ++ { .reg = 0x01, .def = 0x0097 }, ++ { .reg = 0x02, .def = 0x0079 }, ++ { .reg = 0x03, .def = 0x0079 }, ++ { .reg = 0x04, .def = 0x000a }, ++ { .reg = 0x05, .def = 0x0008 }, ++ { .reg = 0x06, .def = 0x009f }, ++ { .reg = 0x07, .def = 0x000a }, ++ { .reg = 0x08, .def = 0x0000 }, ++ { .reg = 0x09, .def = 0x0000 } + }; + + +@@ -620,8 +627,8 @@ const struct regmap_config ssm2602_regmap_config = { + .volatile_reg = ssm2602_register_volatile, + + .cache_type = REGCACHE_RBTREE, +- .reg_defaults_raw = ssm2602_reg, +- .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg), ++ .reg_defaults = ssm2602_reg, ++ .num_reg_defaults = ARRAY_SIZE(ssm2602_reg), + }; + EXPORT_SYMBOL_GPL(ssm2602_regmap_config); + +diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c +index d0fb2f205bd9..4f4ebe90f1a8 100644 +--- a/sound/usb/line6/midi.c ++++ b/sound/usb/line6/midi.c +@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data, + } + + usb_fill_int_urb(urb, line6->usbdev, +- usb_sndbulkpipe(line6->usbdev, ++ usb_sndintpipe(line6->usbdev, + line6->properties->ep_ctrl_w), + transfer_buffer, length, midi_sent, line6, + line6->interval); |