summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2016-05-11 20:15:55 -0400
committerMike Pagano <mpagano@gentoo.org>2016-05-11 20:15:55 -0400
commit4040e317a60e2caff24458b02c1f87a41a84c644 (patch)
tree2ecc8dd77d7df2bb78815af612b13da5fcf73dab
parentLinux patch 4.5.3 (diff)
downloadlinux-patches-4040e317a60e2caff24458b02c1f87a41a84c644.tar.gz
linux-patches-4040e317a60e2caff24458b02c1f87a41a84c644.tar.bz2
linux-patches-4040e317a60e2caff24458b02c1f87a41a84c644.zip
Linux patch 4.5.44.5-6
-rw-r--r--0000_README4
-rw-r--r--1003_linux-4.5.4.patch2354
2 files changed, 2358 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 0147ad91..a736c59f 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch: 1002_linux-4.5.3.patch
From: http://www.kernel.org
Desc: Linux 4.5.3
+Patch: 1003_linux-4.5.4.patch
+From: http://www.kernel.org
+Desc: Linux 4.5.4
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1003_linux-4.5.4.patch b/1003_linux-4.5.4.patch
new file mode 100644
index 00000000..a783ee23
--- /dev/null
+++ b/1003_linux-4.5.4.patch
@@ -0,0 +1,2354 @@
+diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+index c2340eeeb97f..c000832a7fb9 100644
+--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+@@ -30,6 +30,10 @@ Optional properties:
+ - target-supply : regulator for SATA target power
+ - phys : reference to the SATA PHY node
+ - phy-names : must be "sata-phy"
++- ports-implemented : Mask that indicates which ports that the HBA supports
++ are available for software to use. Useful if PORTS_IMPL
++ is not programmed by the BIOS, which is true with
++ some embedded SOC's.
+
+ Required properties when using sub-nodes:
+ - #address-cells : number of cells to encode an address
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 5a389bc68e0e..77e4c10b4c06 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4163,8 +4163,8 @@ F: Documentation/efi-stub.txt
+ F: arch/ia64/kernel/efi.c
+ F: arch/x86/boot/compressed/eboot.[ch]
+ F: arch/x86/include/asm/efi.h
+-F: arch/x86/platform/efi/*
+-F: drivers/firmware/efi/*
++F: arch/x86/platform/efi/
++F: drivers/firmware/efi/
+ F: include/linux/efi*.h
+
+ EFI VARIABLE FILESYSTEM
+diff --git a/Makefile b/Makefile
+index 9b56a6c5e36f..d64eade37241 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index 27b17adea50d..cb69299a492e 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -13,6 +13,15 @@
+ #include <asm/byteorder.h>
+ #include <asm/page.h>
+
++#ifdef CONFIG_ISA_ARCV2
++#include <asm/barrier.h>
++#define __iormb() rmb()
++#define __iowmb() wmb()
++#else
++#define __iormb() do { } while (0)
++#define __iowmb() do { } while (0)
++#endif
++
+ extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+ extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags);
+@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
+ #define ioremap_wc(phy, sz) ioremap(phy, sz)
+ #define ioremap_wt(phy, sz) ioremap(phy, sz)
+
++/*
++ * io{read,write}{16,32}be() macros
++ */
++#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
++#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
++
++#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
++#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
++
+ /* Change struct page to physical address */
+ #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+
+ }
+
+-#ifdef CONFIG_ISA_ARCV2
+-#include <asm/barrier.h>
+-#define __iormb() rmb()
+-#define __iowmb() wmb()
+-#else
+-#define __iormb() do { } while (0)
+-#define __iowmb() do { } while (0)
+-#endif
+-
+ /*
+ * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+ * Based on ARM model for the typical use case
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index ed521e85e208..e8bc7e8bedd2 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -665,7 +665,7 @@
+ };
+
+ sata0: sata@29000000 {
+- compatible = "generic-ahci";
++ compatible = "qcom,apq8064-ahci", "generic-ahci";
+ status = "disabled";
+ reg = <0x29000000 0x180>;
+ interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>;
+@@ -687,6 +687,7 @@
+
+ phys = <&sata_phy0>;
+ phy-names = "sata-phy";
++ ports-implemented = <0x1>;
+ };
+
+ /* Temporary fixed regulator */
+diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
+index 47905a50e075..318394ed5c7a 100644
+--- a/arch/arm/mach-cns3xxx/pcie.c
++++ b/arch/arm/mach-cns3xxx/pcie.c
+@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
+ u32 mask = (0x1ull << (size * 8)) - 1;
+ int shift = (where % 4) * 8;
+
+- v = readl_relaxed(base + (where & 0xffc));
++ v = readl_relaxed(base);
+
+ v &= ~(mask << shift);
+ v |= (val & mask) << shift;
+
+- writel_relaxed(v, base + (where & 0xffc));
+- readl_relaxed(base + (where & 0xffc));
++ writel_relaxed(v, base);
++ readl_relaxed(base);
+ }
+
+ static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
+diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
+index 7c21760f590f..875a2bab64f6 100644
+--- a/arch/arm/mach-exynos/pm_domains.c
++++ b/arch/arm/mach-exynos/pm_domains.c
+@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
+ if (IS_ERR(pd->clk[i]))
+ break;
+
+- if (IS_ERR(pd->clk[i]))
++ if (IS_ERR(pd->pclk[i]))
+ continue; /* Skip on first power up */
+ if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+ pr_err("%s: error setting parent to clock%d\n",
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index 5d94b7a2fb10..c160fa3007e9 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -13,6 +13,7 @@
+ #include <asm/assembler.h>
+
+ .arch armv7-a
++ .arm
+
+ ENTRY(secondary_trampoline)
+ /* CPU1 will always fetch from 0x0 when it is brought out of reset.
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index c976ebfe2269..57b4836b7ecd 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -344,7 +344,7 @@ tracesys_next:
+ #endif
+
+ cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
+- comiclr,>>= __NR_Linux_syscalls, %r20, %r0
++ comiclr,>> __NR_Linux_syscalls, %r20, %r0
+ b,n .Ltracesys_nosys
+
+ LDREGX %r20(%r19), %r19
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index e4396a7d0f7c..4afe66aa1400 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
+ "andc %1,%1,%2\n\t"
+ "popcntd %0,%1"
+ : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+- : "r" (bits));
++ : "b" (bits));
+
+ return leading_zero_bits;
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 2c5aaf8c2e2f..05538582a809 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
+ {
+ __u64 msr_val;
+
++ if (static_cpu_has(X86_FEATURE_HWP))
++ wrmsrl_safe(MSR_HWP_STATUS, 0);
++
+ rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+
+ /* Check for violation of core thermal thresholds*/
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index b285d4e8c68e..5da924bbf0a0 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
++ unsigned long flags;
++
++ flags = pci_resource_flags(dev, i);
++ if (!(flags & IORESOURCE_MEM))
++ continue;
++
++ if (flags & IORESOURCE_UNSET)
++ continue;
++
++ if (pci_resource_len(dev, i) == 0)
++ continue;
+
+ start = pci_resource_start(dev, i);
+- if (start == 0)
+- break;
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
++ break;
+ }
+ }
+ }
+diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
+index 92ae6acac8a7..6aa0f4d9eea6 100644
+--- a/arch/x86/kernel/tsc_msr.c
++++ b/arch/x86/kernel/tsc_msr.c
+@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
+
+ if (freq_desc_tables[cpu_index].msr_plat) {
+ rdmsr(MSR_PLATFORM_INFO, lo, hi);
+- ratio = (lo >> 8) & 0x1f;
++ ratio = (lo >> 8) & 0xff;
+ } else {
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 6979186dbd4b..9f77943653fb 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
+ }
+ #endif /* CONFIG_ACPI_HOTPLUG_CPU */
+
++#ifdef CONFIG_X86
++static bool acpi_hwp_native_thermal_lvt_set;
++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
++ u32 lvl,
++ void *context,
++ void **rv)
++{
++ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
++ u32 capbuf[2];
++ struct acpi_osc_context osc_context = {
++ .uuid_str = sb_uuid_str,
++ .rev = 1,
++ .cap.length = 8,
++ .cap.pointer = capbuf,
++ };
++
++ if (acpi_hwp_native_thermal_lvt_set)
++ return AE_CTRL_TERMINATE;
++
++ capbuf[0] = 0x0000;
++ capbuf[1] = 0x1000; /* set bit 12 */
++
++ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
++ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
++ u32 *capbuf_ret = osc_context.ret.pointer;
++
++ if (capbuf_ret[1] & 0x1000) {
++ acpi_handle_info(handle,
++ "_OSC native thermal LVT Acked\n");
++ acpi_hwp_native_thermal_lvt_set = true;
++ }
++ }
++ kfree(osc_context.ret.pointer);
++ }
++
++ return AE_OK;
++}
++
++void __init acpi_early_processor_osc(void)
++{
++ if (boot_cpu_has(X86_FEATURE_HWP)) {
++ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
++ ACPI_UINT32_MAX,
++ acpi_hwp_native_thermal_lvt_osc,
++ NULL, NULL, NULL);
++ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
++ acpi_hwp_native_thermal_lvt_osc,
++ NULL, NULL);
++ }
++}
++#endif
++
+ /*
+ * The following ACPI IDs are known to be suitable for representing as
+ * processor devices.
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index 6a72047aae1c..c3a052d43317 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ obj_desc->method.mutex->mutex.
+ original_sync_level =
+ obj_desc->method.mutex->mutex.sync_level;
++
++ obj_desc->method.mutex->mutex.thread_id =
++ acpi_os_get_thread_id();
+ }
+ }
+
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 891c42d1cd65..f9081b791b81 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -1005,6 +1005,9 @@ static int __init acpi_bus_init(void)
+ goto error1;
+ }
+
++ /* Set capability bits for _OSC under processor scope */
++ acpi_early_processor_osc();
++
+ /*
+ * _OSC method may exist in module level code,
+ * so it must be run after ACPI_FULL_INITIALIZATION
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 1e6833a5cd44..6f41c73f82bb 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -138,6 +138,12 @@ void acpi_early_processor_set_pdc(void);
+ static inline void acpi_early_processor_set_pdc(void) {}
+ #endif
+
++#ifdef CONFIG_X86
++void acpi_early_processor_osc(void);
++#else
++static inline void acpi_early_processor_osc(void) {}
++#endif
++
+ /* --------------------------------------------------------------------------
+ Embedded Controller
+ -------------------------------------------------------------------------- */
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 04975b851c23..639adb1f8abd 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
+ if (rc)
+ return rc;
+
++ of_property_read_u32(dev->of_node,
++ "ports-implemented", &hpriv->force_port_map);
++
+ if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+
+diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
+index 8e3f7faf00d3..73b19b277138 100644
+--- a/drivers/ata/ahci_xgene.c
++++ b/drivers/ata/ahci_xgene.c
+@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
+ dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
+ __func__);
+ version = XGENE_AHCI_V1;
+- }
+- if (info->valid & ACPI_VALID_CID)
++ } else if (info->valid & ACPI_VALID_CID) {
+ version = XGENE_AHCI_V2;
++ }
+ }
+ }
+ #endif
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 85ea5142a095..bb050ea26101 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -469,6 +469,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, hpriv->force_port_map);
+ port_map = hpriv->force_port_map;
++ hpriv->saved_port_map = port_map;
+ }
+
+ if (hpriv->mask_port_map) {
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index e4c5cc107934..c65d41f4007a 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
+ req, req->cmd_type);
+
+ if (unlikely(!nbd->sock)) {
+- dev_err(disk_to_dev(nbd->disk),
+- "Attempted send on closed socket\n");
++ dev_err_ratelimited(disk_to_dev(nbd->disk),
++ "Attempted send on closed socket\n");
+ req->errors++;
+ nbd_end_request(nbd, req);
+ spin_lock_irq(q->queue_lock);
+diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
+index 64a7b5971b57..cab97593ba54 100644
+--- a/drivers/block/null_blk.c
++++ b/drivers/block/null_blk.c
+@@ -742,10 +742,11 @@ static int null_add_dev(void)
+
+ add_disk(disk);
+
++done:
+ mutex_lock(&lock);
+ list_add_tail(&nullb->list, &nullb_list);
+ mutex_unlock(&lock);
+-done:
++
+ return 0;
+
+ out_cleanup_lightnvm:
+diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
+index e4f89e28b5ec..3a177ade6e6c 100644
+--- a/drivers/clk/bcm/clk-bcm2835-aux.c
++++ b/drivers/clk/bcm/clk-bcm2835-aux.c
+@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+- if (!reg)
+- return -ENODEV;
++ if (IS_ERR(reg))
++ return PTR_ERR(reg);
+
+ onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
+ if (!onecell)
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index ded3ff4b91b9..aa1dacdaa39d 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -423,6 +423,12 @@ const struct clk_ops clk_divider_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_divider_ops);
+
++const struct clk_ops clk_divider_ro_ops = {
++ .recalc_rate = clk_divider_recalc_rate,
++ .round_rate = clk_divider_round_rate,
++};
++EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
++
+ static struct clk *_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+@@ -446,7 +452,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+- init.ops = &clk_divider_ops;
++ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
++ init.ops = &clk_divider_ro_ops;
++ else
++ init.ops = &clk_divider_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
+index 10224b01b97c..b134a8b15e2c 100644
+--- a/drivers/clk/clk-xgene.c
++++ b/drivers/clk/clk-xgene.c
+@@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ /* Set new divider */
+ data = xgene_clk_read(pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+- data &= ~((1 << pclk->param.reg_divider_width) - 1)
+- << pclk->param.reg_divider_shift;
++ data &= ~(((1 << pclk->param.reg_divider_width) - 1)
++ << pclk->param.reg_divider_shift);
+ data |= divider;
+ xgene_clk_write(data, pclk->param.divider_reg +
+ pclk->param.reg_divider_offset);
+diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
+index c83ae1367abc..d920d410b51d 100644
+--- a/drivers/clk/meson/clkc.c
++++ b/drivers/clk/meson/clkc.c
+@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
+ }
+
+ void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
+- size_t nr_confs,
++ unsigned int nr_confs,
+ void __iomem *clk_base)
+ {
+ unsigned int i;
+diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
+index 13aabbb3acbe..558da89555af 100644
+--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
++++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
+@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
+ div->width = 1;
+
+ div_hw = &div->hw;
+- div_ops = &clk_divider_ops;
++ div_ops = &clk_divider_ro_ops;
+ }
+
+ branch->gate.reg = branch->offset + reg_base;
+diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
+index 983dd7dc89a7..0a0c1f533249 100644
+--- a/drivers/clk/qcom/gcc-msm8960.c
++++ b/drivers/clk/qcom/gcc-msm8960.c
+@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
+ },
+ .freq_tbl = clk_tbl_ce3,
+ .clkr = {
+- .enable_reg = 0x2c08,
++ .enable_reg = 0x36c0,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce3_src",
+@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
+ .halt_reg = 0x2fdc,
+ .halt_bit = 5,
+ .clkr = {
+- .enable_reg = 0x36c4,
++ .enable_reg = 0x36cc,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ce3_core_clk",
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index 981a50205339..97f49aab8d42 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -605,13 +605,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
+
+ /* PD_MMC */
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
+- MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
++ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK3228_SDIO_CON0, 1),
+- MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 1),
++ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 0),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3228_EMMC_CON0, 1),
+- MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 1),
++ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0),
+ };
+
+ static const char *const rk3228_critical_clocks[] __initconst = {
+diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
+index d9a0b5d4d47f..226af5720c9e 100644
+--- a/drivers/clk/rockchip/clk.c
++++ b/drivers/clk/rockchip/clk.c
+@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ if (gate_offset >= 0) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+- return ERR_PTR(-ENOMEM);
++ goto err_gate;
+
+ gate->flags = gate_flags;
+ gate->reg = base + gate_offset;
+@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ if (div_width > 0) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+- return ERR_PTR(-ENOMEM);
++ goto err_div;
+
+ div->flags = div_flags;
+ div->reg = base + muxdiv_offset;
+@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ div->width = div_width;
+ div->lock = lock;
+ div->table = div_table;
+- div_ops = &clk_divider_ops;
++ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
++ ? &clk_divider_ro_ops
++ : &clk_divider_ops;
+ }
+
+ clk = clk_register_composite(NULL, name, parent_names, num_parents,
+@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ flags);
+
+ return clk;
++err_div:
++ kfree(gate);
++err_gate:
++ kfree(mux);
++ return ERR_PTR(-ENOMEM);
+ }
+
+ struct rockchip_clk_frac {
+diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
+index 7ba61103a6f5..2ea61debffc1 100644
+--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
++++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
+@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
+
+ /* The A23 APB0 clock is a standard 2 bit wide divider clock */
+ clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
+- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
++ 0, 2, 0, NULL);
+ if (IS_ERR(clk))
+ return clk;
+
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index e78755e0ef78..1fe1e8d970cf 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ int num = ARRAY_SIZE(parent_names);
+ char name[12];
+ struct clk_init_data init;
++ static int instance;
+ int i;
+ bool deprecated;
+
+@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
+- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
++ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
+
+ sp810->timerclken[i].sp810 = sp810;
+ sp810->timerclken[i].channel = i;
+@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ }
+
+ of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
++ instance++;
+ }
+ CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
+diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
+index 2bcecafdeaea..c407c47a3232 100644
+--- a/drivers/clocksource/tango_xtal.c
++++ b/drivers/clocksource/tango_xtal.c
+@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
+
+ ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
+ 32, clocksource_mmio_readl_up);
+- if (!ret) {
++ if (ret) {
+ pr_err("%s: registration failed\n", np->full_name);
+ return;
+ }
+diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
+index a9c659f58974..04042038ec4b 100644
+--- a/drivers/cpufreq/sti-cpufreq.c
++++ b/drivers/cpufreq/sti-cpufreq.c
+@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
+ {
+ int ret;
+
++ if ((!of_machine_is_compatible("st,stih407")) &&
++ (!of_machine_is_compatible("st,stih410")))
++ return -ENODEV;
++
+ ddata.cpu = get_cpu_device(0);
+ if (!ddata.cpu) {
+ dev_err(ddata.cpu, "Failed to get device for CPU0\n");
+diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
+index 545069d5fdfb..e342565e8715 100644
+--- a/drivers/cpuidle/cpuidle-arm.c
++++ b/drivers/cpuidle/cpuidle-arm.c
+@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
+ * call the CPU ops suspend protocol with idle index as a
+ * parameter.
+ */
+- arm_cpuidle_suspend(idx);
++ ret = arm_cpuidle_suspend(idx);
+
+ cpu_pm_exit();
+ }
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 540cbc88c7a2..cc4d9bd0839e 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
+ lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
+ if (lookup) {
+ lookup->adev = adev;
+- lookup->con_id = con_id;
++ lookup->con_id = kstrdup(con_id, GFP_KERNEL);
+ list_add_tail(&lookup->node, &acpi_crs_lookup_list);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index b8fbbd7699e4..73628c7599e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ if (!metadata_size) {
+ if (bo->metadata_size) {
+ kfree(bo->metadata);
++ bo->metadata = NULL;
+ bo->metadata_size = 0;
+ }
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+index 1e0bba29e167..1cd6de575305 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
+ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
++ /* vertical FP must be at least 1 */
++ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++ adjusted_mode->crtc_vsync_start++;
++
+ /* get the native mode for scaling */
+ if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ amdgpu_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index f357058c74d9..2e832fa07e09 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -797,7 +797,7 @@ static int i915_drm_resume(struct drm_device *dev)
+ static int i915_drm_resume_early(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- int ret = 0;
++ int ret;
+
+ /*
+ * We have a resume ordering issue with the snd-hda driver also
+@@ -808,6 +808,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
++
++ /*
++ * Note that we need to set the power state explicitly, since we
++ * powered off the device during freeze and the PCI core won't power
++ * it back up for us during thaw. Powering off the device during
++ * freeze is not a hard requirement though, and during the
++ * suspend/resume phases the PCI core makes sure we get here with the
++ * device powered on. So in case we change our freeze logic and keep
++ * the device powered we can also remove the following set power state
++ * call.
++ */
++ ret = pci_set_power_state(dev->pdev, PCI_D0);
++ if (ret) {
++ DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
++ goto out;
++ }
++
++ /*
++ * Note that pci_enable_device() first enables any parent bridge
++ * device and only then sets the power state for this device. The
++ * bridge enabling is a nop though, since bridge devices are resumed
++ * first. The order of enabling power and enabling the device is
++ * imposed by the PCI core as described above, so here we preserve the
++ * same order for the freeze/thaw phases.
++ *
++ * TODO: eventually we should remove pci_disable_device() /
++ * pci_enable_enable_device() from suspend/resume. Due to how they
++ * depend on the device enable refcount we can't anyway depend on them
++ * disabling/enabling the device.
++ */
+ if (pci_enable_device(dev->pdev)) {
+ ret = -EIO;
+ goto out;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4897728713f6..9b6737c85acb 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2898,7 +2898,14 @@ enum skl_disp_power_wells {
+ #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+ #define BXT_RP_STATE_CAP _MMIO(0x138170)
+
+-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
++/*
++ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
++ * 8300) freezing up around GPU hangs. Looks as if even
++ * scheduling/timer interrupts start misbehaving if the RPS
++ * EI/thresholds are "bad", leading to a very sluggish or even
++ * frozen machine.
++ */
++#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
+ #define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
+ #define INTERVAL_0_833_US(us) (((us) * 6) / 5)
+ #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index 084d5586585d..33b8e0a2b3fd 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ } else if (IS_BROADWELL(dev)) {
+ ddi_translations_fdi = bdw_ddi_translations_fdi;
+ ddi_translations_dp = bdw_ddi_translations_dp;
+- ddi_translations_edp = bdw_ddi_translations_edp;
++
++ if (dev_priv->edp_low_vswing) {
++ ddi_translations_edp = bdw_ddi_translations_edp;
++ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
++ } else {
++ ddi_translations_edp = bdw_ddi_translations_dp;
++ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
++ }
++
+ ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
++
+ n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+@@ -3260,12 +3268,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ intel_ddi_clock_get(encoder, pipe_config);
+ }
+
+-static void intel_ddi_destroy(struct drm_encoder *encoder)
+-{
+- /* HDMI has nothing special to destroy, so we can go with this. */
+- intel_dp_encoder_destroy(encoder);
+-}
+-
+ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+ {
+@@ -3284,7 +3286,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ }
+
+ static const struct drm_encoder_funcs intel_ddi_funcs = {
+- .destroy = intel_ddi_destroy,
++ .reset = intel_dp_encoder_reset,
++ .destroy = intel_dp_encoder_destroy,
+ };
+
+ static struct intel_connector *
+@@ -3356,6 +3359,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ intel_encoder->get_config = intel_ddi_get_config;
++ intel_encoder->suspend = intel_dp_encoder_suspend;
+
+ intel_dig_port->port = port;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index a9c35134f2e2..c023a04c44d0 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -13429,6 +13429,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
++ if (state->legacy_cursor_update)
++ continue;
++
+ ret = intel_crtc_wait_for_pending_flips(crtc);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index cdc2c15873dc..3cd4996c791c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4905,7 +4905,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+ kfree(intel_dig_port);
+ }
+
+-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+ {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+@@ -4947,7 +4947,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+ edp_panel_vdd_schedule_off(intel_dp);
+ }
+
+-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
++void intel_dp_encoder_reset(struct drm_encoder *encoder)
+ {
+ struct intel_dp *intel_dp;
+
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index df7f3cb66056..3ce3bee368fe 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1234,6 +1234,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ void intel_dp_start_link_train(struct intel_dp *intel_dp);
+ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
++void intel_dp_encoder_reset(struct drm_encoder *encoder);
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+ bool intel_dp_compute_config(struct intel_encoder *encoder,
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 616108c4bc3e..43fdae8ff3c3 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -1407,8 +1407,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ hdmi_to_dig_port(intel_hdmi));
+ }
+
+- if (!live_status)
+- DRM_DEBUG_KMS("Live status not up!");
++ if (!live_status) {
++ DRM_DEBUG_KMS("HDMI live status down\n");
++ /*
++ * Live status register is not reliable on all intel platforms.
++ * So consider live_status only for certain platforms, for
++ * others, read EDID to determine presence of sink.
++ */
++ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
++ live_status = true;
++ }
+
+ intel_hdmi_unset_edid(connector);
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 6104d7d7449e..9de6503b10d8 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
++ /* vertical FP must be at least 1 */
++ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++ adjusted_mode->crtc_vsync_start++;
++
+ /* get the native mode for scaling */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index e00db3f510dd..abb98c77bad2 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ goto err_register;
+ }
+
+- pdev->dev.of_node = of_node;
+ pdev->dev.parent = dev;
+
+ ret = platform_device_add_data(pdev, &reg->pdata,
+@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ platform_device_put(pdev);
+ goto err_register;
+ }
++
++ /*
++ * Set of_node only after calling platform_device_add. Otherwise
++ * the platform:imx-ipuv3-crtc modalias won't be used.
++ */
++ pdev->dev.of_node = of_node;
+ }
+
+ return 0;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b6ff6e78ac54..14c14c82795c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -255,6 +255,7 @@
+ #define USB_DEVICE_ID_CORSAIR_K90 0x1b02
+
+ #define USB_VENDOR_ID_CREATIVELABS 0x041e
++#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
+ #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
+
+ #define USB_VENDOR_ID_CVTOUCH 0x1ff7
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 7dd0953cd70f..dc8e6adf95a4 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -70,6 +70,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 94a8875b38fb..f71187aad0d0 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3378,6 +3378,10 @@ static const struct wacom_features wacom_features_0x33E =
+ { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
+ INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
++static const struct wacom_features wacom_features_0x343 =
++ { "Wacom DTK1651", 34616, 19559, 1023, 0,
++ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
++ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ { "Wacom HID", .type = HID_GENERIC };
+@@ -3543,6 +3547,7 @@ const struct hid_device_id wacom_ids[] = {
+ { USB_DEVICE_WACOM(0x33C) },
+ { USB_DEVICE_WACOM(0x33D) },
+ { USB_DEVICE_WACOM(0x33E) },
++ { USB_DEVICE_WACOM(0x343) },
+ { USB_DEVICE_WACOM(0x4001) },
+ { USB_DEVICE_WACOM(0x4004) },
+ { USB_DEVICE_WACOM(0x5000) },
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index b53702ce692f..e35560b955b1 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+ * there is room for the producer to send the pending packet.
+ */
+
+-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+- struct hv_ring_buffer_info *rbi)
++static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+ {
+ u32 cur_write_sz;
+ u32 r_size;
+- u32 write_loc = rbi->ring_buffer->write_index;
++ u32 write_loc;
+ u32 read_loc = rbi->ring_buffer->read_index;
+- u32 pending_sz = rbi->ring_buffer->pending_send_sz;
++ u32 pending_sz;
+
++ /*
++ * Issue a full memory barrier before making the signaling decision.
++ * Here is the reason for having this barrier:
++ * If the reading of the pend_sz (in this function)
++ * were to be reordered and read before we commit the new read
++ * index (in the calling function) we could
++ * have a problem. If the host were to set the pending_sz after we
++ * have sampled pending_sz and go to sleep before we commit the
++ * read index, we could miss sending the interrupt. Issue a full
++ * memory barrier to address this.
++ */
++ mb();
++
++ pending_sz = rbi->ring_buffer->pending_send_sz;
++ write_loc = rbi->ring_buffer->write_index;
+ /* If the other end is not blocked on write don't bother. */
+ if (pending_sz == 0)
+ return false;
+@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+ cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ read_loc - write_loc;
+
+- if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
++ if (cur_write_sz >= pending_sz)
+ return true;
+
+ return false;
+@@ -458,7 +472,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+ /* Update the read index */
+ hv_set_next_read_location(inring_info, next_read_location);
+
+- *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
++ *signal = hv_need_to_signal_on_read(inring_info);
+
+ out_unlock:
+ spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index b13936dacc78..f2a7f72f7aa6 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ int rc;
+ int irq;
+
++ init_waitqueue_head(&data->data_ready_queue);
++ clear_bit(0, &data->flags);
+ if (client->irq)
+ irq = client->irq;
+ else
+@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ return rc;
+ }
+
+- init_waitqueue_head(&data->data_ready_queue);
+- clear_bit(0, &data->flags);
+ data->eoc_irq = irq;
+
+ return rc;
+@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
+ int eoc_gpio;
+ int err;
+ const char *name = NULL;
+- enum asahi_compass_chipset chipset;
++ enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+
+ /* Grab and set up the supplied GPIO. */
+ if (client->dev.platform_data)
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index cf21df4a8bf5..4e94cff5ba71 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ &cq->bar2_qid,
+ user ? &cq->bar2_pa : NULL);
+- if (user && !cq->bar2_va) {
++ if (user && !cq->bar2_pa) {
+ pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
+ pci_name(rdev->lldi.pdev), cq->cqid);
+ ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index e99345eb875a..8ff690bf09d9 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
+
+ if (pbar2_pa)
+ *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
++
++ if (is_t4(rdev->lldi.adapter_type))
++ return NULL;
++
+ return rdev->bar2_kva + bar2_qoffset;
+ }
+
+@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ /*
+ * User mode must have bar2 access.
+ */
+- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
++ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
+ pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
+ pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
+ goto free_dma;
+diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
+index 9bbadaaf6bc3..7b3845aa5983 100644
+--- a/drivers/input/touchscreen/zforce_ts.c
++++ b/drivers/input/touchscreen/zforce_ts.c
+@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+ point.coord_x = point.coord_y = 0;
+ }
+
+- point.state = payload[9 * i + 5] & 0x03;
+- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
++ point.state = payload[9 * i + 5] & 0x0f;
++ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+
+ /* determine touch major, minor and orientation */
+ point.area_major = max(payload[9 * i + 6],
+diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
+index 307db1ea22de..b7ddfb352792 100644
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -499,12 +499,21 @@ static void rrpc_gc_queue(struct work_struct *work)
+ struct rrpc *rrpc = gcb->rrpc;
+ struct rrpc_block *rblk = gcb->rblk;
+ struct nvm_lun *lun = rblk->parent->lun;
++ struct nvm_block *blk = rblk->parent;
+ struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+
+ spin_lock(&rlun->lock);
+ list_add_tail(&rblk->prio, &rlun->prio_list);
+ spin_unlock(&rlun->lock);
+
++ spin_lock(&lun->lock);
++ lun->nr_open_blocks--;
++ lun->nr_closed_blocks++;
++ blk->state &= ~NVM_BLK_ST_OPEN;
++ blk->state |= NVM_BLK_ST_CLOSED;
++ list_move_tail(&rblk->list, &rlun->closed_list);
++ spin_unlock(&lun->lock);
++
+ mempool_free(gcb, rrpc->gcb_pool);
+ pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
+ rblk->parent->id);
+@@ -668,20 +677,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
+ lun = rblk->parent->lun;
+
+ cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
+- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
+- struct nvm_block *blk = rblk->parent;
+- struct rrpc_lun *rlun = rblk->rlun;
+-
+- spin_lock(&lun->lock);
+- lun->nr_open_blocks--;
+- lun->nr_closed_blocks++;
+- blk->state &= ~NVM_BLK_ST_OPEN;
+- blk->state |= NVM_BLK_ST_CLOSED;
+- list_move_tail(&rblk->list, &rlun->closed_list);
+- spin_unlock(&lun->lock);
+-
++ if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
+ rrpc_run_gc(rrpc, rblk);
+- }
+ }
+ }
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e55e6cf9ec17..7551278030d8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ * go away inside make_request
+ */
+ sectors = bio_sectors(bio);
++ /* bio could be mergeable after passing to underlayer */
++ bio->bi_rw &= ~REQ_NOMERGE;
+ mddev->pers->make_request(mddev, bio);
+
+ cpu = part_stat_lock();
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 1743788f1595..1bbbe877ba7e 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
+ err_remove_ltr:
+ intel_lpss_debugfs_remove(lpss);
+ intel_lpss_ltr_hide(lpss);
++ intel_lpss_unregister_clock(lpss);
+
+ err_clk_register:
+ ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 1545a944c309..b86fe50d5d93 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -423,6 +423,10 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
++ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
++ HCA_CAP_OPMOD_GET_MAX);
++ if (err)
++ return err;
+ } else {
+ return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 91afa3ae414c..a192d451dab2 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -2143,11 +2143,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb)
+ {
+- struct ath10k_pktlog_10_4_hdr *hdr =
+- (struct ath10k_pktlog_10_4_hdr *)skb->data;
+-
+- trace_ath10k_htt_pktlog(ar, hdr->payload,
+- sizeof(*hdr) + __le16_to_cpu(hdr->size));
++ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+ EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+index 8f8793004b9f..1b271b99c49e 100644
+--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ };
+ static const int inc[4] = { 0, 100, 0, 0 };
+
++ memset(&mask_m, 0, sizeof(int8_t) * 123);
++ memset(&mask_p, 0, sizeof(int8_t) * 123);
++
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ int tmp, new;
+ int i;
+
+- int8_t mask_m[123];
+- int8_t mask_p[123];
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+- memset(&mask_m, 0, sizeof(int8_t) * 123);
+- memset(&mask_p, 0, sizeof(int8_t) * 123);
+-
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+index db6624527d99..53d7445a5d12 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ int i;
+ struct chan_centers centers;
+
+- int8_t mask_m[123];
+- int8_t mask_p[123];
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+- memset(&mask_m, 0, sizeof(int8_t) * 123);
+- memset(&mask_p, 0, sizeof(int8_t) * 123);
+-
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+index 6a4fc5d183cf..d7db6f23e728 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+@@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+ mwifiex_dbg(adapter, ERROR,
+ "Attempt to reconnect on csa closed chan(%d)\n",
+ bss_desc->channel);
++ ret = -1;
+ goto done;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
+index 28f7010e7108..1aca77719521 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
+@@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_sta_info *sta_entry = NULL;
+- u8 wireless_mode = 0;
++ u16 wireless_mode = 0;
+
+ /*
+ *this rate is no use for true rate, firmware
+@@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+ {
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_sta_info *sta_entry = NULL;
+- u8 wireless_mode = 0;
++ u16 wireless_mode = 0;
+ u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
+
+ if (sta) {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index bbb789f8990b..5e5719b26774 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -3855,7 +3855,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+- u8 wireless_mode = mac->mode;
++ u16 wireless_mode = mac->mode;
+ u8 sifs_timer, r2t_sifs;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 4544752a2ba8..84397b190cc0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -1323,14 +1323,13 @@ struct rtl_tid_data {
+
+ struct rtl_sta_info {
+ struct list_head list;
+- u8 ratr_index;
+- u8 wireless_mode;
+- u8 mimo_ps;
+- u8 mac_addr[ETH_ALEN];
+ struct rtl_tid_data tids[MAX_TID_COUNT];
+-
+ /* just used for ap adhoc or mesh*/
+ struct rssi_sta rssi_stat;
++ u16 wireless_mode;
++ u8 ratr_index;
++ u8 mimo_ps;
++ u8 mac_addr[ETH_ALEN];
+ } __packed;
+
+ struct rtl_priv;
+diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
+index c96405498bf4..4b59f67724de 100644
+--- a/drivers/net/wireless/ti/wlcore/event.c
++++ b/drivers/net/wireless/ti/wlcore/event.c
+@@ -38,7 +38,7 @@
+
+ int wlcore_event_fw_logger(struct wl1271 *wl)
+ {
+- u32 ret;
++ int ret;
+ struct fw_logger_information fw_log;
+ u8 *buffer;
+ u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 544b802a594c..02c574f8ccb3 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -314,9 +314,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ * implementation will limit the pfns advertised through
+ * ->direct_access() to those that are included in the memmap.
+ */
+- if (nd_pfn->mode == PFN_MODE_PMEM)
+- offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
+- else if (nd_pfn->mode == PFN_MODE_RAM)
++ if (nd_pfn->mode == PFN_MODE_PMEM) {
++ unsigned long memmap_size;
++
++ /*
++ * vmemmap_populate_hugepages() allocates the memmap array in
++ * HPAGE_SIZE chunks.
++ */
++ memmap_size = ALIGN(64 * npfns, PMD_SIZE);
++ offset = ALIGN(SZ_8K + memmap_size, nd_pfn->align);
++ } else if (nd_pfn->mode == PFN_MODE_RAM)
+ offset = ALIGN(SZ_8K, nd_pfn->align);
+ else
+ goto err;
+diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
+index 8ba19bba3156..2bb3c5799ac4 100644
+--- a/drivers/nvmem/mxs-ocotp.c
++++ b/drivers/nvmem/mxs-ocotp.c
+@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
+ if (ret)
+ goto close_banks;
+
+- while (val_size) {
++ while (val_size >= reg_size) {
+ if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
+ /* fill up non-data register */
+ *buf = 0;
+@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
+ }
+
+ buf++;
+- val_size--;
++ val_size -= reg_size;
+ offset += reg_size;
+ }
+
+diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
+index 826634ec0d5c..e0679eb399f6 100644
+--- a/drivers/pwm/pwm-omap-dmtimer.c
++++ b/drivers/pwm/pwm-omap-dmtimer.c
+@@ -31,6 +31,7 @@
+ #include <linux/time.h>
+
+ #define DM_TIMER_LOAD_MIN 0xfffffffe
++#define DM_TIMER_MAX 0xffffffff
+
+ struct pwm_omap_dmtimer_chip {
+ struct pwm_chip chip;
+@@ -46,13 +47,9 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
+ return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
+ }
+
+-static int pwm_omap_dmtimer_calc_value(unsigned long clk_rate, int ns)
++static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
+ {
+- u64 c = (u64)clk_rate * ns;
+-
+- do_div(c, NSEC_PER_SEC);
+-
+- return DM_TIMER_LOAD_MIN - c;
++ return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
+ }
+
+ static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
+@@ -99,7 +96,8 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ int duty_ns, int period_ns)
+ {
+ struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+- int load_value, match_value;
++ u32 period_cycles, duty_cycles;
++ u32 load_value, match_value;
+ struct clk *fclk;
+ unsigned long clk_rate;
+ bool timer_active;
+@@ -117,15 +115,13 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ fclk = omap->pdata->get_fclk(omap->dm_timer);
+ if (!fclk) {
+ dev_err(chip->dev, "invalid pmtimer fclk\n");
+- mutex_unlock(&omap->mutex);
+- return -EINVAL;
++ goto err_einval;
+ }
+
+ clk_rate = clk_get_rate(fclk);
+ if (!clk_rate) {
+ dev_err(chip->dev, "invalid pmtimer fclk rate\n");
+- mutex_unlock(&omap->mutex);
+- return -EINVAL;
++ goto err_einval;
+ }
+
+ dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
+@@ -133,11 +129,45 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ /*
+ * Calculate the appropriate load and match values based on the
+ * specified period and duty cycle. The load value determines the
+- * cycle time and the match value determines the duty cycle.
++ * period time and the match value determines the duty time.
++ *
++ * The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
++ * Similarly, the active time lasts (match_value-load_value+1) cycles.
++ * The non-active time is the remainder: (DM_TIMER_MAX-match_value)
++ * clock cycles.
++ *
++ * NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
++ *
++ * References:
++ * OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
++ * AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
+ */
+- load_value = pwm_omap_dmtimer_calc_value(clk_rate, period_ns);
+- match_value = pwm_omap_dmtimer_calc_value(clk_rate,
+- period_ns - duty_ns);
++ period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
++ duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
++
++ if (period_cycles < 2) {
++ dev_info(chip->dev,
++ "period %d ns too short for clock rate %lu Hz\n",
++ period_ns, clk_rate);
++ goto err_einval;
++ }
++
++ if (duty_cycles < 1) {
++ dev_dbg(chip->dev,
++ "duty cycle %d ns is too short for clock rate %lu Hz\n",
++ duty_ns, clk_rate);
++ dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
++ duty_cycles = 1;
++ } else if (duty_cycles >= period_cycles) {
++ dev_dbg(chip->dev,
++ "duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
++ duty_ns, period_ns, clk_rate);
++ dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
++ duty_cycles = period_cycles - 1;
++ }
++
++ load_value = (DM_TIMER_MAX - period_cycles) + 1;
++ match_value = load_value + duty_cycles - 1;
+
+ /*
+ * We MUST stop the associated dual-mode timer before attempting to
+@@ -166,6 +196,11 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ mutex_unlock(&omap->mutex);
+
+ return 0;
++
++err_einval:
++ mutex_unlock(&omap->mutex);
++
++ return -EINVAL;
+ }
+
+ static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index a544366a367e..f57d02c3b6cf 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+- if (vports != NULL)
++ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ struct Scsi_Host *shost;
+ shost = lpfc_shost_from_vport(vports[i]);
+@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
+ }
+ spin_unlock_irq(shost->host_lock);
+ }
+- lpfc_destroy_vport_work_array(phba, vports);
++ }
++ lpfc_destroy_vport_work_array(phba, vports);
+
+ lpfc_unblock_mgmt_io(phba);
+ return 0;
+diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
+index 534c58937a56..4a65c5bda146 100644
+--- a/drivers/soc/rockchip/pm_domains.c
++++ b/drivers/soc/rockchip/pm_domains.c
+@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
+ if (error) {
+ dev_err(dev, "failed to handle node %s: %d\n",
+ node->name, error);
++ of_node_put(node);
+ goto err_out;
+ }
+ }
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 14718a9ffcfb..460c855be0d0 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
+
+ return retval;
+ }
+-
+-static int usb_port_prepare(struct device *dev)
+-{
+- return 1;
+-}
+ #endif
+
+ static const struct dev_pm_ops usb_port_pm_ops = {
+ #ifdef CONFIG_PM
+ .runtime_suspend = usb_port_runtime_suspend,
+ .runtime_resume = usb_port_runtime_resume,
+- .prepare = usb_port_prepare,
+ #endif
+ };
+
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index ebb29caa3fe4..77e4c9bc0ab1 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -311,13 +311,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+
+ static int usb_dev_prepare(struct device *dev)
+ {
+- struct usb_device *udev = to_usb_device(dev);
+-
+- /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
+- if (udev->do_remote_wakeup != device_may_wakeup(dev))
+- return 0;
+-
+- return 1;
++ return 0; /* Implement eventually? */
+ }
+
+ static void usb_dev_complete(struct device *dev)
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 9bc0e090b881..a543cdc0f88f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
+@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
++ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 12eab503efd1..364bc44610c1 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
+ static void balloon_process(struct work_struct *work);
+ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+
++static void release_memory_resource(struct resource *resource);
++
+ /* When ballooning out (allocating memory to return to Xen) we don't really
+ want the kernel to try too hard since that can trigger the oom killer. */
+ #define GFP_BALLOON \
+@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
+ return NULL;
+ }
+
++#ifdef CONFIG_SPARSEMEM
++ {
++ unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
++ unsigned long pfn = res->start >> PAGE_SHIFT;
++
++ if (pfn > limit) {
++ pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
++ pfn, limit);
++ release_memory_resource(res);
++ return NULL;
++ }
++ }
++#endif
++
+ return res;
+ }
+
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index 38272ad24551..f4edd6df3df2 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
+ {
+ unsigned int new_size;
+ evtchn_port_t *new_ring, *old_ring;
+- unsigned int p, c;
+
+ /*
+ * Ensure the ring is large enough to capture all possible
+@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
+ /*
+ * Copy the old ring contents to the new ring.
+ *
+- * If the ring contents crosses the end of the current ring,
+- * it needs to be copied in two chunks.
++ * To take care of wrapping, a full ring, and the new index
++ * pointing into the second half, simply copy the old contents
++ * twice.
+ *
+ * +---------+ +------------------+
+- * |34567 12| -> | 1234567 |
+- * +-----p-c-+ +------------------+
++ * |34567 12| -> |34567 1234567 12|
++ * +-----p-c-+ +-------c------p---+
+ */
+- p = evtchn_ring_offset(u, u->ring_prod);
+- c = evtchn_ring_offset(u, u->ring_cons);
+- if (p < c) {
+- memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
+- memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
+- } else
+- memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
++ memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
++ memcpy(new_ring + u->ring_size, old_ring,
++ u->ring_size * sizeof(*u->ring));
+
+ u->ring = new_ring;
+ u->ring_size = new_size;
+diff --git a/fs/pnode.c b/fs/pnode.c
+index c524fdddc7fb..99899705b105 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
+
+ /* all accesses are serialized by namespace_sem */
+ static struct user_namespace *user_ns;
+-static struct mount *last_dest, *last_source, *dest_master;
++static struct mount *last_dest, *first_source, *last_source, *dest_master;
+ static struct mountpoint *mp;
+ static struct hlist_head *list;
+
+@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
+ type = CL_MAKE_SHARED;
+ } else {
+ struct mount *n, *p;
++ bool done;
+ for (n = m; ; n = p) {
+ p = n->mnt_master;
+- if (p == dest_master || IS_MNT_MARKED(p)) {
+- while (last_dest->mnt_master != p) {
+- last_source = last_source->mnt_master;
+- last_dest = last_source->mnt_parent;
+- }
+- if (!peers(n, last_dest)) {
+- last_source = last_source->mnt_master;
+- last_dest = last_source->mnt_parent;
+- }
++ if (p == dest_master || IS_MNT_MARKED(p))
+ break;
+- }
+ }
++ do {
++ struct mount *parent = last_source->mnt_parent;
++ if (last_source == first_source)
++ break;
++ done = parent->mnt_master == p;
++ if (done && peers(n, parent))
++ break;
++ last_source = last_source->mnt_master;
++ } while (!done);
++
+ type = CL_SLAVE;
+ /* beginning of peer group among the slaves? */
+ if (IS_MNT_SHARED(m))
+@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
+ */
+ user_ns = current->nsproxy->mnt_ns->user_ns;
+ last_dest = dest_mnt;
++ first_source = source_mnt;
+ last_source = source_mnt;
+ mp = dest_mp;
+ list = tree_list;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 4f764c2ac1a5..45f2162e55b2 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ struct mm_struct *mm = file->private_data;
+ unsigned long env_start, env_end;
+
+- if (!mm)
++ /* Ensure the process spawned far enough to have an environment. */
++ if (!mm || !mm->env_end)
+ return 0;
+
+ page = (char *)__get_free_page(GFP_TEMPORARY);
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 1143e38555a4..408a60dca353 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -385,6 +385,7 @@ struct clk_divider {
+ #define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
+
+ extern const struct clk_ops clk_divider_ops;
++extern const struct clk_ops clk_divider_ro_ops;
+
+ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int val, const struct clk_div_table *table,
+diff --git a/include/linux/hash.h b/include/linux/hash.h
+index 1afde47e1528..79c52fa81cac 100644
+--- a/include/linux/hash.h
++++ b/include/linux/hash.h
+@@ -32,12 +32,28 @@
+ #error Wordsize not 32 or 64
+ #endif
+
++/*
++ * The above primes are actively bad for hashing, since they are
++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
++ * real problems. Besides, the "prime" part is pointless for the
++ * multiplicative hash.
++ *
++ * Although a random odd number will do, it turns out that the golden
++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
++ * properties.
++ *
++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
++ * (See Knuth vol 3, section 6.4, exercise 9.)
++ */
++#define GOLDEN_RATIO_32 0x61C88647
++#define GOLDEN_RATIO_64 0x61C8864680B583EBull
++
+ static __always_inline u64 hash_64(u64 val, unsigned int bits)
+ {
+ u64 hash = val;
+
+-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+- hash = hash * GOLDEN_RATIO_PRIME_64;
++#if BITS_PER_LONG == 64
++ hash = hash * GOLDEN_RATIO_64;
+ #else
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ u64 n = hash;
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index d18b65c53dbb..5fa4aa4ddd05 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
+ #ifdef CONFIG_MEMCG
+ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+ {
++ /* Cgroup2 doesn't have per-cgroup swappiness */
++ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
++ return vm_swappiness;
++
+ /* root ? */
+ if (mem_cgroup_disabled() || !memcg->css.parent)
+ return vm_swappiness;
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 0816c872b689..a6cc576fd467 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
+ }
+ #endif /* CONFIG_IP_VS_NFCT */
+
++/* Really using conntrack? */
++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
++ struct sk_buff *skb)
++{
++#ifdef CONFIG_IP_VS_NFCT
++ enum ip_conntrack_info ctinfo;
++ struct nf_conn *ct;
++
++ if (!(cp->flags & IP_VS_CONN_F_NFCT))
++ return false;
++ ct = nf_ct_get(skb, &ctinfo);
++ if (ct && !nf_ct_is_untracked(ct))
++ return true;
++#endif
++ return false;
++}
++
+ static inline int
+ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+ {
+diff --git a/include/xen/page.h b/include/xen/page.h
+index 96294ac93755..9dc46cb8a0fd 100644
+--- a/include/xen/page.h
++++ b/include/xen/page.h
+@@ -15,9 +15,9 @@
+ */
+
+ #define xen_pfn_to_page(xen_pfn) \
+- ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
++ (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
+ #define page_to_xen_pfn(page) \
+- (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
++ ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
+
+ #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 05ddc0820771..6f965864cc02 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
+ trace_create_file("filter", 0644, file->dir, file,
+ &ftrace_event_filter_fops);
+
+- trace_create_file("trigger", 0644, file->dir, file,
+- &event_trigger_fops);
++ /*
++ * Only event directories that can be enabled should have
++ * triggers.
++ */
++ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
++ trace_create_file("trigger", 0644, file->dir, file,
++ &event_trigger_fops);
+
+ trace_create_file("format", 0444, file->dir, call,
+ &ftrace_event_format_fops);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 585de54dbe8c..29fb26970fba 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -880,16 +880,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
+ pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
+ ISOLATE_UNEVICTABLE);
+
+- /*
+- * In case of fatal failure, release everything that might
+- * have been isolated in the previous iteration, and signal
+- * the failure back to caller.
+- */
+- if (!pfn) {
+- putback_movable_pages(&cc->migratepages);
+- cc->nr_migratepages = 0;
++ if (!pfn)
+ break;
+- }
+
+ if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+ break;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 6fe7d15bd1f7..62bbf350ddf7 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1909,7 +1909,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
+ if (gdtc->dirty > gdtc->bg_thresh)
+ return true;
+
+- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
++ if (wb_stat(wb, WB_RECLAIMABLE) >
++ wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
+ return true;
+
+ if (mdtc) {
+@@ -1923,7 +1924,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
+ if (mdtc->dirty > mdtc->bg_thresh)
+ return true;
+
+- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
++ if (wb_stat(wb, WB_RECLAIMABLE) >
++ wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
+ return true;
+ }
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 9d9044e91ac5..629ce645cffd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6214,7 +6214,7 @@ int __meminit init_per_zone_wmark_min(void)
+ setup_per_zone_inactive_ratio();
+ return 0;
+ }
+-module_init(init_per_zone_wmark_min)
++core_initcall(init_per_zone_wmark_min)
+
+ /*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+diff --git a/mm/zswap.c b/mm/zswap.c
+index bf14508afd64..340261946fda 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
+ static LIST_HEAD(zswap_pools);
+ /* protects zswap_pools list modification */
+ static DEFINE_SPINLOCK(zswap_pools_lock);
++/* pool counter to provide unique names to zpool */
++static atomic_t zswap_pools_count = ATOMIC_INIT(0);
+
+ /* used by param callback function */
+ static bool zswap_init_started;
+@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
+ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ {
+ struct zswap_pool *pool;
++ char name[38]; /* 'zswap' + 32 char (max) num + \0 */
+ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ return NULL;
+ }
+
+- pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
++ /* unique name for each pool specifically required by zsmalloc */
++ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
++
++ pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
+ if (!pool->zpool) {
+ pr_err("%s zpool not available\n", type);
+ goto error;
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index a49c705fb86b..5f19133c5530 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+ * be sent to
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip_dst: ipv4 to look up in the DHT
++ * @vid: VLAN identifier
+ *
+ * An originator O is selected if and only if its DHT_ID value is one of three
+ * closest values (from the LEFT, with wrap around if needed) then the hash
+@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+ * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
+ */
+ static struct batadv_dat_candidate *
+-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
++batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
++ unsigned short vid)
+ {
+ int select;
+ batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
+@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+ return NULL;
+
+ dat.ip = ip_dst;
+- dat.vid = 0;
++ dat.vid = vid;
+ ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
+ BATADV_DAT_ADDR_MAX);
+
+@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @ip: the DHT key
++ * @vid: VLAN identifier
+ * @packet_subtype: unicast4addr packet subtype to use
+ *
+ * This function copies the skb with pskb_copy() and is sent as unicast packet
+@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+ */
+ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, __be32 ip,
+- int packet_subtype)
++ unsigned short vid, int packet_subtype)
+ {
+ int i;
+ bool ret = false;
+@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+ struct sk_buff *tmp_skb;
+ struct batadv_dat_candidate *cand;
+
+- cand = batadv_dat_select_candidates(bat_priv, ip);
++ cand = batadv_dat_select_candidates(bat_priv, ip, vid);
+ if (!cand)
+ goto out;
+
+@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ ret = true;
+ } else {
+ /* Send the request to the DHT */
+- ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
++ ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
+ BATADV_P_DAT_DHT_GET);
+ }
+ out:
+@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+ /* Send the ARP reply to the candidates for both the IP addresses that
+ * the node obtained from the ARP reply
+ */
+- batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
+- batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
++ batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
++ batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
+ }
+
+ /**
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index e4f2646d9246..43d15d6c4b6a 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
+ neigh_node = NULL;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
++ /* curr_router used earlier may not be the current orig_ifinfo->router
++ * anymore because it was dereferenced outside of the neigh_list_lock
++ * protected region. After the new best neighbor has replace the current
++ * best neighbor the reference counter needs to decrease. Consequently,
++ * the code needs to ensure the curr_router variable contains a pointer
++ * to the replaced best neighbor.
++ */
++ curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
++
+ rcu_assign_pointer(orig_ifinfo->router, neigh_node);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+ batadv_orig_ifinfo_free_ref(orig_ifinfo);
+diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
+index 782fa33ec296..45bfdefa15a5 100644
+--- a/net/batman-adv/send.c
++++ b/net/batman-adv/send.c
+@@ -629,6 +629,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+
+ if (pending) {
+ hlist_del(&forw_packet->list);
++ if (!forw_packet->own)
++ atomic_inc(&bat_priv->bcast_queue_left);
++
+ batadv_forw_packet_free(forw_packet);
+ }
+ }
+@@ -656,6 +659,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+
+ if (pending) {
+ hlist_del(&forw_packet->list);
++ if (!forw_packet->own)
++ atomic_inc(&bat_priv->batman_queue_left);
++
+ batadv_forw_packet_free(forw_packet);
+ }
+ }
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index ac4d08de5df4..720f1a5b81ac 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ */
+ nf_reset(skb);
+
++ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
++ goto dropped;
++
+ vid = batadv_get_vid(skb, 0);
+ ethhdr = eth_hdr(skb);
+
+ switch (ntohs(ethhdr->h_proto)) {
+ case ETH_P_8021Q:
++ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
++ goto dropped;
++
+ vhdr = (struct vlan_ethhdr *)skb->data;
+
+ if (vhdr->h_vlan_encapsulated_proto != ethertype)
+@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ }
+
+ /* skb->dev & skb->pkt_type are set here */
+- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+- goto dropped;
+ skb->protocol = eth_type_trans(skb, soft_iface);
+
+ /* should not be necessary anymore as we use skb_pull_rcsum()
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 7a2b7915093b..bcb0a1b64556 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1750,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+
+ ret = dev_alloc_name(ndev, ndev->name);
+ if (ret < 0) {
+- free_netdev(ndev);
++ ieee80211_if_free(ndev);
+ return ret;
+ }
+
+@@ -1836,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+
+ ret = register_netdevice(ndev);
+ if (ret) {
+- free_netdev(ndev);
++ ieee80211_if_free(ndev);
+ return ret;
+ }
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index f57b4dcdb233..4da560005b0e 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
+ cp = pp->conn_in_get(ipvs, af, skb, &iph);
+
+ conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+- if (conn_reuse_mode && !iph.fragoffs &&
+- is_new_conn(skb, &iph) && cp &&
+- ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+- unlikely(!atomic_read(&cp->dest->weight))) ||
+- unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+- if (!atomic_read(&cp->n_control))
+- ip_vs_conn_expire_now(cp);
+- __ip_vs_conn_put(cp);
+- cp = NULL;
++ if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
++ bool uses_ct = false, resched = false;
++
++ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
++ unlikely(!atomic_read(&cp->dest->weight))) {
++ resched = true;
++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++ } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++ if (!atomic_read(&cp->n_control)) {
++ resched = true;
++ } else {
++ /* Do not reschedule controlling connection
++ * that uses conntrack while it is still
++ * referenced by controlled connection(s).
++ */
++ resched = !uses_ct;
++ }
++ }
++
++ if (resched) {
++ if (!atomic_read(&cp->n_control))
++ ip_vs_conn_expire_now(cp);
++ __ip_vs_conn_put(cp);
++ if (uses_ct)
++ return NF_DROP;
++ cp = NULL;
++ }
+ }
+
+ if (unlikely(!cp)) {
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 1b8d594e493a..0a6eb5c0d9e9 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ const char *dptr;
+ int retc;
+
+- ip_vs_fill_iph_skb(p->af, skb, false, &iph);
++ retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+
+ /* Only useful with UDP */
+- if (iph.protocol != IPPROTO_UDP)
++ if (!retc || iph.protocol != IPPROTO_UDP)
+ return -EINVAL;
+ /* todo: IPv6 fragments:
+ * I think this only should be done for the first fragment. /HS
+@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ dptr = skb->data + dataoff;
+ datalen = skb->len - dataoff;
+
+- if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
++ if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
+ return -EINVAL;
+
+ /* N.B: pe_data is only set on success,
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 161dd0d67da8..a9155077feef 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
+ do_usb_entry_multi(symval + i, mod);
+ }
+
++static void do_of_entry_multi(void *symval, struct module *mod)
++{
++ char alias[500];
++ int len;
++ char *tmp;
++
++ DEF_FIELD_ADDR(symval, of_device_id, name);
++ DEF_FIELD_ADDR(symval, of_device_id, type);
++ DEF_FIELD_ADDR(symval, of_device_id, compatible);
++
++ len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
++ (*type)[0] ? *type : "*");
++
++ if (compatible[0])
++ sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
++ *compatible);
++
++ /* Replace all whitespace with underscores */
++ for (tmp = alias; tmp && *tmp; tmp++)
++ if (isspace(*tmp))
++ *tmp = '_';
++
++ buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
++ strcat(alias, "C");
++ add_wildcard(alias);
++ buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
++}
++
++static void do_of_table(void *symval, unsigned long size,
++ struct module *mod)
++{
++ unsigned int i;
++ const unsigned long id_size = SIZE_of_device_id;
++
++ device_id_check(mod->name, "of", size, id_size, symval);
++
++ /* Leave last one: it's the terminator. */
++ size -= id_size;
++
++ for (i = 0; i < size; i += id_size)
++ do_of_entry_multi(symval + i, mod);
++}
++
+ /* Looks like: hid:bNvNpN */
+ static int do_hid_entry(const char *filename,
+ void *symval, char *alias)
+@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
+ }
+ ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
+
+-static int do_of_entry (const char *filename, void *symval, char *alias)
+-{
+- int len;
+- char *tmp;
+- DEF_FIELD_ADDR(symval, of_device_id, name);
+- DEF_FIELD_ADDR(symval, of_device_id, type);
+- DEF_FIELD_ADDR(symval, of_device_id, compatible);
+-
+- len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+- (*type)[0] ? *type : "*");
+-
+- if (compatible[0])
+- sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+- *compatible);
+-
+- /* Replace all whitespace with underscores */
+- for (tmp = alias; tmp && *tmp; tmp++)
+- if (isspace (*tmp))
+- *tmp = '_';
+-
+- return 1;
+-}
+-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
+-
+ static int do_vio_entry(const char *filename, void *symval,
+ char *alias)
+ {
+@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ /* First handle the "special" cases */
+ if (sym_is(name, namelen, "usb"))
+ do_usb_table(symval, sym->st_size, mod);
++ if (sym_is(name, namelen, "of"))
++ do_of_table(symval, sym->st_size, mod);
+ else if (sym_is(name, namelen, "pnp"))
+ do_pnp_device_entry(symval, sym->st_size, mod);
+ else if (sym_is(name, namelen, "pnp_card"))