summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2017-03-27 10:02:08 -0400
committerAnthony G. Basile <blueness@gentoo.org>2017-03-27 10:02:08 -0400
commite566389761770017c40d9055be61770fbe8da1a9 (patch)
tree3f93c563b9c32a8376dfafc13e32ce9045de45d7
parentgrsecurity-3.1-4.9.16-201703180820 (diff)
downloadhardened-patchset-e566389761770017c40d9055be61770fbe8da1a9.tar.gz
hardened-patchset-e566389761770017c40d9055be61770fbe8da1a9.tar.bz2
hardened-patchset-e566389761770017c40d9055be61770fbe8da1a9.zip
grsecurity-3.1-4.9.18-20170326110620170326
-rw-r--r--4.9.16/1015_linux-4.9.16.patch1623
-rw-r--r--4.9.18/0000_README (renamed from 4.9.16/0000_README)10
-rw-r--r--4.9.18/1016_linux-4.9.17.patch6091
-rw-r--r--4.9.18/1017_linux-4.9.18.patch876
-rw-r--r--4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch (renamed from 4.9.16/4420_grsecurity-3.1-4.9.16-201703180820.patch)322
-rw-r--r--4.9.18/4425_grsec_remove_EI_PAX.patch (renamed from 4.9.16/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.9.18/4426_default_XATTR_PAX_FLAGS.patch (renamed from 4.9.16/4426_default_XATTR_PAX_FLAGS.patch)0
-rw-r--r--4.9.18/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.9.16/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.9.18/4430_grsec-remove-localversion-grsec.patch (renamed from 4.9.16/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.9.18/4435_grsec-mute-warnings.patch (renamed from 4.9.16/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.9.18/4440_grsec-remove-protected-paths.patch (renamed from 4.9.16/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.9.18/4450_grsec-kconfig-default-gids.patch (renamed from 4.9.16/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.9.18/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.9.16/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.9.18/4470_disable-compat_vdso.patch (renamed from 4.9.16/4470_disable-compat_vdso.patch)0
-rw-r--r--4.9.18/4475_emutramp_default_on.patch (renamed from 4.9.16/4475_emutramp_default_on.patch)0
15 files changed, 7135 insertions, 1787 deletions
diff --git a/4.9.16/1015_linux-4.9.16.patch b/4.9.16/1015_linux-4.9.16.patch
deleted file mode 100644
index 7ac2f77..0000000
--- a/4.9.16/1015_linux-4.9.16.patch
+++ /dev/null
@@ -1,1623 +0,0 @@
-diff --git a/Makefile b/Makefile
-index 03df4fc..4e0f962 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 9
--SUBLEVEL = 15
-+SUBLEVEL = 16
- EXTRAVERSION =
- NAME = Roaring Lionus
-
-diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
-index 5d83ff7..ec8e968 100644
---- a/arch/mips/configs/ip22_defconfig
-+++ b/arch/mips/configs/ip22_defconfig
-@@ -67,8 +67,8 @@ CONFIG_NETFILTER_NETLINK_QUEUE=m
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_DCCP=m
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_DCCP=y
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
-index 2b74aee..e582069 100644
---- a/arch/mips/configs/ip27_defconfig
-+++ b/arch/mips/configs/ip27_defconfig
-@@ -133,7 +133,7 @@ CONFIG_LIBFC=m
- CONFIG_SCSI_QLOGIC_1280=y
- CONFIG_SCSI_PMCRAID=m
- CONFIG_SCSI_BFA_FC=m
--CONFIG_SCSI_DH=m
-+CONFIG_SCSI_DH=y
- CONFIG_SCSI_DH_RDAC=m
- CONFIG_SCSI_DH_HP_SW=m
- CONFIG_SCSI_DH_EMC=m
-@@ -205,7 +205,6 @@ CONFIG_MLX4_EN=m
- # CONFIG_MLX4_DEBUG is not set
- CONFIG_TEHUTI=m
- CONFIG_BNX2X=m
--CONFIG_QLGE=m
- CONFIG_SFC=m
- CONFIG_BE2NET=m
- CONFIG_LIBERTAS_THINFIRM=m
-diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
-index 5da76e0..0cdb431 100644
---- a/arch/mips/configs/lemote2f_defconfig
-+++ b/arch/mips/configs/lemote2f_defconfig
-@@ -39,7 +39,7 @@ CONFIG_HIBERNATION=y
- CONFIG_PM_STD_PARTITION="/dev/hda3"
- CONFIG_CPU_FREQ=y
- CONFIG_CPU_FREQ_DEBUG=y
--CONFIG_CPU_FREQ_STAT=m
-+CONFIG_CPU_FREQ_STAT=y
- CONFIG_CPU_FREQ_STAT_DETAILS=y
- CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
- CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
-index 58d43f3..078ecac 100644
---- a/arch/mips/configs/malta_defconfig
-+++ b/arch/mips/configs/malta_defconfig
-@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_DCCP=m
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_DCCP=y
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
-index c8f7e28..e233f87 100644
---- a/arch/mips/configs/malta_kvm_defconfig
-+++ b/arch/mips/configs/malta_kvm_defconfig
-@@ -60,8 +60,8 @@ CONFIG_NETFILTER=y
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_DCCP=m
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_DCCP=y
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
-index d2f54e5..fbe085c 100644
---- a/arch/mips/configs/malta_kvm_guest_defconfig
-+++ b/arch/mips/configs/malta_kvm_guest_defconfig
-@@ -59,8 +59,8 @@ CONFIG_NETFILTER=y
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_DCCP=m
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_DCCP=y
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
-index 3d0d9cb..2942610 100644
---- a/arch/mips/configs/maltaup_xpa_defconfig
-+++ b/arch/mips/configs/maltaup_xpa_defconfig
-@@ -61,8 +61,8 @@ CONFIG_NETFILTER=y
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_DCCP=m
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_DCCP=y
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
-index b496c25..07d0182 100644
---- a/arch/mips/configs/nlm_xlp_defconfig
-+++ b/arch/mips/configs/nlm_xlp_defconfig
-@@ -110,7 +110,7 @@ CONFIG_NETFILTER=y
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
-index 8e99ad8..f59969a 100644
---- a/arch/mips/configs/nlm_xlr_defconfig
-+++ b/arch/mips/configs/nlm_xlr_defconfig
-@@ -90,7 +90,7 @@ CONFIG_NETFILTER=y
- CONFIG_NF_CONNTRACK=m
- CONFIG_NF_CONNTRACK_SECMARK=y
- CONFIG_NF_CONNTRACK_EVENTS=y
--CONFIG_NF_CT_PROTO_UDPLITE=m
-+CONFIG_NF_CT_PROTO_UDPLITE=y
- CONFIG_NF_CONNTRACK_AMANDA=m
- CONFIG_NF_CONNTRACK_FTP=m
- CONFIG_NF_CONNTRACK_H323=m
-diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h
-index 4775a11..24d5e31 100644
---- a/arch/mips/include/asm/mach-ip27/spaces.h
-+++ b/arch/mips/include/asm/mach-ip27/spaces.h
-@@ -12,14 +12,16 @@
-
- /*
- * IP27 uses the R10000's uncached attribute feature. Attribute 3 selects
-- * uncached memory addressing.
-+ * uncached memory addressing. Hide the definitions on 32-bit compilation
-+ * of the compat-vdso code.
- */
--
-+#ifdef CONFIG_64BIT
- #define HSPEC_BASE 0x9000000000000000
- #define IO_BASE 0x9200000000000000
- #define MSPEC_BASE 0x9400000000000000
- #define UNCAC_BASE 0x9600000000000000
- #define CAC_BASE 0xa800000000000000
-+#endif
-
- #define TO_MSPEC(x) (MSPEC_BASE | ((x) & TO_PHYS_MASK))
- #define TO_HSPEC(x) (HSPEC_BASE | ((x) & TO_PHYS_MASK))
-diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
-index 5a73c5e..23198c9 100644
---- a/arch/mips/ralink/prom.c
-+++ b/arch/mips/ralink/prom.c
-@@ -30,8 +30,10 @@ const char *get_system_type(void)
- return soc_info.sys_type;
- }
-
--static __init void prom_init_cmdline(int argc, char **argv)
-+static __init void prom_init_cmdline(void)
- {
-+ int argc;
-+ char **argv;
- int i;
-
- pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
-@@ -60,14 +62,11 @@ static __init void prom_init_cmdline(int argc, char **argv)
-
- void __init prom_init(void)
- {
-- int argc;
-- char **argv;
--
- prom_soc_init(&soc_info);
-
- pr_info("SoC Type: %s\n", get_system_type());
-
-- prom_init_cmdline(argc, argv);
-+ prom_init_cmdline();
- }
-
- void __init prom_free_prom_memory(void)
-diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
-index 285796e..2b76e36 100644
---- a/arch/mips/ralink/rt288x.c
-+++ b/arch/mips/ralink/rt288x.c
-@@ -40,16 +40,6 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
- { 0 }
- };
-
--static void rt288x_wdt_reset(void)
--{
-- u32 t;
--
-- /* enable WDT reset output on pin SRAM_CS_N */
-- t = rt_sysc_r32(SYSC_REG_CLKCFG);
-- t |= CLKCFG_SRAM_CS_N_WDT;
-- rt_sysc_w32(t, SYSC_REG_CLKCFG);
--}
--
- void __init ralink_clk_init(void)
- {
- unsigned long cpu_rate, wmac_rate = 40000000;
-diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
-index c8a28c4b..e778e0b 100644
---- a/arch/mips/ralink/rt305x.c
-+++ b/arch/mips/ralink/rt305x.c
-@@ -89,17 +89,6 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = {
- { 0 }
- };
-
--static void rt305x_wdt_reset(void)
--{
-- u32 t;
--
-- /* enable WDT reset output on pin SRAM_CS_N */
-- t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
-- t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
-- RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
-- rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
--}
--
- static unsigned long rt5350_get_mem_size(void)
- {
- void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
-diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
-index 4cef916..3e0aa09 100644
---- a/arch/mips/ralink/rt3883.c
-+++ b/arch/mips/ralink/rt3883.c
-@@ -63,16 +63,6 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
- { 0 }
- };
-
--static void rt3883_wdt_reset(void)
--{
-- u32 t;
--
-- /* enable WDT reset output on GPIO 2 */
-- t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
-- t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
-- rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
--}
--
- void __init ralink_clk_init(void)
- {
- unsigned long cpu_rate, sys_rate;
-diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
-index 8077ff3..d4469b2 100644
---- a/arch/mips/ralink/timer.c
-+++ b/arch/mips/ralink/timer.c
-@@ -71,11 +71,6 @@ static int rt_timer_request(struct rt_timer *rt)
- return err;
- }
-
--static void rt_timer_free(struct rt_timer *rt)
--{
-- free_irq(rt->irq, rt);
--}
--
- static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
- {
- if (rt->timer_freq < divisor)
-@@ -101,15 +96,6 @@ static int rt_timer_enable(struct rt_timer *rt)
- return 0;
- }
-
--static void rt_timer_disable(struct rt_timer *rt)
--{
-- u32 t;
--
-- t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
-- t &= ~TMR0CTL_ENABLE;
-- rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
--}
--
- static int rt_timer_probe(struct platform_device *pdev)
- {
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
-index b7a4b7e..e8f6b3a 100644
---- a/arch/mips/sgi-ip22/Platform
-+++ b/arch/mips/sgi-ip22/Platform
-@@ -25,7 +25,7 @@ endif
- # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
- #
- ifdef CONFIG_SGI_IP28
-- ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
-+ ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
- $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
- endif
- endif
-diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
-index 3362299..6ca3b90 100644
---- a/arch/powerpc/lib/sstep.c
-+++ b/arch/powerpc/lib/sstep.c
-@@ -1807,8 +1807,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
- goto instr_done;
-
- case LARX:
-- if (regs->msr & MSR_LE)
-- return 0;
- if (op.ea & (size - 1))
- break; /* can't handle misaligned */
- err = -EFAULT;
-@@ -1832,8 +1830,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
- goto ldst_done;
-
- case STCX:
-- if (regs->msr & MSR_LE)
-- return 0;
- if (op.ea & (size - 1))
- break; /* can't handle misaligned */
- err = -EFAULT;
-@@ -1859,8 +1855,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
- goto ldst_done;
-
- case LOAD:
-- if (regs->msr & MSR_LE)
-- return 0;
- err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
- if (!err) {
- if (op.type & SIGNEXT)
-@@ -1872,8 +1866,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
-
- #ifdef CONFIG_PPC_FPU
- case LOAD_FP:
-- if (regs->msr & MSR_LE)
-- return 0;
- if (size == 4)
- err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
- else
-@@ -1882,15 +1874,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
- #endif
- #ifdef CONFIG_ALTIVEC
- case LOAD_VMX:
-- if (regs->msr & MSR_LE)
-- return 0;
- err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
- goto ldst_done;
- #endif
- #ifdef CONFIG_VSX
- case LOAD_VSX:
-- if (regs->msr & MSR_LE)
-- return 0;
- err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
- goto ldst_done;
- #endif
-@@ -1913,8 +1901,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
- goto instr_done;
-
- case STORE:
-- if (regs->msr & MSR_LE)
-- return 0;
- if ((op.type & UPDATE) && size == sizeof(long) &&
- op.reg == 1 && op.update_reg == 1 &&
- !(regs->msr & MSR_PR) &&
-@@ -1927,8 +1913,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
-
- #ifdef CONFIG_PPC_FPU
- case STORE_FP:
-- if (regs->msr & MSR_LE)
-- return 0;
- if (size == 4)
- err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
- else
-@@ -1937,15 +1921,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
- #endif
- #ifdef CONFIG_ALTIVEC
- case STORE_VMX:
-- if (regs->msr & MSR_LE)
-- return 0;
- err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
- goto ldst_done;
- #endif
- #ifdef CONFIG_VSX
- case STORE_VSX:
-- if (regs->msr & MSR_LE)
-- return 0;
- err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
- goto ldst_done;
- #endif
-diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
-index c96c0cb..32c46b4 100644
---- a/arch/powerpc/sysdev/xics/icp-opal.c
-+++ b/arch/powerpc/sysdev/xics/icp-opal.c
-@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
-
- static void icp_opal_set_cpu_priority(unsigned char cppr)
- {
-+ /*
-+ * Here be dragons. The caller has asked to allow only IPI's and not
-+ * external interrupts. But OPAL XIVE doesn't support that. So instead
-+ * of allowing no interrupts allow all. That's still not right, but
-+ * currently the only caller who does this is xics_migrate_irqs_away()
-+ * and it works in that case.
-+ */
-+ if (cppr >= DEFAULT_PRIORITY)
-+ cppr = LOWEST_PRIORITY;
-+
- xics_set_base_cppr(cppr);
- opal_int_set_cppr(cppr);
- iosync();
-diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
-index 69d858e..23efe4e 100644
---- a/arch/powerpc/sysdev/xics/xics-common.c
-+++ b/arch/powerpc/sysdev/xics/xics-common.c
-@@ -20,6 +20,7 @@
- #include <linux/of.h>
- #include <linux/slab.h>
- #include <linux/spinlock.h>
-+#include <linux/delay.h>
-
- #include <asm/prom.h>
- #include <asm/io.h>
-@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
- /* Remove ourselves from the global interrupt queue */
- xics_set_cpu_giq(xics_default_distrib_server, 0);
-
-- /* Allow IPIs again... */
-- icp_ops->set_priority(DEFAULT_PRIORITY);
--
- for_each_irq_desc(virq, desc) {
- struct irq_chip *chip;
- long server;
-@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
- unlock:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-+
-+ /* Allow "sufficient" time to drop any inflight IRQ's */
-+ mdelay(5);
-+
-+ /*
-+ * Allow IPIs again. This is done at the very end, after migrating all
-+ * interrupts, the expectation is that we'll only get woken up by an IPI
-+ * interrupt beyond this point, but leave externals masked just to be
-+ * safe. If we're using icp-opal this may actually allow all
-+ * interrupts anyway, but that should be OK.
-+ */
-+ icp_ops->set_priority(DEFAULT_PRIORITY);
-+
- }
- #endif /* CONFIG_HOTPLUG_CPU */
-
-diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
-index d56ef26..7678f79 100644
---- a/arch/s390/mm/pgtable.c
-+++ b/arch/s390/mm/pgtable.c
-@@ -606,12 +606,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
- {
- spinlock_t *ptl;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
- pgste_t pgste;
- pte_t *ptep;
- pte_t pte;
- bool dirty;
-
-- ptep = get_locked_pte(mm, addr, &ptl);
-+ pgd = pgd_offset(mm, addr);
-+ pud = pud_alloc(mm, pgd, addr);
-+ if (!pud)
-+ return false;
-+ pmd = pmd_alloc(mm, pud, addr);
-+ if (!pmd)
-+ return false;
-+ /* We can't run guests backed by huge pages, but userspace can
-+ * still set them up and then try to migrate them without any
-+ * migration support.
-+ */
-+ if (pmd_large(*pmd))
-+ return true;
-+
-+ ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (unlikely(!ptep))
- return false;
-
-diff --git a/crypto/Makefile b/crypto/Makefile
-index bd6a029..9e52b3c 100644
---- a/crypto/Makefile
-+++ b/crypto/Makefile
-@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
- obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
- obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
- obj-$(CONFIG_CRYPTO_WP512) += wp512.o
-+CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
- obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
- obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
- obj-$(CONFIG_CRYPTO_ECB) += ecb.o
-@@ -94,6 +95,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
- obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
- obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
- obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
-+CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
- obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
- obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
- obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
-diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
-index 7c75a8d..6bdf39e 100644
---- a/drivers/firmware/efi/arm-runtime.c
-+++ b/drivers/firmware/efi/arm-runtime.c
-@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void)
- bool systab_found;
-
- efi_mm.pgd = pgd_alloc(&efi_mm);
-+ mm_init_cpumask(&efi_mm);
- init_new_context(NULL, &efi_mm);
-
- systab_found = false;
-diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
-index 83768e8..2178266 100644
---- a/drivers/i2c/i2c-mux.c
-+++ b/drivers/i2c/i2c-mux.c
-@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
- while (muxc->num_adapters) {
- struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
- struct i2c_mux_priv *priv = adap->algo_data;
-+ struct device_node *np = adap->dev.of_node;
-
- muxc->adapter[muxc->num_adapters] = NULL;
-
-@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
-
- sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
- i2c_del_adapter(adap);
-+ of_node_put(np);
- kfree(priv);
- }
- }
-diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
-index 4cab29e..11bfa27 100644
---- a/drivers/infiniband/hw/mlx5/main.c
-+++ b/drivers/infiniband/hw/mlx5/main.c
-@@ -3141,9 +3141,11 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
- if (err)
- goto err_rsrc;
-
-- err = mlx5_ib_alloc_q_counters(dev);
-- if (err)
-- goto err_odp;
-+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
-+ err = mlx5_ib_alloc_q_counters(dev);
-+ if (err)
-+ goto err_odp;
-+ }
-
- err = ib_register_device(&dev->ib_dev, NULL);
- if (err)
-@@ -3171,7 +3173,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
- ib_unregister_device(&dev->ib_dev);
-
- err_q_cnt:
-- mlx5_ib_dealloc_q_counters(dev);
-+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
-+ mlx5_ib_dealloc_q_counters(dev);
-
- err_odp:
- mlx5_ib_odp_remove_one(dev);
-@@ -3201,7 +3204,8 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
-
- mlx5_remove_roce_notifier(dev);
- ib_unregister_device(&dev->ib_dev);
-- mlx5_ib_dealloc_q_counters(dev);
-+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
-+ mlx5_ib_dealloc_q_counters(dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index ef7bf1d..628ba00 100644
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -972,10 +972,61 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
- }
- EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
-
-+/*
-+ * Flush current->bio_list when the target map method blocks.
-+ * This fixes deadlocks in snapshot and possibly in other targets.
-+ */
-+struct dm_offload {
-+ struct blk_plug plug;
-+ struct blk_plug_cb cb;
-+};
-+
-+static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
-+{
-+ struct dm_offload *o = container_of(cb, struct dm_offload, cb);
-+ struct bio_list list;
-+ struct bio *bio;
-+
-+ INIT_LIST_HEAD(&o->cb.list);
-+
-+ if (unlikely(!current->bio_list))
-+ return;
-+
-+ list = *current->bio_list;
-+ bio_list_init(current->bio_list);
-+
-+ while ((bio = bio_list_pop(&list))) {
-+ struct bio_set *bs = bio->bi_pool;
-+ if (unlikely(!bs) || bs == fs_bio_set) {
-+ bio_list_add(current->bio_list, bio);
-+ continue;
-+ }
-+
-+ spin_lock(&bs->rescue_lock);
-+ bio_list_add(&bs->rescue_list, bio);
-+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
-+ spin_unlock(&bs->rescue_lock);
-+ }
-+}
-+
-+static void dm_offload_start(struct dm_offload *o)
-+{
-+ blk_start_plug(&o->plug);
-+ o->cb.callback = flush_current_bio_list;
-+ list_add(&o->cb.list, &current->plug->cb_list);
-+}
-+
-+static void dm_offload_end(struct dm_offload *o)
-+{
-+ list_del(&o->cb.list);
-+ blk_finish_plug(&o->plug);
-+}
-+
- static void __map_bio(struct dm_target_io *tio)
- {
- int r;
- sector_t sector;
-+ struct dm_offload o;
- struct bio *clone = &tio->clone;
- struct dm_target *ti = tio->ti;
-
-@@ -988,7 +1039,11 @@ static void __map_bio(struct dm_target_io *tio)
- */
- atomic_inc(&tio->io->io_count);
- sector = clone->bi_iter.bi_sector;
-+
-+ dm_offload_start(&o);
- r = ti->type->map(ti, clone);
-+ dm_offload_end(&o);
-+
- if (r == DM_MAPIO_REMAPPED) {
- /* the bio has been remapped so dispatch it */
-
-diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
-index d9c1f2f..aba7735 100644
---- a/drivers/media/rc/rc-main.c
-+++ b/drivers/media/rc/rc-main.c
-@@ -1411,6 +1411,7 @@ int rc_register_device(struct rc_dev *dev)
- int attr = 0;
- int minor;
- int rc;
-+ u64 rc_type;
-
- if (!dev || !dev->map_name)
- return -EINVAL;
-@@ -1496,14 +1497,18 @@ int rc_register_device(struct rc_dev *dev)
- goto out_input;
- }
-
-+ rc_type = BIT_ULL(rc_map->rc_type);
-+
- if (dev->change_protocol) {
-- u64 rc_type = (1ll << rc_map->rc_type);
- rc = dev->change_protocol(dev, &rc_type);
- if (rc < 0)
- goto out_raw;
- dev->enabled_protocols = rc_type;
- }
-
-+ if (dev->driver_type == RC_DRIVER_IR_RAW)
-+ ir_raw_load_modules(&rc_type);
-+
- /* Allow the RC sysfs nodes to be accessible */
- atomic_set(&dev->initialized, 1);
-
-diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
-index 2c720cb..c3e6734 100644
---- a/drivers/media/usb/dvb-usb/dw2102.c
-+++ b/drivers/media/usb/dvb-usb/dw2102.c
-@@ -68,6 +68,7 @@
- struct dw2102_state {
- u8 initialized;
- u8 last_lock;
-+ u8 data[MAX_XFER_SIZE + 4];
- struct i2c_client *i2c_client_demod;
- struct i2c_client *i2c_client_tuner;
-
-@@ -662,62 +663,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
- {
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
-- u8 obuf[0x40], ibuf[0x40];
-+ struct dw2102_state *state;
-
- if (!d)
- return -ENODEV;
-+
-+ state = d->priv;
-+
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-+ if (mutex_lock_interruptible(&d->data_mutex) < 0) {
-+ mutex_unlock(&d->i2c_mutex);
-+ return -EAGAIN;
-+ }
-
- switch (num) {
- case 1:
- switch (msg[0].addr) {
- case SU3000_STREAM_CTRL:
-- obuf[0] = msg[0].buf[0] + 0x36;
-- obuf[1] = 3;
-- obuf[2] = 0;
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
-+ state->data[0] = msg[0].buf[0] + 0x36;
-+ state->data[1] = 3;
-+ state->data[2] = 0;
-+ if (dvb_usb_generic_rw(d, state->data, 3,
-+ state->data, 0, 0) < 0)
- err("i2c transfer failed.");
- break;
- case DW2102_RC_QUERY:
-- obuf[0] = 0x10;
-- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
-+ state->data[0] = 0x10;
-+ if (dvb_usb_generic_rw(d, state->data, 1,
-+ state->data, 2, 0) < 0)
- err("i2c transfer failed.");
-- msg[0].buf[1] = ibuf[0];
-- msg[0].buf[0] = ibuf[1];
-+ msg[0].buf[1] = state->data[0];
-+ msg[0].buf[0] = state->data[1];
- break;
- default:
- /* always i2c write*/
-- obuf[0] = 0x08;
-- obuf[1] = msg[0].addr;
-- obuf[2] = msg[0].len;
-+ state->data[0] = 0x08;
-+ state->data[1] = msg[0].addr;
-+ state->data[2] = msg[0].len;
-
-- memcpy(&obuf[3], msg[0].buf, msg[0].len);
-+ memcpy(&state->data[3], msg[0].buf, msg[0].len);
-
-- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
-- ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
-+ state->data, 1, 0) < 0)
- err("i2c transfer failed.");
-
- }
- break;
- case 2:
- /* always i2c read */
-- obuf[0] = 0x09;
-- obuf[1] = msg[0].len;
-- obuf[2] = msg[1].len;
-- obuf[3] = msg[0].addr;
-- memcpy(&obuf[4], msg[0].buf, msg[0].len);
--
-- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
-- ibuf, msg[1].len + 1, 0) < 0)
-+ state->data[0] = 0x09;
-+ state->data[1] = msg[0].len;
-+ state->data[2] = msg[1].len;
-+ state->data[3] = msg[0].addr;
-+ memcpy(&state->data[4], msg[0].buf, msg[0].len);
-+
-+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
-+ state->data, msg[1].len + 1, 0) < 0)
- err("i2c transfer failed.");
-
-- memcpy(msg[1].buf, &ibuf[1], msg[1].len);
-+ memcpy(msg[1].buf, &state->data[1], msg[1].len);
- break;
- default:
- warn("more than 2 i2c messages at a time is not handled yet.");
- break;
- }
-+ mutex_unlock(&d->data_mutex);
- mutex_unlock(&d->i2c_mutex);
- return num;
- }
-@@ -845,17 +856,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
- static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
- {
- struct dw2102_state *state = (struct dw2102_state *)d->priv;
-- u8 obuf[] = {0xde, 0};
-+ int ret = 0;
-
- info("%s: %d, initialized %d", __func__, i, state->initialized);
-
- if (i && !state->initialized) {
-+ mutex_lock(&d->data_mutex);
-+
-+ state->data[0] = 0xde;
-+ state->data[1] = 0;
-+
- state->initialized = 1;
- /* reset board */
-- return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
-+ ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
-+ mutex_unlock(&d->data_mutex);
- }
-
-- return 0;
-+ return ret;
- }
-
- static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
-@@ -1310,49 +1327,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
- return 0;
- }
-
--static int su3000_frontend_attach(struct dvb_usb_adapter *d)
-+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
- {
-- u8 obuf[3] = { 0xe, 0x80, 0 };
-- u8 ibuf[] = { 0 };
-+ struct dvb_usb_device *d = adap->dev;
-+ struct dw2102_state *state = d->priv;
-+
-+ mutex_lock(&d->data_mutex);
-+
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x80;
-+ state->data[2] = 0;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x02;
-- obuf[2] = 1;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x02;
-+ state->data[2] = 1;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
- msleep(300);
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x83;
-- obuf[2] = 0;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x83;
-+ state->data[2] = 0;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x83;
-- obuf[2] = 1;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x83;
-+ state->data[2] = 1;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0x51;
-+ state->data[0] = 0x51;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
- err("command 0x51 transfer failed.");
-
-- d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
-- &d->dev->i2c_adap);
-- if (d->fe_adap[0].fe == NULL)
-+ mutex_unlock(&d->data_mutex);
-+
-+ adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
-+ &d->i2c_adap);
-+ if (adap->fe_adap[0].fe == NULL)
- return -EIO;
-
-- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
-+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
- &dw2104_ts2020_config,
-- &d->dev->i2c_adap)) {
-+ &d->i2c_adap)) {
- info("Attached DS3000/TS2020!");
- return 0;
- }
-@@ -1361,47 +1386,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
- return -EIO;
- }
-
--static int t220_frontend_attach(struct dvb_usb_adapter *d)
-+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
- {
-- u8 obuf[3] = { 0xe, 0x87, 0 };
-- u8 ibuf[] = { 0 };
-+ struct dvb_usb_device *d = adap->dev;
-+ struct dw2102_state *state = d->priv;
-+
-+ mutex_lock(&d->data_mutex);
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x87;
-+ state->data[2] = 0x0;
-+
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x86;
-- obuf[2] = 1;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x86;
-+ state->data[2] = 1;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x80;
-- obuf[2] = 0;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x80;
-+ state->data[2] = 0;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
- msleep(50);
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x80;
-- obuf[2] = 1;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x80;
-+ state->data[2] = 1;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0x51;
-+ state->data[0] = 0x51;
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
- err("command 0x51 transfer failed.");
-
-- d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
-- &d->dev->i2c_adap, NULL);
-- if (d->fe_adap[0].fe != NULL) {
-- if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
-- &d->dev->i2c_adap, &tda18271_config)) {
-+ mutex_unlock(&d->data_mutex);
-+
-+ adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
-+ &d->i2c_adap, NULL);
-+ if (adap->fe_adap[0].fe != NULL) {
-+ if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
-+ &d->i2c_adap, &tda18271_config)) {
- info("Attached TDA18271HD/CXD2820R!");
- return 0;
- }
-@@ -1411,23 +1444,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
- return -EIO;
- }
-
--static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
-+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
- {
-- u8 obuf[] = { 0x51 };
-- u8 ibuf[] = { 0 };
-+ struct dvb_usb_device *d = adap->dev;
-+ struct dw2102_state *state = d->priv;
-+
-+ mutex_lock(&d->data_mutex);
-
-- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
-+ state->data[0] = 0x51;
-+
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
- err("command 0x51 transfer failed.");
-
-- d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
-- &d->dev->i2c_adap);
-+ mutex_unlock(&d->data_mutex);
-
-- if (d->fe_adap[0].fe == NULL)
-+ adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
-+ &s421_m88rs2000_config,
-+ &d->i2c_adap);
-+
-+ if (adap->fe_adap[0].fe == NULL)
- return -EIO;
-
-- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
-+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
- &dw2104_ts2020_config,
-- &d->dev->i2c_adap)) {
-+ &d->i2c_adap)) {
- info("Attached RS2000/TS2020!");
- return 0;
- }
-@@ -1440,44 +1480,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
- {
- struct dvb_usb_device *d = adap->dev;
- struct dw2102_state *state = d->priv;
-- u8 obuf[3] = { 0xe, 0x80, 0 };
-- u8 ibuf[] = { 0 };
- struct i2c_adapter *i2c_adapter;
- struct i2c_client *client;
- struct i2c_board_info board_info;
- struct m88ds3103_platform_data m88ds3103_pdata = {};
- struct ts2020_config ts2020_config = {};
-
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
-+ mutex_lock(&d->data_mutex);
-+
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x80;
-+ state->data[2] = 0x0;
-+
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x02;
-- obuf[2] = 1;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x02;
-+ state->data[2] = 1;
-
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
- msleep(300);
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x83;
-- obuf[2] = 0;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x83;
-+ state->data[2] = 0;
-
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0xe;
-- obuf[1] = 0x83;
-- obuf[2] = 1;
-+ state->data[0] = 0xe;
-+ state->data[1] = 0x83;
-+ state->data[2] = 1;
-
-- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
- err("command 0x0e transfer failed.");
-
-- obuf[0] = 0x51;
-+ state->data[0] = 0x51;
-
-- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
-+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
- err("command 0x51 transfer failed.");
-
-+ mutex_unlock(&d->data_mutex);
-+
- /* attach demod */
- m88ds3103_pdata.clk = 27000000;
- m88ds3103_pdata.i2c_wr_max = 33;
-diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
-index f9fa3fa..2051f28 100644
---- a/drivers/mtd/maps/pmcmsp-flash.c
-+++ b/drivers/mtd/maps/pmcmsp-flash.c
-@@ -139,15 +139,13 @@ static int __init init_msp_flash(void)
- }
-
- msp_maps[i].bankwidth = 1;
-- msp_maps[i].name = kmalloc(7, GFP_KERNEL);
-+ msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
- if (!msp_maps[i].name) {
- iounmap(msp_maps[i].virt);
- kfree(msp_parts[i]);
- goto cleanup_loop;
- }
-
-- msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
--
- for (j = 0; j < pcnt; j++) {
- part_name[5] = '0' + i;
- part_name[7] = '0' + j;
-diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
-index 5370909..08d91ef 100644
---- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
-+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
-@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
- priv->old_link = 0;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-+ } else {
-+ phydev = NULL;
- }
-
- /* mask all interrupts and request them */
-@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
- enet_dmac_writel(priv, priv->dma_chan_int_mask,
- ENETDMAC_IRMASK, priv->tx_chan);
-
-- if (priv->has_phy)
-+ if (phydev)
- phy_start(phydev);
- else
- bcm_enet_adjust_link(dev);
-@@ -1126,7 +1128,7 @@ static int bcm_enet_open(struct net_device *dev)
- free_irq(dev->irq, dev);
-
- out_phy_disconnect:
-- if (priv->has_phy)
-+ if (phydev)
- phy_disconnect(phydev);
-
- return ret;
-diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
-index 28097be..5127b7e 100644
---- a/drivers/net/ethernet/ti/cpmac.c
-+++ b/drivers/net/ethernet/ti/cpmac.c
-@@ -1211,7 +1211,7 @@ int cpmac_init(void)
- goto fail_alloc;
- }
-
--#warning FIXME: unhardcode gpio&reset bits
-+ /* FIXME: unhardcode gpio&reset bits */
- ar7_gpio_disable(26);
- ar7_gpio_disable(27);
- ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
-diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
-index 3a035e07..087a218 100644
---- a/drivers/pci/quirks.c
-+++ b/drivers/pci/quirks.c
-@@ -2173,6 +2173,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
- quirk_blacklist_vpd);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
-
- /*
- * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
-diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
-index f44615f..3e2ef4f 100644
---- a/drivers/tty/serial/samsung.c
-+++ b/drivers/tty/serial/samsung.c
-@@ -1036,8 +1036,10 @@ static int s3c64xx_serial_startup(struct uart_port *port)
- if (ourport->dma) {
- ret = s3c24xx_serial_request_dma(ourport);
- if (ret < 0) {
-- dev_warn(port->dev, "DMA request failed\n");
-- return ret;
-+ dev_warn(port->dev,
-+ "DMA request failed, DMA will not be used\n");
-+ devm_kfree(port->dev, ourport->dma);
-+ ourport->dma = NULL;
- }
- }
-
-diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
-index 29e80cc..5dd1832 100644
---- a/drivers/usb/dwc3/dwc3-omap.c
-+++ b/drivers/usb/dwc3/dwc3-omap.c
-@@ -249,6 +249,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
- val = dwc3_omap_read_utmi_ctrl(omap);
- val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
- dwc3_omap_write_utmi_ctrl(omap, val);
-+ break;
-
- case OMAP_DWC3_VBUS_OFF:
- val = dwc3_omap_read_utmi_ctrl(omap);
-diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
-index e4a1d97..39459b7 100644
---- a/drivers/usb/dwc3/gadget.h
-+++ b/drivers/usb/dwc3/gadget.h
-@@ -28,23 +28,23 @@ struct dwc3;
- #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
-
- /* DEPCFG parameter 1 */
--#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
-+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
- #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
- #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
- #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
- #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
- #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
--#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
-+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
- #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
--#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
-+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
- #define DWC3_DEPCFG_BULK_BASED (1 << 30)
- #define DWC3_DEPCFG_FIFO_BASED (1 << 31)
-
- /* DEPCFG parameter 0 */
--#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
--#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
--#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
--#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
-+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
-+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
-+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
-+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
- #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
- /* This applies for core versions earlier than 1.94a */
- #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
-diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 8d412d8..89081b8 100644
---- a/drivers/usb/gadget/function/f_fs.c
-+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1833,11 +1833,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
- struct usb_endpoint_descriptor *ds;
-+ struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
-+ int needs_comp_desc = false;
- int desc_idx;
-
-- if (ffs->gadget->speed == USB_SPEED_SUPER)
-+ if (ffs->gadget->speed == USB_SPEED_SUPER) {
- desc_idx = 2;
-- else if (ffs->gadget->speed == USB_SPEED_HIGH)
-+ needs_comp_desc = true;
-+ } else if (ffs->gadget->speed == USB_SPEED_HIGH)
- desc_idx = 1;
- else
- desc_idx = 0;
-@@ -1854,6 +1857,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
-
- ep->ep->driver_data = ep;
- ep->ep->desc = ds;
-+
-+ comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
-+ USB_DT_ENDPOINT_SIZE);
-+ ep->ep->maxburst = comp_desc->bMaxBurst + 1;
-+
-+ if (needs_comp_desc)
-+ ep->ep->comp_desc = comp_desc;
-+
- ret = usb_ep_enable(ep->ep);
- if (likely(!ret)) {
- epfile->ep = ep;
-diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
-index 27ed51b..29b41b5 100644
---- a/drivers/usb/gadget/function/f_uvc.c
-+++ b/drivers/usb/gadget/function/f_uvc.c
-@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
- memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
- v4l2_event_queue(&uvc->vdev, &v4l2_event);
-
-- /* Pass additional setup data to userspace */
-- if (uvc->event_setup_out && uvc->event_length) {
-- uvc->control_req->length = uvc->event_length;
-- return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
-- uvc->control_req, GFP_ATOMIC);
-- }
--
- return 0;
- }
-
-diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
-index a81d9ab..4fa5de2 100644
---- a/drivers/usb/gadget/udc/dummy_hcd.c
-+++ b/drivers/usb/gadget/udc/dummy_hcd.c
-@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev)
- int rc;
-
- dum = *((void **)dev_get_platdata(&pdev->dev));
-+ /* Clear usb_gadget region for new registration to udc-core */
-+ memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
- dum->gadget.name = gadget_name;
- dum->gadget.ops = &dummy_ops;
- dum->gadget.max_speed = USB_SPEED_SUPER;
-diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
-index b38a228..af0566d 100644
---- a/drivers/usb/host/ohci-at91.c
-+++ b/drivers/usb/host/ohci-at91.c
-@@ -361,7 +361,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
-
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
-- if (valid_port(wIndex)) {
-+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
- ohci_at91_port_suspend(ohci_at91->sfr_regmap,
- 1);
- return 0;
-@@ -404,7 +404,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
-
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
-- if (valid_port(wIndex)) {
-+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
- ohci_at91_port_suspend(ohci_at91->sfr_regmap,
- 0);
- return 0;
-diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
-index 74c42f7..3425154 100644
---- a/drivers/usb/host/xhci-dbg.c
-+++ b/drivers/usb/host/xhci-dbg.c
-@@ -111,7 +111,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
- xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
-
- /* xhci 1.1 controllers have the HCCPARAMS2 register */
-- if (hci_version > 100) {
-+ if (hci_version > 0x100) {
- temp = readl(&xhci->cap_regs->hcc_params2);
- xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
- xhci_dbg(xhci, " HC %s Force save context capability",
-diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
-index abe3606..5895e84 100644
---- a/drivers/usb/host/xhci-plat.c
-+++ b/drivers/usb/host/xhci-plat.c
-@@ -274,6 +274,8 @@ static int xhci_plat_remove(struct platform_device *dev)
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct clk *clk = xhci->clk;
-
-+ xhci->xhc_state |= XHCI_STATE_REMOVING;
-+
- usb_remove_hcd(xhci->shared_hcd);
- usb_phy_shutdown(hcd->usb_phy);
-
-diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
-index 095778f..37c63cb 100644
---- a/drivers/usb/misc/iowarrior.c
-+++ b/drivers/usb/misc/iowarrior.c
-@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface,
- iface_desc = interface->cur_altsetting;
- dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
-
-- if (iface_desc->desc.bNumEndpoints < 1) {
-- dev_err(&interface->dev, "Invalid number of endpoints\n");
-- retval = -EINVAL;
-- goto error;
-- }
--
- /* set up the endpoint information */
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface,
- /* this one will match for the IOWarrior56 only */
- dev->int_out_endpoint = endpoint;
- }
-+
-+ if (!dev->int_in_endpoint) {
-+ dev_err(&interface->dev, "no interrupt-in endpoint found\n");
-+ retval = -ENODEV;
-+ goto error;
-+ }
-+
-+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
-+ if (!dev->int_out_endpoint) {
-+ dev_err(&interface->dev, "no interrupt-out endpoint found\n");
-+ retval = -ENODEV;
-+ goto error;
-+ }
-+ }
-+
- /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
- dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
- if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
-diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
-index 6a1df9e..30bf0f5 100644
---- a/drivers/usb/serial/digi_acceleport.c
-+++ b/drivers/usb/serial/digi_acceleport.c
-@@ -1482,16 +1482,20 @@ static int digi_read_oob_callback(struct urb *urb)
- struct usb_serial *serial = port->serial;
- struct tty_struct *tty;
- struct digi_port *priv = usb_get_serial_port_data(port);
-+ unsigned char *buf = urb->transfer_buffer;
- int opcode, line, status, val;
- int i;
- unsigned int rts;
-
-+ if (urb->actual_length < 4)
-+ return -1;
-+
- /* handle each oob command */
-- for (i = 0; i < urb->actual_length - 3;) {
-- opcode = ((unsigned char *)urb->transfer_buffer)[i++];
-- line = ((unsigned char *)urb->transfer_buffer)[i++];
-- status = ((unsigned char *)urb->transfer_buffer)[i++];
-- val = ((unsigned char *)urb->transfer_buffer)[i++];
-+ for (i = 0; i < urb->actual_length - 3; i += 4) {
-+ opcode = buf[i];
-+ line = buf[i + 1];
-+ status = buf[i + 2];
-+ val = buf[i + 3];
-
- dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n",
- opcode, line, status, val);
-diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
-index c02808a..f1a8fdc 100644
---- a/drivers/usb/serial/io_ti.c
-+++ b/drivers/usb/serial/io_ti.c
-@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb)
- function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
- dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
- port_number, function, data[1]);
-+
-+ if (port_number >= edge_serial->serial->num_ports) {
-+ dev_err(dev, "bad port number %d\n", port_number);
-+ goto exit;
-+ }
-+
- port = edge_serial->serial->port[port_number];
- edge_port = usb_get_serial_port_data(port);
- if (!edge_port) {
-@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb)
-
- port_number = edge_port->port->port_number;
-
-- if (edge_port->lsr_event) {
-+ if (urb->actual_length > 0 && edge_port->lsr_event) {
- edge_port->lsr_event = 0;
- dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
- __func__, port_number, edge_port->lsr_mask, *data);
-diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
-index a180b17..76564b3 100644
---- a/drivers/usb/serial/omninet.c
-+++ b/drivers/usb/serial/omninet.c
-@@ -142,12 +142,6 @@ static int omninet_port_remove(struct usb_serial_port *port)
-
- static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
- {
-- struct usb_serial *serial = port->serial;
-- struct usb_serial_port *wport;
--
-- wport = serial->port[1];
-- tty_port_tty_set(&wport->port, tty);
--
- return usb_serial_generic_open(tty, port);
- }
-
-diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
-index 93c6c9b..8a069aa 100644
---- a/drivers/usb/serial/safe_serial.c
-+++ b/drivers/usb/serial/safe_serial.c
-@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb)
- if (!safe)
- goto out;
-
-+ if (length < 2) {
-+ dev_err(&port->dev, "malformed packet\n");
-+ return;
-+ }
-+
- fcs = fcs_compute10(data, length, CRC10_INITFCS);
- if (fcs) {
- dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 1d4f5fa..dc9d64a 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -3824,6 +3824,10 @@ static int ext4_block_truncate_page(handle_t *handle,
- unsigned blocksize;
- struct inode *inode = mapping->host;
-
-+ /* If we are processing an encrypted inode during orphan list handling */
-+ if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
-+ return 0;
-+
- blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
-
-diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
-index eb209d4..dc79773 100644
---- a/include/linux/user_namespace.h
-+++ b/include/linux/user_namespace.h
-@@ -65,7 +65,7 @@ struct ucounts {
- struct hlist_node node;
- struct user_namespace *ns;
- kuid_t uid;
-- atomic_t count;
-+ int count;
- atomic_t ucount[UCOUNT_COUNTS];
- };
-
-diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
-index 14e49c7..b35533b 100644
---- a/include/trace/events/syscalls.h
-+++ b/include/trace/events/syscalls.h
-@@ -1,5 +1,6 @@
- #undef TRACE_SYSTEM
- #define TRACE_SYSTEM raw_syscalls
-+#undef TRACE_INCLUDE_FILE
- #define TRACE_INCLUDE_FILE syscalls
-
- #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
-diff --git a/kernel/ucount.c b/kernel/ucount.c
-index 4bbd38e..f4ac185 100644
---- a/kernel/ucount.c
-+++ b/kernel/ucount.c
-@@ -139,7 +139,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
-
- new->ns = ns;
- new->uid = uid;
-- atomic_set(&new->count, 0);
-+ new->count = 0;
-
- spin_lock_irq(&ucounts_lock);
- ucounts = find_ucounts(ns, uid, hashent);
-@@ -150,8 +150,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
- ucounts = new;
- }
- }
-- if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
-+ if (ucounts->count == INT_MAX)
- ucounts = NULL;
-+ else
-+ ucounts->count += 1;
- spin_unlock_irq(&ucounts_lock);
- return ucounts;
- }
-@@ -160,13 +162,15 @@ static void put_ucounts(struct ucounts *ucounts)
- {
- unsigned long flags;
-
-- if (atomic_dec_and_test(&ucounts->count)) {
-- spin_lock_irqsave(&ucounts_lock, flags);
-+ spin_lock_irqsave(&ucounts_lock, flags);
-+ ucounts->count -= 1;
-+ if (!ucounts->count)
- hlist_del_init(&ucounts->node);
-- spin_unlock_irqrestore(&ucounts_lock, flags);
-+ else
-+ ucounts = NULL;
-+ spin_unlock_irqrestore(&ucounts_lock, flags);
-
-- kfree(ucounts);
-- }
-+ kfree(ucounts);
- }
-
- static inline bool atomic_inc_below(atomic_t *v, int u)
-diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
-index ebe1b9f..85814d1 100644
---- a/virt/kvm/arm/vgic/vgic-mmio.c
-+++ b/virt/kvm/arm/vgic/vgic-mmio.c
-@@ -187,21 +187,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
- static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
- bool new_active_state)
- {
-+ struct kvm_vcpu *requester_vcpu;
- spin_lock(&irq->irq_lock);
-+
-+ /*
-+ * The vcpu parameter here can mean multiple things depending on how
-+ * this function is called; when handling a trap from the kernel it
-+ * depends on the GIC version, and these functions are also called as
-+ * part of save/restore from userspace.
-+ *
-+ * Therefore, we have to figure out the requester in a reliable way.
-+ *
-+ * When accessing VGIC state from user space, the requester_vcpu is
-+ * NULL, which is fine, because we guarantee that no VCPUs are running
-+ * when accessing VGIC state from user space so irq->vcpu->cpu is
-+ * always -1.
-+ */
-+ requester_vcpu = kvm_arm_get_running_vcpu();
-+
- /*
- * If this virtual IRQ was written into a list register, we
- * have to make sure the CPU that runs the VCPU thread has
-- * synced back LR state to the struct vgic_irq. We can only
-- * know this for sure, when either this irq is not assigned to
-- * anyone's AP list anymore, or the VCPU thread is not
-- * running on any CPUs.
-+ * synced back the LR state to the struct vgic_irq.
- *
-- * In the opposite case, we know the VCPU thread may be on its
-- * way back from the guest and still has to sync back this
-- * IRQ, so we release and re-acquire the spin_lock to let the
-- * other thread sync back the IRQ.
-+ * As long as the conditions below are true, we know the VCPU thread
-+ * may be on its way back from the guest (we kicked the VCPU thread in
-+ * vgic_change_active_prepare) and still has to sync back this IRQ,
-+ * so we release and re-acquire the spin_lock to let the other thread
-+ * sync back the IRQ.
- */
- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
-+ irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
- irq->vcpu->cpu != -1) /* VCPU thread is running */
- cond_resched_lock(&irq->irq_lock);
-
diff --git a/4.9.16/0000_README b/4.9.18/0000_README
index 5b280f3..8c12f63 100644
--- a/4.9.16/0000_README
+++ b/4.9.18/0000_README
@@ -2,11 +2,15 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1015_linux-4.9.16.patch
+Patch: 1016_linux-4.9.17.patch
From: http://www.kernel.org
-Desc: Linux 4.9.16
+Desc: Linux 4.9.17
-Patch: 4420_grsecurity-3.1-4.9.16-201703180820.patch
+Patch: 1017_linux-4.9.18.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.18
+
+Patch: 4420_grsecurity-3.1-4.9.18-201703261106.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.9.18/1016_linux-4.9.17.patch b/4.9.18/1016_linux-4.9.17.patch
new file mode 100644
index 0000000..1a83496
--- /dev/null
+++ b/4.9.18/1016_linux-4.9.17.patch
@@ -0,0 +1,6091 @@
+diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
+index 405da11..d11af52 100644
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -42,24 +42,26 @@ file acts as a registry of software workarounds in the Linux Kernel and
+ will be updated when new workarounds are committed and backported to
+ stable kernels.
+
+-| Implementor | Component | Erratum ID | Kconfig |
+-+----------------+-----------------+-----------------+-------------------------+
+-| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+-| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
+-| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
+-| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
+-| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 |
+-| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 |
+-| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
+-| ARM | Cortex-A57 | #852523 | N/A |
+-| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
+-| ARM | Cortex-A72 | #853709 | N/A |
+-| ARM | MMU-500 | #841119,#826419 | N/A |
+-| | | | |
+-| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+-| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
+-| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
+-| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
+-| Cavium | ThunderX SMMUv2 | #27704 | N/A |
+-| | | | |
+-| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
++| Implementor | Component | Erratum ID | Kconfig |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
++| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
++| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
++| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
++| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 |
++| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 |
++| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
++| ARM | Cortex-A57 | #852523 | N/A |
++| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
++| ARM | Cortex-A72 | #853709 | N/A |
++| ARM | MMU-500 | #841119,#826419 | N/A |
++| | | | |
++| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
++| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
++| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
++| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
++| Cavium | ThunderX SMMUv2 | #27704 | N/A |
++| | | | |
++| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
++| | | | |
++| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
+diff --git a/Makefile b/Makefile
+index 4e0f962..004f90a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 969ef88..cf57a77 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -474,6 +474,16 @@ config CAVIUM_ERRATUM_27456
+
+ If unsure, say Y.
+
++config QCOM_QDF2400_ERRATUM_0065
++ bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
++ default y
++ help
++ On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
++ ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
++ been indicated as 16Bytes (0xf), not 8Bytes (0x7).
++
++ If unsure, say Y.
++
+ endmenu
+
+
+diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
+index 88e2f2b..55889d0 100644
+--- a/arch/arm64/kvm/hyp/tlb.c
++++ b/arch/arm64/kvm/hyp/tlb.c
+@@ -17,14 +17,62 @@
+
+ #include <asm/kvm_hyp.h>
+
++static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
++{
++ u64 val;
++
++ /*
++ * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
++ * most TLB operations target EL2/EL0. In order to affect the
++ * guest TLBs (EL1/EL0), we need to change one of these two
++ * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
++ * let's flip TGE before executing the TLB operation.
++ */
++ write_sysreg(kvm->arch.vttbr, vttbr_el2);
++ val = read_sysreg(hcr_el2);
++ val &= ~HCR_TGE;
++ write_sysreg(val, hcr_el2);
++ isb();
++}
++
++static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
++{
++ write_sysreg(kvm->arch.vttbr, vttbr_el2);
++ isb();
++}
++
++static hyp_alternate_select(__tlb_switch_to_guest,
++ __tlb_switch_to_guest_nvhe,
++ __tlb_switch_to_guest_vhe,
++ ARM64_HAS_VIRT_HOST_EXTN);
++
++static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
++{
++ /*
++ * We're done with the TLB operation, let's restore the host's
++ * view of HCR_EL2.
++ */
++ write_sysreg(0, vttbr_el2);
++ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
++}
++
++static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
++{
++ write_sysreg(0, vttbr_el2);
++}
++
++static hyp_alternate_select(__tlb_switch_to_host,
++ __tlb_switch_to_host_nvhe,
++ __tlb_switch_to_host_vhe,
++ ARM64_HAS_VIRT_HOST_EXTN);
++
+ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+ {
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+- isb();
++ __tlb_switch_to_guest()(kvm);
+
+ /*
+ * We could do so much better if we had the VA as well.
+@@ -45,7 +93,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+ dsb(ish);
+ isb();
+
+- write_sysreg(0, vttbr_el2);
++ __tlb_switch_to_host()(kvm);
+ }
+
+ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
+@@ -54,14 +102,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+- isb();
++ __tlb_switch_to_guest()(kvm);
+
+ asm volatile("tlbi vmalls12e1is" : : );
+ dsb(ish);
+ isb();
+
+- write_sysreg(0, vttbr_el2);
++ __tlb_switch_to_host()(kvm);
+ }
+
+ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+@@ -69,14 +116,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+ struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+ /* Switch to requested VMID */
+- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+- isb();
++ __tlb_switch_to_guest()(kvm);
+
+ asm volatile("tlbi vmalle1" : : );
+ dsb(nsh);
+ isb();
+
+- write_sysreg(0, vttbr_el2);
++ __tlb_switch_to_host()(kvm);
+ }
+
+ void __hyp_text __kvm_flush_vm_context(void)
+diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+index 9fa046d..4119945 100644
+--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
++++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
+ {
+ u32 *key = crypto_tfm_ctx(tfm);
+
+- *key = 0;
++ *key = ~0;
+
+ return 0;
+ }
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index 5c45114..b9e3f0a 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm);
+ struct mm_iommu_table_group_mem_t;
+
+ extern int isolate_lru_page(struct page *page); /* from internal.h */
+-extern bool mm_iommu_preregistered(void);
+-extern long mm_iommu_get(unsigned long ua, unsigned long entries,
++extern bool mm_iommu_preregistered(struct mm_struct *mm);
++extern long mm_iommu_get(struct mm_struct *mm,
++ unsigned long ua, unsigned long entries,
+ struct mm_iommu_table_group_mem_t **pmem);
+-extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
+-extern void mm_iommu_init(mm_context_t *ctx);
+-extern void mm_iommu_cleanup(mm_context_t *ctx);
+-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
+- unsigned long size);
+-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
+- unsigned long entries);
++extern long mm_iommu_put(struct mm_struct *mm,
++ struct mm_iommu_table_group_mem_t *mem);
++extern void mm_iommu_init(struct mm_struct *mm);
++extern void mm_iommu_cleanup(struct mm_struct *mm);
++extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
++ unsigned long ua, unsigned long size);
++extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
++ unsigned long ua, unsigned long entries);
+ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
+ unsigned long ua, unsigned long *hpa);
+ extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 270ee30..f516ac5 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p)
+ init_mm.context.pte_frag = NULL;
+ #endif
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+- mm_iommu_init(&init_mm.context);
++ mm_iommu_init(&init_mm);
+ #endif
+ irqstack_early_init();
+ exc_lvl_early_init();
+diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
+index b114f8b..73bf6e1 100644
+--- a/arch/powerpc/mm/mmu_context_book3s64.c
++++ b/arch/powerpc/mm/mmu_context_book3s64.c
+@@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ mm->context.pte_frag = NULL;
+ #endif
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+- mm_iommu_init(&mm->context);
++ mm_iommu_init(mm);
+ #endif
+ return 0;
+ }
+@@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm)
+ }
+ #endif
+
+-
+ void destroy_context(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+- mm_iommu_cleanup(&mm->context);
++ WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
+ #endif
+-
+ #ifdef CONFIG_PPC_ICSWX
+ drop_cop(mm->context.acop, mm);
+ kfree(mm->context.cop_lockp);
+diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
+index e0f1c33..7de7124 100644
+--- a/arch/powerpc/mm/mmu_context_iommu.c
++++ b/arch/powerpc/mm/mmu_context_iommu.c
+@@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
+ }
+
+ pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
+- current->pid,
++ current ? current->pid : 0,
+ incr ? '+' : '-',
+ npages << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
+@@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
+ return ret;
+ }
+
+-bool mm_iommu_preregistered(void)
++bool mm_iommu_preregistered(struct mm_struct *mm)
+ {
+- if (!current || !current->mm)
+- return false;
+-
+- return !list_empty(&current->mm->context.iommu_group_mem_list);
++ return !list_empty(&mm->context.iommu_group_mem_list);
+ }
+ EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
+
+@@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page)
+ return 0;
+ }
+
+-long mm_iommu_get(unsigned long ua, unsigned long entries,
++long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
+ struct mm_iommu_table_group_mem_t **pmem)
+ {
+ struct mm_iommu_table_group_mem_t *mem;
+ long i, j, ret = 0, locked_entries = 0;
+ struct page *page = NULL;
+
+- if (!current || !current->mm)
+- return -ESRCH; /* process exited */
+-
+ mutex_lock(&mem_list_mutex);
+
+- list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
++ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
+ next) {
+ if ((mem->ua == ua) && (mem->entries == entries)) {
+ ++mem->used;
+@@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
+
+ }
+
+- ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
++ ret = mm_iommu_adjust_locked_vm(mm, entries, true);
+ if (ret)
+ goto unlock_exit;
+
+@@ -190,7 +184,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
+ * of the CMA zone if possible. NOTE: faulting in + migration
+ * can be expensive. Batching can be considered later
+ */
+- if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
++ if (is_migrate_cma_page(page)) {
+ if (mm_iommu_move_page_from_cma(page))
+ goto populate;
+ if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
+@@ -215,11 +209,11 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
+ mem->entries = entries;
+ *pmem = mem;
+
+- list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
++ list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
+
+ unlock_exit:
+ if (locked_entries && ret)
+- mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
++ mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+
+ mutex_unlock(&mem_list_mutex);
+
+@@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head)
+ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
+ {
+ list_del_rcu(&mem->next);
+- mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
+ call_rcu(&mem->rcu, mm_iommu_free);
+ }
+
+-long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
++long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
+ {
+ long ret = 0;
+
+- if (!current || !current->mm)
+- return -ESRCH; /* process exited */
+-
+ mutex_lock(&mem_list_mutex);
+
+ if (mem->used == 0) {
+@@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
+ /* @mapped became 0 so now mappings are disabled, release the region */
+ mm_iommu_release(mem);
+
++ mm_iommu_adjust_locked_vm(mm, mem->entries, false);
++
+ unlock_exit:
+ mutex_unlock(&mem_list_mutex);
+
+@@ -304,14 +296,12 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
+ }
+ EXPORT_SYMBOL_GPL(mm_iommu_put);
+
+-struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
+- unsigned long size)
++struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
++ unsigned long ua, unsigned long size)
+ {
+ struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
+
+- list_for_each_entry_rcu(mem,
+- &current->mm->context.iommu_group_mem_list,
+- next) {
++ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
+ if ((mem->ua <= ua) &&
+ (ua + size <= mem->ua +
+ (mem->entries << PAGE_SHIFT))) {
+@@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
+ }
+ EXPORT_SYMBOL_GPL(mm_iommu_lookup);
+
+-struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
+- unsigned long entries)
++struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
++ unsigned long ua, unsigned long entries)
+ {
+ struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
+
+- list_for_each_entry_rcu(mem,
+- &current->mm->context.iommu_group_mem_list,
+- next) {
++ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
+ if ((mem->ua == ua) && (mem->entries == entries)) {
+ ret = mem;
+ break;
+@@ -373,17 +361,7 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
+ }
+ EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
+
+-void mm_iommu_init(mm_context_t *ctx)
++void mm_iommu_init(struct mm_struct *mm)
+ {
+- INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
+-}
+-
+-void mm_iommu_cleanup(mm_context_t *ctx)
+-{
+- struct mm_iommu_table_group_mem_t *mem, *tmp;
+-
+- list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
+- list_del_rcu(&mem->next);
+- mm_iommu_do_free(mem);
+- }
++ INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
+ }
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 7fe88bb..38623e2 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2096,8 +2096,8 @@ static int x86_pmu_event_init(struct perf_event *event)
+
+ static void refresh_pce(void *ignored)
+ {
+- if (current->mm)
+- load_mm_cr4(current->mm);
++ if (current->active_mm)
++ load_mm_cr4(current->active_mm);
+ }
+
+ static void x86_pmu_event_mapped(struct perf_event *event)
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 8f44c5a..f228f74 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -31,6 +31,7 @@
+ #include <asm/apic.h>
+ #include <asm/timer.h>
+ #include <asm/reboot.h>
++#include <asm/nmi.h>
+
+ struct ms_hyperv_info ms_hyperv;
+ EXPORT_SYMBOL_GPL(ms_hyperv);
+@@ -158,6 +159,26 @@ static unsigned char hv_get_nmi_reason(void)
+ return 0;
+ }
+
++#ifdef CONFIG_X86_LOCAL_APIC
++/*
++ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
++ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
++ * unknown NMI on the first CPU which gets it.
++ */
++static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
++{
++ static atomic_t nmi_cpu = ATOMIC_INIT(-1);
++
++ if (!unknown_nmi_panic)
++ return NMI_DONE;
++
++ if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
++ return NMI_HANDLED;
++
++ return NMI_DONE;
++}
++#endif
++
+ static void __init ms_hyperv_init_platform(void)
+ {
+ /*
+@@ -183,6 +204,9 @@ static void __init ms_hyperv_init_platform(void)
+ pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
+ lapic_timer_frequency);
+ }
++
++ register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
++ "hv_nmi_unknown");
+ #endif
+
+ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 54a2372..b5785c1 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -4,6 +4,7 @@
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ */
+
++#define DISABLE_BRANCH_PROFILING
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+ #include <linux/types.h>
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 46b2f41..eea88fe 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1287,6 +1287,8 @@ static int __init init_tsc_clocksource(void)
+ * exporting a reliable TSC.
+ */
+ if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
++ if (boot_cpu_has(X86_FEATURE_ART))
++ art_related_clocksource = &clocksource_tsc;
+ clocksource_register_khz(&clocksource_tsc, tsc_khz);
+ return 0;
+ }
+diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
+index 0493c17..333362f 100644
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -1,3 +1,4 @@
++#define DISABLE_BRANCH_PROFILING
+ #define pr_fmt(fmt) "kasan: " fmt
+ #include <linux/bootmem.h>
+ #include <linux/kasan.h>
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index bedfab9..a00a6c0 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ return 1;
+
+ for_each_pci_msi_entry(msidesc, dev) {
+- __pci_read_msi_msg(msidesc, &msg);
+- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
+- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
+- if (msg.data != XEN_PIRQ_MSI_DATA ||
+- xen_irq_from_pirq(pirq) < 0) {
+- pirq = xen_allocate_pirq_msi(dev, msidesc);
+- if (pirq < 0) {
+- irq = -ENODEV;
+- goto error;
+- }
+- xen_msi_compose_msg(dev, pirq, &msg);
+- __pci_write_msi_msg(msidesc, &msg);
+- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+- } else {
+- dev_dbg(&dev->dev,
+- "xen: msi already bound to pirq=%d\n", pirq);
++ pirq = xen_allocate_pirq_msi(dev, msidesc);
++ if (pirq < 0) {
++ irq = -ENODEV;
++ goto error;
+ }
++ xen_msi_compose_msg(dev, pirq, &msg);
++ __pci_write_msi_msg(msidesc, &msg);
++ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
+ (type == PCI_CAP_ID_MSI) ? nvec : 1,
+ (type == PCI_CAP_ID_MSIX) ?
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 0774799..c6fee74 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+ __set_bit(WRITE_16, filter->write_ok);
+ __set_bit(WRITE_LONG, filter->write_ok);
+ __set_bit(WRITE_LONG_2, filter->write_ok);
++ __set_bit(WRITE_SAME, filter->write_ok);
++ __set_bit(WRITE_SAME_16, filter->write_ok);
++ __set_bit(WRITE_SAME_32, filter->write_ok);
+ __set_bit(ERASE, filter->write_ok);
+ __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
+ __set_bit(MODE_SELECT, filter->write_ok);
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index bdc67ba..4421f7c 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
+ },
+ },
++ {
++ .callback = dmi_enable_rev_override,
++ .ident = "DELL Precision 5520",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
++ },
++ },
++ {
++ .callback = dmi_enable_rev_override,
++ .ident = "DELL Precision 3520",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
++ },
++ },
++ /*
++ * Resolves a quirk with the Dell Latitude 3350 that
++ * causes the ethernet adapter to not function.
++ */
++ {
++ .callback = dmi_enable_rev_override,
++ .ident = "DELL Latitude 3350",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
++ },
++ },
+ #endif
+ {}
+ };
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 3bbd2a5..2acaa77 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1598,7 +1598,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
+ .a2w_reg = A2W_PLLH_AUX,
+ .load_mask = CM_PLLH_LOADAUX,
+ .hold_mask = 0,
+- .fixed_divider = 10),
++ .fixed_divider = 1),
+ [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV(
+ .name = "pllh_pix",
+ .source_pll = "pllh",
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 015f711..d235fbe 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -691,7 +691,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+ ioat_chan->completion =
+ dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
+- GFP_KERNEL, &ioat_chan->completion_dma);
++ GFP_NOWAIT, &ioat_chan->completion_dma);
+ if (!ioat_chan->completion)
+ return -ENOMEM;
+
+@@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+ order = IOAT_MAX_ORDER;
+- ring = ioat_alloc_ring(c, order, GFP_KERNEL);
++ ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
+ if (!ring)
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+index 77a52b5..70f0344 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+@@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o
+ nvkm-y += nvkm/engine/disp/cursgt215.o
+ nvkm-y += nvkm/engine/disp/cursgf119.o
+ nvkm-y += nvkm/engine/disp/cursgk104.o
++nvkm-y += nvkm/engine/disp/cursgp102.o
+
+ nvkm-y += nvkm/engine/disp/oimmnv50.o
+ nvkm-y += nvkm/engine/disp/oimmg84.o
+ nvkm-y += nvkm/engine/disp/oimmgt215.o
+ nvkm-y += nvkm/engine/disp/oimmgf119.o
+ nvkm-y += nvkm/engine/disp/oimmgk104.o
++nvkm-y += nvkm/engine/disp/oimmgp102.o
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+index dd2953b..9d90d8b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
+
+ if (mthd->addr) {
+ snprintf(cname_, sizeof(cname_), "%s %d",
+- mthd->name, chan->chid);
++ mthd->name, chan->chid.user);
+ cname = cname_;
+ }
+
+@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
+ notify->size = sizeof(struct nvif_notify_uevent_rep);
+ notify->types = 1;
+- notify->index = chan->chid;
++ notify->index = chan->chid.user;
+ return 0;
+ }
+
+@@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+- *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
++ *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
+ return 0;
+ }
+
+@@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+- nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
++ nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
+ return 0;
+ }
+
+@@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ *addr = device->func->resource_addr(device, 0) +
+- 0x640000 + (chan->chid * 0x1000);
++ 0x640000 + (chan->chid.user * 0x1000);
+ *size = 0x001000;
+ return 0;
+ }
+@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
+ {
+ struct nv50_disp_chan *chan = nv50_disp_chan(object);
+ struct nv50_disp *disp = chan->root->disp;
+- if (chan->chid >= 0)
+- disp->chan[chan->chid] = NULL;
++ if (chan->chid.user >= 0)
++ disp->chan[chan->chid.user] = NULL;
+ return chan->func->dtor ? chan->func->dtor(chan) : chan;
+ }
+
+@@ -263,7 +263,7 @@ nv50_disp_chan = {
+ int
+ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+- struct nv50_disp_root *root, int chid, int head,
++ struct nv50_disp_root *root, int ctrl, int user, int head,
+ const struct nvkm_oclass *oclass,
+ struct nv50_disp_chan *chan)
+ {
+@@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+ chan->func = func;
+ chan->mthd = mthd;
+ chan->root = root;
+- chan->chid = chid;
++ chan->chid.ctrl = ctrl;
++ chan->chid.user = user;
+ chan->head = head;
+
+- if (disp->chan[chan->chid]) {
+- chan->chid = -1;
++ if (disp->chan[chan->chid.user]) {
++ chan->chid.user = -1;
+ return -EBUSY;
+ }
+- disp->chan[chan->chid] = chan;
++ disp->chan[chan->chid.user] = chan;
+ return 0;
+ }
+
+ int
+ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+- struct nv50_disp_root *root, int chid, int head,
++ struct nv50_disp_root *root, int ctrl, int user, int head,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+ {
+@@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
+ return -ENOMEM;
+ *pobject = &chan->object;
+
+- return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
++ return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
++ head, oclass, chan);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+index f5f683d..737b38f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+@@ -7,7 +7,11 @@ struct nv50_disp_chan {
+ const struct nv50_disp_chan_func *func;
+ const struct nv50_disp_chan_mthd *mthd;
+ struct nv50_disp_root *root;
+- int chid;
++
++ struct {
++ int ctrl;
++ int user;
++ } chid;
+ int head;
+
+ struct nvkm_object object;
+@@ -25,11 +29,11 @@ struct nv50_disp_chan_func {
+
+ int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+- struct nv50_disp_root *, int chid, int head,
++ struct nv50_disp_root *, int ctrl, int user, int head,
+ const struct nvkm_oclass *, struct nv50_disp_chan *);
+ int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+- struct nv50_disp_root *, int chid, int head,
++ struct nv50_disp_root *, int ctrl, int user, int head,
+ const struct nvkm_oclass *, struct nvkm_object **);
+
+ extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
+@@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
+ struct nv50_disp_pioc_oclass {
+ int (*ctor)(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+- struct nv50_disp_root *, int chid,
++ struct nv50_disp_root *, int ctrl, int user,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ struct nvkm_sclass base;
+ const struct nv50_disp_chan_func *func;
+ const struct nv50_disp_chan_mthd *mthd;
+- int chid;
++ struct {
++ int ctrl;
++ int user;
++ } chid;
+ };
+
+ extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
+@@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
+ extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
+ extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+
++extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
++extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
+
+ int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+- struct nv50_disp_root *, int chid,
++ struct nv50_disp_root *, int ctrl, int user,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
+ const struct nv50_disp_chan_mthd *,
+- struct nv50_disp_root *, int chid,
++ struct nv50_disp_root *, int ctrl, int user,
+ const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ #endif
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+index dd99fc7..fa781b5 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+@@ -33,5 +33,5 @@ g84_disp_curs_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &nv50_disp_pioc_func,
+- .chid = 7,
++ .chid = { 7, 7 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+index 2a1574e..2be6fb0 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+@@ -33,5 +33,5 @@ gf119_disp_curs_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+- .chid = 13,
++ .chid = { 13, 13 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+index 28e8f06..2a99db4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+@@ -33,5 +33,5 @@ gk104_disp_curs_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &gf119_disp_pioc_func,
+- .chid = 13,
++ .chid = { 13, 13 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
+new file mode 100644
+index 0000000..e958210
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2016 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs <bskeggs@redhat.com>
++ */
++#include "channv50.h"
++#include "rootnv50.h"
++
++#include <nvif/class.h>
++
++const struct nv50_disp_pioc_oclass
++gp102_disp_curs_oclass = {
++ .base.oclass = GK104_DISP_CURSOR,
++ .base.minver = 0,
++ .base.maxver = 0,
++ .ctor = nv50_disp_curs_new,
++ .func = &gf119_disp_pioc_func,
++ .chid = { 13, 17 },
++};
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+index d8a4b9c..00a7f35 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+@@ -33,5 +33,5 @@ gt215_disp_curs_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &nv50_disp_pioc_func,
+- .chid = 7,
++ .chid = { 7, 7 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+index 8b13204..82ff82d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+@@ -33,7 +33,7 @@
+ int
+ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+- struct nv50_disp_root *root, int chid,
++ struct nv50_disp_root *root, int ctrl, int user,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+ {
+@@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
+ } else
+ return ret;
+
+- return nv50_disp_chan_new_(func, mthd, root, chid + head,
++ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
+ head, oclass, pobject);
+ }
+
+@@ -65,5 +65,5 @@ nv50_disp_curs_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_curs_new,
+ .func = &nv50_disp_pioc_func,
+- .chid = 7,
++ .chid = { 7, 7 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+index a57f7ce..ce7cd74 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
+ struct nvkm_object *object, u32 handle)
+ {
+ return nvkm_ramht_insert(chan->base.root->ramht, object,
+- chan->base.chid, -9, handle,
+- chan->base.chid << 27 | 0x00000001);
++ chan->base.chid.user, -9, handle,
++ chan->base.chid.user << 27 | 0x00000001);
+ }
+
+ void
+@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->base.chid;
++ int ctrl = chan->base.chid.ctrl;
++ int user = chan->base.chid.user;
+
+ /* deactivate channel */
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d fini: %08x\n", user,
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
+ }
+
+ /* disable error reporting and completion notification */
+- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
++ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
+ }
+
+ static int
+@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->base.chid;
++ int ctrl = chan->base.chid.ctrl;
++ int user = chan->base.chid.user;
+
+ /* enable error reporting */
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
+
+ /* initialise channel for dma command submission */
+- nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
+- nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
+- nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
++ nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
++ nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
++ nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
++ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
++ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d init: %08x\n", chid,
+- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d init: %08x\n", user,
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+index ad24c2c..d26d3b4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+@@ -32,26 +32,27 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->base.chid;
++ int ctrl = chan->base.chid.ctrl;
++ int user = chan->base.chid.user;
+
+ /* enable error reporting */
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
+
+ /* initialise channel for dma command submission */
+- nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
+- nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
+- nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
+- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
++ nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
++ nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
++ nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
++ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
++ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
++ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d init: %08x\n", chid,
+- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d init: %08x\n", user,
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+index 9c6645a..0a1381a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+@@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
+ chan->func = func;
+
+ ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
+- chid, head, oclass, &chan->base);
++ chid, chid, head, oclass, &chan->base);
+ if (ret)
+ return ret;
+
+@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
+ struct nvkm_object *object, u32 handle)
+ {
+ return nvkm_ramht_insert(chan->base.root->ramht, object,
+- chan->base.chid, -10, handle,
+- chan->base.chid << 28 |
+- chan->base.chid);
++ chan->base.chid.user, -10, handle,
++ chan->base.chid.user << 28 |
++ chan->base.chid.user);
+ }
+
+ static void
+@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->base.chid;
++ int ctrl = chan->base.chid.ctrl;
++ int user = chan->base.chid.user;
+
+ /* deactivate channel */
+- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
++ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
++ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
+- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ }
+
+ /* disable error reporting and completion notifications */
+- nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
++ nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
+ }
+
+ static int
+@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->base.chid;
++ int ctrl = chan->base.chid.ctrl;
++ int user = chan->base.chid.user;
+
+ /* enable error reporting */
+- nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
++ nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
+
+ /* initialise channel for dma command submission */
+- nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
+- nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
+- nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
+- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+- nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
++ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
++ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
++ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
++ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
++ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
++ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
+- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+index 54a4ae8..5ad5d0f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+@@ -33,5 +33,5 @@ g84_disp_oimm_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &nv50_disp_pioc_func,
+- .chid = 5,
++ .chid = { 5, 5 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+index c658db5..1f9fd34 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+@@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+- .chid = 9,
++ .chid = { 9, 9 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+index b1fde8c..0c09fe8 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+@@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &gf119_disp_pioc_func,
+- .chid = 9,
++ .chid = { 9, 9 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
+new file mode 100644
+index 0000000..abf8236
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2016 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs <bskeggs@redhat.com>
++ */
++#include "channv50.h"
++#include "rootnv50.h"
++
++#include <nvif/class.h>
++
++const struct nv50_disp_pioc_oclass
++gp102_disp_oimm_oclass = {
++ .base.oclass = GK104_DISP_OVERLAY,
++ .base.minver = 0,
++ .base.maxver = 0,
++ .ctor = nv50_disp_oimm_new,
++ .func = &gf119_disp_pioc_func,
++ .chid = { 9, 13 },
++};
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+index f4e7eb3..1281db2 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+@@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &nv50_disp_pioc_func,
+- .chid = 5,
++ .chid = { 5, 5 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+index 3940b9c..07540f3 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+@@ -33,7 +33,7 @@
+ int
+ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
+ const struct nv50_disp_chan_mthd *mthd,
+- struct nv50_disp_root *root, int chid,
++ struct nv50_disp_root *root, int ctrl, int user,
+ const struct nvkm_oclass *oclass, void *data, u32 size,
+ struct nvkm_object **pobject)
+ {
+@@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
+ } else
+ return ret;
+
+- return nv50_disp_chan_new_(func, mthd, root, chid + head,
++ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
+ head, oclass, pobject);
+ }
+
+@@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = {
+ .base.maxver = 0,
+ .ctor = nv50_disp_oimm_new,
+ .func = &nv50_disp_pioc_func,
+- .chid = 5,
++ .chid = { 5, 5 },
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+index a625a98..0abaa64 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->chid;
++ int ctrl = chan->chid.ctrl;
++ int user = chan->chid.user;
+
+- nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
++ nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
++ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d fini: %08x\n", user,
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
+ }
+
+ /* disable error reporting and completion notification */
+- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
++ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
+ }
+
+ static int
+@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->chid;
++ int ctrl = chan->chid.ctrl;
++ int user = chan->chid.user;
+
+ /* enable error reporting */
+- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
++ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
+
+ /* activate channel */
+- nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
++ nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
+ if (nvkm_msec(device, 2000,
+- u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
++ u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
+ if ((tmp & 0x00030000) == 0x00010000)
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d init: %08x\n", chid,
+- nvkm_rd32(device, 0x610490 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d init: %08x\n", user,
++ nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+index 9d2618d..0211e0e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->chid;
++ int ctrl = chan->chid.ctrl;
++ int user = chan->chid.user;
+
+- nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
++ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
+- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d timeout: %08x\n", user,
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ }
+ }
+
+@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan)
+ struct nv50_disp *disp = chan->root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+- int chid = chan->chid;
++ int ctrl = chan->chid.ctrl;
++ int user = chan->chid.user;
+
+- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
++ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
+ if (nvkm_msec(device, 2000,
+- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
++ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
+- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
++ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
+ if (nvkm_msec(device, 2000,
+- u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
++ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
+ if ((tmp & 0x00030000) == 0x00010000)
+ break;
+ ) < 0) {
+- nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
+- nvkm_rd32(device, 0x610200 + (chid * 0x10)));
++ nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
++ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+index 8443e04..b053b29 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+@@ -36,8 +36,8 @@ gp104_disp_root = {
+ &gp104_disp_ovly_oclass,
+ },
+ .pioc = {
+- &gk104_disp_oimm_oclass,
+- &gk104_disp_curs_oclass,
++ &gp102_disp_oimm_oclass,
++ &gp102_disp_curs_oclass,
+ },
+ };
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+index 2f9cecd..05c829a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+@@ -207,8 +207,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
+ {
+ const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
+ struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+- return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+- oclass, data, size, pobject);
++ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
++ sclass->chid.user, oclass, data, size, pobject);
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index d544ff9..7aadce1 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -83,8 +83,7 @@ struct vc4_crtc_data {
+ /* Which channel of the HVS this pixelvalve sources from. */
+ int hvs_channel;
+
+- enum vc4_encoder_type encoder0_type;
+- enum vc4_encoder_type encoder1_type;
++ enum vc4_encoder_type encoder_types[4];
+ };
+
+ #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
+@@ -669,6 +668,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id)
+ CRTC_WRITE(PV_INTEN, 0);
+ }
+
++/* Must be called with the event lock held */
++bool vc4_event_pending(struct drm_crtc *crtc)
++{
++ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
++
++ return !!vc4_crtc->event;
++}
++
+ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
+ {
+ struct drm_crtc *crtc = &vc4_crtc->base;
+@@ -859,20 +866,26 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
+
+ static const struct vc4_crtc_data pv0_data = {
+ .hvs_channel = 0,
+- .encoder0_type = VC4_ENCODER_TYPE_DSI0,
+- .encoder1_type = VC4_ENCODER_TYPE_DPI,
++ .encoder_types = {
++ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
++ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
++ },
+ };
+
+ static const struct vc4_crtc_data pv1_data = {
+ .hvs_channel = 2,
+- .encoder0_type = VC4_ENCODER_TYPE_DSI1,
+- .encoder1_type = VC4_ENCODER_TYPE_SMI,
++ .encoder_types = {
++ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
++ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
++ },
+ };
+
+ static const struct vc4_crtc_data pv2_data = {
+ .hvs_channel = 1,
+- .encoder0_type = VC4_ENCODER_TYPE_VEC,
+- .encoder1_type = VC4_ENCODER_TYPE_HDMI,
++ .encoder_types = {
++ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
++ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
++ },
+ };
+
+ static const struct of_device_id vc4_crtc_dt_match[] = {
+@@ -886,17 +899,20 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
+ struct drm_crtc *crtc)
+ {
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
++ const struct vc4_crtc_data *crtc_data = vc4_crtc->data;
++ const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, drm) {
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+-
+- if (vc4_encoder->type == vc4_crtc->data->encoder0_type) {
+- vc4_encoder->clock_select = 0;
+- encoder->possible_crtcs |= drm_crtc_mask(crtc);
+- } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) {
+- vc4_encoder->clock_select = 1;
+- encoder->possible_crtcs |= drm_crtc_mask(crtc);
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
++ if (vc4_encoder->type == encoder_types[i]) {
++ vc4_encoder->clock_select = i;
++ encoder->possible_crtcs |= drm_crtc_mask(crtc);
++ break;
++ }
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
+index 7c1e4d9..50a55ef 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.h
++++ b/drivers/gpu/drm/vc4/vc4_drv.h
+@@ -194,6 +194,7 @@ to_vc4_plane(struct drm_plane *plane)
+ }
+
+ enum vc4_encoder_type {
++ VC4_ENCODER_TYPE_NONE,
+ VC4_ENCODER_TYPE_HDMI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI0,
+@@ -440,6 +441,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
+ extern struct platform_driver vc4_crtc_driver;
+ int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
+ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
++bool vc4_event_pending(struct drm_crtc *crtc);
+ int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
+ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
+index c1f65c6..67af2af 100644
+--- a/drivers/gpu/drm/vc4/vc4_kms.c
++++ b/drivers/gpu/drm/vc4/vc4_kms.c
+@@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev,
+
+ /* Make sure that any outstanding modesets have finished. */
+ if (nonblock) {
+- ret = down_trylock(&vc4->async_modeset);
+- if (ret) {
++ struct drm_crtc *crtc;
++ struct drm_crtc_state *crtc_state;
++ unsigned long flags;
++ bool busy = false;
++
++ /*
++ * If there's an undispatched event to send then we're
++ * obviously still busy. If there isn't, then we can
++ * unconditionally wait for the semaphore because it
++ * shouldn't be contended (for long).
++ *
++ * This is to prevent a race where queuing a new flip
++ * from userspace immediately on receipt of an event
++ * beats our clean-up and returns EBUSY.
++ */
++ spin_lock_irqsave(&dev->event_lock, flags);
++ for_each_crtc_in_state(state, crtc, crtc_state, i)
++ busy |= vc4_event_pending(crtc);
++ spin_unlock_irqrestore(&dev->event_lock, flags);
++ if (busy) {
+ kfree(c);
+ return -EBUSY;
+ }
+- } else {
+- ret = down_interruptible(&vc4->async_modeset);
+- if (ret) {
+- kfree(c);
+- return ret;
+- }
++ }
++ ret = down_interruptible(&vc4->async_modeset);
++ if (ret) {
++ kfree(c);
++ return ret;
+ }
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
+index 1aa44c2..39f6886 100644
+--- a/drivers/gpu/drm/vc4/vc4_regs.h
++++ b/drivers/gpu/drm/vc4/vc4_regs.h
+@@ -177,8 +177,9 @@
+ # define PV_CONTROL_WAIT_HSTART BIT(12)
+ # define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4)
+ # define PV_CONTROL_PIXEL_REP_SHIFT 4
+-# define PV_CONTROL_CLK_SELECT_DSI_VEC 0
++# define PV_CONTROL_CLK_SELECT_DSI 0
+ # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1
++# define PV_CONTROL_CLK_SELECT_VEC 2
+ # define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2)
+ # define PV_CONTROL_CLK_SELECT_SHIFT 2
+ # define PV_CONTROL_FIFO_CLR BIT(1)
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index c5dee30..acb9d25 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1598,6 +1598,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+ }
+
++static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
++{
++ struct its_node *its = data;
++
++ /* On QDF2400, the size of the ITE is 16Bytes */
++ its->ite_size = 16;
++}
++
+ static const struct gic_quirk its_quirks[] = {
+ #ifdef CONFIG_CAVIUM_ERRATUM_22375
+ {
+@@ -1615,6 +1623,14 @@ static const struct gic_quirk its_quirks[] = {
+ .init = its_enable_quirk_cavium_23144,
+ },
+ #endif
++#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
++ {
++ .desc = "ITS: QDF2400 erratum 0065",
++ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
++ .mask = 0xffffffff,
++ .init = its_enable_quirk_qdf2400_e0065,
++ },
++#endif
+ {
+ }
+ };
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 302e284..cde43b6 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1595,6 +1595,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
+ return buffer;
+ }
+
++static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
++{
++ struct uvc_video_chain *chain;
++
++ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
++ if (chain == NULL)
++ return NULL;
++
++ INIT_LIST_HEAD(&chain->entities);
++ mutex_init(&chain->ctrl_mutex);
++ chain->dev = dev;
++ v4l2_prio_init(&chain->prio);
++
++ return chain;
++}
++
++/*
++ * Fallback heuristic for devices that don't connect units and terminals in a
++ * valid chain.
++ *
++ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
++ * to fail, but if we just take the entities we can find and put them together
++ * in the most sensible chain we can think of, turns out they do work anyway.
++ * Note: This heuristic assumes there is a single chain.
++ *
++ * At the time of writing, devices known to have such a broken chain are
++ * - Acer Integrated Camera (5986:055a)
++ * - Realtek rtl157a7 (0bda:57a7)
++ */
++static int uvc_scan_fallback(struct uvc_device *dev)
++{
++ struct uvc_video_chain *chain;
++ struct uvc_entity *iterm = NULL;
++ struct uvc_entity *oterm = NULL;
++ struct uvc_entity *entity;
++ struct uvc_entity *prev;
++
++ /*
++ * Start by locating the input and output terminals. We only support
++ * devices with exactly one of each for now.
++ */
++ list_for_each_entry(entity, &dev->entities, list) {
++ if (UVC_ENTITY_IS_ITERM(entity)) {
++ if (iterm)
++ return -EINVAL;
++ iterm = entity;
++ }
++
++ if (UVC_ENTITY_IS_OTERM(entity)) {
++ if (oterm)
++ return -EINVAL;
++ oterm = entity;
++ }
++ }
++
++ if (iterm == NULL || oterm == NULL)
++ return -EINVAL;
++
++ /* Allocate the chain and fill it. */
++ chain = uvc_alloc_chain(dev);
++ if (chain == NULL)
++ return -ENOMEM;
++
++ if (uvc_scan_chain_entity(chain, oterm) < 0)
++ goto error;
++
++ prev = oterm;
++
++ /*
++ * Add all Processing and Extension Units with two pads. The order
++ * doesn't matter much, use reverse list traversal to connect units in
++ * UVC descriptor order as we build the chain from output to input. This
++ * leads to units appearing in the order meant by the manufacturer for
++ * the cameras known to require this heuristic.
++ */
++ list_for_each_entry_reverse(entity, &dev->entities, list) {
++ if (entity->type != UVC_VC_PROCESSING_UNIT &&
++ entity->type != UVC_VC_EXTENSION_UNIT)
++ continue;
++
++ if (entity->num_pads != 2)
++ continue;
++
++ if (uvc_scan_chain_entity(chain, entity) < 0)
++ goto error;
++
++ prev->baSourceID[0] = entity->id;
++ prev = entity;
++ }
++
++ if (uvc_scan_chain_entity(chain, iterm) < 0)
++ goto error;
++
++ prev->baSourceID[0] = iterm->id;
++
++ list_add_tail(&chain->list, &dev->chains);
++
++ uvc_trace(UVC_TRACE_PROBE,
++ "Found a video chain by fallback heuristic (%s).\n",
++ uvc_print_chain(chain));
++
++ return 0;
++
++error:
++ kfree(chain);
++ return -EINVAL;
++}
++
+ /*
+ * Scan the device for video chains and register video devices.
+ *
+@@ -1617,15 +1725,10 @@ static int uvc_scan_device(struct uvc_device *dev)
+ if (term->chain.next || term->chain.prev)
+ continue;
+
+- chain = kzalloc(sizeof(*chain), GFP_KERNEL);
++ chain = uvc_alloc_chain(dev);
+ if (chain == NULL)
+ return -ENOMEM;
+
+- INIT_LIST_HEAD(&chain->entities);
+- mutex_init(&chain->ctrl_mutex);
+- chain->dev = dev;
+- v4l2_prio_init(&chain->prio);
+-
+ term->flags |= UVC_ENTITY_FLAG_DEFAULT;
+
+ if (uvc_scan_chain(chain, term) < 0) {
+@@ -1639,6 +1742,9 @@ static int uvc_scan_device(struct uvc_device *dev)
+ list_add_tail(&chain->list, &dev->chains);
+ }
+
++ if (list_empty(&dev->chains))
++ uvc_scan_fallback(dev);
++
+ if (list_empty(&dev->chains)) {
+ uvc_printk(KERN_INFO, "No valid video chain found.\n");
+ return -1;
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index a36022b..03dca73 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1181,7 +1181,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
+
+ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
+ {
++ struct tcphdr *tcph;
+ int offset = 0;
++ int hdr_len;
+
+ /* only TCP packets will be aggregated */
+ if (skb->protocol == htons(ETH_P_IP)) {
+@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
+ /* if mss is not set through Large Packet bit/mss in rx buffer,
+ * expect that the mss will be written to the tcp header checksum.
+ */
++ tcph = (struct tcphdr *)(skb->data + offset);
+ if (lrg_pkt) {
+ skb_shinfo(skb)->gso_size = mss;
+ } else if (offset) {
+- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
+-
+ skb_shinfo(skb)->gso_size = ntohs(tcph->check);
+ tcph->check = 0;
+ }
++
++ if (skb_shinfo(skb)->gso_size) {
++ hdr_len = offset + tcph->doff * 4;
++ skb_shinfo(skb)->gso_segs =
++ DIV_ROUND_UP(skb->len - hdr_len,
++ skb_shinfo(skb)->gso_size);
++ }
+ }
+
+ static int ibmveth_poll(struct napi_struct *napi, int budget)
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+index 5b54254..2788a54 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
+@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
+ s32 ret_val = 0;
+ u16 phy_id;
+
++ /* ensure PHY page selection to fix misconfigured i210 */
++ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
++ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
++
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index b3067137..d4fa851 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -81,6 +81,7 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+ {
+ priv->params.rq_wq_type = rq_type;
++ priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+@@ -92,6 +93,10 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
++
++ /* Extra room needed for build_skb */
++ priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+@@ -3473,12 +3478,6 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+ mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
+
+- priv->params.lro_wqe_sz =
+- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
+- /* Extra room needed for build_skb */
+- MLX5_RX_HEADROOM -
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-
+ /* Initialize pflags */
+ MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+@@ -3936,6 +3935,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
+ }
+ }
+
++static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
++{
++ struct mlx5_eswitch *esw = mdev->priv.eswitch;
++ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
++ int vport;
++
++ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
++ return;
++
++ for (vport = 1; vport < total_vfs; vport++)
++ mlx5_eswitch_unregister_vport_rep(esw, vport);
++}
++
+ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -3983,6 +3995,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+ return err;
+ }
+
++ mlx5e_register_vport_rep(mdev);
+ return 0;
+ }
+
+@@ -3994,6 +4007,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+ if (!netif_device_present(netdev))
+ return;
+
++ mlx5e_unregister_vport_rep(mdev);
+ mlx5e_detach_netdev(mdev, netdev);
+ mlx5e_destroy_mdev_resources(mdev);
+ }
+@@ -4012,8 +4026,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
+ if (err)
+ return NULL;
+
+- mlx5e_register_vport_rep(mdev);
+-
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ ppriv = &esw->offloads.vport_reps[0];
+
+@@ -4065,13 +4077,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
+
+ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+ {
+- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ struct mlx5e_priv *priv = vpriv;
+- int vport;
+-
+- for (vport = 1; vport < total_vfs; vport++)
+- mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ unregister_netdev(priv->netdev);
+ mlx5e_detach(mdev, vpriv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index e7b2158..796bdf0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -92,19 +92,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+ {
+- u16 wqe_cnt_step;
+-
+ cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+ cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
+ cq->title.op_own &= 0xf0;
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
+ cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
+
+- wqe_cnt_step =
+- rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+- mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+- cq->decmprs_wqe_counter =
+- (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
++ cq->decmprs_wqe_counter +=
++ mpwrq_get_cqe_consumed_strides(&cq->title);
++ else
++ cq->decmprs_wqe_counter =
++ (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
+ }
+
+ static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index e83072d..6905630 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -500,30 +500,40 @@ static int
+ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct mlxsw_sp_prefix_usage *req_prefix_usage)
+ {
+- struct mlxsw_sp_lpm_tree *lpm_tree;
++ struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
++ struct mlxsw_sp_lpm_tree *new_tree;
++ int err;
+
+- if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
+- &vr->lpm_tree->prefix_usage))
++ if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
+ return 0;
+
+- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
++ new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
+ vr->proto, false);
+- if (IS_ERR(lpm_tree)) {
++ if (IS_ERR(new_tree)) {
+ /* We failed to get a tree according to the required
+ * prefix usage. However, the current tree might be still good
+ * for us if our requirement is subset of the prefixes used
+ * in the tree.
+ */
+ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
+- &vr->lpm_tree->prefix_usage))
++ &lpm_tree->prefix_usage))
+ return 0;
+- return PTR_ERR(lpm_tree);
++ return PTR_ERR(new_tree);
+ }
+
+- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
++ /* Prevent packet loss by overwriting existing binding */
++ vr->lpm_tree = new_tree;
++ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
++ if (err)
++ goto err_tree_bind;
++ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
++
++ return 0;
++
++err_tree_bind:
+ vr->lpm_tree = lpm_tree;
+- return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
++ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
++ return err;
+ }
+
+ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 8b4822a..3c1f89a 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1039,16 +1039,22 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct ip_tunnel_info *info = NULL;
++ int err;
+
+ if (geneve->collect_md)
+ info = skb_tunnel_info(skb);
+
++ rcu_read_lock();
+ #if IS_ENABLED(CONFIG_IPV6)
+ if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
+ (!info && geneve->remote.sa.sa_family == AF_INET6))
+- return geneve6_xmit_skb(skb, dev, info);
++ err = geneve6_xmit_skb(skb, dev, info);
++ else
+ #endif
+- return geneve_xmit_skb(skb, dev, info);
++ err = geneve_xmit_skb(skb, dev, info);
++ rcu_read_unlock();
++
++ return err;
+ }
+
+ static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index f424b86..201ffa5 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -611,14 +611,18 @@ void phy_start_machine(struct phy_device *phydev)
+ * phy_trigger_machine - trigger the state machine to run
+ *
+ * @phydev: the phy_device struct
++ * @sync: indicate whether we should wait for the workqueue cancelation
+ *
+ * Description: There has been a change in state which requires that the
+ * state machine runs.
+ */
+
+-static void phy_trigger_machine(struct phy_device *phydev)
++static void phy_trigger_machine(struct phy_device *phydev, bool sync)
+ {
+- cancel_delayed_work_sync(&phydev->state_queue);
++ if (sync)
++ cancel_delayed_work_sync(&phydev->state_queue);
++ else
++ cancel_delayed_work(&phydev->state_queue);
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+ }
+
+@@ -655,7 +659,7 @@ static void phy_error(struct phy_device *phydev)
+ phydev->state = PHY_HALTED;
+ mutex_unlock(&phydev->lock);
+
+- phy_trigger_machine(phydev);
++ phy_trigger_machine(phydev, false);
+ }
+
+ /**
+@@ -817,7 +821,7 @@ void phy_change(struct work_struct *work)
+ }
+
+ /* reschedule state queue work to run as soon as possible */
+- phy_trigger_machine(phydev);
++ phy_trigger_machine(phydev, true);
+ return;
+
+ ignore:
+@@ -907,7 +911,7 @@ void phy_start(struct phy_device *phydev)
+ if (do_resume)
+ phy_resume(phydev);
+
+- phy_trigger_machine(phydev);
++ phy_trigger_machine(phydev, true);
+ }
+ EXPORT_SYMBOL(phy_start);
+
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index b31aca8..a931b73 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -819,7 +819,18 @@ static void tun_net_uninit(struct net_device *dev)
+ /* Net device open. */
+ static int tun_net_open(struct net_device *dev)
+ {
++ struct tun_struct *tun = netdev_priv(dev);
++ int i;
++
+ netif_tx_start_all_queues(dev);
++
++ for (i = 0; i < tun->numqueues; i++) {
++ struct tun_file *tfile;
++
++ tfile = rtnl_dereference(tun->tfiles[i]);
++ tfile->socket.sk->sk_write_space(tfile->socket.sk);
++ }
++
+ return 0;
+ }
+
+@@ -1116,9 +1127,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
+ if (!skb_array_empty(&tfile->tx_array))
+ mask |= POLLIN | POLLRDNORM;
+
+- if (sock_writeable(sk) ||
+- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+- sock_writeable(sk)))
++ if (tun->dev->flags & IFF_UP &&
++ (sock_writeable(sk) ||
++ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
++ sock_writeable(sk))))
+ mask |= POLLOUT | POLLWRNORM;
+
+ if (tun->dev->reg_state != NETREG_REGISTERED)
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 95cf1d8..bc744ac 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -346,6 +346,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
+
+ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
++ int len = skb->len;
+ netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+@@ -353,7 +354,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ u64_stats_update_begin(&dstats->syncp);
+ dstats->tx_pkts++;
+- dstats->tx_bytes += skb->len;
++ dstats->tx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+ } else {
+ this_cpu_inc(dev->dstats->tx_drps);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index d4f495b..3c4c2cf 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1942,7 +1942,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ const struct iphdr *old_iph;
+ union vxlan_addr *dst;
+ union vxlan_addr remote_ip, local_ip;
+- union vxlan_addr *src;
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
+ __be16 src_port = 0, dst_port;
+@@ -1956,11 +1955,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+
+ info = skb_tunnel_info(skb);
+
++ rcu_read_lock();
+ if (rdst) {
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+ vni = rdst->remote_vni;
+ dst = &rdst->remote_ip;
+- src = &vxlan->cfg.saddr;
++ local_ip = vxlan->cfg.saddr;
+ dst_cache = &rdst->dst_cache;
+ } else {
+ if (!info) {
+@@ -1979,7 +1979,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
+ }
+ dst = &remote_ip;
+- src = &local_ip;
+ dst_cache = &info->dst_cache;
+ }
+
+@@ -1987,7 +1986,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ if (did_rsc) {
+ /* short-circuited back to local bridge */
+ vxlan_encap_bypass(skb, vxlan, vxlan);
+- return;
++ goto out_unlock;
+ }
+ goto drop;
+ }
+@@ -2028,7 +2027,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ rt = vxlan_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0, tos,
+ dst->sin.sin_addr.s_addr,
+- &src->sin.sin_addr.s_addr,
++ &local_ip.sin.sin_addr.s_addr,
+ dst_cache, info);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n",
+@@ -2056,7 +2055,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+- return;
++ goto out_unlock;
+ }
+
+ if (!info)
+@@ -2071,7 +2070,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ if (err < 0)
+ goto xmit_tx_error;
+
+- udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
++ udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr,
+ dst->sin.sin_addr.s_addr, tos, ttl, df,
+ src_port, dst_port, xnet, !udp_sum);
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -2087,7 +2086,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ ndst = vxlan6_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0, tos,
+ label, &dst->sin6.sin6_addr,
+- &src->sin6.sin6_addr,
++ &local_ip.sin6.sin6_addr,
+ dst_cache, info);
+ if (IS_ERR(ndst)) {
+ netdev_dbg(dev, "no route to %pI6\n",
+@@ -2117,7 +2116,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+- return;
++ goto out_unlock;
+ }
+
+ if (!info)
+@@ -2131,15 +2130,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ if (err < 0) {
+ dst_release(ndst);
+ dev->stats.tx_errors++;
+- return;
++ goto out_unlock;
+ }
+ udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
+- &src->sin6.sin6_addr,
++ &local_ip.sin6.sin6_addr,
+ &dst->sin6.sin6_addr, tos, ttl,
+ label, src_port, dst_port, !udp_sum);
+ #endif
+ }
+-
++out_unlock:
++ rcu_read_unlock();
+ return;
+
+ drop:
+@@ -2155,6 +2155,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ dev->stats.tx_errors++;
+ tx_free:
+ dev_kfree_skb(skb);
++ rcu_read_unlock();
+ }
+
+ /* Transmit local packets over Vxlan
+@@ -2637,7 +2638,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+
+ if (data[IFLA_VXLAN_ID]) {
+ __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+- if (id >= VXLAN_VID_MASK)
++ if (id >= VXLAN_N_VID)
+ return -ERANGE;
+ }
+
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index e30f05c..4722782 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
+ return rc;
+ }
+
+- pci_iov_set_numvfs(dev, nr_virtfn);
+- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+- pci_cfg_access_lock(dev);
+- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+- msleep(100);
+- pci_cfg_access_unlock(dev);
+-
+ iov->initial_VFs = initial;
+ if (nr_virtfn < initial)
+ initial = nr_virtfn;
+@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
+ goto err_pcibios;
+ }
+
++ pci_iov_set_numvfs(dev, nr_virtfn);
++ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
++ pci_cfg_access_lock(dev);
++ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
++ msleep(100);
++ pci_cfg_access_unlock(dev);
++
+ for (i = 0; i < initial; i++) {
+ rc = pci_iov_add_virtfn(dev, i, 0);
+ if (rc)
+@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev)
+ }
+
+ /**
+- * pci_iov_resource_bar - get position of the SR-IOV BAR
++ * pci_iov_update_resource - update a VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ *
+- * Returns position of the BAR encapsulated in the SR-IOV capability.
++ * Update a VF BAR in the SR-IOV capability of a PF.
+ */
+-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
++void pci_iov_update_resource(struct pci_dev *dev, int resno)
+ {
+- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
+- return 0;
++ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
++ struct resource *res = dev->resource + resno;
++ int vf_bar = resno - PCI_IOV_RESOURCES;
++ struct pci_bus_region region;
++ u16 cmd;
++ u32 new;
++ int reg;
++
++ /*
++ * The generic pci_restore_bars() path calls this for all devices,
++ * including VFs and non-SR-IOV devices. If this is not a PF, we
++ * have nothing to do.
++ */
++ if (!iov)
++ return;
++
++ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
++ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
++ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
++ vf_bar, res);
++ return;
++ }
++
++ /*
++ * Ignore unimplemented BARs, unused resource slots for 64-bit
++ * BARs, and non-movable resources, e.g., those described via
++ * Enhanced Allocation.
++ */
++ if (!res->flags)
++ return;
++
++ if (res->flags & IORESOURCE_UNSET)
++ return;
++
++ if (res->flags & IORESOURCE_PCI_FIXED)
++ return;
+
+- BUG_ON(!dev->is_physfn);
++ pcibios_resource_to_bus(dev->bus, &region, res);
++ new = region.start;
++ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+
+- return dev->sriov->pos + PCI_SRIOV_BAR +
+- 4 * (resno - PCI_IOV_RESOURCES);
++ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
++ pci_write_config_dword(dev, reg, new);
++ if (res->flags & IORESOURCE_MEM_64) {
++ new = region.start >> 16 >> 16;
++ pci_write_config_dword(dev, reg + 4, new);
++ }
+ }
+
+ resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index eda6a7c..6922964 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev)
+ {
+ int i;
+
+- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+- if (dev->is_virtfn)
+- return;
+-
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
+ pci_update_resource(dev, i);
+ }
+@@ -4835,36 +4831,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
+ }
+ EXPORT_SYMBOL(pci_select_bars);
+
+-/**
+- * pci_resource_bar - get position of the BAR associated with a resource
+- * @dev: the PCI device
+- * @resno: the resource number
+- * @type: the BAR type to be filled in
+- *
+- * Returns BAR position in config space, or 0 if the BAR is invalid.
+- */
+-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
+-{
+- int reg;
+-
+- if (resno < PCI_ROM_RESOURCE) {
+- *type = pci_bar_unknown;
+- return PCI_BASE_ADDRESS_0 + 4 * resno;
+- } else if (resno == PCI_ROM_RESOURCE) {
+- *type = pci_bar_mem32;
+- return dev->rom_base_reg;
+- } else if (resno < PCI_BRIDGE_RESOURCES) {
+- /* device specific resource */
+- *type = pci_bar_unknown;
+- reg = pci_iov_resource_bar(dev, resno);
+- if (reg)
+- return reg;
+- }
+-
+- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
+- return 0;
+-}
+-
+ /* Some architectures require additional programming to enable VGA */
+ static arch_set_vga_state_t arch_set_vga_state;
+
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 4518562..a5d37f6 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -245,7 +245,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
+ int pci_setup_device(struct pci_dev *dev);
+ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int reg);
+-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
+ void pci_configure_ari(struct pci_dev *dev);
+ void __pci_bus_size_bridges(struct pci_bus *bus,
+ struct list_head *realloc_head);
+@@ -289,7 +288,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
+ #ifdef CONFIG_PCI_IOV
+ int pci_iov_init(struct pci_dev *dev);
+ void pci_iov_release(struct pci_dev *dev);
+-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
++void pci_iov_update_resource(struct pci_dev *dev, int resno);
+ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+ void pci_restore_iov_state(struct pci_dev *dev);
+ int pci_iov_bus_range(struct pci_bus *bus);
+@@ -303,10 +302,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
+
+ {
+ }
+-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+-{
+- return 0;
+-}
+ static inline void pci_restore_iov_state(struct pci_dev *dev)
+ {
+ }
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 300770c..d266d80 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ }
+ } else {
+- res->flags |= (l & IORESOURCE_ROM_ENABLE);
++ if (l & PCI_ROM_ADDRESS_ENABLE)
++ res->flags |= IORESOURCE_ROM_ENABLE;
+ l64 = l & PCI_ROM_ADDRESS_MASK;
+ sz64 = sz & PCI_ROM_ADDRESS_MASK;
+ mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index 06663d3..b6edb18 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev)
+ if (res->flags & IORESOURCE_ROM_SHADOW)
+ return 0;
+
++ /*
++ * Ideally pci_update_resource() would update the ROM BAR address,
++ * and we would only set the enable bit here. But apparently some
++ * devices have buggy ROM BARs that read as zero when disabled.
++ */
+ pcibios_resource_to_bus(pdev->bus, &region, res);
+ pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
+ rom_addr &= ~PCI_ROM_ADDRESS_MASK;
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 9526e34..4bc589e 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -25,21 +25,18 @@
+ #include <linux/slab.h>
+ #include "pci.h"
+
+-
+-void pci_update_resource(struct pci_dev *dev, int resno)
++static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ {
+ struct pci_bus_region region;
+ bool disable;
+ u16 cmd;
+ u32 new, check, mask;
+ int reg;
+- enum pci_bar_type type;
+ struct resource *res = dev->resource + resno;
+
+- if (dev->is_virtfn) {
+- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
++ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
++ if (dev->is_virtfn)
+ return;
+- }
+
+ /*
+ * Ignore resources for unimplemented BARs and unused resource slots
+@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
+ return;
+
+ pcibios_resource_to_bus(dev->bus, &region, res);
++ new = region.start;
+
+- new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
+- if (res->flags & IORESOURCE_IO)
++ if (res->flags & IORESOURCE_IO) {
+ mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
+- else
++ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
++ } else if (resno == PCI_ROM_RESOURCE) {
++ mask = (u32)PCI_ROM_ADDRESS_MASK;
++ } else {
+ mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
++ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
++ }
+
+- reg = pci_resource_bar(dev, resno, &type);
+- if (!reg)
+- return;
+- if (type != pci_bar_unknown) {
++ if (resno < PCI_ROM_RESOURCE) {
++ reg = PCI_BASE_ADDRESS_0 + 4 * resno;
++ } else if (resno == PCI_ROM_RESOURCE) {
++
++ /*
++ * Apparently some Matrox devices have ROM BARs that read
++ * as zero when disabled, so don't update ROM BARs unless
++ * they're enabled. See https://lkml.org/lkml/2005/8/30/138.
++ */
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ return;
++
++ reg = dev->rom_base_reg;
+ new |= PCI_ROM_ADDRESS_ENABLE;
+- }
++ } else
++ return;
+
+ /*
+ * We can't update a 64-bit BAR atomically, so when possible,
+@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+
++void pci_update_resource(struct pci_dev *dev, int resno)
++{
++ if (resno <= PCI_ROM_RESOURCE)
++ pci_std_update_resource(dev, resno);
++#ifdef CONFIG_PCI_IOV
++ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
++ pci_iov_update_resource(dev, resno);
++#endif
++}
++
+ int pci_claim_resource(struct pci_dev *dev, int resource)
+ {
+ struct resource *res = &dev->resource[resource];
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index ed92fb0..76b802c 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1712,6 +1712,9 @@ static void ap_scan_bus(struct work_struct *unused)
+ ap_dev->queue_depth = queue_depth;
+ ap_dev->raw_hwtype = device_type;
+ ap_dev->device_type = device_type;
++ /* CEX6 toleration: map to CEX5 */
++ if (device_type == AP_DEVICE_TYPE_CEX6)
++ ap_dev->device_type = AP_DEVICE_TYPE_CEX5;
+ ap_dev->functions = device_functions;
+ spin_lock_init(&ap_dev->lock);
+ INIT_LIST_HEAD(&ap_dev->pendingq);
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index d7fdf5c..fd66d2c 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -105,6 +105,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
+ #define AP_DEVICE_TYPE_CEX3C 9
+ #define AP_DEVICE_TYPE_CEX4 10
+ #define AP_DEVICE_TYPE_CEX5 11
++#define AP_DEVICE_TYPE_CEX6 12
+
+ /*
+ * Known function facilities
+diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+index 91dfd58..c4fe95a 100644
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+@@ -22,7 +22,7 @@
+ *
+ ****************************************************************************/
+
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -82,7 +82,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
+ }
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+- /* residual data from an overflow write */
++ /* residual data from an overflow write */
+ rsp->flags = SRP_RSP_FLAG_DOOVER;
+ rsp->data_out_res_cnt = cpu_to_be32(residual_count);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+@@ -102,7 +102,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
+ * and the function returns TRUE.
+ *
+ * EXECUTION ENVIRONMENT:
+- * Interrupt or Process environment
++ * Interrupt or Process environment
+ */
+ static bool connection_broken(struct scsi_info *vscsi)
+ {
+@@ -325,7 +325,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
+ }
+
+ /**
+- * ibmvscsis_send_init_message() - send initialize message to the client
++ * ibmvscsis_send_init_message() - send initialize message to the client
+ * @vscsi: Pointer to our adapter structure
+ * @format: Which Init Message format to send
+ *
+@@ -383,13 +383,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
+ vscsi->cmd_q.base_addr);
+ if (crq) {
+ *format = (uint)(crq->format);
+- rc = ERROR;
++ rc = ERROR;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+ }
+ } else {
+ *format = (uint)(crq->format);
+- rc = ERROR;
++ rc = ERROR;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+ }
+@@ -398,166 +398,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
+ }
+
+ /**
+- * ibmvscsis_establish_new_q() - Establish new CRQ queue
+- * @vscsi: Pointer to our adapter structure
+- * @new_state: New state being established after resetting the queue
+- *
+- * Must be called with interrupt lock held.
+- */
+-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
+-{
+- long rc = ADAPT_SUCCESS;
+- uint format;
+-
+- vscsi->flags &= PRESERVE_FLAG_FIELDS;
+- vscsi->rsp_q_timer.timer_pops = 0;
+- vscsi->debit = 0;
+- vscsi->credit = 0;
+-
+- rc = vio_enable_interrupts(vscsi->dma_dev);
+- if (rc) {
+- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
+- rc);
+- return rc;
+- }
+-
+- rc = ibmvscsis_check_init_msg(vscsi, &format);
+- if (rc) {
+- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
+- rc);
+- return rc;
+- }
+-
+- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
+- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+- switch (rc) {
+- case H_SUCCESS:
+- case H_DROPPED:
+- case H_CLOSED:
+- rc = ADAPT_SUCCESS;
+- break;
+-
+- case H_PARAMETER:
+- case H_HARDWARE:
+- break;
+-
+- default:
+- vscsi->state = UNDEFINED;
+- rc = H_HARDWARE;
+- break;
+- }
+- }
+-
+- return rc;
+-}
+-
+-/**
+- * ibmvscsis_reset_queue() - Reset CRQ Queue
+- * @vscsi: Pointer to our adapter structure
+- * @new_state: New state to establish after resetting the queue
+- *
+- * This function calls h_free_q and then calls h_reg_q and does all
+- * of the bookkeeping to get us back to where we can communicate.
+- *
+- * Actually, we don't always call h_free_crq. A problem was discovered
+- * where one partition would close and reopen his queue, which would
+- * cause his partner to get a transport event, which would cause him to
+- * close and reopen his queue, which would cause the original partition
+- * to get a transport event, etc., etc. To prevent this, we don't
+- * actually close our queue if the client initiated the reset, (i.e.
+- * either we got a transport event or we have detected that the client's
+- * queue is gone)
+- *
+- * EXECUTION ENVIRONMENT:
+- * Process environment, called with interrupt lock held
+- */
+-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
+-{
+- int bytes;
+- long rc = ADAPT_SUCCESS;
+-
+- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+-
+- /* don't reset, the client did it for us */
+- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+- vscsi->flags &= PRESERVE_FLAG_FIELDS;
+- vscsi->rsp_q_timer.timer_pops = 0;
+- vscsi->debit = 0;
+- vscsi->credit = 0;
+- vscsi->state = new_state;
+- vio_enable_interrupts(vscsi->dma_dev);
+- } else {
+- rc = ibmvscsis_free_command_q(vscsi);
+- if (rc == ADAPT_SUCCESS) {
+- vscsi->state = new_state;
+-
+- bytes = vscsi->cmd_q.size * PAGE_SIZE;
+- rc = h_reg_crq(vscsi->dds.unit_id,
+- vscsi->cmd_q.crq_token, bytes);
+- if (rc == H_CLOSED || rc == H_SUCCESS) {
+- rc = ibmvscsis_establish_new_q(vscsi,
+- new_state);
+- }
+-
+- if (rc != ADAPT_SUCCESS) {
+- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+-
+- vscsi->state = ERR_DISCONNECTED;
+- vscsi->flags |= RESPONSE_Q_DOWN;
+- ibmvscsis_free_command_q(vscsi);
+- }
+- } else {
+- vscsi->state = ERR_DISCONNECTED;
+- vscsi->flags |= RESPONSE_Q_DOWN;
+- }
+- }
+-}
+-
+-/**
+- * ibmvscsis_free_cmd_resources() - Free command resources
+- * @vscsi: Pointer to our adapter structure
+- * @cmd: Command which is not longer in use
+- *
+- * Must be called with interrupt lock held.
+- */
+-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+- struct ibmvscsis_cmd *cmd)
+-{
+- struct iu_entry *iue = cmd->iue;
+-
+- switch (cmd->type) {
+- case TASK_MANAGEMENT:
+- case SCSI_CDB:
+- /*
+- * When the queue goes down this value is cleared, so it
+- * cannot be cleared in this general purpose function.
+- */
+- if (vscsi->debit)
+- vscsi->debit -= 1;
+- break;
+- case ADAPTER_MAD:
+- vscsi->flags &= ~PROCESSING_MAD;
+- break;
+- case UNSET_TYPE:
+- break;
+- default:
+- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+- cmd->type);
+- break;
+- }
+-
+- cmd->iue = NULL;
+- list_add_tail(&cmd->list, &vscsi->free_cmd);
+- srp_iu_put(iue);
+-
+- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+- vscsi->flags &= ~WAIT_FOR_IDLE;
+- complete(&vscsi->wait_idle);
+- }
+-}
+-
+-/**
+ * ibmvscsis_disconnect() - Helper function to disconnect
+ * @work: Pointer to work_struct, gives access to our adapter structure
+ *
+@@ -576,7 +416,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
+ proc_work);
+ u16 new_state;
+ bool wait_idle = false;
+- long rc = ADAPT_SUCCESS;
+
+ spin_lock_bh(&vscsi->intr_lock);
+ new_state = vscsi->new_state;
+@@ -590,7 +429,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
+ * should transitition to the new state
+ */
+ switch (vscsi->state) {
+- /* Should never be called while in this state. */
++ /* Should never be called while in this state. */
+ case NO_QUEUE:
+ /*
+ * Can never transition from this state;
+@@ -629,30 +468,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
+ vscsi->state = new_state;
+ break;
+
+- /*
+- * If this is a transition into an error state.
+- * a client is attempting to establish a connection
+- * and has violated the RPA protocol.
+- * There can be nothing pending on the adapter although
+- * there can be requests in the command queue.
+- */
+ case WAIT_ENABLED:
+- case PART_UP_WAIT_ENAB:
+ switch (new_state) {
+- case ERR_DISCONNECT:
+- vscsi->flags |= RESPONSE_Q_DOWN;
++ case UNCONFIGURING:
+ vscsi->state = new_state;
++ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+- ibmvscsis_free_command_q(vscsi);
+- break;
+- case ERR_DISCONNECT_RECONNECT:
+- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
++ dma_rmb();
++ if (vscsi->flags & CFG_SLEEPING) {
++ vscsi->flags &= ~CFG_SLEEPING;
++ complete(&vscsi->unconfig);
++ }
+ break;
+
+ /* should never happen */
++ case ERR_DISCONNECT:
++ case ERR_DISCONNECT_RECONNECT:
+ case WAIT_IDLE:
+- rc = ERROR;
+ dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
+ vscsi->state);
+ break;
+@@ -661,6 +494,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
+
+ case WAIT_IDLE:
+ switch (new_state) {
++ case UNCONFIGURING:
++ vscsi->flags |= RESPONSE_Q_DOWN;
++ vscsi->state = new_state;
++ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
++ DISCONNECT_SCHEDULED);
++ ibmvscsis_free_command_q(vscsi);
++ break;
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ vscsi->state = new_state;
+@@ -765,45 +605,348 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
+ else
+ state = vscsi->state;
+
+- switch (state) {
+- case NO_QUEUE:
+- case UNCONFIGURING:
+- break;
++ switch (state) {
++ case NO_QUEUE:
++ case UNCONFIGURING:
++ break;
++
++ case ERR_DISCONNECTED:
++ case ERR_DISCONNECT:
++ case UNDEFINED:
++ if (new_state == UNCONFIGURING)
++ vscsi->new_state = new_state;
++ break;
++
++ case ERR_DISCONNECT_RECONNECT:
++ switch (new_state) {
++ case UNCONFIGURING:
++ case ERR_DISCONNECT:
++ vscsi->new_state = new_state;
++ break;
++ default:
++ break;
++ }
++ break;
++
++ case WAIT_ENABLED:
++ case WAIT_IDLE:
++ case WAIT_CONNECTION:
++ case CONNECTED:
++ case SRP_PROCESSING:
++ vscsi->new_state = new_state;
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
++ vscsi->flags, vscsi->new_state);
++}
++
++/**
++ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
++ * @vscsi: Pointer to our adapter structure
++ *
++ * Must be called with interrupt lock held.
++ */
++static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
++{
++ long rc = ADAPT_SUCCESS;
++
++ switch (vscsi->state) {
++ case NO_QUEUE:
++ case ERR_DISCONNECT:
++ case ERR_DISCONNECT_RECONNECT:
++ case ERR_DISCONNECTED:
++ case UNCONFIGURING:
++ case UNDEFINED:
++ rc = ERROR;
++ break;
++
++ case WAIT_CONNECTION:
++ vscsi->state = CONNECTED;
++ break;
++
++ case WAIT_IDLE:
++ case SRP_PROCESSING:
++ case CONNECTED:
++ case WAIT_ENABLED:
++ default:
++ rc = ERROR;
++ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
++ vscsi->state);
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
++ break;
++ }
++
++ return rc;
++}
++
++/**
++ * ibmvscsis_handle_init_msg() - Respond to an Init Message
++ * @vscsi: Pointer to our adapter structure
++ *
++ * Must be called with interrupt lock held.
++ */
++static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
++{
++ long rc = ADAPT_SUCCESS;
++
++ switch (vscsi->state) {
++ case WAIT_CONNECTION:
++ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
++ switch (rc) {
++ case H_SUCCESS:
++ vscsi->state = CONNECTED;
++ break;
++
++ case H_PARAMETER:
++ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
++ rc);
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
++ break;
++
++ case H_DROPPED:
++ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
++ rc);
++ rc = ERROR;
++ ibmvscsis_post_disconnect(vscsi,
++ ERR_DISCONNECT_RECONNECT, 0);
++ break;
++
++ case H_CLOSED:
++ pr_warn("init_msg: failed to send, rc %ld\n", rc);
++ rc = 0;
++ break;
++ }
++ break;
++
++ case UNDEFINED:
++ rc = ERROR;
++ break;
++
++ case UNCONFIGURING:
++ break;
++
++ case WAIT_ENABLED:
++ case CONNECTED:
++ case SRP_PROCESSING:
++ case WAIT_IDLE:
++ case NO_QUEUE:
++ case ERR_DISCONNECT:
++ case ERR_DISCONNECT_RECONNECT:
++ case ERR_DISCONNECTED:
++ default:
++ rc = ERROR;
++ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
++ vscsi->state);
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
++ break;
++ }
++
++ return rc;
++}
++
++/**
++ * ibmvscsis_init_msg() - Respond to an init message
++ * @vscsi: Pointer to our adapter structure
++ * @crq: Pointer to CRQ element containing the Init Message
++ *
++ * EXECUTION ENVIRONMENT:
++ * Interrupt, interrupt lock held
++ */
++static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
++{
++ long rc = ADAPT_SUCCESS;
++
++ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
++
++ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
++ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
++ 0);
++ if (rc == H_SUCCESS) {
++ vscsi->client_data.partition_number =
++ be64_to_cpu(*(u64 *)vscsi->map_buf);
++ pr_debug("init_msg, part num %d\n",
++ vscsi->client_data.partition_number);
++ } else {
++ pr_debug("init_msg h_vioctl rc %ld\n", rc);
++ rc = ADAPT_SUCCESS;
++ }
++
++ if (crq->format == INIT_MSG) {
++ rc = ibmvscsis_handle_init_msg(vscsi);
++ } else if (crq->format == INIT_COMPLETE_MSG) {
++ rc = ibmvscsis_handle_init_compl_msg(vscsi);
++ } else {
++ rc = ERROR;
++ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
++ (uint)crq->format);
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
++ }
++
++ return rc;
++}
++
++/**
++ * ibmvscsis_establish_new_q() - Establish new CRQ queue
++ * @vscsi: Pointer to our adapter structure
++ *
++ * Must be called with interrupt lock held.
++ */
++static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
++{
++ long rc = ADAPT_SUCCESS;
++ uint format;
++
++ vscsi->flags &= PRESERVE_FLAG_FIELDS;
++ vscsi->rsp_q_timer.timer_pops = 0;
++ vscsi->debit = 0;
++ vscsi->credit = 0;
++
++ rc = vio_enable_interrupts(vscsi->dma_dev);
++ if (rc) {
++ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
++ rc);
++ return rc;
++ }
++
++ rc = ibmvscsis_check_init_msg(vscsi, &format);
++ if (rc) {
++ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
++ rc);
++ return rc;
++ }
++
++ if (format == UNUSED_FORMAT) {
++ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
++ switch (rc) {
++ case H_SUCCESS:
++ case H_DROPPED:
++ case H_CLOSED:
++ rc = ADAPT_SUCCESS;
++ break;
++
++ case H_PARAMETER:
++ case H_HARDWARE:
++ break;
++
++ default:
++ vscsi->state = UNDEFINED;
++ rc = H_HARDWARE;
++ break;
++ }
++ } else if (format == INIT_MSG) {
++ rc = ibmvscsis_handle_init_msg(vscsi);
++ }
++
++ return rc;
++}
++
++/**
++ * ibmvscsis_reset_queue() - Reset CRQ Queue
++ * @vscsi: Pointer to our adapter structure
++ *
++ * This function calls h_free_q and then calls h_reg_q and does all
++ * of the bookkeeping to get us back to where we can communicate.
++ *
++ * Actually, we don't always call h_free_crq. A problem was discovered
++ * where one partition would close and reopen his queue, which would
++ * cause his partner to get a transport event, which would cause him to
++ * close and reopen his queue, which would cause the original partition
++ * to get a transport event, etc., etc. To prevent this, we don't
++ * actually close our queue if the client initiated the reset, (i.e.
++ * either we got a transport event or we have detected that the client's
++ * queue is gone)
++ *
++ * EXECUTION ENVIRONMENT:
++ * Process environment, called with interrupt lock held
++ */
++static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
++{
++ int bytes;
++ long rc = ADAPT_SUCCESS;
++
++ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
++
++ /* don't reset, the client did it for us */
++ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
++ vscsi->flags &= PRESERVE_FLAG_FIELDS;
++ vscsi->rsp_q_timer.timer_pops = 0;
++ vscsi->debit = 0;
++ vscsi->credit = 0;
++ vscsi->state = WAIT_CONNECTION;
++ vio_enable_interrupts(vscsi->dma_dev);
++ } else {
++ rc = ibmvscsis_free_command_q(vscsi);
++ if (rc == ADAPT_SUCCESS) {
++ vscsi->state = WAIT_CONNECTION;
++
++ bytes = vscsi->cmd_q.size * PAGE_SIZE;
++ rc = h_reg_crq(vscsi->dds.unit_id,
++ vscsi->cmd_q.crq_token, bytes);
++ if (rc == H_CLOSED || rc == H_SUCCESS) {
++ rc = ibmvscsis_establish_new_q(vscsi);
++ }
+
+- case ERR_DISCONNECTED:
+- case ERR_DISCONNECT:
+- case UNDEFINED:
+- if (new_state == UNCONFIGURING)
+- vscsi->new_state = new_state;
+- break;
++ if (rc != ADAPT_SUCCESS) {
++ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+- case ERR_DISCONNECT_RECONNECT:
+- switch (new_state) {
+- case UNCONFIGURING:
+- case ERR_DISCONNECT:
+- vscsi->new_state = new_state;
+- break;
+- default:
+- break;
++ vscsi->state = ERR_DISCONNECTED;
++ vscsi->flags |= RESPONSE_Q_DOWN;
++ ibmvscsis_free_command_q(vscsi);
+ }
+- break;
++ } else {
++ vscsi->state = ERR_DISCONNECTED;
++ vscsi->flags |= RESPONSE_Q_DOWN;
++ }
++ }
++}
+
+- case WAIT_ENABLED:
+- case PART_UP_WAIT_ENAB:
+- case WAIT_IDLE:
+- case WAIT_CONNECTION:
+- case CONNECTED:
+- case SRP_PROCESSING:
+- vscsi->new_state = new_state;
+- break;
++/**
++ * ibmvscsis_free_cmd_resources() - Free command resources
++ * @vscsi: Pointer to our adapter structure
++ * @cmd: Command which is not longer in use
++ *
++ * Must be called with interrupt lock held.
++ */
++static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
++ struct ibmvscsis_cmd *cmd)
++{
++ struct iu_entry *iue = cmd->iue;
+
+- default:
+- break;
+- }
++ switch (cmd->type) {
++ case TASK_MANAGEMENT:
++ case SCSI_CDB:
++ /*
++ * When the queue goes down this value is cleared, so it
++ * cannot be cleared in this general purpose function.
++ */
++ if (vscsi->debit)
++ vscsi->debit -= 1;
++ break;
++ case ADAPTER_MAD:
++ vscsi->flags &= ~PROCESSING_MAD;
++ break;
++ case UNSET_TYPE:
++ break;
++ default:
++ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
++ cmd->type);
++ break;
+ }
+
+- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+- vscsi->flags, vscsi->new_state);
++ cmd->iue = NULL;
++ list_add_tail(&cmd->list, &vscsi->free_cmd);
++ srp_iu_put(iue);
++
++ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
++ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
++ vscsi->flags &= ~WAIT_FOR_IDLE;
++ complete(&vscsi->wait_idle);
++ }
+ }
+
+ /**
+@@ -864,10 +1007,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
+ TRANS_EVENT));
+ break;
+
+- case PART_UP_WAIT_ENAB:
+- vscsi->state = WAIT_ENABLED;
+- break;
+-
+ case SRP_PROCESSING:
+ if ((vscsi->debit > 0) ||
+ !list_empty(&vscsi->schedule_q) ||
+@@ -896,7 +1035,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
+ }
+ }
+
+- rc = vscsi->flags & SCHEDULE_DISCONNECT;
++ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+
+ pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
+@@ -1067,16 +1206,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
+ free_qs = true;
+
+ switch (vscsi->state) {
++ case UNCONFIGURING:
++ ibmvscsis_free_command_q(vscsi);
++ dma_rmb();
++ isync();
++ if (vscsi->flags & CFG_SLEEPING) {
++ vscsi->flags &= ~CFG_SLEEPING;
++ complete(&vscsi->unconfig);
++ }
++ break;
+ case ERR_DISCONNECT_RECONNECT:
+- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
++ ibmvscsis_reset_queue(vscsi);
+ pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ break;
+
+ case ERR_DISCONNECT:
+ ibmvscsis_free_command_q(vscsi);
+- vscsi->flags &= ~DISCONNECT_SCHEDULED;
++ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
+ vscsi->flags |= RESPONSE_Q_DOWN;
+- vscsi->state = ERR_DISCONNECTED;
++ if (vscsi->tport.enabled)
++ vscsi->state = ERR_DISCONNECTED;
++ else
++ vscsi->state = WAIT_ENABLED;
+ pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ break;
+@@ -1221,7 +1372,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
+ * @iue: Information Unit containing the Adapter Info MAD request
+ *
+ * EXECUTION ENVIRONMENT:
+- * Interrupt adpater lock is held
++ * Interrupt adapter lock is held
+ */
+ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
+ struct iu_entry *iue)
+@@ -1621,8 +1772,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
+ be64_to_cpu(msg_hi),
+ be64_to_cpu(cmd->rsp.tag));
+
+- pr_debug("send_messages: tag 0x%llx, rc %ld\n",
+- be64_to_cpu(cmd->rsp.tag), rc);
++ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
++ cmd, be64_to_cpu(cmd->rsp.tag), rc);
+
+ /* if all ok free up the command element resources */
+ if (rc == H_SUCCESS) {
+@@ -1692,7 +1843,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
+ * @crq: Pointer to the CRQ entry containing the MAD request
+ *
+ * EXECUTION ENVIRONMENT:
+- * Interrupt called with adapter lock held
++ * Interrupt, called with adapter lock held
+ */
+ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
+ {
+@@ -1746,14 +1897,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
+
+ pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+
+- if (be16_to_cpu(mad->length) < 0) {
+- dev_err(&vscsi->dev, "mad: length is < 0\n");
+- ibmvscsis_post_disconnect(vscsi,
+- ERR_DISCONNECT_RECONNECT, 0);
+- rc = SRP_VIOLATION;
+- } else {
+- rc = ibmvscsis_process_mad(vscsi, iue);
+- }
++ rc = ibmvscsis_process_mad(vscsi, iue);
+
+ pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
+ rc);
+@@ -1865,7 +2009,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
++ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+@@ -2090,248 +2234,98 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
+ break;
+
+ case SRP_TSK_MGMT:
+- tsk = &vio_iu(iue)->srp.tsk_mgmt;
+- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
+- tsk->tag);
+- cmd->rsp.tag = tsk->tag;
+- vscsi->debit += 1;
+- cmd->type = TASK_MANAGEMENT;
+- list_add_tail(&cmd->list, &vscsi->schedule_q);
+- queue_work(vscsi->work_q, &cmd->work);
+- break;
+-
+- case SRP_CMD:
+- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
+- srp->tag);
+- cmd->rsp.tag = srp->tag;
+- vscsi->debit += 1;
+- cmd->type = SCSI_CDB;
+- /*
+- * We want to keep track of work waiting for
+- * the workqueue.
+- */
+- list_add_tail(&cmd->list, &vscsi->schedule_q);
+- queue_work(vscsi->work_q, &cmd->work);
+- break;
+-
+- case SRP_I_LOGOUT:
+- rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
+- break;
+-
+- case SRP_CRED_RSP:
+- case SRP_AER_RSP:
+- default:
+- ibmvscsis_free_cmd_resources(vscsi, cmd);
+- dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
+- (uint)srp->opcode);
+- ibmvscsis_post_disconnect(vscsi,
+- ERR_DISCONNECT_RECONNECT, 0);
+- break;
+- }
+- } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
+- rc = ibmvscsis_srp_login(vscsi, cmd, crq);
+- } else {
+- ibmvscsis_free_cmd_resources(vscsi, cmd);
+- dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
+- vscsi->state);
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+- }
+-}
+-
+-/**
+- * ibmvscsis_ping_response() - Respond to a ping request
+- * @vscsi: Pointer to our adapter structure
+- *
+- * Let the client know that the server is alive and waiting on
+- * its native I/O stack.
+- * If any type of error occurs from the call to queue a ping
+- * response then the client is either not accepting or receiving
+- * interrupts. Disconnect with an error.
+- *
+- * EXECUTION ENVIRONMENT:
+- * Interrupt, interrupt lock held
+- */
+-static long ibmvscsis_ping_response(struct scsi_info *vscsi)
+-{
+- struct viosrp_crq *crq;
+- u64 buffer[2] = { 0, 0 };
+- long rc;
+-
+- crq = (struct viosrp_crq *)&buffer;
+- crq->valid = VALID_CMD_RESP_EL;
+- crq->format = (u8)MESSAGE_IN_CRQ;
+- crq->status = PING_RESPONSE;
+-
+- rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
+- cpu_to_be64(buffer[MSG_LOW]));
+-
+- switch (rc) {
+- case H_SUCCESS:
+- break;
+- case H_CLOSED:
+- vscsi->flags |= CLIENT_FAILED;
+- case H_DROPPED:
+- vscsi->flags |= RESPONSE_Q_DOWN;
+- case H_REMOTE_PARM:
+- dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
+- rc);
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+- break;
+- default:
+- dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
+- rc);
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+- break;
+- }
+-
+- return rc;
+-}
+-
+-/**
+- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+- * @vscsi: Pointer to our adapter structure
+- *
+- * Must be called with interrupt lock held.
+- */
+-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+-{
+- long rc = ADAPT_SUCCESS;
+-
+- switch (vscsi->state) {
+- case NO_QUEUE:
+- case ERR_DISCONNECT:
+- case ERR_DISCONNECT_RECONNECT:
+- case ERR_DISCONNECTED:
+- case UNCONFIGURING:
+- case UNDEFINED:
+- rc = ERROR;
+- break;
+-
+- case WAIT_CONNECTION:
+- vscsi->state = CONNECTED;
+- break;
+-
+- case WAIT_IDLE:
+- case SRP_PROCESSING:
+- case CONNECTED:
+- case WAIT_ENABLED:
+- case PART_UP_WAIT_ENAB:
+- default:
+- rc = ERROR;
+- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+- vscsi->state);
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+- break;
+- }
+-
+- return rc;
+-}
+-
+-/**
+- * ibmvscsis_handle_init_msg() - Respond to an Init Message
+- * @vscsi: Pointer to our adapter structure
+- *
+- * Must be called with interrupt lock held.
+- */
+-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+-{
+- long rc = ADAPT_SUCCESS;
+-
+- switch (vscsi->state) {
+- case WAIT_ENABLED:
+- vscsi->state = PART_UP_WAIT_ENAB;
+- break;
++ tsk = &vio_iu(iue)->srp.tsk_mgmt;
++ pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
++ tsk->tag);
++ cmd->rsp.tag = tsk->tag;
++ vscsi->debit += 1;
++ cmd->type = TASK_MANAGEMENT;
++ list_add_tail(&cmd->list, &vscsi->schedule_q);
++ queue_work(vscsi->work_q, &cmd->work);
++ break;
+
+- case WAIT_CONNECTION:
+- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+- switch (rc) {
+- case H_SUCCESS:
+- vscsi->state = CONNECTED;
++ case SRP_CMD:
++ pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
++ srp->tag);
++ cmd->rsp.tag = srp->tag;
++ vscsi->debit += 1;
++ cmd->type = SCSI_CDB;
++ /*
++ * We want to keep track of work waiting for
++ * the workqueue.
++ */
++ list_add_tail(&cmd->list, &vscsi->schedule_q);
++ queue_work(vscsi->work_q, &cmd->work);
+ break;
+
+- case H_PARAMETER:
+- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+- rc);
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
++ case SRP_I_LOGOUT:
++ rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
+ break;
+
+- case H_DROPPED:
+- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+- rc);
+- rc = ERROR;
++ case SRP_CRED_RSP:
++ case SRP_AER_RSP:
++ default:
++ ibmvscsis_free_cmd_resources(vscsi, cmd);
++ dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
++ (uint)srp->opcode);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+-
+- case H_CLOSED:
+- pr_warn("init_msg: failed to send, rc %ld\n", rc);
+- rc = 0;
+- break;
+ }
+- break;
+-
+- case UNDEFINED:
+- rc = ERROR;
+- break;
+-
+- case UNCONFIGURING:
+- break;
+-
+- case PART_UP_WAIT_ENAB:
+- case CONNECTED:
+- case SRP_PROCESSING:
+- case WAIT_IDLE:
+- case NO_QUEUE:
+- case ERR_DISCONNECT:
+- case ERR_DISCONNECT_RECONNECT:
+- case ERR_DISCONNECTED:
+- default:
+- rc = ERROR;
+- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
++ } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
++ rc = ibmvscsis_srp_login(vscsi, cmd, crq);
++ } else {
++ ibmvscsis_free_cmd_resources(vscsi, cmd);
++ dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+- break;
+ }
+-
+- return rc;
+ }
+
+ /**
+- * ibmvscsis_init_msg() - Respond to an init message
++ * ibmvscsis_ping_response() - Respond to a ping request
+ * @vscsi: Pointer to our adapter structure
+- * @crq: Pointer to CRQ element containing the Init Message
++ *
++ * Let the client know that the server is alive and waiting on
++ * its native I/O stack.
++ * If any type of error occurs from the call to queue a ping
++ * response then the client is either not accepting or receiving
++ * interrupts. Disconnect with an error.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
++static long ibmvscsis_ping_response(struct scsi_info *vscsi)
+ {
+- long rc = ADAPT_SUCCESS;
++ struct viosrp_crq *crq;
++ u64 buffer[2] = { 0, 0 };
++ long rc;
+
+- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
++ crq = (struct viosrp_crq *)&buffer;
++ crq->valid = VALID_CMD_RESP_EL;
++ crq->format = (u8)MESSAGE_IN_CRQ;
++ crq->status = PING_RESPONSE;
+
+- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+- 0);
+- if (rc == H_SUCCESS) {
+- vscsi->client_data.partition_number =
+- be64_to_cpu(*(u64 *)vscsi->map_buf);
+- pr_debug("init_msg, part num %d\n",
+- vscsi->client_data.partition_number);
+- } else {
+- pr_debug("init_msg h_vioctl rc %ld\n", rc);
+- rc = ADAPT_SUCCESS;
+- }
++ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
++ cpu_to_be64(buffer[MSG_LOW]));
+
+- if (crq->format == INIT_MSG) {
+- rc = ibmvscsis_handle_init_msg(vscsi);
+- } else if (crq->format == INIT_COMPLETE_MSG) {
+- rc = ibmvscsis_handle_init_compl_msg(vscsi);
+- } else {
+- rc = ERROR;
+- dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+- (uint)crq->format);
++ switch (rc) {
++ case H_SUCCESS:
++ break;
++ case H_CLOSED:
++ vscsi->flags |= CLIENT_FAILED;
++ case H_DROPPED:
++ vscsi->flags |= RESPONSE_Q_DOWN;
++ case H_REMOTE_PARM:
++ dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
++ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
++ break;
++ default:
++ dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
++ rc);
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
++ break;
+ }
+
+ return rc;
+@@ -2392,7 +2386,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
+ break;
+
+ case VALID_TRANS_EVENT:
+- rc = ibmvscsis_trans_event(vscsi, crq);
++ rc = ibmvscsis_trans_event(vscsi, crq);
+ break;
+
+ case VALID_INIT_MSG:
+@@ -2523,7 +2517,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
+ dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
+ srp->tag);
+ goto fail;
+- return;
+ }
+
+ cmd->rsp.sol_not = srp->sol_not;
+@@ -2560,6 +2553,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
+ data_len, attr, dir, 0);
+ if (rc) {
+ dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
++ spin_lock_bh(&vscsi->intr_lock);
++ list_del(&cmd->list);
++ ibmvscsis_free_cmd_resources(vscsi, cmd);
++ spin_unlock_bh(&vscsi->intr_lock);
+ goto fail;
+ }
+ return;
+@@ -2639,6 +2636,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
+ if (rc) {
+ dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
+ rc);
++ spin_lock_bh(&vscsi->intr_lock);
++ list_del(&cmd->list);
++ spin_unlock_bh(&vscsi->intr_lock);
+ cmd->se_cmd.se_tmr_req->response =
+ TMR_FUNCTION_REJECTED;
+ }
+@@ -2787,36 +2787,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
+ }
+
+ /**
+- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
+- * @vscsi: Pointer to our adapter structure
+- *
+- * Checks if a initialize message was queued by the initiatior
+- * while the timing window was open. This function is called from
+- * probe after the CRQ is created and interrupts are enabled.
+- * It would only be used by adapters who wait for some event before
+- * completing the init handshake with the client. For ibmvscsi, this
+- * event is waiting for the port to be enabled.
+- *
+- * EXECUTION ENVIRONMENT:
+- * Process level only, interrupt lock held
+- */
+-static long ibmvscsis_check_q(struct scsi_info *vscsi)
+-{
+- uint format;
+- long rc;
+-
+- rc = ibmvscsis_check_init_msg(vscsi, &format);
+- if (rc)
+- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+- else if (format == UNUSED_FORMAT)
+- vscsi->state = WAIT_ENABLED;
+- else
+- vscsi->state = PART_UP_WAIT_ENAB;
+-
+- return rc;
+-}
+-
+-/**
+ * ibmvscsis_enable_change_state() - Set new state based on enabled status
+ * @vscsi: Pointer to our adapter structure
+ *
+@@ -2827,77 +2797,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
+ */
+ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
+ {
++ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+-handle_state_change:
+- switch (vscsi->state) {
+- case WAIT_ENABLED:
+- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+- switch (rc) {
+- case H_SUCCESS:
+- case H_DROPPED:
+- case H_CLOSED:
+- vscsi->state = WAIT_CONNECTION;
+- rc = ADAPT_SUCCESS;
+- break;
+-
+- case H_PARAMETER:
+- break;
+-
+- case H_HARDWARE:
+- break;
+-
+- default:
+- vscsi->state = UNDEFINED;
+- rc = H_HARDWARE;
+- break;
+- }
+- break;
+- case PART_UP_WAIT_ENAB:
+- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+- switch (rc) {
+- case H_SUCCESS:
+- vscsi->state = CONNECTED;
+- rc = ADAPT_SUCCESS;
+- break;
+-
+- case H_DROPPED:
+- case H_CLOSED:
+- vscsi->state = WAIT_ENABLED;
+- goto handle_state_change;
+-
+- case H_PARAMETER:
+- break;
+-
+- case H_HARDWARE:
+- break;
+-
+- default:
+- rc = H_HARDWARE;
+- break;
+- }
+- break;
+-
+- case WAIT_CONNECTION:
+- case WAIT_IDLE:
+- case SRP_PROCESSING:
+- case CONNECTED:
+- rc = ADAPT_SUCCESS;
+- break;
+- /* should not be able to get here */
+- case UNCONFIGURING:
+- rc = ERROR;
+- vscsi->state = UNDEFINED;
+- break;
++ bytes = vscsi->cmd_q.size * PAGE_SIZE;
++ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
++ if (rc == H_CLOSED || rc == H_SUCCESS) {
++ vscsi->state = WAIT_CONNECTION;
++ rc = ibmvscsis_establish_new_q(vscsi);
++ }
+
+- /* driver should never allow this to happen */
+- case ERR_DISCONNECT:
+- case ERR_DISCONNECT_RECONNECT:
+- default:
+- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
+- vscsi->state);
+- rc = ADAPT_SUCCESS;
+- break;
++ if (rc != ADAPT_SUCCESS) {
++ vscsi->state = ERR_DISCONNECTED;
++ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+
+ return rc;
+@@ -2917,7 +2829,6 @@ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
+ */
+ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
+ {
+- long rc = 0;
+ int pages;
+ struct vio_dev *vdev = vscsi->dma_dev;
+
+@@ -2941,22 +2852,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
+ return -ENOMEM;
+ }
+
+- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
+- if (rc) {
+- if (rc == H_CLOSED) {
+- vscsi->state = WAIT_ENABLED;
+- rc = 0;
+- } else {
+- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
+- PAGE_SIZE, DMA_BIDIRECTIONAL);
+- free_page((unsigned long)vscsi->cmd_q.base_addr);
+- rc = -ENODEV;
+- }
+- } else {
+- vscsi->state = WAIT_ENABLED;
+- }
+-
+- return rc;
++ return 0;
+ }
+
+ /**
+@@ -3271,7 +3167,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
+ /*
+ * if we are in a path where we are waiting for all pending commands
+ * to complete because we received a transport event and anything in
+- * the command queue is for a new connection, do nothing
++ * the command queue is for a new connection, do nothing
+ */
+ if (TARGET_STOP(vscsi)) {
+ vio_enable_interrupts(vscsi->dma_dev);
+@@ -3315,7 +3211,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
+ * everything but transport events on the queue
+ *
+ * need to decrement the queue index so we can
+- * look at the elment again
++ * look at the element again
+ */
+ if (vscsi->cmd_q.index)
+ vscsi->cmd_q.index -= 1;
+@@ -3379,7 +3275,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
+ INIT_LIST_HEAD(&vscsi->waiting_rsp);
+ INIT_LIST_HEAD(&vscsi->active_q);
+
+- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
++ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
++ dev_name(&vdev->dev));
+
+ pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+
+@@ -3394,6 +3291,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
+ strncat(vscsi->eye, vdev->name, MAX_EYE);
+
+ vscsi->dds.unit_id = vdev->unit_address;
++ strncpy(vscsi->dds.partition_name, partition_name,
++ sizeof(vscsi->dds.partition_name));
++ vscsi->dds.partition_num = partition_number;
+
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
+@@ -3470,6 +3370,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
+ (unsigned long)vscsi);
+
+ init_completion(&vscsi->wait_idle);
++ init_completion(&vscsi->unconfig);
+
+ snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
+ vscsi->work_q = create_workqueue(wq_name);
+@@ -3486,31 +3387,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
+ goto destroy_WQ;
+ }
+
+- spin_lock_bh(&vscsi->intr_lock);
+- vio_enable_interrupts(vdev);
+- if (rc) {
+- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
+- rc = -ENODEV;
+- spin_unlock_bh(&vscsi->intr_lock);
+- goto free_irq;
+- }
+-
+- if (ibmvscsis_check_q(vscsi)) {
+- rc = ERROR;
+- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
+- spin_unlock_bh(&vscsi->intr_lock);
+- goto disable_interrupt;
+- }
+- spin_unlock_bh(&vscsi->intr_lock);
++ vscsi->state = WAIT_ENABLED;
+
+ dev_set_drvdata(&vdev->dev, vscsi);
+
+ return 0;
+
+-disable_interrupt:
+- vio_disable_interrupts(vdev);
+-free_irq:
+- free_irq(vdev->irq, vscsi);
+ destroy_WQ:
+ destroy_workqueue(vscsi->work_q);
+ unmap_buf:
+@@ -3544,10 +3426,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
+
+ pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+
+- /*
+- * TBD: Need to handle if there are commands on the waiting_rsp q
+- * Actually, can there still be cmds outstanding to tcm?
+- */
++ spin_lock_bh(&vscsi->intr_lock);
++ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
++ vscsi->flags |= CFG_SLEEPING;
++ spin_unlock_bh(&vscsi->intr_lock);
++ wait_for_completion(&vscsi->unconfig);
+
+ vio_disable_interrupts(vdev);
+ free_irq(vdev->irq, vscsi);
+@@ -3556,7 +3439,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
+ DMA_BIDIRECTIONAL);
+ kfree(vscsi->map_buf);
+ tasklet_kill(&vscsi->work_task);
+- ibmvscsis_unregister_command_q(vscsi);
+ ibmvscsis_destroy_command_q(vscsi);
+ ibmvscsis_freetimer(vscsi);
+ ibmvscsis_free_cmds(vscsi);
+@@ -3610,7 +3492,7 @@ static int ibmvscsis_get_system_info(void)
+
+ num = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (num)
+- partition_number = *num;
++ partition_number = of_read_number(num, 1);
+
+ of_node_put(rootdn);
+
+@@ -3904,18 +3786,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
+ }
+
+ if (tmp) {
+- tport->enabled = true;
+ spin_lock_bh(&vscsi->intr_lock);
++ tport->enabled = true;
+ lrc = ibmvscsis_enable_change_state(vscsi);
+ if (lrc)
+ pr_err("enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
+ spin_unlock_bh(&vscsi->intr_lock);
+ } else {
++ spin_lock_bh(&vscsi->intr_lock);
+ tport->enabled = false;
++ /* This simulates the server going down */
++ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
++ spin_unlock_bh(&vscsi->intr_lock);
+ }
+
+- pr_debug("tpg_enable_store, state %d\n", vscsi->state);
++ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
+
+ return count;
+ }
+@@ -3985,10 +3871,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
+ ATTRIBUTE_GROUPS(ibmvscsis_dev);
+
+ static struct class ibmvscsis_class = {
+- .name = "ibmvscsis",
+- .dev_release = ibmvscsis_dev_release,
+- .class_attrs = ibmvscsis_class_attrs,
+- .dev_groups = ibmvscsis_dev_groups,
++ .name = "ibmvscsis",
++ .dev_release = ibmvscsis_dev_release,
++ .class_attrs = ibmvscsis_class_attrs,
++ .dev_groups = ibmvscsis_dev_groups,
+ };
+
+ static struct vio_device_id ibmvscsis_device_table[] = {
+diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+index 981a0c9..98b0ca7 100644
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+@@ -204,8 +204,6 @@ struct scsi_info {
+ struct list_head waiting_rsp;
+ #define NO_QUEUE 0x00
+ #define WAIT_ENABLED 0X01
+- /* driver has received an initialize command */
+-#define PART_UP_WAIT_ENAB 0x02
+ #define WAIT_CONNECTION 0x04
+ /* have established a connection */
+ #define CONNECTED 0x08
+@@ -259,6 +257,8 @@ struct scsi_info {
+ #define SCHEDULE_DISCONNECT 0x00400
+ /* disconnect handler is scheduled */
+ #define DISCONNECT_SCHEDULED 0x00800
++ /* remove function is sleeping */
++#define CFG_SLEEPING 0x01000
+ u32 flags;
+ /* adapter lock */
+ spinlock_t intr_lock;
+@@ -287,6 +287,7 @@ struct scsi_info {
+
+ struct workqueue_struct *work_q;
+ struct completion wait_idle;
++ struct completion unconfig;
+ struct device dev;
+ struct vio_dev *dma_dev;
+ struct srp_target target;
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 4d09bd4..6e3e636 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -52,6 +52,7 @@ struct serial_private {
+ struct pci_dev *dev;
+ unsigned int nr;
+ struct pci_serial_quirk *quirk;
++ const struct pciserial_board *board;
+ int line[0];
+ };
+
+@@ -3871,6 +3872,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
+ }
+ }
+ priv->nr = i;
++ priv->board = board;
+ return priv;
+
+ err_deinit:
+@@ -3881,7 +3883,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
+ }
+ EXPORT_SYMBOL_GPL(pciserial_init_ports);
+
+-void pciserial_remove_ports(struct serial_private *priv)
++void pciserial_detach_ports(struct serial_private *priv)
+ {
+ struct pci_serial_quirk *quirk;
+ int i;
+@@ -3895,7 +3897,11 @@ void pciserial_remove_ports(struct serial_private *priv)
+ quirk = find_quirk(priv->dev);
+ if (quirk->exit)
+ quirk->exit(priv->dev);
++}
+
++void pciserial_remove_ports(struct serial_private *priv)
++{
++ pciserial_detach_ports(priv);
+ kfree(priv);
+ }
+ EXPORT_SYMBOL_GPL(pciserial_remove_ports);
+@@ -5590,7 +5596,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (priv)
+- pciserial_suspend_ports(priv);
++ pciserial_detach_ports(priv);
+
+ pci_disable_device(dev);
+
+@@ -5615,9 +5621,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
+ static void serial8250_io_resume(struct pci_dev *dev)
+ {
+ struct serial_private *priv = pci_get_drvdata(dev);
++ const struct pciserial_board *board;
+
+- if (priv)
+- pciserial_resume_ports(priv);
++ if (!priv)
++ return;
++
++ board = priv->board;
++ kfree(priv);
++ priv = pciserial_init_ports(dev, board);
++
++ if (!IS_ERR(priv)) {
++ pci_set_drvdata(dev, priv);
++ }
+ }
+
+ static const struct pci_error_handlers serial8250_err_handler = {
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index 45bc997..a95b3e7 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
+ dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
+ goto err;
+ }
+- ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
++ sprintf(ep->name, "ep%d", ep->index);
++ ep->ep.name = ep->name;
+
+ ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
+ ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
+index 3e1c9d5..b03b2eb 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
+@@ -280,6 +280,7 @@ struct usba_ep {
+ void __iomem *ep_regs;
+ void __iomem *dma_regs;
+ void __iomem *fifo;
++ char name[8];
+ struct usb_ep ep;
+ struct usba_udc *udc;
+
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
+index 80378dd..c882357 100644
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -31,49 +31,49 @@
+ static void tce_iommu_detach_group(void *iommu_data,
+ struct iommu_group *iommu_group);
+
+-static long try_increment_locked_vm(long npages)
++static long try_increment_locked_vm(struct mm_struct *mm, long npages)
+ {
+ long ret = 0, locked, lock_limit;
+
+- if (!current || !current->mm)
+- return -ESRCH; /* process exited */
++ if (WARN_ON_ONCE(!mm))
++ return -EPERM;
+
+ if (!npages)
+ return 0;
+
+- down_write(&current->mm->mmap_sem);
+- locked = current->mm->locked_vm + npages;
++ down_write(&mm->mmap_sem);
++ locked = mm->locked_vm + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ ret = -ENOMEM;
+ else
+- current->mm->locked_vm += npages;
++ mm->locked_vm += npages;
+
+ pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
+ npages << PAGE_SHIFT,
+- current->mm->locked_vm << PAGE_SHIFT,
++ mm->locked_vm << PAGE_SHIFT,
+ rlimit(RLIMIT_MEMLOCK),
+ ret ? " - exceeded" : "");
+
+- up_write(&current->mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+
+ return ret;
+ }
+
+-static void decrement_locked_vm(long npages)
++static void decrement_locked_vm(struct mm_struct *mm, long npages)
+ {
+- if (!current || !current->mm || !npages)
+- return; /* process exited */
++ if (!mm || !npages)
++ return;
+
+- down_write(&current->mm->mmap_sem);
+- if (WARN_ON_ONCE(npages > current->mm->locked_vm))
+- npages = current->mm->locked_vm;
+- current->mm->locked_vm -= npages;
++ down_write(&mm->mmap_sem);
++ if (WARN_ON_ONCE(npages > mm->locked_vm))
++ npages = mm->locked_vm;
++ mm->locked_vm -= npages;
+ pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
+ npages << PAGE_SHIFT,
+- current->mm->locked_vm << PAGE_SHIFT,
++ mm->locked_vm << PAGE_SHIFT,
+ rlimit(RLIMIT_MEMLOCK));
+- up_write(&current->mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ }
+
+ /*
+@@ -89,6 +89,15 @@ struct tce_iommu_group {
+ };
+
+ /*
++ * A container needs to remember which preregistered region it has
++ * referenced to do proper cleanup at the userspace process exit.
++ */
++struct tce_iommu_prereg {
++ struct list_head next;
++ struct mm_iommu_table_group_mem_t *mem;
++};
++
++/*
+ * The container descriptor supports only a single group per container.
+ * Required by the API as the container is not supplied with the IOMMU group
+ * at the moment of initialization.
+@@ -97,24 +106,68 @@ struct tce_container {
+ struct mutex lock;
+ bool enabled;
+ bool v2;
++ bool def_window_pending;
+ unsigned long locked_pages;
++ struct mm_struct *mm;
+ struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
+ struct list_head group_list;
++ struct list_head prereg_list;
+ };
+
++static long tce_iommu_mm_set(struct tce_container *container)
++{
++ if (container->mm) {
++ if (container->mm == current->mm)
++ return 0;
++ return -EPERM;
++ }
++ BUG_ON(!current->mm);
++ container->mm = current->mm;
++ atomic_inc(&container->mm->mm_count);
++
++ return 0;
++}
++
++static long tce_iommu_prereg_free(struct tce_container *container,
++ struct tce_iommu_prereg *tcemem)
++{
++ long ret;
++
++ ret = mm_iommu_put(container->mm, tcemem->mem);
++ if (ret)
++ return ret;
++
++ list_del(&tcemem->next);
++ kfree(tcemem);
++
++ return 0;
++}
++
+ static long tce_iommu_unregister_pages(struct tce_container *container,
+ __u64 vaddr, __u64 size)
+ {
+ struct mm_iommu_table_group_mem_t *mem;
++ struct tce_iommu_prereg *tcemem;
++ bool found = false;
+
+ if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
+ return -EINVAL;
+
+- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
++ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
+ if (!mem)
+ return -ENOENT;
+
+- return mm_iommu_put(mem);
++ list_for_each_entry(tcemem, &container->prereg_list, next) {
++ if (tcemem->mem == mem) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found)
++ return -ENOENT;
++
++ return tce_iommu_prereg_free(container, tcemem);
+ }
+
+ static long tce_iommu_register_pages(struct tce_container *container,
+@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container,
+ {
+ long ret = 0;
+ struct mm_iommu_table_group_mem_t *mem = NULL;
++ struct tce_iommu_prereg *tcemem;
+ unsigned long entries = size >> PAGE_SHIFT;
+
+ if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
+ ((vaddr + size) < vaddr))
+ return -EINVAL;
+
+- ret = mm_iommu_get(vaddr, entries, &mem);
++ mem = mm_iommu_find(container->mm, vaddr, entries);
++ if (mem) {
++ list_for_each_entry(tcemem, &container->prereg_list, next) {
++ if (tcemem->mem == mem)
++ return -EBUSY;
++ }
++ }
++
++ ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
+ if (ret)
+ return ret;
+
++ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
++ tcemem->mem = mem;
++ list_add(&tcemem->next, &container->prereg_list);
++
+ container->enabled = true;
+
+ return 0;
+ }
+
+-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
++static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
++ struct mm_struct *mm)
+ {
+ unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
+ tbl->it_size, PAGE_SIZE);
+@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+
+ BUG_ON(tbl->it_userspace);
+
+- ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
++ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ uas = vzalloc(cb);
+ if (!uas) {
+- decrement_locked_vm(cb >> PAGE_SHIFT);
++ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
+ return -ENOMEM;
+ }
+ tbl->it_userspace = uas;
+@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+ return 0;
+ }
+
+-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
++static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
++ struct mm_struct *mm)
+ {
+ unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
+ tbl->it_size, PAGE_SIZE);
+@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+
+ vfree(tbl->it_userspace);
+ tbl->it_userspace = NULL;
+- decrement_locked_vm(cb >> PAGE_SHIFT);
++ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
+ }
+
+ static bool tce_page_is_contained(struct page *page, unsigned page_shift)
+@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container)
+ struct iommu_table_group *table_group;
+ struct tce_iommu_group *tcegrp;
+
+- if (!current->mm)
+- return -ESRCH; /* process exited */
+-
+ if (container->enabled)
+ return -EBUSY;
+
+@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container)
+ if (!table_group->tce32_size)
+ return -EPERM;
+
++ ret = tce_iommu_mm_set(container);
++ if (ret)
++ return ret;
++
+ locked = table_group->tce32_size >> PAGE_SHIFT;
+- ret = try_increment_locked_vm(locked);
++ ret = try_increment_locked_vm(container->mm, locked);
+ if (ret)
+ return ret;
+
+@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container)
+
+ container->enabled = false;
+
+- if (!current->mm)
+- return;
+-
+- decrement_locked_vm(container->locked_pages);
++ BUG_ON(!container->mm);
++ decrement_locked_vm(container->mm, container->locked_pages);
+ }
+
+ static void *tce_iommu_open(unsigned long arg)
+@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
+
+ mutex_init(&container->lock);
+ INIT_LIST_HEAD_RCU(&container->group_list);
++ INIT_LIST_HEAD_RCU(&container->prereg_list);
+
+ container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
+
+@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg)
+ static int tce_iommu_clear(struct tce_container *container,
+ struct iommu_table *tbl,
+ unsigned long entry, unsigned long pages);
+-static void tce_iommu_free_table(struct iommu_table *tbl);
++static void tce_iommu_free_table(struct tce_container *container,
++ struct iommu_table *tbl);
+
+ static void tce_iommu_release(void *iommu_data)
+ {
+@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data)
+ continue;
+
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+- tce_iommu_free_table(tbl);
++ tce_iommu_free_table(container, tbl);
++ }
++
++ while (!list_empty(&container->prereg_list)) {
++ struct tce_iommu_prereg *tcemem;
++
++ tcemem = list_first_entry(&container->prereg_list,
++ struct tce_iommu_prereg, next);
++ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
+ }
+
+ tce_iommu_disable(container);
++ if (container->mm)
++ mmdrop(container->mm);
+ mutex_destroy(&container->lock);
+
+ kfree(container);
+@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
+ put_page(page);
+ }
+
+-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
++static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
++ unsigned long tce, unsigned long size,
+ unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
+ {
+ long ret = 0;
+ struct mm_iommu_table_group_mem_t *mem;
+
+- mem = mm_iommu_lookup(tce, size);
++ mem = mm_iommu_lookup(container->mm, tce, size);
+ if (!mem)
+ return -EINVAL;
+
+@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+ return 0;
+ }
+
+-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
+- unsigned long entry)
++static void tce_iommu_unuse_page_v2(struct tce_container *container,
++ struct iommu_table *tbl, unsigned long entry)
+ {
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ int ret;
+ unsigned long hpa = 0;
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+
+- if (!pua || !current || !current->mm)
++ if (!pua)
+ return;
+
+- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
++ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
+ &hpa, &mem);
+ if (ret)
+ pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
+@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container,
+ continue;
+
+ if (container->v2) {
+- tce_iommu_unuse_page_v2(tbl, entry);
++ tce_iommu_unuse_page_v2(container, tbl, entry);
+ continue;
+ }
+
+@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container,
+ unsigned long hpa;
+ enum dma_data_direction dirtmp;
+
++ if (!tbl->it_userspace) {
++ ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
++ if (ret)
++ return ret;
++ }
++
+ for (i = 0; i < pages; ++i) {
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
+ entry + i);
+
+- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
+- &hpa, &mem);
++ ret = tce_iommu_prereg_ua_to_hpa(container,
++ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
+ if (ret)
+ break;
+
+@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
+ ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
+ if (ret) {
+ /* dirtmp cannot be DMA_NONE here */
+- tce_iommu_unuse_page_v2(tbl, entry + i);
++ tce_iommu_unuse_page_v2(container, tbl, entry + i);
+ pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
+ __func__, entry << tbl->it_page_shift,
+ tce, ret);
+@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
+ }
+
+ if (dirtmp != DMA_NONE)
+- tce_iommu_unuse_page_v2(tbl, entry + i);
++ tce_iommu_unuse_page_v2(container, tbl, entry + i);
+
+ *pua = tce;
+
+@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container,
+ if (!table_size)
+ return -EINVAL;
+
+- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
++ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container,
+ WARN_ON(!ret && !(*ptbl)->it_ops->free);
+ WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
+
+- if (!ret && container->v2) {
+- ret = tce_iommu_userspace_view_alloc(*ptbl);
+- if (ret)
+- (*ptbl)->it_ops->free(*ptbl);
+- }
+-
+- if (ret)
+- decrement_locked_vm(table_size >> PAGE_SHIFT);
+-
+ return ret;
+ }
+
+-static void tce_iommu_free_table(struct iommu_table *tbl)
++static void tce_iommu_free_table(struct tce_container *container,
++ struct iommu_table *tbl)
+ {
+ unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
+
+- tce_iommu_userspace_view_free(tbl);
++ tce_iommu_userspace_view_free(tbl, container->mm);
+ tbl->it_ops->free(tbl);
+- decrement_locked_vm(pages);
++ decrement_locked_vm(container->mm, pages);
+ }
+
+ static long tce_iommu_create_window(struct tce_container *container,
+@@ -663,7 +741,7 @@ static long tce_iommu_create_window(struct tce_container *container,
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+ table_group->ops->unset_window(table_group, num);
+ }
+- tce_iommu_free_table(tbl);
++ tce_iommu_free_table(container, tbl);
+
+ return ret;
+ }
+@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container,
+
+ /* Free table */
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+- tce_iommu_free_table(tbl);
++ tce_iommu_free_table(container, tbl);
+ container->tables[num] = NULL;
+
+ return 0;
+ }
+
++static long tce_iommu_create_default_window(struct tce_container *container)
++{
++ long ret;
++ __u64 start_addr = 0;
++ struct tce_iommu_group *tcegrp;
++ struct iommu_table_group *table_group;
++
++ if (!container->def_window_pending)
++ return 0;
++
++ if (!tce_groups_attached(container))
++ return -ENODEV;
++
++ tcegrp = list_first_entry(&container->group_list,
++ struct tce_iommu_group, next);
++ table_group = iommu_group_get_iommudata(tcegrp->grp);
++ if (!table_group)
++ return -ENODEV;
++
++ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
++ table_group->tce32_size, 1, &start_addr);
++ WARN_ON_ONCE(!ret && start_addr);
++
++ if (!ret)
++ container->def_window_pending = false;
++
++ return ret;
++}
++
+ static long tce_iommu_ioctl(void *iommu_data,
+ unsigned int cmd, unsigned long arg)
+ {
+@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data,
+ }
+
+ return (ret < 0) ? 0 : ret;
++ }
++
++ /*
++ * Sanity check to prevent one userspace from manipulating
++ * another userspace mm.
++ */
++ BUG_ON(!container);
++ if (container->mm && container->mm != current->mm)
++ return -EPERM;
+
++ switch (cmd) {
+ case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
+ struct vfio_iommu_spapr_tce_info info;
+ struct tce_iommu_group *tcegrp;
+@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data,
+ VFIO_DMA_MAP_FLAG_WRITE))
+ return -EINVAL;
+
++ ret = tce_iommu_create_default_window(container);
++ if (ret)
++ return ret;
++
+ num = tce_iommu_find_table(container, param.iova, &tbl);
+ if (num < 0)
+ return -ENXIO;
+@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data,
+ if (param.flags)
+ return -EINVAL;
+
++ ret = tce_iommu_create_default_window(container);
++ if (ret)
++ return ret;
++
+ num = tce_iommu_find_table(container, param.iova, &tbl);
+ if (num < 0)
+ return -ENXIO;
+@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data,
+ minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
+ size);
+
++ ret = tce_iommu_mm_set(container);
++ if (ret)
++ return ret;
++
+ if (copy_from_user(&param, (void __user *)arg, minsz))
+ return -EFAULT;
+
+@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data,
+ if (!container->v2)
+ break;
+
++ if (!container->mm)
++ return -EPERM;
++
+ minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
+ size);
+
+@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data,
+ if (!container->v2)
+ break;
+
++ ret = tce_iommu_mm_set(container);
++ if (ret)
++ return ret;
++
+ if (!tce_groups_attached(container))
+ return -ENXIO;
+
+@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data,
+
+ mutex_lock(&container->lock);
+
++ ret = tce_iommu_create_default_window(container);
++ if (ret)
++ return ret;
++
+ ret = tce_iommu_create_window(container, create.page_shift,
+ create.window_size, create.levels,
+ &create.start_addr);
+@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data,
+ if (!container->v2)
+ break;
+
++ ret = tce_iommu_mm_set(container);
++ if (ret)
++ return ret;
++
+ if (!tce_groups_attached(container))
+ return -ENXIO;
+
+@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data,
+ if (remove.flags)
+ return -EINVAL;
+
++ if (container->def_window_pending && !remove.start_addr) {
++ container->def_window_pending = false;
++ return 0;
++ }
++
+ mutex_lock(&container->lock);
+
+ ret = tce_iommu_remove_window(container, remove.start_addr);
+@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
+ continue;
+
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+- tce_iommu_userspace_view_free(tbl);
++ tce_iommu_userspace_view_free(tbl, container->mm);
+ if (tbl->it_map)
+ iommu_release_ownership(tbl);
+
+@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
+ if (!tbl || !tbl->it_map)
+ continue;
+
+- rc = tce_iommu_userspace_view_alloc(tbl);
+- if (!rc)
+- rc = iommu_take_ownership(tbl);
+-
++ rc = iommu_take_ownership(tbl);
+ if (rc) {
+ for (j = 0; j < i; ++j)
+ iommu_release_ownership(
+@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
+ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
+ struct iommu_table_group *table_group)
+ {
+- long i, ret = 0;
+- struct iommu_table *tbl = NULL;
+-
+ if (!table_group->ops->create_table || !table_group->ops->set_window ||
+ !table_group->ops->release_ownership) {
+ WARN_ON_ONCE(1);
+@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
+
+ table_group->ops->take_ownership(table_group);
+
+- /*
+- * If it the first group attached, check if there is
+- * a default DMA window and create one if none as
+- * the userspace expects it to exist.
+- */
+- if (!tce_groups_attached(container) && !container->tables[0]) {
+- ret = tce_iommu_create_table(container,
+- table_group,
+- 0, /* window number */
+- IOMMU_PAGE_SHIFT_4K,
+- table_group->tce32_size,
+- 1, /* default levels */
+- &tbl);
+- if (ret)
+- goto release_exit;
+- else
+- container->tables[0] = tbl;
+- }
+-
+- /* Set all windows to the new group */
+- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+- tbl = container->tables[i];
+-
+- if (!tbl)
+- continue;
+-
+- /* Set the default window to a new group */
+- ret = table_group->ops->set_window(table_group, i, tbl);
+- if (ret)
+- goto release_exit;
+- }
+-
+ return 0;
+-
+-release_exit:
+- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+- table_group->ops->unset_window(table_group, i);
+-
+- table_group->ops->release_ownership(table_group);
+-
+- return ret;
+ }
+
+ static int tce_iommu_attach_group(void *iommu_data,
+@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data,
+ }
+
+ if (!table_group->ops || !table_group->ops->take_ownership ||
+- !table_group->ops->release_ownership)
++ !table_group->ops->release_ownership) {
+ ret = tce_iommu_take_ownership(container, table_group);
+- else
++ } else {
+ ret = tce_iommu_take_ownership_ddw(container, table_group);
++ if (!tce_groups_attached(container) && !container->tables[0])
++ container->def_window_pending = true;
++ }
+
+ if (!ret) {
+ tcegrp->grp = iommu_group;
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 6aaf425..a13b031 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -18,19 +18,12 @@
+
+ struct bpf_reg_state {
+ enum bpf_reg_type type;
+- /*
+- * Used to determine if any memory access using this register will
+- * result in a bad access.
+- */
+- s64 min_value;
+- u64 max_value;
+ union {
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+ s64 imm;
+
+ /* valid when type == PTR_TO_PACKET* */
+ struct {
+- u32 id;
+ u16 off;
+ u16 range;
+ };
+@@ -40,6 +33,13 @@ struct bpf_reg_state {
+ */
+ struct bpf_map *map_ptr;
+ };
++ u32 id;
++ /* Used to determine if any memory access using this register will
++ * result in a bad access. These two fields must be last.
++ * See states_equal()
++ */
++ s64 min_value;
++ u64 max_value;
+ };
+
+ enum bpf_stack_slot_type {
+diff --git a/include/linux/dccp.h b/include/linux/dccp.h
+index 61d042b..6844929 100644
+--- a/include/linux/dccp.h
++++ b/include/linux/dccp.h
+@@ -163,6 +163,7 @@ struct dccp_request_sock {
+ __u64 dreq_isr;
+ __u64 dreq_gsr;
+ __be32 dreq_service;
++ spinlock_t dreq_lock;
+ struct list_head dreq_featneg;
+ __u32 dreq_timestamp_echo;
+ __u32 dreq_timestamp_time;
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 192eef2f..d596a07 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1548,31 +1548,23 @@ static inline struct vmpacket_descriptor *
+ get_next_pkt_raw(struct vmbus_channel *channel)
+ {
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+- u32 read_loc = ring_info->priv_read_index;
++ u32 priv_read_loc = ring_info->priv_read_index;
+ void *ring_buffer = hv_get_ring_buffer(ring_info);
+- struct vmpacket_descriptor *cur_desc;
+- u32 packetlen;
+ u32 dsize = ring_info->ring_datasize;
+- u32 delta = read_loc - ring_info->ring_buffer->read_index;
++ /*
++ * delta is the difference between what is available to read and
++ * what was already consumed in place. We commit read index after
++ * the whole batch is processed.
++ */
++ u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
++ priv_read_loc - ring_info->ring_buffer->read_index :
++ (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
+ u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
+
+ if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
+ return NULL;
+
+- if ((read_loc + sizeof(*cur_desc)) > dsize)
+- return NULL;
+-
+- cur_desc = ring_buffer + read_loc;
+- packetlen = cur_desc->len8 << 3;
+-
+- /*
+- * If the packet under consideration is wrapping around,
+- * return failure.
+- */
+- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
+- return NULL;
+-
+- return cur_desc;
++ return ring_buffer + priv_read_loc;
+ }
+
+ /*
+@@ -1584,16 +1576,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
+ struct vmpacket_descriptor *desc)
+ {
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+- u32 read_loc = ring_info->priv_read_index;
+ u32 packetlen = desc->len8 << 3;
+ u32 dsize = ring_info->ring_datasize;
+
+- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
+- BUG();
+ /*
+ * Include the packet trailer.
+ */
+ ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
++ ring_info->priv_read_index %= dsize;
+ }
+
+ /*
+diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
+index d08c63f..0c5d5dd 100644
+--- a/include/uapi/linux/packet_diag.h
++++ b/include/uapi/linux/packet_diag.h
+@@ -64,7 +64,7 @@ struct packet_diag_mclist {
+ __u32 pdmc_count;
+ __u16 pdmc_type;
+ __u16 pdmc_alen;
+- __u8 pdmc_addr[MAX_ADDR_LEN];
++ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
+ };
+
+ struct packet_diag_ring {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8199821..85d1c94 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -212,9 +212,10 @@ static void print_verifier_state(struct bpf_verifier_state *state)
+ else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
+ t == PTR_TO_MAP_VALUE_OR_NULL ||
+ t == PTR_TO_MAP_VALUE_ADJ)
+- verbose("(ks=%d,vs=%d)",
++ verbose("(ks=%d,vs=%d,id=%u)",
+ reg->map_ptr->key_size,
+- reg->map_ptr->value_size);
++ reg->map_ptr->value_size,
++ reg->id);
+ if (reg->min_value != BPF_REGISTER_MIN_RANGE)
+ verbose(",min_value=%lld",
+ (long long)reg->min_value);
+@@ -443,13 +444,19 @@ static void init_reg_state(struct bpf_reg_state *regs)
+ regs[BPF_REG_1].type = PTR_TO_CTX;
+ }
+
+-static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
++static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
+ {
+- BUG_ON(regno >= MAX_BPF_REG);
+ regs[regno].type = UNKNOWN_VALUE;
++ regs[regno].id = 0;
+ regs[regno].imm = 0;
+ }
+
++static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
++{
++ BUG_ON(regno >= MAX_BPF_REG);
++ __mark_reg_unknown_value(regs, regno);
++}
++
+ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
+ {
+ regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
+@@ -1252,6 +1259,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
+ return -EINVAL;
+ }
+ regs[BPF_REG_0].map_ptr = meta.map_ptr;
++ regs[BPF_REG_0].id = ++env->id_gen;
+ } else {
+ verbose("unknown return type %d of func %d\n",
+ fn->ret_type, func_id);
+@@ -1668,8 +1676,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ insn->src_reg);
+ return -EACCES;
+ }
+- regs[insn->dst_reg].type = UNKNOWN_VALUE;
+- regs[insn->dst_reg].map_ptr = NULL;
++ mark_reg_unknown_value(regs, insn->dst_reg);
+ }
+ } else {
+ /* case: R = imm
+@@ -1931,6 +1938,43 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+ check_reg_overflow(true_reg);
+ }
+
++static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
++ enum bpf_reg_type type)
++{
++ struct bpf_reg_state *reg = &regs[regno];
++
++ if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
++ reg->type = type;
++ /* We don't need id from this point onwards anymore, thus we
++ * should better reset it, so that state pruning has chances
++ * to take effect.
++ */
++ reg->id = 0;
++ if (type == UNKNOWN_VALUE)
++ __mark_reg_unknown_value(regs, regno);
++ }
++}
++
++/* The logic is similar to find_good_pkt_pointers(), both could eventually
++ * be folded together at some point.
++ */
++static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
++ enum bpf_reg_type type)
++{
++ struct bpf_reg_state *regs = state->regs;
++ u32 id = regs[regno].id;
++ int i;
++
++ for (i = 0; i < MAX_BPF_REG; i++)
++ mark_map_reg(regs, i, id, type);
++
++ for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
++ if (state->stack_slot_type[i] != STACK_SPILL)
++ continue;
++ mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type);
++ }
++}
++
+ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ struct bpf_insn *insn, int *insn_idx)
+ {
+@@ -2018,18 +2062,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ if (BPF_SRC(insn->code) == BPF_K &&
+ insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
+ dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
+- if (opcode == BPF_JEQ) {
+- /* next fallthrough insn can access memory via
+- * this register
+- */
+- regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
+- /* branch targer cannot access it, since reg == 0 */
+- mark_reg_unknown_value(other_branch->regs,
+- insn->dst_reg);
+- } else {
+- other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
+- mark_reg_unknown_value(regs, insn->dst_reg);
+- }
++ /* Mark all identical map registers in each branch as either
++ * safe or unknown depending R == 0 or R != 0 conditional.
++ */
++ mark_map_regs(this_branch, insn->dst_reg,
++ opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE);
++ mark_map_regs(other_branch, insn->dst_reg,
++ opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+ dst_reg->type == PTR_TO_PACKET &&
+ regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+@@ -2469,7 +2508,7 @@ static bool states_equal(struct bpf_verifier_env *env,
+ * we didn't do a variable access into a map then we are a-ok.
+ */
+ if (!varlen_map_access &&
+- rold->type == rcur->type && rold->imm == rcur->imm)
++ memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0)
+ continue;
+
+ /* If we didn't map access then again we don't care about the
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 38b68c2..4c6b6e6 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2813,7 +2813,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ {
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct rt_mutex_waiter rt_waiter;
+- struct rt_mutex *pi_mutex = NULL;
+ struct futex_hash_bucket *hb;
+ union futex_key key2 = FUTEX_KEY_INIT;
+ struct futex_q q = futex_q_init;
+@@ -2897,6 +2896,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (q.pi_state && (q.pi_state->owner != current)) {
+ spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
++ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
++ rt_mutex_unlock(&q.pi_state->pi_mutex);
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+@@ -2905,6 +2906,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ spin_unlock(q.lock_ptr);
+ }
+ } else {
++ struct rt_mutex *pi_mutex;
++
+ /*
+ * We have been woken up by futex_unlock_pi(), a timeout, or a
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor
+@@ -2928,18 +2931,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (res)
+ ret = (res < 0) ? res : 0;
+
++ /*
++ * If fixup_pi_state_owner() faulted and was unable to handle
++ * the fault, unlock the rt_mutex and return the fault to
++ * userspace.
++ */
++ if (ret && rt_mutex_owner(pi_mutex) == current)
++ rt_mutex_unlock(pi_mutex);
++
+ /* Unqueue and drop the lock. */
+ unqueue_me_pi(&q);
+ }
+
+- /*
+- * If fixup_pi_state_owner() faulted and was unable to handle the
+- * fault, unlock the rt_mutex and return the fault to userspace.
+- */
+- if (ret == -EFAULT) {
+- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+- rt_mutex_unlock(pi_mutex);
+- } else if (ret == -EINTR) {
++ if (ret == -EINTR) {
+ /*
+ * We've already been requeued, but cannot restart by calling
+ * futex_lock_pi() directly. We could restart this syscall, but
+diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
+index 1591f6b..2bef4ab 100644
+--- a/kernel/locking/rwsem-spinlock.c
++++ b/kernel/locking/rwsem-spinlock.c
+@@ -216,10 +216,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
+ */
+ if (sem->count == 0)
+ break;
+- if (signal_pending_state(state, current)) {
+- ret = -EINTR;
+- goto out;
+- }
++ if (signal_pending_state(state, current))
++ goto out_nolock;
+ set_task_state(tsk, state);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ schedule();
+@@ -227,12 +225,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
+ }
+ /* got the lock */
+ sem->count = -1;
+-out:
+ list_del(&waiter.list);
+
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ return ret;
++
++out_nolock:
++ list_del(&waiter.list);
++ if (!list_empty(&sem->wait_list))
++ __rwsem_do_wake(sem, 1);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
++
++ return -EINTR;
+ }
+
+ void __sched __down_write(struct rw_semaphore *sem)
+diff --git a/mm/slab.c b/mm/slab.c
+index bd878f0..1f82d16 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2332,7 +2332,7 @@ static int drain_freelist(struct kmem_cache *cache,
+ return nr_freed;
+ }
+
+-int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
++int __kmem_cache_shrink(struct kmem_cache *cachep)
+ {
+ int ret = 0;
+ int node;
+@@ -2352,7 +2352,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
+
+ int __kmem_cache_shutdown(struct kmem_cache *cachep)
+ {
+- return __kmem_cache_shrink(cachep, false);
++ return __kmem_cache_shrink(cachep);
+ }
+
+ void __kmem_cache_release(struct kmem_cache *cachep)
+diff --git a/mm/slab.h b/mm/slab.h
+index bc05fdc..ceb7d70 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -146,7 +146,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
+
+ int __kmem_cache_shutdown(struct kmem_cache *);
+ void __kmem_cache_release(struct kmem_cache *);
+-int __kmem_cache_shrink(struct kmem_cache *, bool);
++int __kmem_cache_shrink(struct kmem_cache *);
+ void slab_kmem_cache_release(struct kmem_cache *);
+
+ struct seq_file;
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 329b038..5d2f24f 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -573,6 +573,29 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
+ get_online_cpus();
+ get_online_mems();
+
++#ifdef CONFIG_SLUB
++ /*
++ * In case of SLUB, we need to disable empty slab caching to
++ * avoid pinning the offline memory cgroup by freeable kmem
++ * pages charged to it. SLAB doesn't need this, as it
++ * periodically purges unused slabs.
++ */
++ mutex_lock(&slab_mutex);
++ list_for_each_entry(s, &slab_caches, list) {
++ c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL;
++ if (c) {
++ c->cpu_partial = 0;
++ c->min_partial = 0;
++ }
++ }
++ mutex_unlock(&slab_mutex);
++ /*
++ * kmem_cache->cpu_partial is checked locklessly (see
++ * put_cpu_partial()). Make sure the change is visible.
++ */
++ synchronize_sched();
++#endif
++
+ mutex_lock(&slab_mutex);
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!is_root_cache(s))
+@@ -584,7 +607,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
+ if (!c)
+ continue;
+
+- __kmem_cache_shrink(c, true);
++ __kmem_cache_shrink(c);
+ arr->entries[idx] = NULL;
+ }
+ mutex_unlock(&slab_mutex);
+@@ -755,7 +778,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
+ get_online_cpus();
+ get_online_mems();
+ kasan_cache_shrink(cachep);
+- ret = __kmem_cache_shrink(cachep, false);
++ ret = __kmem_cache_shrink(cachep);
+ put_online_mems();
+ put_online_cpus();
+ return ret;
+diff --git a/mm/slob.c b/mm/slob.c
+index 5ec1580..eac04d43 100644
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c)
+ {
+ }
+
+-int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
++int __kmem_cache_shrink(struct kmem_cache *d)
+ {
+ return 0;
+ }
+diff --git a/mm/slub.c b/mm/slub.c
+index 7aa0e97..58c7526 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3887,7 +3887,7 @@ EXPORT_SYMBOL(kfree);
+ * being allocated from last increasing the chance that the last objects
+ * are freed in them.
+ */
+-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
++int __kmem_cache_shrink(struct kmem_cache *s)
+ {
+ int node;
+ int i;
+@@ -3899,21 +3899,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+ unsigned long flags;
+ int ret = 0;
+
+- if (deactivate) {
+- /*
+- * Disable empty slabs caching. Used to avoid pinning offline
+- * memory cgroups by kmem pages that can be freed.
+- */
+- s->cpu_partial = 0;
+- s->min_partial = 0;
+-
+- /*
+- * s->cpu_partial is checked locklessly (see put_cpu_partial),
+- * so we have to make sure the change is visible.
+- */
+- synchronize_sched();
+- }
+-
+ flush_all(s);
+ for_each_kmem_cache_node(s, node, n) {
+ INIT_LIST_HEAD(&discard);
+@@ -3970,7 +3955,7 @@ static int slab_mem_going_offline_callback(void *arg)
+
+ mutex_lock(&slab_mutex);
+ list_for_each_entry(s, &slab_caches, list)
+- __kmem_cache_shrink(s, false);
++ __kmem_cache_shrink(s);
+ mutex_unlock(&slab_mutex);
+
+ return 0;
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 7cb41ae..8498e35 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
+ /* Do not flood unicast traffic to ports that turn it off */
+ if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
+ continue;
++ /* Do not flood if mc off, except for traffic we originate */
+ if (pkt_type == BR_PKT_MULTICAST &&
+- !(p->flags & BR_MCAST_FLOOD))
++ !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
+ continue;
+
+ /* Do not flood to ports that enable proxy ARP */
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 855b72f..267b46a 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -29,6 +29,7 @@ EXPORT_SYMBOL(br_should_route_hook);
+ static int
+ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
++ br_drop_fake_rtable(skb);
+ return netif_receive_skb(skb);
+ }
+
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 7fbdbae..aa1df1a 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
+ }
+
+
+-/* PF_BRIDGE/LOCAL_IN ************************************************/
+-/* The packet is locally destined, which requires a real
+- * dst_entry, so detach the fake one. On the way up, the
+- * packet would pass through PRE_ROUTING again (which already
+- * took place when the packet entered the bridge), but we
+- * register an IPv4 PRE_ROUTING 'sabotage' hook that will
+- * prevent this from happening. */
+-static unsigned int br_nf_local_in(void *priv,
+- struct sk_buff *skb,
+- const struct nf_hook_state *state)
+-{
+- br_drop_fake_rtable(skb);
+- return NF_ACCEPT;
+-}
+-
+ /* PF_BRIDGE/FORWARD *************************************************/
+ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+@@ -906,12 +891,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
+ .priority = NF_BR_PRI_BRNF,
+ },
+ {
+- .hook = br_nf_local_in,
+- .pf = NFPROTO_BRIDGE,
+- .hooknum = NF_BR_LOCAL_IN,
+- .priority = NF_BR_PRI_BRNF,
+- },
+- {
+ .hook = br_nf_forward_ip,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_FORWARD,
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 60b0a604..2e04fd1 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1697,27 +1697,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+ static struct static_key netstamp_needed __read_mostly;
+ #ifdef HAVE_JUMP_LABEL
+ static atomic_t netstamp_needed_deferred;
++static atomic_t netstamp_wanted;
+ static void netstamp_clear(struct work_struct *work)
+ {
+ int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
++ int wanted;
+
+- while (deferred--)
+- static_key_slow_dec(&netstamp_needed);
++ wanted = atomic_add_return(deferred, &netstamp_wanted);
++ if (wanted > 0)
++ static_key_enable(&netstamp_needed);
++ else
++ static_key_disable(&netstamp_needed);
+ }
+ static DECLARE_WORK(netstamp_work, netstamp_clear);
+ #endif
+
+ void net_enable_timestamp(void)
+ {
++#ifdef HAVE_JUMP_LABEL
++ int wanted;
++
++ while (1) {
++ wanted = atomic_read(&netstamp_wanted);
++ if (wanted <= 0)
++ break;
++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
++ return;
++ }
++ atomic_inc(&netstamp_needed_deferred);
++ schedule_work(&netstamp_work);
++#else
+ static_key_slow_inc(&netstamp_needed);
++#endif
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+
+ void net_disable_timestamp(void)
+ {
+ #ifdef HAVE_JUMP_LABEL
+- /* net_disable_timestamp() can be called from non process context */
+- atomic_inc(&netstamp_needed_deferred);
++ int wanted;
++
++ while (1) {
++ wanted = atomic_read(&netstamp_wanted);
++ if (wanted <= 1)
++ break;
++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
++ return;
++ }
++ atomic_dec(&netstamp_needed_deferred);
+ schedule_work(&netstamp_work);
+ #else
+ static_key_slow_dec(&netstamp_needed);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 1e3e008..f0f462c 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3814,13 +3814,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+ if (!skb_may_tx_timestamp(sk, false))
+ return;
+
+- /* take a reference to prevent skb_orphan() from freeing the socket */
+- sock_hold(sk);
+-
+- *skb_hwtstamps(skb) = *hwtstamps;
+- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+-
+- sock_put(sk);
++ /* Take a reference to prevent skb_orphan() from freeing the socket,
++ * but only if the socket refcount is not zero.
++ */
++ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
++ *skb_hwtstamps(skb) = *hwtstamps;
++ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
++ sock_put(sk);
++ }
+ }
+ EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
+
+@@ -3871,7 +3872,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+ {
+ struct sock *sk = skb->sk;
+ struct sock_exterr_skb *serr;
+- int err;
++ int err = 1;
+
+ skb->wifi_acked_valid = 1;
+ skb->wifi_acked = acked;
+@@ -3881,14 +3882,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+ serr->ee.ee_errno = ENOMSG;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+
+- /* take a reference to prevent skb_orphan() from freeing the socket */
+- sock_hold(sk);
+-
+- err = sock_queue_err_skb(sk, skb);
++ /* Take a reference to prevent skb_orphan() from freeing the socket,
++ * but only if the socket refcount is not zero.
++ */
++ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
++ err = sock_queue_err_skb(sk, skb);
++ sock_put(sk);
++ }
+ if (err)
+ kfree_skb(skb);
+-
+- sock_put(sk);
+ }
+ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
+
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index f053198..5e3a730 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
+ for (i = 0; i < hc->tx_seqbufc; i++)
+ kfree(hc->tx_seqbuf[i]);
+ hc->tx_seqbufc = 0;
++ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
+ }
+
+ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
+diff --git a/net/dccp/input.c b/net/dccp/input.c
+index 8fedc2d..4a05d78 100644
+--- a/net/dccp/input.c
++++ b/net/dccp/input.c
+@@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
+ const int old_state = sk->sk_state;
++ bool acceptable;
+ int queued = 0;
+
+ /*
+@@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ */
+ if (sk->sk_state == DCCP_LISTEN) {
+ if (dh->dccph_type == DCCP_PKT_REQUEST) {
+- if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
+- skb) < 0)
++ /* It is possible that we process SYN packets from backlog,
++ * so we need to make sure to disable BH right there.
++ */
++ local_bh_disable();
++ acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
++ local_bh_enable();
++ if (!acceptable)
+ return 1;
+ consume_skb(skb);
+ return 0;
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index edbe59d..86b0933 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
+
+ switch (type) {
+ case ICMP_REDIRECT:
+- dccp_do_redirect(skb, sk);
++ if (!sock_owned_by_user(sk))
++ dccp_do_redirect(skb, sk);
+ goto out;
+ case ICMP_SOURCE_QUENCH:
+ /* Just silently ignore these. */
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 7506c03..237d62c 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ np = inet6_sk(sk);
+
+ if (type == NDISC_REDIRECT) {
+- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
++ if (!sock_owned_by_user(sk)) {
++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+
+- if (dst)
+- dst->ops->redirect(dst, sk, skb);
++ if (dst)
++ dst->ops->redirect(dst, sk, skb);
++ }
+ goto out;
+ }
+
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
+index 53eddf9..39e7e2b 100644
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
+ /* It is still raw copy of parent, so invalidate
+ * destructor and make plain sk_free() */
+ newsk->sk_destruct = NULL;
++ bh_unlock_sock(newsk);
+ sk_free(newsk);
+ return NULL;
+ }
+@@ -145,6 +146,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+ struct dccp_request_sock *dreq = dccp_rsk(req);
+ bool own_req;
+
++ /* TCP/DCCP listeners became lockless.
++ * DCCP stores complex state in its request_sock, so we need
++ * a protection for them, now this code runs without being protected
++ * by the parent (listener) lock.
++ */
++ spin_lock_bh(&dreq->dreq_lock);
++
+ /* Check for retransmitted REQUEST */
+ if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
+
+@@ -159,7 +167,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+ inet_rtx_syn_ack(sk, req);
+ }
+ /* Network Duplicate, discard packet */
+- return NULL;
++ goto out;
+ }
+
+ DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
+@@ -185,20 +193,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+
+ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+ req, &own_req);
+- if (!child)
+- goto listen_overflow;
+-
+- return inet_csk_complete_hashdance(sk, child, req, own_req);
++ if (child) {
++ child = inet_csk_complete_hashdance(sk, child, req, own_req);
++ goto out;
++ }
+
+-listen_overflow:
+- dccp_pr_debug("listen_overflow!\n");
+ DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
+ drop:
+ if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
+ req->rsk_ops->send_reset(sk, skb);
+
+ inet_csk_reqsk_queue_drop(sk, req);
+- return NULL;
++out:
++ spin_unlock_bh(&dreq->dreq_lock);
++ return child;
+ }
+
+ EXPORT_SYMBOL_GPL(dccp_check_req);
+@@ -249,6 +257,7 @@ int dccp_reqsk_init(struct request_sock *req,
+ {
+ struct dccp_request_sock *dreq = dccp_rsk(req);
+
++ spin_lock_init(&dreq->dreq_lock);
+ inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
+ inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
+ inet_rsk(req)->acked = 0;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 21514324..971b947 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1460,8 +1460,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
+ int proto = iph->protocol;
+ int err = -ENOSYS;
+
+- if (skb->encapsulation)
++ if (skb->encapsulation) {
++ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
+ skb_set_inner_network_header(skb, nhoff);
++ }
+
+ csum_replace2(&iph->check, iph->tot_len, newlen);
+ iph->tot_len = newlen;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index d851cae..17e6fbf 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1968,6 +1968,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ {
+ int res;
+
++ tos &= IPTOS_RT_MASK;
+ rcu_read_lock();
+
+ /* Multicast recognition logic is moved from route cache to here.
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index c71d49c..ce42ded 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5916,9 +5916,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ if (th->syn) {
+ if (th->fin)
+ goto discard;
+- if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
+- return 1;
++ /* It is possible that we process SYN packets from backlog,
++ * so we need to make sure to disable BH right there.
++ */
++ local_bh_disable();
++ acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
++ local_bh_enable();
+
++ if (!acceptable)
++ return 1;
+ consume_skb(skb);
+ return 0;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 2259114..6988566 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -269,10 +269,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
+ */
+ void tcp_v4_mtu_reduced(struct sock *sk)
+ {
+- struct dst_entry *dst;
+ struct inet_sock *inet = inet_sk(sk);
+- u32 mtu = tcp_sk(sk)->mtu_info;
++ struct dst_entry *dst;
++ u32 mtu;
+
++ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
++ return;
++ mtu = tcp_sk(sk)->mtu_info;
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
+ return;
+@@ -418,7 +421,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+
+ switch (type) {
+ case ICMP_REDIRECT:
+- do_redirect(icmp_skb, sk);
++ if (!sock_owned_by_user(sk))
++ do_redirect(icmp_skb, sk);
+ goto out;
+ case ICMP_SOURCE_QUENCH:
+ /* Just silently ignore these. */
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 3ea1cf8..b1e65b3 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
+
+ sk_mem_reclaim_partial(sk);
+
+- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ goto out;
+
+ if (time_after(icsk->icsk_ack.timeout, jiffies)) {
+@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ int event;
+
+- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++ !icsk->icsk_pending)
+ goto out;
+
+ if (time_after(icsk->icsk_timeout, jiffies)) {
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index ef54852..8c88a37 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -908,6 +908,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
+ ins = &rt->dst.rt6_next;
+ iter = *ins;
+ while (iter) {
++ if (iter->rt6i_metric > rt->rt6i_metric)
++ break;
+ if (rt6_qualify_for_ecmp(iter)) {
+ *ins = iter->dst.rt6_next;
+ fib6_purge_rt(iter, fn, info->nl_net);
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index fc7b401..33b04ec 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
+ int err = -ENOSYS;
+
+- if (skb->encapsulation)
++ if (skb->encapsulation) {
++ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
+ skb_set_inner_network_header(skb, nhoff);
++ }
+
+ iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
+
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 9a87bfb..e27b8fd 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -757,13 +757,14 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ * Fragment the datagram.
+ */
+
+- *prevhdr = NEXTHDR_FRAGMENT;
+ troom = rt->dst.dev->needed_tailroom;
+
+ /*
+ * Keep copying data until we run out.
+ */
+ while (left > 0) {
++ u8 *fragnexthdr_offset;
++
+ len = left;
+ /* IF: it doesn't fit, use 'mtu' - the data space left */
+ if (len > mtu)
+@@ -808,6 +809,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ */
+ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
+
++ fragnexthdr_offset = skb_network_header(frag);
++ fragnexthdr_offset += prevhdr - skb_network_header(skb);
++ *fragnexthdr_offset = NEXTHDR_FRAGMENT;
++
+ /*
+ * Build fragment header.
+ */
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index c299c1e..66c2b4b 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -691,6 +691,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
+ u->link = p->link;
+ u->i_key = p->i_key;
+ u->o_key = p->o_key;
++ if (u->i_key)
++ u->i_flags |= GRE_KEY;
++ if (u->o_key)
++ u->o_flags |= GRE_KEY;
+ u->proto = p->proto;
+
+ memcpy(u->name, p->name, sizeof(u->name));
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 9948b5c..986d4ca 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+ hdr = ipv6_hdr(skb);
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
++ skb_orphan(skb);
+ fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq == NULL) {
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 6673965..b2e61a0 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -375,10 +375,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ np = inet6_sk(sk);
+
+ if (type == NDISC_REDIRECT) {
+- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
++ if (!sock_owned_by_user(sk)) {
++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+
+- if (dst)
+- dst->ops->redirect(dst, sk, skb);
++ if (dst)
++ dst->ops->redirect(dst, sk, skb);
++ }
+ goto out;
+ }
+
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index c0f0750..ff750bb 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -388,7 +388,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+ drop:
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
+ kfree_skb(skb);
+- return -1;
++ return 0;
+ }
+
+ /* Userspace will call sendmsg() on the tunnel socket to send L2TP
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 5b77377..1309e2c 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -956,7 +956,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
+ /* fall through */
+ case NETDEV_CHANGE:
+ nh->nh_flags |= RTNH_F_LINKDOWN;
+- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
++ if (event != NETDEV_UNREGISTER)
++ ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+ break;
+ }
+ if (event == NETDEV_UNREGISTER)
+@@ -1696,6 +1697,7 @@ static void mpls_net_exit(struct net *net)
+ for (index = 0; index < platform_labels; index++) {
+ struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ RCU_INIT_POINTER(platform_label[index], NULL);
++ mpls_notify_route(net, index, rt, NULL, NULL);
+ mpls_rt_free(rt);
+ }
+ rtnl_unlock();
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index eab210b..48386bf 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -367,7 +367,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
+ } else if (key->eth.type == htons(ETH_P_IPV6)) {
+ enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+
+- skb_orphan(skb);
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+ err = nf_ct_frag6_gather(net, skb, user);
+ if (err) {
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 34de326..f2b04a7 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3140,7 +3140,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
+ {
+ struct sock *sk = sock->sk;
+- char name[15];
++ char name[sizeof(uaddr->sa_data) + 1];
+
+ /*
+ * Check legality
+@@ -3148,7 +3148,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+
+ if (addr_len != sizeof(struct sockaddr))
+ return -EINVAL;
+- strlcpy(name, uaddr->sa_data, sizeof(name));
++ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
++ * zero-terminated.
++ */
++ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
++ name[sizeof(uaddr->sa_data)] = 0;
+
+ return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
+ }
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index c6c2a93..c651cfc 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -820,10 +820,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+ goto out_module_put;
+
+ err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops);
+- if (err < 0)
++ if (err <= 0)
+ goto out_module_put;
+- if (err == 0)
+- goto noflush_out;
+
+ nla_nest_end(skb, nest);
+
+@@ -840,7 +838,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+ out_module_put:
+ module_put(ops->owner);
+ err_out:
+-noflush_out:
+ kfree_skb(skb);
+ return err;
+ }
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
+index eae07a2..1191179 100644
+--- a/net/sched/act_connmark.c
++++ b/net/sched/act_connmark.c
+@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+ if (ret < 0)
+ return ret;
+
++ if (!tb[TCA_CONNMARK_PARMS])
++ return -EINVAL;
++
+ parm = nla_data(tb[TCA_CONNMARK_PARMS]);
+
+ if (!tcf_hash_check(tn, parm->index, a, bind)) {
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index e7d9638..f85313d 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+
+ return skb->len;
+ nla_put_failure:
+- rcu_read_unlock();
+ nlmsg_trim(skb, b);
+ return -1;
+ }
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
+index 41adf36..b5c279b 100644
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -504,6 +504,7 @@ static int __init strp_mod_init(void)
+
+ static void __exit strp_mod_exit(void)
+ {
++ destroy_workqueue(strp_wq);
+ }
+ module_init(strp_mod_init);
+ module_exit(strp_mod_exit);
diff --git a/4.9.18/1017_linux-4.9.18.patch b/4.9.18/1017_linux-4.9.18.patch
new file mode 100644
index 0000000..3f957a2
--- /dev/null
+++ b/4.9.18/1017_linux-4.9.18.patch
@@ -0,0 +1,876 @@
+diff --git a/Makefile b/Makefile
+index 004f90a..c10d0e6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index 7bd69bd..1d8c24d 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -45,28 +45,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
+
+ #define flush_kernel_dcache_range(start,size) \
+ flush_kernel_dcache_range_asm((start), (start)+(size));
+-/* vmap range flushes and invalidates. Architecturally, we don't need
+- * the invalidate, because the CPU should refuse to speculate once an
+- * area has been flushed, so invalidate is left empty */
+-static inline void flush_kernel_vmap_range(void *vaddr, int size)
+-{
+- unsigned long start = (unsigned long)vaddr;
+-
+- flush_kernel_dcache_range_asm(start, start + size);
+-}
+-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+-{
+- unsigned long start = (unsigned long)vaddr;
+- void *cursor = vaddr;
+
+- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
+- struct page *page = vmalloc_to_page(cursor);
+-
+- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+- flush_kernel_dcache_page(page);
+- }
+- flush_kernel_dcache_range_asm(start, start + size);
+-}
++void flush_kernel_vmap_range(void *vaddr, int size);
++void invalidate_kernel_vmap_range(void *vaddr, int size);
+
+ #define flush_cache_vmap(start, end) flush_cache_all()
+ #define flush_cache_vunmap(start, end) flush_cache_all()
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 977f0a4f..53ec75f 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -633,3 +633,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
+ }
++
++void flush_kernel_vmap_range(void *vaddr, int size)
++{
++ unsigned long start = (unsigned long)vaddr;
++
++ if ((unsigned long)size > parisc_cache_flush_threshold)
++ flush_data_cache();
++ else
++ flush_kernel_dcache_range_asm(start, start + size);
++}
++EXPORT_SYMBOL(flush_kernel_vmap_range);
++
++void invalidate_kernel_vmap_range(void *vaddr, int size)
++{
++ unsigned long start = (unsigned long)vaddr;
++
++ if ((unsigned long)size > parisc_cache_flush_threshold)
++ flush_data_cache();
++ else
++ flush_kernel_dcache_range_asm(start, start + size);
++}
++EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index 4063943..e81afc37 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -139,6 +139,8 @@ void machine_power_off(void)
+
+ printk(KERN_EMERG "System shut down completed.\n"
+ "Please power this system off now.");
++
++ for (;;);
+ }
+
+ void (*pm_power_off)(void) = machine_power_off;
+diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
+index 861e721..f080abf 100644
+--- a/arch/powerpc/boot/zImage.lds.S
++++ b/arch/powerpc/boot/zImage.lds.S
+@@ -68,6 +68,7 @@ SECTIONS
+ }
+
+ #ifdef CONFIG_PPC64_BOOT_WRAPPER
++ . = ALIGN(256);
+ .got :
+ {
+ __toc_start = .;
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 6e6c1fb..272608f 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+ char *buf)
+ {
+ unsigned int cur_freq = __cpufreq_get(policy);
+- if (!cur_freq)
+- return sprintf(buf, "<unknown>");
+- return sprintf(buf, "%u\n", cur_freq);
++
++ if (cur_freq)
++ return sprintf(buf, "%u\n", cur_freq);
++
++ return sprintf(buf, "<unknown>\n");
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index b447a01..09e6a73 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -3506,6 +3506,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
++ } else if (adev->asic_type == CHIP_OLAND) {
++ if ((adev->pdev->device == 0x6604) &&
++ (adev->pdev->subsystem_vendor == 0x1028) &&
++ (adev->pdev->subsystem_device == 0x066F)) {
++ max_sclk = 75000;
++ }
+ }
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index 8703f56..246d1ae 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -61,21 +61,24 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+ if (ret < 0)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT0);
+- pm_runtime_put(&vc4->v3d->pdev->dev);
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT1:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret < 0)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT1);
+- pm_runtime_put(&vc4->v3d->pdev->dev);
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT2:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret < 0)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT2);
+- pm_runtime_put(&vc4->v3d->pdev->dev);
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ args->value = true;
+diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
+index 18e3717..ab30169 100644
+--- a/drivers/gpu/drm/vc4/vc4_gem.c
++++ b/drivers/gpu/drm/vc4/vc4_gem.c
+@@ -711,8 +711,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
+ }
+
+ mutex_lock(&vc4->power_lock);
+- if (--vc4->power_refcount == 0)
+- pm_runtime_put(&vc4->v3d->pdev->dev);
++ if (--vc4->power_refcount == 0) {
++ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
++ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
++ }
+ mutex_unlock(&vc4->power_lock);
+
+ kfree(exec);
+diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
+index e6d3c60..7cc346a 100644
+--- a/drivers/gpu/drm/vc4/vc4_v3d.c
++++ b/drivers/gpu/drm/vc4/vc4_v3d.c
+@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
+ return ret;
+ }
+
++ pm_runtime_use_autosuspend(dev);
++ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
+ pm_runtime_enable(dev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+index 2543cf5..917321c 100644
+--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+@@ -608,9 +608,7 @@ static bool
+ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+ {
+ uint32_t max_branch_target = 0;
+- bool found_shader_end = false;
+ int ip;
+- int shader_end_ip = 0;
+ int last_branch = -2;
+
+ for (ip = 0; ip < validation_state->max_ip; ip++) {
+@@ -621,8 +619,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+ uint32_t branch_target_ip;
+
+ if (sig == QPU_SIG_PROG_END) {
+- shader_end_ip = ip;
+- found_shader_end = true;
++ /* There are two delay slots after program end is
++ * signaled that are still executed, then we're
++ * finished. validation_state->max_ip is the
++ * instruction after the last valid instruction in the
++ * program.
++ */
++ validation_state->max_ip = ip + 3;
+ continue;
+ }
+
+@@ -676,15 +679,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+ }
+ set_bit(after_delay_ip, validation_state->branch_targets);
+ max_branch_target = max(max_branch_target, after_delay_ip);
+-
+- /* There are two delay slots after program end is signaled
+- * that are still executed, then we're finished.
+- */
+- if (found_shader_end && ip == shader_end_ip + 2)
+- break;
+ }
+
+- if (max_branch_target > shader_end_ip) {
++ if (max_branch_target > validation_state->max_ip - 3) {
+ DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+ return false;
+ }
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
+index aecec6d..7f1c625 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
+ return -ENODEV;
+ }
+
++ if (hostif->desc.bNumEndpoints < 1)
++ return -ENODEV;
++
+ dev_info(&udev->dev,
+ "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 39fddda..55b5e0e 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1470,7 +1470,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
++ /*
++ * If a bio is splitted, the first part of bio will pass
++ * barrier but the bio is queued in current->bio_list (see
++ * generic_make_request). If there is a raise_barrier() called
++ * here, the second part of bio can't pass barrier. But since
++ * the first part bio isn't dispatched to underlaying disks
++ * yet, the barrier is never released, hence raise_barrier will
++ * alays wait. We have a deadlock.
++ * Note, this only happens in read path. For write path, the
++ * first part of bio is dispatched in a schedule() call
++ * (because of blk plug) or offloaded to raid10d.
++ * Quitting from the function immediately can change the bio
++ * order queued in bio_list and avoid the deadlock.
++ */
+ __make_request(mddev, split);
++ if (split != bio && bio_data_dir(bio) == READ) {
++ generic_make_request(bio);
++ break;
++ }
+ } while (split != bio);
+
+ /* In case raid10d snuck in to freeze_array */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index f9b6fba..a530f08 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
+ WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+ task->state = state;
+
+- if (!list_empty(&task->running))
++ spin_lock_bh(&conn->taskqueuelock);
++ if (!list_empty(&task->running)) {
++ pr_debug_once("%s while task on list", __func__);
+ list_del_init(&task->running);
++ }
++ spin_unlock_bh(&conn->taskqueuelock);
+
+ if (conn->task == task)
+ conn->task = NULL;
+@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (session->tt->xmit_task(task))
+ goto free_task;
+ } else {
++ spin_lock_bh(&conn->taskqueuelock);
+ list_add_tail(&task->running, &conn->mgmtqueue);
++ spin_unlock_bh(&conn->taskqueuelock);
+ iscsi_conn_queue_work(conn);
+ }
+
+@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
+ * this may be on the requeue list already if the xmit_task callout
+ * is handling the r2ts while we are adding new ones
+ */
++ spin_lock_bh(&conn->taskqueuelock);
+ if (list_empty(&task->running))
+ list_add_tail(&task->running, &conn->requeue);
++ spin_unlock_bh(&conn->taskqueuelock);
+ iscsi_conn_queue_work(conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ * only have one nop-out as a ping from us and targets should not
+ * overflow us with nop-ins
+ */
++ spin_lock_bh(&conn->taskqueuelock);
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+ conn->task = list_entry(conn->mgmtqueue.next,
+ struct iscsi_task, running);
+ list_del_init(&conn->task->running);
++ spin_unlock_bh(&conn->taskqueuelock);
+ if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
+ __iscsi_put_task(conn->task);
+ spin_unlock_bh(&conn->session->back_lock);
+ conn->task = NULL;
++ spin_lock_bh(&conn->taskqueuelock);
+ continue;
+ }
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
++ spin_lock_bh(&conn->taskqueuelock);
+ }
+
+ /* process pending command queue */
+@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&conn->task->running);
++ spin_unlock_bh(&conn->taskqueuelock);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+ fail_scsi_task(conn->task, DID_IMM_RETRY);
++ spin_lock_bh(&conn->taskqueuelock);
+ continue;
+ }
+ rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ if (rc) {
+ if (rc == -ENOMEM || rc == -EACCES) {
++ spin_lock_bh(&conn->taskqueuelock);
+ list_add_tail(&conn->task->running,
+ &conn->cmdqueue);
+ conn->task = NULL;
++ spin_unlock_bh(&conn->taskqueuelock);
+ goto done;
+ } else
+ fail_scsi_task(conn->task, DID_ABORT);
++ spin_lock_bh(&conn->taskqueuelock);
+ continue;
+ }
+ rc = iscsi_xmit_task(conn);
+@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
++ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ conn->task = task;
+ list_del_init(&conn->task->running);
+ conn->task->state = ISCSI_TASK_RUNNING;
++ spin_unlock_bh(&conn->taskqueuelock);
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
++ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
++ spin_unlock_bh(&conn->taskqueuelock);
+ spin_unlock_bh(&conn->session->frwd_lock);
+ return -ENODATA;
+
+@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ goto prepd_reject;
+ }
+ } else {
++ spin_lock_bh(&conn->taskqueuelock);
+ list_add_tail(&task->running, &conn->cmdqueue);
++ spin_unlock_bh(&conn->taskqueuelock);
+ iscsi_conn_queue_work(conn);
+ }
+
+@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->cmdqueue);
+ INIT_LIST_HEAD(&conn->requeue);
++ spin_lock_init(&conn->taskqueuelock);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+ /* allocate login_task used for the login/text sequences */
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 734a042..f7e3f27 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11393,6 +11393,7 @@ static struct pci_driver lpfc_driver = {
+ .id_table = lpfc_id_table,
+ .probe = lpfc_pci_probe_one,
+ .remove = lpfc_pci_remove_one,
++ .shutdown = lpfc_pci_remove_one,
+ .suspend = lpfc_pci_suspend_one,
+ .resume = lpfc_pci_resume_one,
+ .err_handler = &lpfc_err_handler,
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index bff9689..feab7ea 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -5375,16 +5375,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
+
+ static int
+ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
+- struct atio_from_isp *atio)
++ struct atio_from_isp *atio, bool ha_locked)
+ {
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t status;
++ unsigned long flags;
+
+ if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+ return 0;
+
++ if (!ha_locked)
++ spin_lock_irqsave(&ha->hardware_lock, flags);
+ status = temp_sam_status;
+ qlt_send_busy(vha, atio, status);
++ if (!ha_locked)
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ return 1;
+ }
+
+@@ -5429,7 +5435,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+
+
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+- rc = qlt_chk_qfull_thresh_hold(vha, atio);
++ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
+ if (rc != 0) {
+ tgt->atio_irq_cmd_count--;
+ return;
+@@ -5552,7 +5558,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+ break;
+ }
+
+- rc = qlt_chk_qfull_thresh_hold(vha, atio);
++ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+@@ -6794,6 +6800,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ kfree(op);
+ }
+
+ void
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 9125d93..ef1c8c1 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
+
+ buf = kzalloc(12, GFP_KERNEL);
+ if (!buf)
+- return;
++ goto out_free;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = MODE_SENSE;
+@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
+ * If MODE_SENSE still returns zero, set the default value to 1024.
+ */
+ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
++out_free:
+ if (!sdev->sector_size)
+ sdev->sector_size = 1024;
+-out_free:
++
+ kfree(buf);
+ }
+
+@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+ sd->lun, sd->queue_depth);
+ }
+
+- dev->dev_attrib.hw_block_size = sd->sector_size;
++ dev->dev_attrib.hw_block_size =
++ min_not_zero((int)sd->sector_size, 512);
+ dev->dev_attrib.hw_max_sectors =
+- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
++ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
+ dev->dev_attrib.hw_queue_depth = sd->queue_depth;
+
+ /*
+@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+ /*
+ * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+ */
+- if (sd->type == TYPE_TAPE)
++ if (sd->type == TYPE_TAPE) {
+ pscsi_tape_read_blocksize(dev, sd);
++ dev->dev_attrib.hw_block_size = sd->sector_size;
++ }
+ return 0;
+ }
+
+@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
+ /*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
++static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
+ __releases(sh->host_lock)
+ {
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+ return 0;
+ }
+
+-/*
+- * Called with struct Scsi_Host->host_lock called.
+- */
+-static int pscsi_create_type_other(struct se_device *dev,
+- struct scsi_device *sd)
+- __releases(sh->host_lock)
+-{
+- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+- struct Scsi_Host *sh = sd->host;
+- int ret;
+-
+- spin_unlock_irq(sh->host_lock);
+- ret = pscsi_add_device_to_list(dev, sd);
+- if (ret)
+- return ret;
+-
+- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
+- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+- sd->channel, sd->id, sd->lun);
+- return 0;
+-}
+-
+ static int pscsi_configure_device(struct se_device *dev)
+ {
+ struct se_hba *hba = dev->se_hba;
+@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
+ case TYPE_DISK:
+ ret = pscsi_create_type_disk(dev, sd);
+ break;
+- case TYPE_ROM:
+- ret = pscsi_create_type_rom(dev, sd);
+- break;
+ default:
+- ret = pscsi_create_type_other(dev, sd);
++ ret = pscsi_create_type_nondisk(dev, sd);
+ break;
+ }
+
+@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
+ else if (pdv->pdv_lld_host)
+ scsi_host_put(pdv->pdv_lld_host);
+
+- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+- scsi_device_put(sd);
++ scsi_device_put(sd);
+
+ pdv->pdv_sd = NULL;
+ }
+@@ -1069,7 +1047,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
+ if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+ return pdv->pdv_bd->bd_part->nr_sects;
+
+- dump_stack();
+ return 0;
+ }
+
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index aabd660..a53fb23 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -1104,9 +1104,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ return ret;
+ break;
+ case VERIFY:
++ case VERIFY_16:
+ size = 0;
+- sectors = transport_get_sectors_10(cdb);
+- cmd->t_task_lba = transport_lba_32(cdb);
++ if (cdb[0] == VERIFY) {
++ sectors = transport_get_sectors_10(cdb);
++ cmd->t_task_lba = transport_lba_32(cdb);
++ } else {
++ sectors = transport_get_sectors_16(cdb);
++ cmd->t_task_lba = transport_lba_64(cdb);
++ }
+ cmd->execute_cmd = sbc_emulate_noop;
+ goto check_lba;
+ case REZERO_UNIT:
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index afe29ba..5fa9ba1 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3830,7 +3830,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ EXT4_DESC_PER_BLOCK(sb);
+ if (ext4_has_feature_meta_bg(sb)) {
+- if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
++ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
+ ext4_msg(sb, KERN_WARNING,
+ "first meta block group too large: %u "
+ "(group descriptor block count %u)",
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index a6a3389..51519c2 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -207,7 +207,7 @@ struct lm_lockname {
+ struct gfs2_sbd *ln_sbd;
+ u64 ln_number;
+ unsigned int ln_type;
+-};
++} __packed __aligned(sizeof(int));
+
+ #define lm_name_equal(name1, name2) \
+ (((name1)->ln_number == (name2)->ln_number) && \
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 609840d..1536aeb 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7426,11 +7426,11 @@ static void nfs4_exchange_id_release(void *data)
+ struct nfs41_exchange_id_data *cdata =
+ (struct nfs41_exchange_id_data *)data;
+
+- nfs_put_client(cdata->args.client);
+ if (cdata->xprt) {
+ xprt_put(cdata->xprt);
+ rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
+ }
++ nfs_put_client(cdata->args.client);
+ kfree(cdata->res.impl_id);
+ kfree(cdata->res.server_scope);
+ kfree(cdata->res.server_owner);
+@@ -7537,10 +7537,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ task_setup_data.callback_data = calldata;
+
+ task = rpc_run_task(&task_setup_data);
+- if (IS_ERR(task)) {
+- status = PTR_ERR(task);
+- goto out_impl_id;
+- }
++ if (IS_ERR(task))
++ return PTR_ERR(task);
+
+ if (!xprt) {
+ status = rpc_wait_for_completion_task(task);
+@@ -7568,6 +7566,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ kfree(calldata->res.server_owner);
+ out_calldata:
+ kfree(calldata);
++ nfs_put_client(clp);
+ goto out;
+ }
+
+diff --git a/include/linux/log2.h b/include/linux/log2.h
+index fd7ff3d..f38fae2 100644
+--- a/include/linux/log2.h
++++ b/include/linux/log2.h
+@@ -16,12 +16,6 @@
+ #include <linux/bitops.h>
+
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n) \
+ ( \
+ __builtin_constant_p(n) ? ( \
+- (n) < 1 ? ____ilog2_NaN() : \
++ (n) < 2 ? 0 : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+- (n) & (1ULL << 1) ? 1 : \
+- (n) & (1ULL << 0) ? 0 : \
+- ____ilog2_NaN() \
+- ) : \
++ 1 ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 4d1c46a..c7b1dc7 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -196,6 +196,7 @@ struct iscsi_conn {
+ struct iscsi_task *task; /* xmit task in progress */
+
+ /* xmit */
++ spinlock_t taskqueuelock; /* protects the next three lists */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head cmdqueue; /* data-path cmd queue */
+ struct list_head requeue; /* tasks needing another run */
+diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
+index 2bd6737..a57242e 100644
+--- a/kernel/cgroup_pids.c
++++ b/kernel/cgroup_pids.c
+@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
+ /* Only log the first time events_limit is incremented. */
+ if (atomic64_inc_return(&pids->events_limit) == 1) {
+ pr_info("cgroup: fork rejected by pids controller in ");
+- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
++ pr_cont_cgroup_path(css->cgroup);
+ pr_cont("\n");
+ }
+ cgroup_file_notify(&pids->events_file);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 4b33231..07c0dc8 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10333,6 +10333,17 @@ void perf_event_free_task(struct task_struct *task)
+ continue;
+
+ mutex_lock(&ctx->mutex);
++ raw_spin_lock_irq(&ctx->lock);
++ /*
++ * Destroy the task <-> ctx relation and mark the context dead.
++ *
++ * This is important because even though the task hasn't been
++ * exposed yet the context has been (through child_list).
++ */
++ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
++ WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
++ put_task_struct(task); /* cannot be last */
++ raw_spin_unlock_irq(&ctx->lock);
+ again:
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+ group_entry)
+@@ -10586,7 +10597,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
+ ret = inherit_task_group(event, parent, parent_ctx,
+ child, ctxn, &inherited_all);
+ if (ret)
+- break;
++ goto out_unlock;
+ }
+
+ /*
+@@ -10602,7 +10613,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
+ ret = inherit_task_group(event, parent, parent_ctx,
+ child, ctxn, &inherited_all);
+ if (ret)
+- break;
++ goto out_unlock;
+ }
+
+ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+@@ -10630,6 +10641,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
+ }
+
+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++out_unlock:
+ mutex_unlock(&parent_ctx->mutex);
+
+ perf_unpin_context(parent_ctx);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 2557143..f014ceb 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1010,8 +1010,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ mutex_unlock(&pcpu_alloc_mutex);
+ }
+
+- if (chunk != pcpu_reserved_chunk)
++ if (chunk != pcpu_reserved_chunk) {
++ spin_lock_irqsave(&pcpu_lock, flags);
+ pcpu_nr_empty_pop_pages -= occ_pages;
++ spin_unlock_irqrestore(&pcpu_lock, flags);
++ }
+
+ if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+ pcpu_schedule_balance_work();
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index e2c37061..69502fa 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -486,7 +486,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
+ struct ib_cq *sendcq, *recvcq;
+ int rc;
+
+- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
++ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
++ RPCRDMA_MAX_SEND_SGES);
+ if (max_sge < RPCRDMA_MIN_SEND_SGES) {
+ pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
+ return -ENOMEM;
+diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
+index 4144666..d5677d3 100644
+--- a/tools/include/linux/log2.h
++++ b/tools/include/linux/log2.h
+@@ -13,12 +13,6 @@
+ #define _TOOLS_LINUX_LOG2_H
+
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n) \
+ ( \
+ __builtin_constant_p(n) ? ( \
+- (n) < 1 ? ____ilog2_NaN() : \
++ (n) < 2 ? 0 : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+- (n) & (1ULL << 1) ? 1 : \
+- (n) & (1ULL << 0) ? 0 : \
+- ____ilog2_NaN() \
+- ) : \
++ 1 ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
diff --git a/4.9.16/4420_grsecurity-3.1-4.9.16-201703180820.patch b/4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch
index 8d585e2..3659b97 100644
--- a/4.9.16/4420_grsecurity-3.1-4.9.16-201703180820.patch
+++ b/4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch
@@ -419,7 +419,7 @@ index 3d0ae15..84e5412 100644
cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
diff --git a/Makefile b/Makefile
-index 4e0f962..202756a 100644
+index c10d0e6..54799eb2 100644
--- a/Makefile
+++ b/Makefile
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -4916,10 +4916,10 @@ index a4ec240..96faf9b 100644
#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 969ef88..305b856 100644
+index cf57a77..ab33bd2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -896,6 +896,7 @@ config RELOCATABLE
+@@ -906,6 +906,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
@@ -21174,7 +21174,7 @@ index b28200d..e93e14d 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
-index 7fe88bb..afd1630 100644
+index 38623e2..7eae820 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1570,7 +1570,7 @@ static void __init pmu_check_apic(void)
@@ -30027,10 +30027,10 @@ index cdc0dea..ada8a20 100644
static void microcode_fini_cpu(int cpu)
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
-index 8f44c5a..ed71f8c 100644
+index f228f74..8f3df2a 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
-@@ -206,7 +206,7 @@ static void __init ms_hyperv_init_platform(void)
+@@ -230,7 +230,7 @@ static void __init ms_hyperv_init_platform(void)
x86_platform.get_nmi_reason = hv_get_nmi_reason;
}
@@ -30928,10 +30928,10 @@ index 8639bb2..aaa97ae 100644
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
-index 54a2372..46504a4 100644
+index b5785c1..c60cbcf 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
-@@ -62,12 +62,12 @@ int __init early_make_pgtable(unsigned long address)
+@@ -63,12 +63,12 @@ int __init early_make_pgtable(unsigned long address)
pgd = *pgd_p;
/*
@@ -30947,7 +30947,7 @@ index 54a2372..46504a4 100644
else {
if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
reset_early_page_tables();
-@@ -76,13 +76,13 @@ int __init early_make_pgtable(unsigned long address)
+@@ -77,13 +77,13 @@ int __init early_make_pgtable(unsigned long address)
pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
@@ -30963,7 +30963,7 @@ index 54a2372..46504a4 100644
else {
if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
reset_early_page_tables();
-@@ -91,7 +91,7 @@ int __init early_make_pgtable(unsigned long address)
+@@ -92,7 +92,7 @@ int __init early_make_pgtable(unsigned long address)
pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
@@ -30972,7 +30972,7 @@ index 54a2372..46504a4 100644
}
pmd = (physaddr & PMD_MASK) + early_pmd_flags;
pmd_p[pmd_index(address)] = pmd;
-@@ -155,8 +155,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
+@@ -156,8 +156,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss();
@@ -35339,7 +35339,7 @@ index bd4e3d4..3e938e3 100644
#endif
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index 46b2f41..666b35b 100644
+index eea88fe..443da46 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -24,6 +24,7 @@
@@ -45469,7 +45469,7 @@ index bcd86e5..fe457ef 100644
(u8 *) pte, count) < count) {
kfree(pte);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
-index 0774799..a0012ea 100644
+index c6fee74..49c7f8f 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
@@ -46125,7 +46125,7 @@ index 75f128e..0fbae68 100644
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
if (!bgrt_kobj)
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
-index bdc67ba..a82756b 100644
+index 4421f7c..aa32b81 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -47,13 +47,13 @@ struct acpi_blacklist_item {
@@ -49501,7 +49501,7 @@ index 5649234..34b55b7 100644
static void resize_console(struct port *port)
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
-index 3bbd2a5..69b87bb 100644
+index 2acaa77..1d0128e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1147,8 +1147,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
@@ -49817,7 +49817,7 @@ index 4d3ec92..cf501fc 100644
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 6e6c1fb..ccc5cd2 100644
+index 272608f..5c4a47a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -528,12 +528,12 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -49835,7 +49835,7 @@ index 6e6c1fb..ccc5cd2 100644
const char *buf, size_t count)
{
int ret, enable;
-@@ -2114,7 +2114,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+@@ -2116,7 +2116,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
mutex_lock(&cpufreq_governor_mutex);
@@ -49844,7 +49844,7 @@ index 6e6c1fb..ccc5cd2 100644
mutex_unlock(&cpufreq_governor_mutex);
return;
}
-@@ -2334,13 +2334,17 @@ int cpufreq_boost_trigger_state(int state)
+@@ -2336,13 +2336,17 @@ int cpufreq_boost_trigger_state(int state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -49864,7 +49864,7 @@ index 6e6c1fb..ccc5cd2 100644
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
-@@ -2381,7 +2385,9 @@ int cpufreq_enable_boost_support(void)
+@@ -2383,7 +2387,9 @@ int cpufreq_enable_boost_support(void)
if (cpufreq_boost_supported())
return 0;
@@ -49875,7 +49875,7 @@ index 6e6c1fb..ccc5cd2 100644
/* This will get removed on driver unregister */
return create_boost_sysfs_file();
-@@ -2439,8 +2445,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2441,8 +2447,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -54947,10 +54947,10 @@ index 611b6b9..e0faec1 100644
#endif
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
-index 8703f56..7e8f99c 100644
+index 246d1ae..aa305a2 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
-@@ -180,6 +180,11 @@ static int compare_dev(struct device *dev, void *data)
+@@ -183,6 +183,11 @@ static int compare_dev(struct device *dev, void *data)
return dev == data;
}
@@ -54962,7 +54962,7 @@ index 8703f56..7e8f99c 100644
static void vc4_match_add_drivers(struct device *dev,
struct component_match **match,
struct platform_driver *const *drivers,
-@@ -191,8 +196,7 @@ static void vc4_match_add_drivers(struct device *dev,
+@@ -194,8 +199,7 @@ static void vc4_match_add_drivers(struct device *dev,
struct device_driver *drv = &drivers[i]->driver;
struct device *p = NULL, *d;
@@ -58729,10 +58729,10 @@ index 6a2df32..dc962f1 100644
capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
-index aecec6d..11e13c5 100644
+index 7f1c625..2da3ff6 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
-@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
+@@ -2568,22 +2568,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
static const struct gigaset_ops gigops = {
@@ -62356,7 +62356,7 @@ index 29e2df5..c367325 100644
"md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index 39fddda..be1dd54 100644
+index 55b5e0e..4969510 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1063,7 +1063,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
@@ -62377,7 +62377,7 @@ index 39fddda..be1dd54 100644
struct bio *split;
-@@ -1829,7 +1829,7 @@ static void end_sync_read(struct bio *bio)
+@@ -1847,7 +1847,7 @@ static void end_sync_read(struct bio *bio)
/* The write handler will notice the lack of
* R10BIO_Uptodate and record any errors etc
*/
@@ -62386,7 +62386,7 @@ index 39fddda..be1dd54 100644
&conf->mirrors[d].rdev->corrected_errors);
/* for reconstruct, we always reschedule after a read.
-@@ -1978,7 +1978,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+@@ -1996,7 +1996,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
if (j == vcnt)
continue;
@@ -62395,7 +62395,7 @@ index 39fddda..be1dd54 100644
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */
continue;
-@@ -2177,7 +2177,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2195,7 +2195,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
{
long cur_time_mon;
unsigned long hours_since_last;
@@ -62404,7 +62404,7 @@ index 39fddda..be1dd54 100644
cur_time_mon = ktime_get_seconds();
-@@ -2198,9 +2198,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2216,9 +2216,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
* overflowing the shift of read_errors by hours_since_last.
*/
if (hours_since_last >= 8 * sizeof(read_errors))
@@ -62416,7 +62416,7 @@ index 39fddda..be1dd54 100644
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
-@@ -2254,8 +2254,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2272,8 +2272,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
return;
check_decay_read_errors(mddev, rdev);
@@ -62427,7 +62427,7 @@ index 39fddda..be1dd54 100644
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
-@@ -2263,7 +2263,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2281,7 +2281,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
"md/raid10:%s: %s: Raid device exceeded "
"read_error threshold [cur %d:max %d]\n",
mdname(mddev), b,
@@ -62436,7 +62436,7 @@ index 39fddda..be1dd54 100644
printk(KERN_NOTICE
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
-@@ -2420,7 +2420,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2438,7 +2438,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
@@ -62445,7 +62445,7 @@ index 39fddda..be1dd54 100644
}
rdev_dec_pending(rdev, mddev);
-@@ -3191,6 +3191,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+@@ -3209,6 +3209,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
@@ -62453,7 +62453,7 @@ index 39fddda..be1dd54 100644
bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
-@@ -3216,7 +3217,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+@@ -3234,7 +3235,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sector = sector_nr;
set_bit(R10BIO_IsSync, &r10_bio->state);
raid10_find_phys(conf, r10_bio);
@@ -63483,10 +63483,10 @@ index 2cc4d2b..3a559c8 100644
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
-index 302e284..93781d6 100644
+index cde43b6..8412dfc 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
-@@ -2078,7 +2078,7 @@ static int uvc_reset_resume(struct usb_interface *intf)
+@@ -2184,7 +2184,7 @@ static int uvc_reset_resume(struct usb_interface *intf)
* Module parameters
*/
@@ -63495,7 +63495,7 @@ index 302e284..93781d6 100644
{
if (uvc_clock_param == CLOCK_MONOTONIC)
return sprintf(buffer, "CLOCK_MONOTONIC");
-@@ -2086,7 +2086,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
+@@ -2192,7 +2192,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
return sprintf(buffer, "CLOCK_REALTIME");
}
@@ -65417,7 +65417,7 @@ index 22570ea..c462375 100644
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
-index 1deb8ff..4e2b0c1 100644
+index 1deb8ff9..4e2b0c1 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -338,7 +338,7 @@ static int bfin_can_get_berr_counter(const struct net_device *dev,
@@ -69309,10 +69309,10 @@ index 93dc10b..6598671 100644
struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
-index 8b4822a..e99c1c4 100644
+index 3c1f89a..9b9e82d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
-@@ -1467,7 +1467,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
+@@ -1473,7 +1473,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
}
@@ -69321,7 +69321,7 @@ index 8b4822a..e99c1c4 100644
.kind = "geneve",
.maxtype = IFLA_GENEVE_MAX,
.policy = geneve_policy,
-@@ -1533,7 +1533,7 @@ static int geneve_netdevice_event(struct notifier_block *unused,
+@@ -1539,7 +1539,7 @@ static int geneve_netdevice_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -69858,10 +69858,10 @@ index a380649..fd8fe79c 100644
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index b31aca8..3853488 100644
+index a931b73..a07f1cb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
-@@ -966,7 +966,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
+@@ -977,7 +977,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -69870,7 +69870,7 @@ index b31aca8..3853488 100644
new_hr = NET_SKB_PAD;
tun->align = new_hr;
-@@ -1550,7 +1550,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
+@@ -1562,7 +1562,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
return -EINVAL;
}
@@ -69879,7 +69879,7 @@ index b31aca8..3853488 100644
.kind = DRV_NAME,
.priv_size = sizeof(struct tun_struct),
.setup = tun_setup,
-@@ -1979,7 +1979,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
+@@ -1991,7 +1991,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
@@ -69888,7 +69888,7 @@ index b31aca8..3853488 100644
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
-@@ -1993,6 +1993,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+@@ -2005,6 +2005,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int le;
int ret;
@@ -69898,7 +69898,7 @@ index b31aca8..3853488 100644
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
-@@ -2508,7 +2511,7 @@ static int tun_device_event(struct notifier_block *unused,
+@@ -2520,7 +2523,7 @@ static int tun_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -70074,10 +70074,10 @@ index 51fc0c3..6cc1baa 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
-index 95cf1d8..b2a00f6 100644
+index bc744ac..2abf77e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
-@@ -1296,7 +1296,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
+@@ -1297,7 +1297,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
[IFLA_VRF_TABLE] = { .type = NLA_U32 },
};
@@ -70086,7 +70086,7 @@ index 95cf1d8..b2a00f6 100644
.kind = DRV_NAME,
.priv_size = sizeof(struct net_vrf),
-@@ -1333,7 +1333,7 @@ static int vrf_device_event(struct notifier_block *unused,
+@@ -1334,7 +1334,7 @@ static int vrf_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -70096,10 +70096,10 @@ index 95cf1d8..b2a00f6 100644
};
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index d4f495b..9b39d92 100644
+index 3c4c2cf..3cbf47b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -3195,7 +3195,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
+@@ -3196,7 +3196,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
return vxlan->net;
}
@@ -70108,7 +70108,7 @@ index d4f495b..9b39d92 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -3279,7 +3279,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
+@@ -3280,7 +3280,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -75437,7 +75437,7 @@ index bcd10c7..c7c18bc 100644
if (!sysfs_initialized)
return -EACCES;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
-index 45185621..fd0ac76 100644
+index a5d37f6..8c7494b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -116,7 +116,7 @@ struct pci_vpd_ops {
@@ -75449,7 +75449,7 @@ index 45185621..fd0ac76 100644
struct mutex lock;
unsigned int len;
u16 flag;
-@@ -317,7 +317,7 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
+@@ -312,7 +312,7 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
@@ -75507,7 +75507,7 @@ index 79327cc..28fde3f 100644
* Boxes that should not use MSI for PCIe PME signaling.
*/
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 300770c..552fc7e 100644
+index d266d80..ada4895 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -180,7 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -79116,7 +79116,7 @@ index a63542b..80692ee 100644
snprintf(name, sizeof(name), "discovery_trace");
vport->debug_disc_trc =
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
-index 734a042..5f4c380 100644
+index f7e3f27..e77bed0 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11127,7 +11127,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
@@ -79128,7 +79128,7 @@ index 734a042..5f4c380 100644
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-@@ -11434,8 +11434,10 @@ lpfc_init(void)
+@@ -11435,8 +11435,10 @@ lpfc_init(void)
printk(KERN_ERR "Could not register lpfcmgmt device, "
"misc_register returned with status %d", error);
@@ -79531,7 +79531,7 @@ index bea819e..fb745e0 100644
scsi_qla_host_t *vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
-index bff9689..8caa187 100644
+index feab7ea..94d8a9c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -678,7 +678,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
@@ -79554,7 +79554,7 @@ index bff9689..8caa187 100644
struct qla_tgt *tgt = container_of(work, struct qla_tgt,
sess_del_work);
struct scsi_qla_host *vha = tgt->vha;
-@@ -5825,7 +5826,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+@@ -5831,7 +5832,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
retry:
global_resets =
@@ -79563,7 +79563,7 @@ index bff9689..8caa187 100644
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
if (rc != 0) {
-@@ -5864,12 +5865,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+@@ -5870,12 +5871,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
}
if (global_resets !=
@@ -79578,7 +79578,7 @@ index bff9689..8caa187 100644
qla_tgt->tgt_global_resets_count));
goto retry;
}
-@@ -6080,8 +6081,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+@@ -6086,8 +6087,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
init_waitqueue_head(&tgt->waitQ);
INIT_LIST_HEAD(&tgt->sess_list);
INIT_LIST_HEAD(&tgt->del_sess_list);
@@ -79588,7 +79588,7 @@ index bff9689..8caa187 100644
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
-@@ -6089,7 +6089,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+@@ -6095,7 +6095,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
INIT_LIST_HEAD(&tgt->srr_ctio_list);
INIT_LIST_HEAD(&tgt->srr_imm_list);
INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
@@ -84940,10 +84940,10 @@ index e8819aa..33d2176 100644
if (share_irqs)
irqflag = IRQF_SHARED;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
-index 4d09bd4..5c7839e 100644
+index 6e3e636..9064253 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
-@@ -5582,7 +5582,7 @@ static struct pci_device_id serial_pci_tbl[] = {
+@@ -5588,7 +5588,7 @@ static struct pci_device_id serial_pci_tbl[] = {
};
static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
@@ -107692,7 +107692,7 @@ index cf68100..f96c5c0 100644
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (unlikely(err))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index afe29ba..6032d48 100644
+index 5fa9ba1..f4d4551 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -994,10 +994,12 @@ static void init_once(void *foo)
@@ -148202,7 +148202,7 @@ index 4e2f3de..d50672d 100644
list_for_each_entry(task, &cset->tasks, cg_list) {
if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
-index 2bd6737..9b0ddd4 100644
+index a57242e..da67bb2 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -54,7 +54,7 @@ struct pids_cgroup {
@@ -148230,7 +148230,7 @@ index 2bd6737..9b0ddd4 100644
- if (atomic64_inc_return(&pids->events_limit) == 1) {
+ if (atomic64_inc_return_unchecked(&pids->events_limit) == 1) {
pr_info("cgroup: fork rejected by pids controller in ");
- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+ pr_cont_cgroup_path(css->cgroup);
pr_cont("\n");
@@ -310,7 +310,7 @@ static int pids_events_show(struct seq_file *sf, void *v)
{
@@ -148800,7 +148800,7 @@ index e9fdb52..cfb547d 100644
new_table.data = &new_value;
ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 4b33231..e0edf1b 100644
+index 07c0dc8..26e0271 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -389,8 +389,15 @@ static struct srcu_struct pmus_srcu;
@@ -149740,7 +149740,7 @@ index ba8a015..37d2e1d 100644
int threads = max_threads;
int min = MIN_THREADS;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 38b68c2..1940ab9 100644
+index 4c6b6e6..2f72a22 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -210,7 +210,7 @@ struct futex_pi_state {
@@ -149773,7 +149773,7 @@ index 38b68c2..1940ab9 100644
/*
* The futex address must be "naturally" aligned.
*/
-@@ -3279,6 +3284,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3283,6 +3288,7 @@ static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
@@ -149781,7 +149781,7 @@ index 38b68c2..1940ab9 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -3290,8 +3296,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3294,8 +3300,11 @@ static void __init futex_detect_cmpxchg(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -160056,7 +160056,7 @@ index 1460e6a..154adc1f 100644
#ifdef CONFIG_HIBERNATION
diff --git a/mm/percpu.c b/mm/percpu.c
-index 2557143..19f5eca 100644
+index f014ceb..9b37d31 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -133,7 +133,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
@@ -160321,7 +160321,7 @@ index 9d32e1c..054adce 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index bd878f0..d96f2c6 100644
+index 1f82d16..d9233f3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,6 +116,7 @@
@@ -160569,7 +160569,7 @@ index bd878f0..d96f2c6 100644
#endif /* CONFIG_HARDENED_USERCOPY */
diff --git a/mm/slab.h b/mm/slab.h
-index bc05fdc..ffe0dbc 100644
+index ceb7d70..99ab7d7 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -21,8 +21,10 @@ struct kmem_cache {
@@ -160653,7 +160653,7 @@ index bc05fdc..ffe0dbc 100644
if (slab_equal_or_root(cachep, s))
return cachep;
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 329b038..52e9e91 100644
+index 5d2f24f..cb5d8a4 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -25,11 +25,35 @@
@@ -160839,7 +160839,7 @@ index 329b038..52e9e91 100644
root_cache->ctor, memcg, root_cache);
/*
* If we could not create a memcg cache, do not complain, because
-@@ -718,8 +771,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+@@ -741,8 +794,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
kasan_cache_destroy(s);
mutex_lock(&slab_mutex);
@@ -160849,7 +160849,7 @@ index 329b038..52e9e91 100644
goto out_unlock;
err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
-@@ -770,13 +822,15 @@ bool slab_is_available(void)
+@@ -793,13 +845,15 @@ bool slab_is_available(void)
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
@@ -160866,7 +160866,7 @@ index 329b038..52e9e91 100644
slab_init_memcg_params(s);
-@@ -786,23 +840,29 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
+@@ -809,23 +863,29 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
name, size, err);
@@ -160901,7 +160901,7 @@ index 329b038..52e9e91 100644
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_caches);
-@@ -811,6 +871,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+@@ -834,6 +894,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif
@@ -160913,7 +160913,7 @@ index 329b038..52e9e91 100644
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
-@@ -875,6 +940,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+@@ -898,6 +963,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
return kmalloc_dma_caches[index];
#endif
@@ -160927,7 +160927,7 @@ index 329b038..52e9e91 100644
return kmalloc_caches[index];
}
-@@ -952,8 +1024,8 @@ void __init setup_kmalloc_cache_index_table(void)
+@@ -975,8 +1047,8 @@ void __init setup_kmalloc_cache_index_table(void)
static void __init new_kmalloc_cache(int idx, unsigned long flags)
{
@@ -160938,7 +160938,7 @@ index 329b038..52e9e91 100644
}
/*
-@@ -998,6 +1070,23 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -1021,6 +1093,23 @@ void __init create_kmalloc_caches(unsigned long flags)
}
}
#endif
@@ -160962,7 +160962,7 @@ index 329b038..52e9e91 100644
}
#endif /* !CONFIG_SLOB */
-@@ -1013,6 +1102,12 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+@@ -1036,6 +1125,12 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
@@ -160975,7 +160975,7 @@ index 329b038..52e9e91 100644
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size, flags);
-@@ -1102,6 +1197,9 @@ static void print_slabinfo_header(struct seq_file *m)
+@@ -1125,6 +1220,9 @@ static void print_slabinfo_header(struct seq_file *m)
#ifdef CONFIG_DEBUG_SLAB
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
@@ -160985,7 +160985,7 @@ index 329b038..52e9e91 100644
#endif
seq_putc(m, '\n');
}
-@@ -1231,7 +1329,7 @@ static int __init slab_proc_init(void)
+@@ -1254,7 +1352,7 @@ static int __init slab_proc_init(void)
module_init(slab_proc_init);
#endif /* CONFIG_SLABINFO */
@@ -160995,7 +160995,7 @@ index 329b038..52e9e91 100644
{
void *ret;
diff --git a/mm/slob.c b/mm/slob.c
-index 5ec1580..eea07f2 100644
+index eac04d43..73c02ba 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -67,6 +67,7 @@
@@ -161456,7 +161456,7 @@ index 5ec1580..eea07f2 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 7aa0e97..ca3813c 100644
+index 58c7526..5566ff1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -34,6 +34,7 @@
@@ -161663,7 +161663,7 @@ index 7aa0e97..ca3813c 100644
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
-@@ -4135,7 +4201,7 @@ void __init kmem_cache_init(void)
+@@ -4120,7 +4186,7 @@ void __init kmem_cache_init(void)
kmem_cache = &boot_kmem_cache;
create_boot_cache(kmem_cache_node, "kmem_cache_node",
@@ -161672,7 +161672,7 @@ index 7aa0e97..ca3813c 100644
register_hotmemory_notifier(&slab_memory_callback_nb);
-@@ -4145,7 +4211,7 @@ void __init kmem_cache_init(void)
+@@ -4130,7 +4196,7 @@ void __init kmem_cache_init(void)
create_boot_cache(kmem_cache, "kmem_cache",
offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *),
@@ -161681,7 +161681,7 @@ index 7aa0e97..ca3813c 100644
kmem_cache = bootstrap(&boot_kmem_cache);
-@@ -4184,7 +4250,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+@@ -4169,7 +4235,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -161690,7 +161690,7 @@ index 7aa0e97..ca3813c 100644
/*
* Adjust the object sizes so that we clear
-@@ -4200,7 +4266,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+@@ -4185,7 +4251,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
}
if (sysfs_slab_alias(s, name)) {
@@ -161699,7 +161699,7 @@ index 7aa0e97..ca3813c 100644
s = NULL;
}
}
-@@ -4212,6 +4278,8 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+@@ -4197,6 +4263,8 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
{
int err;
@@ -161708,7 +161708,7 @@ index 7aa0e97..ca3813c 100644
err = kmem_cache_open(s, flags);
if (err)
return err;
-@@ -4280,7 +4348,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+@@ -4265,7 +4333,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#endif
@@ -161717,7 +161717,7 @@ index 7aa0e97..ca3813c 100644
static int count_inuse(struct page *page)
{
return page->inuse;
-@@ -4561,7 +4629,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
+@@ -4546,7 +4614,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
@@ -161729,7 +161729,7 @@ index 7aa0e97..ca3813c 100644
else
len += sprintf(buf + len, "<not-available>");
-@@ -4659,12 +4731,12 @@ static void __init resiliency_test(void)
+@@ -4644,12 +4716,12 @@ static void __init resiliency_test(void)
validate_slab_cache(kmalloc_caches[9]);
}
#else
@@ -161744,7 +161744,7 @@ index 7aa0e97..ca3813c 100644
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4901,13 +4973,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
+@@ -4886,13 +4958,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (!s->ctor)
return 0;
@@ -161763,7 +161763,7 @@ index 7aa0e97..ca3813c 100644
}
SLAB_ATTR_RO(aliases);
-@@ -4995,6 +5071,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+@@ -4980,6 +5056,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(cache_dma);
#endif
@@ -161786,7 +161786,7 @@ index 7aa0e97..ca3813c 100644
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
-@@ -5050,7 +5142,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
+@@ -5035,7 +5127,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
* as well as cause other issues like converting a mergeable
* cache into an umergeable one.
*/
@@ -161795,7 +161795,7 @@ index 7aa0e97..ca3813c 100644
return -EINVAL;
s->flags &= ~SLAB_TRACE;
-@@ -5168,7 +5260,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
+@@ -5153,7 +5245,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
@@ -161804,7 +161804,7 @@ index 7aa0e97..ca3813c 100644
return -EINVAL;
s->flags &= ~SLAB_FAILSLAB;
-@@ -5300,7 +5392,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+@@ -5285,7 +5377,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
#endif
@@ -161813,7 +161813,7 @@ index 7aa0e97..ca3813c 100644
&slab_size_attr.attr,
&object_size_attr.attr,
&objs_per_slab_attr.attr,
-@@ -5335,6 +5427,12 @@ static struct attribute *slab_attrs[] = {
+@@ -5320,6 +5412,12 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
@@ -161826,7 +161826,7 @@ index 7aa0e97..ca3813c 100644
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
-@@ -5578,6 +5676,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5563,6 +5661,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -161834,7 +161834,7 @@ index 7aa0e97..ca3813c 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5649,6 +5748,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5634,6 +5733,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -161842,7 +161842,7 @@ index 7aa0e97..ca3813c 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5662,6 +5762,7 @@ struct saved_alias {
+@@ -5647,6 +5747,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -161850,7 +161850,7 @@ index 7aa0e97..ca3813c 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5684,6 +5785,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5669,6 +5770,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -163534,10 +163534,10 @@ index 2f2cb5e..5b9d8c6 100644
tty_port_close(&dev->port, tty, filp);
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
-index 7fbdbae..89ac4a9 100644
+index aa1df1a..0a9f1a9 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
-@@ -980,13 +980,13 @@ static void __net_exit brnf_exit_net(struct net *net)
+@@ -959,13 +959,13 @@ static void __net_exit brnf_exit_net(struct net *net)
brnet->enabled = false;
}
@@ -163912,10 +163912,10 @@ index b7de71f..808387d 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 60b0a604..920cbea 100644
+index 2e04fd1..723a3c6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2995,7 +2995,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
+@@ -3022,7 +3022,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
out_kfree_skb:
kfree_skb(skb);
out_null:
@@ -163924,7 +163924,7 @@ index 60b0a604..920cbea 100644
return NULL;
}
-@@ -3406,7 +3406,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3433,7 +3433,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
rc = -ENETDOWN;
rcu_read_unlock_bh();
@@ -163933,7 +163933,7 @@ index 60b0a604..920cbea 100644
kfree_skb_list(skb);
return rc;
out:
-@@ -3759,7 +3759,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -3786,7 +3786,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
local_irq_restore(flags);
@@ -163942,7 +163942,7 @@ index 60b0a604..920cbea 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -3836,7 +3836,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3863,7 +3863,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -163951,7 +163951,7 @@ index 60b0a604..920cbea 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -4203,9 +4203,9 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+@@ -4230,9 +4230,9 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
} else {
drop:
if (!deliver_exact)
@@ -163963,7 +163963,7 @@ index 60b0a604..920cbea 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -5192,7 +5192,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+@@ -5219,7 +5219,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
return work;
}
@@ -163972,7 +163972,7 @@ index 60b0a604..920cbea 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -7535,9 +7535,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -7562,9 +7562,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -163985,7 +163985,7 @@ index 60b0a604..920cbea 100644
return storage;
}
EXPORT_SYMBOL(dev_get_stats);
-@@ -8162,7 +8162,7 @@ static void __net_exit netdev_exit(struct net *net)
+@@ -8189,7 +8189,7 @@ static void __net_exit netdev_exit(struct net *net)
kfree(net->dev_index_head);
}
@@ -163994,7 +163994,7 @@ index 60b0a604..920cbea 100644
.init = netdev_init,
.exit = netdev_exit,
};
-@@ -8262,7 +8262,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
+@@ -8289,7 +8289,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
rtnl_unlock();
}
@@ -164467,7 +164467,7 @@ index 2696aef..dbd5807 100644
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 1e3e008..f3e4944 100644
+index f0f462c..e5d59e8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1047,7 +1047,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
@@ -165120,7 +165120,7 @@ index cb7176c..afd2c62 100644
return NULL;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index 21514324..fb6543d 100644
+index 971b947..db7beb2 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1447,7 +1447,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
@@ -165132,7 +165132,7 @@ index 21514324..fb6543d 100644
#endif
return -EINVAL;
}
-@@ -1657,7 +1657,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net)
+@@ -1659,7 +1659,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net)
free_percpu(net->mib.tcp_statistics);
}
@@ -165141,7 +165141,7 @@ index 21514324..fb6543d 100644
.init = ipv4_mib_init_net,
.exit = ipv4_mib_exit_net,
};
-@@ -1698,7 +1698,7 @@ static __net_exit void inet_exit_net(struct net *net)
+@@ -1700,7 +1700,7 @@ static __net_exit void inet_exit_net(struct net *net)
{
}
@@ -165883,7 +165883,7 @@ index ecbe5a7..8ae8a54 100644
.exit = raw_exit_net,
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index d851cae..5769b1a 100644
+index 17e6fbf..fdb89dc 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -232,7 +232,7 @@ static const struct seq_operations rt_cache_seq_ops = {
@@ -165931,7 +165931,7 @@ index d851cae..5769b1a 100644
static u32 *ip_tstamps __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
-@@ -2777,34 +2777,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
+@@ -2778,34 +2778,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
@@ -165974,7 +165974,7 @@ index d851cae..5769b1a 100644
err_dup:
return -ENOMEM;
}
-@@ -2819,7 +2819,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net)
+@@ -2820,7 +2820,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net)
kfree(tbl);
}
@@ -165983,7 +165983,7 @@ index d851cae..5769b1a 100644
.init = sysctl_route_net_init,
.exit = sysctl_route_net_exit,
};
-@@ -2827,14 +2827,14 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
+@@ -2828,14 +2828,14 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
@@ -166001,7 +166001,7 @@ index d851cae..5769b1a 100644
.init = rt_genid_init,
};
-@@ -2858,7 +2858,7 @@ static void __net_exit ipv4_inetpeer_exit(struct net *net)
+@@ -2859,7 +2859,7 @@ static void __net_exit ipv4_inetpeer_exit(struct net *net)
kfree(bp);
}
@@ -166010,7 +166010,7 @@ index d851cae..5769b1a 100644
.init = ipv4_inetpeer_init,
.exit = ipv4_inetpeer_exit,
};
-@@ -2872,11 +2872,7 @@ int __init ip_rt_init(void)
+@@ -2873,11 +2873,7 @@ int __init ip_rt_init(void)
int rc = 0;
int cpu;
@@ -166119,7 +166119,7 @@ index 80bc36b..d70d622 100644
.exit = ipv4_sysctl_exit_net,
};
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index c71d49c..306109a 100644
+index ce42ded..9c93e33 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -288,11 +288,13 @@ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
@@ -166177,9 +166177,9 @@ index c71d49c..306109a 100644
- if (th->fin)
+ if (th->fin || th->urg || th->psh)
goto discard;
- if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
- return 1;
-@@ -6235,7 +6239,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ /* It is possible that we process SYN packets from backlog,
+ * so we need to make sure to disable BH right there.
+@@ -6241,7 +6245,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
#if IS_ENABLED(CONFIG_IPV6)
ireq->pktopts = NULL;
#endif
@@ -166189,7 +166189,7 @@ index c71d49c..306109a 100644
write_pnet(&ireq->ireq_net, sock_net(sk_listener));
ireq->ireq_family = sk_listener->sk_family;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index 2259114..2d5e8a0 100644
+index 6988566..2b781b3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -87,6 +87,10 @@
@@ -166203,7 +166203,7 @@ index 2259114..2d5e8a0 100644
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
-@@ -1427,6 +1431,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1431,6 +1435,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
@@ -166213,7 +166213,7 @@ index 2259114..2d5e8a0 100644
tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
-@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
+@@ -1641,12 +1648,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
lookup:
sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
th->dest, &refcounted);
@@ -166236,7 +166236,7 @@ index 2259114..2d5e8a0 100644
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
-@@ -1732,6 +1746,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
+@@ -1736,6 +1750,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
} else {
@@ -166247,7 +166247,7 @@ index 2259114..2d5e8a0 100644
tcp_v4_send_reset(NULL, skb);
}
-@@ -2465,7 +2483,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+@@ -2469,7 +2487,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
}
@@ -166310,7 +166310,7 @@ index f6c50af..1eb9aa5 100644
cnt += width;
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
-index 3ea1cf8..9faf8d7 100644
+index b1e65b3..fafad47 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -22,6 +22,10 @@
@@ -166767,7 +166767,7 @@ index 02761c9..530bd3e 100644
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
-index ef54852..56699bb 100644
+index 8c88a37..2885f6f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -98,9 +98,9 @@ static int fib6_new_sernum(struct net *net)
@@ -166847,7 +166847,7 @@ index f6ba452..b04707b 100644
.maxtype = IFLA_IPTUN_MAX,
.policy = ip6_tnl_policy,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
-index c299c1e..b5fd20d 100644
+index 66c2b4b..0610be2 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
@@ -166859,7 +166859,7 @@ index c299c1e..b5fd20d 100644
static int vti6_net_id __read_mostly;
struct vti6_net {
-@@ -1030,7 +1030,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+@@ -1034,7 +1034,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
};
@@ -166868,7 +166868,7 @@ index c299c1e..b5fd20d 100644
.kind = "vti6",
.maxtype = IFLA_VTI_MAX,
.policy = vti6_policy,
-@@ -1161,7 +1161,7 @@ static int vti6_device_event(struct notifier_block *unused,
+@@ -1165,7 +1165,7 @@ static int vti6_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -166951,7 +166951,7 @@ index 55aacea..482ad2e 100644
case IP6T_SO_GET_ENTRIES:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
-index 9948b5c..95b3e7a 100644
+index 986d4ca..f8a55a5 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -95,12 +95,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
@@ -167249,7 +167249,7 @@ index 69c50e7..ec875fa 100644
struct ctl_table *ipv6_icmp_table;
int err;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 6673965..16ec3de 100644
+index b2e61a0..bf47484 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,6 +101,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
@@ -167263,7 +167263,7 @@ index 6673965..16ec3de 100644
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
-@@ -1302,6 +1306,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1304,6 +1308,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
@@ -167273,7 +167273,7 @@ index 6673965..16ec3de 100644
tcp_v6_send_reset(sk, skb);
discard:
if (opt_skb)
-@@ -1406,12 +1413,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+@@ -1408,12 +1415,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
th->source, th->dest, inet6_iif(skb),
&refcounted);
@@ -167296,7 +167296,7 @@ index 6673965..16ec3de 100644
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
-@@ -1501,6 +1516,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+@@ -1503,6 +1518,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
} else {
@@ -168259,7 +168259,7 @@ index 965f7e3..daa74100 100644
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
-index c0f0750..7f2e432 100644
+index ff750bb..6e9865d 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -641,7 +641,7 @@ static struct inet_protosw l2tp_ip_protosw = {
@@ -168695,7 +168695,7 @@ index 06019db..8b752f48 100644
/* defaults per 802.15.4-2011 */
wpan_dev->min_be = 3;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
-index 5b77377..7bd5994 100644
+index 1309e2c..6e543c6 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -873,7 +873,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
@@ -168707,16 +168707,16 @@ index 5b77377..7bd5994 100644
int i;
table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
-@@ -956,7 +956,7 @@ static void mpls_ifdown(struct net_device *dev, int event)
- /* fall through */
+@@ -957,7 +957,7 @@ static void mpls_ifdown(struct net_device *dev, int event)
case NETDEV_CHANGE:
nh->nh_flags |= RTNH_F_LINKDOWN;
-- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
-+ ACCESS_ONCE_RW(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+ if (event != NETDEV_UNREGISTER)
+- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
++ ACCESS_ONCE_RW(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
break;
}
if (event == NETDEV_UNREGISTER)
-@@ -994,7 +994,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
+@@ -995,7 +995,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
nh->nh_flags &= ~nh_flags;
} endfor_nexthops(rt);
@@ -168725,7 +168725,7 @@ index 5b77377..7bd5994 100644
}
}
-@@ -1621,7 +1621,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
+@@ -1622,7 +1622,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
struct net *net = table->data;
int platform_labels = net->mpls.platform_labels;
int ret;
@@ -168734,7 +168734,7 @@ index 5b77377..7bd5994 100644
.procname = table->procname,
.data = &platform_labels,
.maxlen = sizeof(int),
-@@ -1651,7 +1651,7 @@ static const struct ctl_table mpls_table[] = {
+@@ -1652,7 +1652,7 @@ static const struct ctl_table mpls_table[] = {
static int mpls_net_init(struct net *net)
{
@@ -169949,7 +169949,7 @@ index 7eb955e..479c9a6 100644
static int __init ovs_vxlan_tnl_init(void)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 34de326..071ac96 100644
+index f2b04a7..44ba4de 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -278,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
@@ -170004,7 +170004,7 @@ index 34de326..071ac96 100644
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
-@@ -3867,7 +3867,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3871,7 +3871,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
@@ -170013,7 +170013,7 @@ index 34de326..071ac96 100644
return -EFAULT;
switch (val) {
case TPACKET_V1:
-@@ -3902,9 +3902,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3906,9 +3906,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
return -EINVAL;
@@ -170026,7 +170026,7 @@ index 34de326..071ac96 100644
data = &rstats;
lv = sizeof(rstats);
break;
-@@ -3922,7 +3922,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3926,7 +3926,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
len = lv;
if (put_user(len, optlen))
return -EFAULT;
diff --git a/4.9.16/4425_grsec_remove_EI_PAX.patch b/4.9.18/4425_grsec_remove_EI_PAX.patch
index 594598a..594598a 100644
--- a/4.9.16/4425_grsec_remove_EI_PAX.patch
+++ b/4.9.18/4425_grsec_remove_EI_PAX.patch
diff --git a/4.9.16/4426_default_XATTR_PAX_FLAGS.patch b/4.9.18/4426_default_XATTR_PAX_FLAGS.patch
index f7e97b5..f7e97b5 100644
--- a/4.9.16/4426_default_XATTR_PAX_FLAGS.patch
+++ b/4.9.18/4426_default_XATTR_PAX_FLAGS.patch
diff --git a/4.9.16/4427_force_XATTR_PAX_tmpfs.patch b/4.9.18/4427_force_XATTR_PAX_tmpfs.patch
index 3871139..3871139 100644
--- a/4.9.16/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.9.18/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.9.16/4430_grsec-remove-localversion-grsec.patch b/4.9.18/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.9.16/4430_grsec-remove-localversion-grsec.patch
+++ b/4.9.18/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.9.16/4435_grsec-mute-warnings.patch b/4.9.18/4435_grsec-mute-warnings.patch
index 8929222..8929222 100644
--- a/4.9.16/4435_grsec-mute-warnings.patch
+++ b/4.9.18/4435_grsec-mute-warnings.patch
diff --git a/4.9.16/4440_grsec-remove-protected-paths.patch b/4.9.18/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.9.16/4440_grsec-remove-protected-paths.patch
+++ b/4.9.18/4440_grsec-remove-protected-paths.patch
diff --git a/4.9.16/4450_grsec-kconfig-default-gids.patch b/4.9.18/4450_grsec-kconfig-default-gids.patch
index cee6e27..cee6e27 100644
--- a/4.9.16/4450_grsec-kconfig-default-gids.patch
+++ b/4.9.18/4450_grsec-kconfig-default-gids.patch
diff --git a/4.9.16/4465_selinux-avc_audit-log-curr_ip.patch b/4.9.18/4465_selinux-avc_audit-log-curr_ip.patch
index 06a5294..06a5294 100644
--- a/4.9.16/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.9.18/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.9.16/4470_disable-compat_vdso.patch b/4.9.18/4470_disable-compat_vdso.patch
index a1401d8..a1401d8 100644
--- a/4.9.16/4470_disable-compat_vdso.patch
+++ b/4.9.18/4470_disable-compat_vdso.patch
diff --git a/4.9.16/4475_emutramp_default_on.patch b/4.9.18/4475_emutramp_default_on.patch
index feb8c7b..feb8c7b 100644
--- a/4.9.16/4475_emutramp_default_on.patch
+++ b/4.9.18/4475_emutramp_default_on.patch