summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2017-11-05 13:49:52 -0500
committerMike Pagano <mpagano@gentoo.org>2017-11-05 13:49:52 -0500
commit679479168a68ec71fc95e7a66dac25305220e45f (patch)
tree3c4aed3ca420360740dd959c1ec657dbeef1bbdb
parentFor 3.10.X, include patch to validate the output buffer length for L2CAP conf... (diff)
downloadlinux-patches-3.10.tar.gz
linux-patches-3.10.tar.bz2
linux-patches-3.10.zip
Linux patch 3.10.1083.10-1163.10
-rw-r--r--0000_README4
-rw-r--r--1107_linux-3.10.108.patch3467
2 files changed, 3471 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e949f2e4..e964bd41 100644
--- a/0000_README
+++ b/0000_README
@@ -470,6 +470,10 @@ Patch: 1106_linux-3.10.107.patch
From: http://www.kernel.org
Desc: Linux 3.10.107
+Patch: 1107_linux-3.10.108.patch
+From: http://www.kernel.org
+Desc: Linux 3.10.108
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1107_linux-3.10.108.patch b/1107_linux-3.10.108.patch
new file mode 100644
index 00000000..f8e4e900
--- /dev/null
+++ b/1107_linux-3.10.108.patch
@@ -0,0 +1,3467 @@
+diff --git a/Makefile b/Makefile
+index 752b1c67daa0..924f98a4bc0f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,8 +1,8 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 107
++SUBLEVEL = 108
+ EXTRAVERSION =
+-NAME = TOSSUG Baby Fish
++NAME = END-OF-LIFE
+
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
+index e28a3e0eb3cb..582d8b61ce5c 100644
+--- a/arch/mips/include/asm/branch.h
++++ b/arch/mips/include/asm/branch.h
+@@ -44,10 +44,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
+ return __microMIPS_compute_return_epc(regs);
+ if (cpu_has_mips16)
+ return __MIPS16e_compute_return_epc(regs);
+- return regs->cp0_epc;
+- }
+-
+- if (!delay_slot(regs)) {
++ } else if (!delay_slot(regs)) {
+ regs->cp0_epc += 4;
+ return 0;
+ }
+diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
+index 46c2ad0703a0..63b942f613c4 100644
+--- a/arch/mips/kernel/branch.c
++++ b/arch/mips/kernel/branch.c
+@@ -200,7 +200,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+- * @returns: -EFAULT on error and forces SIGBUS, and on success
++ * @returns: -EFAULT on error and forces SIGILL, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
+ */
+@@ -297,6 +297,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
+ /*
+ * These are unconditional and in j_format.
+ */
++ case jalx_op:
+ case jal_op:
+ regs->regs[31] = regs->cp0_epc + 8;
+ case j_op:
+@@ -436,8 +437,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
+ return ret;
+
+ sigill:
+- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+- force_sig(SIGBUS, current);
++ pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
++ current->comm);
++ force_sig(SIGILL, current);
+ return -EFAULT;
+ }
+ EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
+diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
+index b79d13f95bf0..eb0f4dfb385c 100644
+--- a/arch/mips/kernel/syscall.c
++++ b/arch/mips/kernel/syscall.c
+@@ -140,7 +140,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+- " bnez %[tmp], 4f \n"
++ " beqz %[tmp], 4f \n"
+ "3: \n"
+ " .subsection 2 \n"
+ "4: b 1b \n"
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 3d492a823a55..dbddc9ccf270 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -2002,6 +2002,35 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ return 0;
+ }
+
++/*
++ * Emulate FPU instructions.
++ *
++ * If we use FPU hardware, then we have been typically called to handle
++ * an unimplemented operation, such as where an operand is a NaN or
++ * denormalized. In that case exit the emulation loop after a single
++ * iteration so as to let hardware execute any subsequent instructions.
++ *
++ * If we have no FPU hardware or it has been disabled, then continue
++ * emulating floating-point instructions until one of these conditions
++ * has occurred:
++ *
++ * - a non-FPU instruction has been encountered,
++ *
++ * - an attempt to emulate has ended with a signal,
++ *
++ * - the ISA mode has been switched.
++ *
++ * We need to terminate the emulation loop if we got switched to the
++ * MIPS16 mode, whether supported or not, so that we do not attempt
++ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
++ * Similarly if we got switched to the microMIPS mode and only the
++ * regular MIPS mode is supported, so that we do not attempt to emulate
++ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
++ * we got switched to the regular MIPS mode and only the microMIPS mode
++ * is supported, so that we do not attempt to emulate a regular MIPS
++ * instruction that should cause an Address Error exception instead.
++ * For simplicity we always terminate upon an ISA mode switch.
++ */
+ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ int has_fpu, void *__user *fault_addr)
+ {
+@@ -2093,6 +2122,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ break;
+ if (sig)
+ break;
++ /*
++ * We have to check for the ISA bit explicitly here,
++ * because `get_isa16_mode' may return 0 if support
++ * for code compression has been globally disabled,
++ * or otherwise we may produce the wrong signal or
++ * even proceed successfully where we must not.
++ */
++ if ((xcp->cp0_epc ^ prevepc) & 0x1)
++ break;
+
+ cond_resched();
+ } while (xcp->cp0_epc > prevepc);
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index e3b1d41c89be..84bcdfa410ff 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -501,7 +501,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
++static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+ {
+ long t1, t2;
+
+@@ -520,7 +520,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
+
+- return t1;
++ return t1 != 0;
+ }
+
+ #endif /* __powerpc64__ */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 469d7715d6aa..954168be7878 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1136,7 +1136,7 @@
+ " .llong 0\n" \
+ " .llong 0\n" \
+ ".previous" \
+- : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
++ : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG) : "cr0"); rval;})
+ #else
+ #define mftb() ({unsigned long rval; \
+ asm volatile("mftb %0" : "=r" (rval)); rval;})
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index 11f5b03a0b06..762c10d46d66 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -529,6 +529,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+ regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
+ #endif
+
++ /*
++ * jprobes use jprobe_return() which skips the normal return
++ * path of the function, and this messes up the accounting of the
++ * function graph tracer.
++ *
++ * Pause function graph tracing while performing the jprobe function.
++ */
++ pause_graph_tracing();
++
+ return 1;
+ }
+
+@@ -551,6 +560,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ * saved regs...
+ */
+ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
++ /* It's OK to start function graph tracing again */
++ unpause_graph_tracing();
+ preempt_enable_no_resched();
+ return 1;
+ }
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index 08490ecc465e..23da15ff7796 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -863,6 +863,19 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
+ goto instr_done;
+ #endif
+ case 19: /* mfcr */
++ if ((instr >> 20) & 1) {
++ imm = 0xf0000000UL;
++ for (sh = 0; sh < 8; ++sh) {
++ if (instr & (0x80000 >> sh)) {
++ regs->gpr[rd] = regs->ccr & imm;
++ break;
++ }
++ imm >>= 4;
++ }
++
++ goto instr_done;
++ }
++
+ regs->gpr[rd] = regs->ccr;
+ regs->gpr[rd] &= 0xffffffffUL;
+ goto instr_done;
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index d8e8eefbe24c..86ec87dc1f57 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -296,13 +296,13 @@ static inline unsigned type in##bwl##_p(int port) \
+ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+ { \
+ asm volatile("rep; outs" #bwl \
+- : "+S"(addr), "+c"(count) : "d"(port)); \
++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ } \
+ \
+ static inline void ins##bwl(int port, void *addr, unsigned long count) \
+ { \
+ asm volatile("rep; ins" #bwl \
+- : "+D"(addr), "+c"(count) : "d"(port)); \
++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ }
+
+ BUILDIO(b, b, char)
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 3cd8bfc3c4b6..bc37ddeaa627 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1581,8 +1581,10 @@ void __init enable_IR_x2apic(void)
+ int ret, x2apic_enabled = 0;
+ int hardware_init_ret;
+
++#ifdef CONFIG_X86_IO_APIC
+ if (skip_ioapic_setup)
+ return;
++#endif
+
+ /* Make sure irq_remap_ops are initialized */
+ setup_irq_remapping_ops();
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index c4ff2a916139..c95ece93f359 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -159,8 +159,8 @@ void kvm_async_pf_task_wait(u32 token)
+ */
+ rcu_irq_exit();
+ native_safe_halt();
+- rcu_irq_enter();
+ local_irq_disable();
++ rcu_irq_enter();
+ }
+ }
+ if (!n.halted)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index d9016e4a80f9..be1389527284 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8014,7 +8014,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
+ */
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+- kvm_set_cr4(vcpu, vmcs12->host_cr4);
++ vmx_set_cr4(vcpu, vmcs12->host_cr4);
+
+ /* shadow page tables on either EPT or shadow page tables */
+ kvm_set_cr3(vcpu, vmcs12->host_cr3);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b70b67bde90d..3d316cafff91 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4596,6 +4596,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
+
+ if (var.unusable) {
+ memset(desc, 0, sizeof(*desc));
++ if (base3)
++ *base3 = 0;
+ return false;
+ }
+
+diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
+index 73a6d7395bd3..58e7e9d4bbc7 100644
+--- a/arch/x86/mm/numa_32.c
++++ b/arch/x86/mm/numa_32.c
+@@ -100,5 +100,6 @@ void __init initmem_init(void)
+ printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
+ (ulong) pfn_to_kaddr(highstart_pfn));
+
++ __vmalloc_start_set = true;
+ setup_bootmem_allocator();
+ }
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index ea05c531db26..8e2747401d34 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -92,8 +92,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+- if (sg)
++ if (sg) {
+ scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++ }
+
+ list_add_tail(&sgl->list, &ctx->tsgl);
+ }
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 070b843c37ee..8cff7cae7331 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -988,6 +988,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
+ if (list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+ mutex_unlock(&ghes_list_mutex);
++ synchronize_rcu();
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ mutex_lock(&ghes_list_mutex);
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index f3f0801a0e81..aa4e36b3a599 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2794,10 +2794,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+ if (!sata_pmp_attached(ap)) {
+- if (likely(devno < ata_link_max_devices(&ap->link)))
++ if (likely(devno >= 0 &&
++ devno < ata_link_max_devices(&ap->link)))
+ return &ap->link.device[devno];
+ } else {
+- if (likely(devno < ap->nr_pmp_links))
++ if (likely(devno >= 0 &&
++ devno < ap->nr_pmp_links))
+ return &ap->pmp_link[devno].device[0];
+ }
+
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 7072404c8b6d..8d73f999f40f 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1692,7 +1692,7 @@ int pm_genpd_add_subdomain_names(const char *master_name,
+ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+ {
+- struct gpd_link *link;
++ struct gpd_link *l, *link;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+@@ -1701,7 +1701,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ start:
+ genpd_acquire_lock(genpd);
+
+- list_for_each_entry(link, &genpd->master_links, master_node) {
++ list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
+ if (link->slave != subdomain)
+ continue;
+
+diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
+index f97cb3d8c5a2..f34e8191665f 100644
+--- a/drivers/cpufreq/cpufreq_conservative.c
++++ b/drivers/cpufreq/cpufreq_conservative.c
+@@ -212,8 +212,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+- /* cannot be lower than 11 otherwise freq will not fall */
+- if (ret != 1 || input < 11 || input > 100 ||
++ /* cannot be lower than 1 otherwise freq will not fall */
++ if (ret != 1 || input < 1 || input > 100 ||
+ input >= cs_tuners->up_threshold)
+ return -EINVAL;
+
+diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
+index 4f1881eee3f1..6da4fbd4eef4 100644
+--- a/drivers/cpufreq/s3c2416-cpufreq.c
++++ b/drivers/cpufreq/s3c2416-cpufreq.c
+@@ -434,7 +434,6 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+ rate = clk_get_rate(s3c_freq->hclk);
+ if (rate < 133 * 1000 * 1000) {
+ pr_err("cpufreq: HCLK not at 133MHz\n");
+- clk_put(s3c_freq->hclk);
+ ret = -EINVAL;
+ goto err_armclk;
+ }
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index e9d8b235f68d..34815a74d900 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -476,7 +476,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+- wait_for_completion_interruptible(&result.completion);
++ wait_for_completion(&result.completion);
+ ret = result.err;
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
+diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
+index 87138d2adb5f..fd6bc0bc56c5 100644
+--- a/drivers/crypto/caam/key_gen.c
++++ b/drivers/crypto/caam/key_gen.c
+@@ -107,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+- wait_for_completion_interruptible(&result.completion);
++ wait_for_completion(&result.completion);
+ ret = result.err;
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 057d894eee66..6e5ba44dfaac 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -623,7 +623,7 @@ static void talitos_unregister_rng(struct device *dev)
+ * crypto alg
+ */
+ #define TALITOS_CRA_PRIORITY 3000
+-#define TALITOS_MAX_KEY_SIZE 96
++#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+ #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+
+ #define MD5_BLOCK_SIZE 64
+@@ -1380,6 +1380,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ {
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
++ if (keylen > TALITOS_MAX_KEY_SIZE) {
++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
+ memcpy(&ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index 89664933861f..a3a70283bded 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -368,6 +368,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+ return fifo_state->static_buffer;
+ else {
+ fifo_state->dynamic_buffer = vmalloc(bytes);
++ if (!fifo_state->dynamic_buffer)
++ goto out_err;
+ return fifo_state->dynamic_buffer;
+ }
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index 5f5f20f42231..c61f3e7aa353 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -6670,7 +6670,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
+ unsigned long flags;
+
+ while (wait) {
+- unsigned long shadow;
++ unsigned long shadow = 0;
+ int cstart, previ = -1;
+
+ /*
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 8292554bccb5..7604ae54d7bc 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -165,11 +165,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ out:
+ mutex_unlock(&ppriv->vlan_mutex);
+
++ rtnl_unlock();
++
+ if (result)
+ free_netdev(priv->dev);
+
+- rtnl_unlock();
+-
+ return result;
+ }
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 0e7cd14bf7bb..88ba9649bd1f 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3402,6 +3402,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ mutex_unlock(&domain->api_lock);
+
+ domain_flush_tlb_pde(domain);
++ domain_flush_complete(domain);
+
+ return unmap_size;
+ }
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 37470ee7c850..4d874427101d 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1806,6 +1806,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ long pages;
+ struct bitmap_page *new_bp;
+
++ if (bitmap->storage.file && !init) {
++ pr_info("md: cannot resize file-based bitmap\n");
++ return -EINVAL;
++ }
++
+ if (chunksize == 0) {
+ /* If there is enough space, leave the chunk size unchanged,
+ * else increase by factor of two until there is enough space.
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7c45286e2662..95eb53f68413 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1898,7 +1898,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+ }
+ sb = page_address(rdev->sb_page);
+ sb->data_size = cpu_to_le64(num_sectors);
+- sb->super_offset = rdev->sb_start;
++ sb->super_offset = cpu_to_le64(rdev->sb_start);
+ sb->sb_csum = calc_sb_1_csum(sb);
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index f53f4f895502..b4de9c3e5ca4 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1569,11 +1569,24 @@ retry_write:
+ mbio->bi_private = r10_bio;
+
+ atomic_inc(&r10_bio->remaining);
++
++ cb = blk_check_plugged(raid10_unplug, mddev,
++ sizeof(*plug));
++ if (cb)
++ plug = container_of(cb, struct raid10_plug_cb,
++ cb);
++ else
++ plug = NULL;
+ spin_lock_irqsave(&conf->device_lock, flags);
+- bio_list_add(&conf->pending_bio_list, mbio);
+- conf->pending_count++;
++ if (plug) {
++ bio_list_add(&plug->pending, mbio);
++ plug->pending_cnt++;
++ } else {
++ bio_list_add(&conf->pending_bio_list, mbio);
++ conf->pending_count++;
++ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+- if (!mddev_check_plugged(mddev))
++ if (!plug)
+ md_wakeup_thread(mddev->thread);
+ }
+ }
+diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
+index 93609091cb23..9dad717fb78c 100644
+--- a/drivers/media/platform/davinci/vpfe_capture.c
++++ b/drivers/media/platform/davinci/vpfe_capture.c
+@@ -1706,27 +1706,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
+
+ switch (cmd) {
+ case VPFE_CMD_S_CCDC_RAW_PARAMS:
++ ret = -EINVAL;
+ v4l2_warn(&vpfe_dev->v4l2_dev,
+- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+- if (ccdc_dev->hw_ops.set_params) {
+- ret = ccdc_dev->hw_ops.set_params(param);
+- if (ret) {
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+- "Error setting parameters in CCDC\n");
+- goto unlock_out;
+- }
+- ret = vpfe_get_ccdc_image_format(vpfe_dev,
+- &vpfe_dev->fmt);
+- if (ret < 0) {
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+- "Invalid image format at CCDC\n");
+- goto unlock_out;
+- }
+- } else {
+- ret = -EINVAL;
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+- }
++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+ break;
+ default:
+ ret = -ENOTTY;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 72e3fa652481..257bb7a6ff54 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1530,7 +1530,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
+ if (kc == KEY_KEYBOARD && !ictx->release_code) {
+ ictx->last_keycode = kc;
+ if (!nomouse) {
+- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
++ ictx->pad_mouse = !ictx->pad_mouse;
+ dev_dbg(dev, "toggling to %s mode\n",
+ ictx->pad_mouse ? "mouse" : "keyboard");
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+index 9515f3a68f8f..122815e1cb65 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+@@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
+ memset(&tvdata,0,sizeof(tvdata));
+
+ eeprom = pvr2_eeprom_fetch(hdw);
+- if (!eeprom) return -EINVAL;
+-
+- {
+- struct i2c_client fake_client;
+- /* Newer version expects a useless client interface */
+- fake_client.addr = hdw->eeprom_addr;
+- fake_client.adapter = &hdw->i2c_adap;
+- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
+- }
++ if (!eeprom)
++ return -EINVAL;
++
++ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
+
+ trace_eeprom("eeprom assumed v4l tveeprom module");
+ trace_eeprom("eeprom direct call results:");
+diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
+index c7576a503e5b..2afadd00d3fd 100644
+--- a/drivers/mfd/omap-usb-tll.c
++++ b/drivers/mfd/omap-usb-tll.c
+@@ -380,8 +380,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
+ * and use SDR Mode
+ */
+ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
+- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
+ | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
++ reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
+ } else if (pdata->port_mode[i] ==
+ OMAP_EHCI_PORT_MODE_HSIC) {
+ /*
+diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
+index 5484301d57d9..3dc61ea7dc64 100644
+--- a/drivers/misc/c2port/c2port-duramar2150.c
++++ b/drivers/misc/c2port/c2port-duramar2150.c
+@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
+
+ duramar2150_c2port_dev = c2port_device_register("uc",
+ &duramar2150_c2port_ops, NULL);
+- if (!duramar2150_c2port_dev) {
+- ret = -ENODEV;
++ if (IS_ERR(duramar2150_c2port_dev)) {
++ ret = PTR_ERR(duramar2150_c2port_dev);
+ goto free_region;
+ }
+
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index d5455c760618..503f37850fb3 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -335,7 +335,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
+ }
+
+ cf->can_id = id & ESD_IDMASK;
+- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
++ cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
+
+ if (id & ESD_EXTID)
+ cf->can_id |= CAN_EFF_FLAG;
+diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
+index 5409fe876a44..69bc0a0eb423 100644
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -905,10 +905,10 @@ static void korina_restart_task(struct work_struct *work)
+ DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
+ &lp->rx_dma_regs->dmasm);
+
+- korina_free_ring(dev);
+-
+ napi_disable(&lp->napi);
+
++ korina_free_ring(dev);
++
+ if (korina_init(dev) < 0) {
+ printk(KERN_ERR "%s: cannot restart device\n", dev->name);
+ return;
+@@ -1069,12 +1069,12 @@ static int korina_close(struct net_device *dev)
+ tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
+ writel(tmp, &lp->rx_dma_regs->dmasm);
+
+- korina_free_ring(dev);
+-
+ napi_disable(&lp->napi);
+
+ cancel_work_sync(&lp->restart_task);
+
++ korina_free_ring(dev);
++
+ free_irq(lp->rx_irq, dev);
+ free_irq(lp->tx_irq, dev);
+ free_irq(lp->ovr_irq, dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
+index 31d02649be41..d22482b49744 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
+@@ -113,8 +113,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+ if (!buf)
+ return -ENOMEM;
+
++ if (offset_in_page(buf)) {
++ dma_free_coherent(dev, PAGE_SIZE << order,
++ buf, sg_dma_address(mem));
++ return -ENOMEM;
++ }
++
+ sg_set_buf(mem, buf, PAGE_SIZE << order);
+- BUG_ON(mem->offset);
+ sg_dma_len(mem) = PAGE_SIZE << order;
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 3fb2643d05b4..8c58001aff1d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -511,8 +511,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
+ return -ENOSYS;
+ }
+
+- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+-
+ dev->caps.hca_core_clock = hca_param.hca_core_clock;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+index 10093f0c4c0f..00a80587b47d 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
+ seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ seg_hdr->segNum = seg_number;
+ seg_hdr->segSize = seg_size;
+- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
++ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ }
+
+ /*
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index b7268b3dae77..5f5f84ad0697 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -398,7 +398,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
+ *
+ * Return: Total number of bytes received
+ */
+-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
++static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
+ {
+ void __iomem *addr;
+ u16 length, proto_type;
+@@ -438,7 +438,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+
+ /* Check if received ethernet frame is a raw ethernet frame
+ * or an IP packet or an ARP packet */
+- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
++ if (proto_type > ETH_DATA_LEN) {
+
+ if (proto_type == ETH_P_IP) {
+ length = ((ntohl(in_be32(addr +
+@@ -446,6 +446,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+ XEL_RXBUFF_OFFSET)) >>
+ XEL_HEADER_SHIFT) &
+ XEL_RPLR_LENGTH_MASK);
++ length = min_t(u16, length, ETH_DATA_LEN);
+ length += ETH_HLEN + ETH_FCS_LEN;
+
+ } else if (proto_type == ETH_P_ARP)
+@@ -458,6 +459,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+ /* Use the length in the frame, plus the header and trailer */
+ length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+
++ if (WARN_ON(length > maxlen))
++ length = maxlen;
++
+ /* Read from the EmacLite device */
+ xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+ data, length);
+@@ -632,7 +636,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
+
+ skb_reserve(skb, 2);
+
+- len = xemaclite_recv_data(lp, (u8 *) skb->data);
++ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
+
+ if (!len) {
+ dev->stats.rx_errors++;
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 202fe1ff1987..b23f36a5b0dd 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -656,8 +656,6 @@ static int marvell_read_status(struct phy_device *phydev)
+ if (adv < 0)
+ return adv;
+
+- lpa &= adv;
+-
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 5225d4321e7c..0a3ad7ba2bea 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2121,8 +2121,10 @@ start_again:
+
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
+ TEAM_CMD_OPTIONS_GET);
+- if (!hdr)
++ if (!hdr) {
++ nlmsg_free(skb);
+ return -EMSGSIZE;
++ }
+
+ if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+ goto nla_put_failure;
+@@ -2389,8 +2391,10 @@ start_again:
+
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
+ TEAM_CMD_PORT_LIST_GET);
+- if (!hdr)
++ if (!hdr) {
++ nlmsg_free(skb);
+ return -EMSGSIZE;
++ }
+
+ if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+ goto nla_put_failure;
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+index 2c524305589f..8afb60925332 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+@@ -4019,6 +4019,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ GFP_KERNEL);
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
++ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
++ brcmf_err("invalid action frame length\n");
++ err = -EINVAL;
++ goto exit;
++ }
+ af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+ if (af_params == NULL) {
+ brcmf_err("unable to allocate frame\n");
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index bf13e73ecabc..0f3581b7a2e4 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -556,19 +556,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+
+ if (fsf) {
+ rec->fsf_req_id = fsf->req_id;
++ rec->pl_len = FCP_RESP_WITH_EXT;
+ fcp_rsp = (struct fcp_resp_with_ext *)
+ &(fsf->qtcb->bottom.io.fcp_rsp);
++ /* mandatory parts of FCP_RSP IU in this SCSI record */
+ memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+ if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+ fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+ }
+ if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+- (u16)ZFCP_DBF_PAY_MAX_REC);
+- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+- "fcp_sns", fsf->req_id);
++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
+ }
++ /* complete FCP_RSP IU in associated PAYload record
++ * but only if there are optional parts
++ */
++ if (fcp_rsp->resp.fr_flags != 0)
++ zfcp_dbf_pl_write(
++ dbf, fcp_rsp,
++ /* at least one full PAY record
++ * but not beyond hardware response field
++ */
++ min_t(u16, max_t(u16, rec->pl_len,
++ ZFCP_DBF_PAY_MAX_REC),
++ FSF_FCP_RSP_SIZE),
++ "fcp_riu", fsf->req_id);
+ }
+
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index a8165f142550..712a8484a7b3 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -323,7 +323,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+ {
+ struct fsf_qtcb *qtcb = req->qtcb;
+
+- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
++ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
++ ZFCP_STATUS_FSFREQ_ERROR))) {
++ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
++
++ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+
+diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
+index b1d2024ed513..c2e40e10b293 100644
+--- a/drivers/s390/scsi/zfcp_fc.h
++++ b/drivers/s390/scsi/zfcp_fc.h
+@@ -4,7 +4,7 @@
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+- * Copyright IBM Corp. 2009
++ * Copyright IBM Corp. 2009, 2017
+ */
+
+ #ifndef ZFCP_FC_H
+@@ -291,6 +291,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+ !(rsp_flags & FCP_SNS_LEN_VAL) &&
+ fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
++ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
++ /* FCP_DL was not sufficient for SCSI data length */
++ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
++ set_host_byte(scsi, DID_ERROR);
+ }
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index ad5718401eab..d27b49194d68 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -2286,7 +2286,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+ fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
+
+- if (scsi_prot_sg_count(scsi_cmnd)) {
++ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
++ scsi_prot_sg_count(scsi_cmnd)) {
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ scsi_prot_sg_count(scsi_cmnd));
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 66c37e77ac7c..8ec101a4a5eb 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -294,8 +294,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+
+ zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+- if (ret)
++ if (ret) {
++ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags);
+ return ret;
++ }
+
+ if (!(atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_RUNNING)) {
+@@ -303,8 +305,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ return SUCCESS;
+ }
+ }
+- if (!fsf_req)
++ if (!fsf_req) {
++ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags);
+ return FAILED;
++ }
+
+ wait_for_completion(&fsf_req->completion);
+
+diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
+index e1c8be06de9d..f94fcda1285d 100644
+--- a/drivers/scsi/device_handler/scsi_dh_emc.c
++++ b/drivers/scsi/device_handler/scsi_dh_emc.c
+@@ -464,7 +464,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+ static int clariion_std_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+ {
+- int err;
++ int err = SCSI_DH_OK;
+ char *sp_model;
+
+ err = send_inquiry_cmd(sdev, 0, csdev);
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index bf60c631abb5..3b0f02c146d5 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -299,6 +299,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ return -EINVAL;
+ if (start > ha->optrom_size)
+ return -EINVAL;
++ if (size > ha->optrom_size - start)
++ size = ha->optrom_size - start;
+
+ switch (val) {
+ case 0:
+@@ -320,8 +322,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ return -EINVAL;
+
+ ha->optrom_region_start = start;
+- ha->optrom_region_size = start + size > ha->optrom_size ?
+- ha->optrom_size - start : size;
++ ha->optrom_region_size = start + size;
+
+ ha->optrom_state = QLA_SREADING;
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+@@ -388,8 +389,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ }
+
+ ha->optrom_region_start = start;
+- ha->optrom_region_size = start + size > ha->optrom_size ?
+- ha->optrom_size - start : size;
++ ha->optrom_region_size = start + size;
+
+ ha->optrom_state = QLA_SWRITING;
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 40fe8a77236a..c11b82e70956 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2342,10 +2342,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ if (mem_only) {
+ if (pci_enable_device_mem(pdev))
+- goto probe_out;
++ return ret;
+ } else {
+ if (pci_enable_device(pdev))
+- goto probe_out;
++ return ret;
+ }
+
+ /* This may fail but that's ok */
+@@ -2355,7 +2355,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (!ha) {
+ ql_log_pci(ql_log_fatal, pdev, 0x0009,
+ "Unable to allocate memory for ha.\n");
+- goto probe_out;
++ goto disable_device;
+ }
+ ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
+ "Memory allocated for ha=%p.\n", ha);
+@@ -2899,7 +2899,7 @@ iospace_config_failed:
+ kfree(ha);
+ ha = NULL;
+
+-probe_out:
++disable_device:
+ pci_disable_device(pdev);
+ return ret;
+ }
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 0ae406a47507..2aba2f75fb87 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2557,15 +2557,13 @@ static int __init comedi_init(void)
+
+ comedi_class->dev_attrs = comedi_dev_attrs;
+
+- /* XXX requires /proc interface */
+- comedi_proc_init();
+-
+ /* create devices files for legacy/manual use */
+ for (i = 0; i < comedi_num_legacy_minors; i++) {
+ struct comedi_device *dev;
+ dev = comedi_alloc_board_minor(NULL);
+ if (IS_ERR(dev)) {
+ comedi_cleanup_board_minors();
++ class_destroy(comedi_class);
+ cdev_del(&comedi_cdev);
+ unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ COMEDI_NUM_MINORS);
+@@ -2576,6 +2574,9 @@ static int __init comedi_init(void)
+ }
+ }
+
++ /* XXX requires /proc interface */
++ comedi_proc_init();
++
+ return 0;
+ }
+ module_init(comedi_init);
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index 0d3356d4b7d2..3c0b16fe172b 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -477,7 +477,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
+ long m)
+ {
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+- bool negative;
++ u16 negative;
+ int ret = 0;
+ u16 pos;
+ s16 vel;
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index 04c775cb3e65..179f7810d398 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -84,6 +84,11 @@ static int target_fabric_mappedlun_link(
+ "_tpg does not exist\n");
+ return -EINVAL;
+ }
++ if (lun->lun_shutdown) {
++ pr_err("Unable to create mappedlun symlink because"
++ " lun->lun_shutdown=true\n");
++ return -EINVAL;
++ }
+ se_tpg = lun->lun_sep->sep_tpg;
+
+ nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 8572207e3d4d..bc3092f032b0 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -839,6 +839,8 @@ static void core_tpg_shutdown_lun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+ {
++ lun->lun_shutdown = true;
++
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_from_sessions(lun);
+ }
+@@ -868,6 +870,7 @@ struct se_lun *core_tpg_pre_dellun(
+ spin_unlock(&tpg->tpg_lun_lock);
+ return ERR_PTR(-ENODEV);
+ }
++ lun->lun_shutdown = false;
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
+index 7d199c8e1a75..c9635f1c7108 100644
+--- a/drivers/tty/serial/efm32-uart.c
++++ b/drivers/tty/serial/efm32-uart.c
+@@ -27,6 +27,7 @@
+ #define UARTn_FRAME 0x04
+ #define UARTn_FRAME_DATABITS__MASK 0x000f
+ #define UARTn_FRAME_DATABITS(n) ((n) - 3)
++#define UARTn_FRAME_PARITY__MASK 0x0300
+ #define UARTn_FRAME_PARITY_NONE 0x0000
+ #define UARTn_FRAME_PARITY_EVEN 0x0200
+ #define UARTn_FRAME_PARITY_ODD 0x0300
+@@ -578,12 +579,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
+ 16 * (4 + (clkdiv >> 6)));
+
+ frame = efm32_uart_read32(efm_port, UARTn_FRAME);
+- if (frame & UARTn_FRAME_PARITY_ODD)
++ switch (frame & UARTn_FRAME_PARITY__MASK) {
++ case UARTn_FRAME_PARITY_ODD:
+ *parity = 'o';
+- else if (frame & UARTn_FRAME_PARITY_EVEN)
++ break;
++ case UARTn_FRAME_PARITY_EVEN:
+ *parity = 'e';
+- else
++ break;
++ default:
+ *parity = 'n';
++ }
+
+ *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
+ UARTn_FRAME_DATABITS(4) + 4;
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index 8b1534c424af..be3dc751dfbb 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -1379,9 +1379,9 @@ static struct spi_driver ifx_spi_driver = {
+ static void __exit ifx_spi_exit(void)
+ {
+ /* unregister */
++ spi_unregister_driver((void *)&ifx_spi_driver);
+ tty_unregister_driver(tty_drv);
+ put_tty_driver(tty_drv);
+- spi_unregister_driver((void *)&ifx_spi_driver);
+ unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
+ }
+
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 010ec70d59fb..3390a39f5a78 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -2601,13 +2601,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ * related to the kernel should not use this.
+ */
+ data = vt_get_shift_state();
+- ret = __put_user(data, p);
++ ret = put_user(data, p);
+ break;
+ case TIOCL_GETMOUSEREPORTING:
+ console_lock(); /* May be overkill */
+ data = mouse_reporting();
+ console_unlock();
+- ret = __put_user(data, p);
++ ret = put_user(data, p);
+ break;
+ case TIOCL_SETVESABLANK:
+ console_lock();
+@@ -2616,7 +2616,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ break;
+ case TIOCL_GETKMSGREDIRECT:
+ data = vt_get_kmsg_redirect();
+- ret = __put_user(data, p);
++ ret = put_user(data, p);
+ break;
+ case TIOCL_SETKMSGREDIRECT:
+ if (!capable(CAP_SYS_ADMIN)) {
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index 36a7063a6cba..5a38ca87f406 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -203,7 +203,8 @@ static int ci_role_show(struct seq_file *s, void *data)
+ {
+ struct ci13xxx *ci = s->private;
+
+- seq_printf(s, "%s\n", ci_role(ci)->name);
++ if (ci->role != CI_ROLE_END)
++ seq_printf(s, "%s\n", ci_role(ci)->name);
+
+ return 0;
+ }
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index a9142a46ae82..2cbb26c20082 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1522,6 +1522,8 @@ static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
+ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
+ {
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
++ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
++ struct usb_string *dev_str = gstr->strings;
+
+ /* composite_disconnect() must already have been called
+ * by the underlying peripheral controller driver!
+@@ -1541,6 +1543,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
+
+ composite_dev_cleanup(cdev);
+
++ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
++ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
++
+ kfree(cdev->def_manufacturer);
+ kfree(cdev);
+ set_gadget_data(gadget, NULL);
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index 6656dfda5665..0fa139081b16 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -1270,7 +1270,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+ time = 30;
+ break;
+ default:
+- time = 300;
++ time = 50;
+ break;
+ }
+
+@@ -1786,6 +1786,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
+ pipe = td->pipe;
+ pipe_stop(r8a66597, pipe);
+
++ /* Select a different address or endpoint */
+ new_td = td;
+ do {
+ list_move_tail(&new_td->queue,
+@@ -1795,7 +1796,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
+ new_td = td;
+ break;
+ }
+- } while (td != new_td && td->address == new_td->address);
++ } while (td != new_td && td->address == new_td->address &&
++ td->pipe->info.epnum == new_td->pipe->info.epnum);
+
+ start_transfer(r8a66597, new_td);
+
+diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
+index cfd205036aba..a4b027342589 100644
+--- a/drivers/usb/renesas_usbhs/common.c
++++ b/drivers/usb/renesas_usbhs/common.c
+@@ -600,8 +600,10 @@ static int usbhsc_resume(struct device *dev)
+ struct usbhs_priv *priv = dev_get_drvdata(dev);
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+
+- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
++ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
+ usbhsc_power_ctrl(priv, 1);
++ usbhs_mod_autonomy_mode(priv);
++ }
+
+ usbhs_platform_call(priv, phy_reset, pdev);
+
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 157a9f9afc2d..0c962ff5eed2 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -261,11 +261,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo)
+ {
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++ int ret = 0;
+
+- if (!usbhs_pipe_is_dcp(pipe))
+- usbhsf_fifo_barrier(priv, fifo);
++ if (!usbhs_pipe_is_dcp(pipe)) {
++ /*
++ * This driver checks the pipe condition first to avoid -EBUSY
++ * from usbhsf_fifo_barrier() with about 10 msec delay in
++ * the interrupt handler if the pipe is RX direction and empty.
++ */
++ if (usbhs_pipe_is_dir_in(pipe))
++ ret = usbhs_pipe_is_accessible(pipe);
++ if (!ret)
++ ret = usbhsf_fifo_barrier(priv, fifo);
++ }
+
+- usbhs_write(priv, fifo->ctr, BCLR);
++ /*
++ * if non-DCP pipe, this driver should set BCLR when
++ * usbhsf_fifo_barrier() returns 0.
++ */
++ if (!ret)
++ usbhs_write(priv, fifo->ctr, BCLR);
+ }
+
+ static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
+@@ -545,6 +560,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
+ usbhsf_send_terminator(pipe, fifo);
+
+ usbhsf_tx_irq_ctrl(pipe, !*is_done);
++ usbhs_pipe_running(pipe, !*is_done);
+ usbhs_pipe_enable(pipe);
+
+ dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
+@@ -571,12 +587,21 @@ usbhs_fifo_write_busy:
+ * retry in interrupt
+ */
+ usbhsf_tx_irq_ctrl(pipe, 1);
++ usbhs_pipe_running(pipe, 1);
+
+ return ret;
+ }
+
++static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
++{
++ if (usbhs_pipe_is_running(pkt->pipe))
++ return 0;
++
++ return usbhsf_pio_try_push(pkt, is_done);
++}
++
+ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
+- .prepare = usbhsf_pio_try_push,
++ .prepare = usbhsf_pio_prepare_push,
+ .try_run = usbhsf_pio_try_push,
+ };
+
+@@ -590,6 +615,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
+ if (usbhs_pipe_is_busy(pipe))
+ return 0;
+
++ if (usbhs_pipe_is_running(pipe))
++ return 0;
++
+ /*
+ * pipe enable to prepare packet receive
+ */
+@@ -598,6 +626,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
+
+ usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
+ usbhs_pipe_enable(pipe);
++ usbhs_pipe_running(pipe, 1);
+ usbhsf_rx_irq_ctrl(pipe, 1);
+
+ return 0;
+@@ -643,6 +672,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
+ (total_len < maxp)) { /* short packet */
+ *is_done = 1;
+ usbhsf_rx_irq_ctrl(pipe, 0);
++ usbhs_pipe_running(pipe, 0);
+ usbhs_pipe_disable(pipe); /* disable pipe first */
+ }
+
+@@ -798,10 +828,11 @@ static void xfer_work(struct work_struct *work)
+ dev_dbg(dev, " %s %d (%d/ %d)\n",
+ fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
+
++ usbhs_pipe_running(pipe, 1);
+ usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
+- usbhs_pipe_enable(pipe);
+- usbhsf_dma_start(pipe, fifo);
+ dma_async_issue_pending(chan);
++ usbhsf_dma_start(pipe, fifo);
++ usbhs_pipe_enable(pipe);
+ }
+
+ /*
+@@ -829,6 +860,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ goto usbhsf_pio_prepare_push;
+
++ /* return at this time if the pipe is running */
++ if (usbhs_pipe_is_running(pipe))
++ return 0;
++
+ /* get enable DMA fifo */
+ fifo = usbhsf_get_dma_fifo(priv, pkt);
+ if (!fifo)
+@@ -866,6 +901,7 @@ static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
+ pkt->actual = pkt->trans;
+
+ *is_done = !pkt->zero; /* send zero packet ? */
++ usbhs_pipe_running(pipe, !*is_done);
+
+ usbhsf_dma_stop(pipe, pipe->fifo);
+ usbhsf_dma_unmap(pkt);
+@@ -966,8 +1002,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
+ if ((pkt->actual == pkt->length) || /* receive all data */
+ (pkt->trans < maxp)) { /* short packet */
+ *is_done = 1;
++ usbhs_pipe_running(pipe, 0);
+ } else {
+ /* re-enable */
++ usbhs_pipe_running(pipe, 0);
+ usbhsf_prepare_pop(pkt, is_done);
+ }
+
+diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
+index 7926e1c700f1..85e30e1d5e82 100644
+--- a/drivers/usb/renesas_usbhs/pipe.c
++++ b/drivers/usb/renesas_usbhs/pipe.c
+@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
+ return usbhsp_flags_has(pipe, IS_DIR_HOST);
+ }
+
++int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
++{
++ return usbhsp_flags_has(pipe, IS_RUNNING);
++}
++
++void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
++{
++ if (running)
++ usbhsp_flags_set(pipe, IS_RUNNING);
++ else
++ usbhsp_flags_clr(pipe, IS_RUNNING);
++}
++
+ void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
+ {
+ u16 mask = (SQCLR | SQSET);
+diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
+index b476fde955bf..b18a794922d3 100644
+--- a/drivers/usb/renesas_usbhs/pipe.h
++++ b/drivers/usb/renesas_usbhs/pipe.h
+@@ -36,6 +36,7 @@ struct usbhs_pipe {
+ #define USBHS_PIPE_FLAGS_IS_USED (1 << 0)
+ #define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1)
+ #define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2)
++#define USBHS_PIPE_FLAGS_IS_RUNNING (1 << 3)
+
+ struct usbhs_pkt_handle *handler;
+
+@@ -79,6 +80,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv);
+ void usbhs_pipe_remove(struct usbhs_priv *priv);
+ int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
+ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
++int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
++void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
++
+ void usbhs_pipe_init(struct usbhs_priv *priv,
+ int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
+ int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index 5f3bcd31e204..f3bbe210119d 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -188,6 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
+ kfree(tty);
+ reset_open_count:
+ port->port.count = 0;
++ info->port = NULL;
+ usb_autopm_put_interface(serial->interface);
+ error_get_interface:
+ usb_serial_put(serial);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 296cc1b49446..7831e6865f16 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2974,6 +2974,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+ ret = PTR_ERR(new_root);
+ goto out;
+ }
++ if (!is_fstree(new_root->objectid)) {
++ ret = -ENOENT;
++ goto out;
++ }
+
+ if (btrfs_root_refs(&new_root->root_item) == 0) {
+ ret = -ENOENT;
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index 7ab90f5081ee..4007749a478e 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -759,7 +759,8 @@ out:
+ */
+ if (sdio->boundary) {
+ ret = dio_send_cur_page(dio, sdio, map_bh);
+- dio_bio_submit(dio, sdio);
++ if (sdio->bio)
++ dio_bio_submit(dio, sdio);
+ page_cache_release(sdio->cur_page);
+ sdio->cur_page = NULL;
+ }
+@@ -933,6 +934,7 @@ do_holes:
+ i_size_aligned >> blkbits) {
+ /* We hit eof */
+ page_cache_release(page);
++ dio_cleanup(dio, sdio);
+ goto out;
+ }
+ zero_user(page, block_in_page << blkbits,
+diff --git a/fs/exec.c b/fs/exec.c
+index c945a555eb25..e3abc8e3d58f 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -196,8 +196,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+
+ if (write) {
+ unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
++ unsigned long ptr_size;
+ struct rlimit *rlim;
+
++ /*
++ * Since the stack will hold pointers to the strings, we
++ * must account for them as well.
++ *
++ * The size calculation is the entire vma while each arg page is
++ * built, so each time we get here it's calculating how far it
++ * is currently (rather than each call being just the newly
++ * added size from the arg page). As a result, we need to
++ * always add the entire size of the pointers, so that on the
++ * last call to get_arg_page() we'll actually have the entire
++ * correct size.
++ */
++ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
++ if (ptr_size > ULONG_MAX - size)
++ goto fail;
++ size += ptr_size;
++
+ acct_arg_size(bprm, size / PAGE_SIZE);
+
+ /*
+@@ -215,13 +233,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ * to work from.
+ */
+ rlim = current->signal->rlim;
+- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
+- put_page(page);
+- return NULL;
+- }
++ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
++ goto fail;
+ }
+
+ return page;
++
++fail:
++ put_page(page);
++ return NULL;
+ }
+
+ static void put_arg_page(struct page *page)
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index ec9770f42538..ed2badabebf0 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -325,47 +325,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+ (pgoff_t)num);
+- if (nr_pages == 0) {
+- if (whence == SEEK_DATA)
+- break;
+-
+- BUG_ON(whence != SEEK_HOLE);
+- /*
+- * If this is the first time to go into the loop and
+- * offset is not beyond the end offset, it will be a
+- * hole at this offset
+- */
+- if (lastoff == startoff || lastoff < endoff)
+- found = 1;
++ if (nr_pages == 0)
+ break;
+- }
+-
+- /*
+- * If this is the first time to go into the loop and
+- * offset is smaller than the first page offset, it will be a
+- * hole at this offset.
+- */
+- if (lastoff == startoff && whence == SEEK_HOLE &&
+- lastoff < page_offset(pvec.pages[0])) {
+- found = 1;
+- break;
+- }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ struct buffer_head *bh, *head;
+
+ /*
+- * If the current offset is not beyond the end of given
+- * range, it will be a hole.
++ * If current offset is smaller than the page offset,
++ * there is a hole at this offset.
+ */
+- if (lastoff < endoff && whence == SEEK_HOLE &&
+- page->index > end) {
++ if (whence == SEEK_HOLE && lastoff < endoff &&
++ lastoff < page_offset(pvec.pages[i])) {
+ found = 1;
+ *offset = lastoff;
+ goto out;
+ }
+
++ if (page->index > end)
++ goto out;
++
+ lock_page(page);
+
+ if (unlikely(page->mapping != inode->i_mapping)) {
+@@ -382,6 +362,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ lastoff = page_offset(page);
+ bh = head = page_buffers(page);
+ do {
++ if (lastoff + bh->b_size <= startoff)
++ goto next;
+ if (buffer_uptodate(bh) ||
+ buffer_unwritten(bh)) {
+ if (whence == SEEK_DATA)
+@@ -396,6 +378,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ unlock_page(page);
+ goto out;
+ }
++next:
+ lastoff += bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+@@ -405,20 +388,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ unlock_page(page);
+ }
+
+- /*
+- * The no. of pages is less than our desired, that would be a
+- * hole in there.
+- */
+- if (nr_pages < num && whence == SEEK_HOLE) {
+- found = 1;
+- *offset = lastoff;
++ /* The no. of pages is less than our desired, we are done. */
++ if (nr_pages < num)
+ break;
+- }
+
+ index = pvec.pages[i - 1]->index + 1;
+ pagevec_release(&pvec);
+ } while (index <= end);
+
++ if (whence == SEEK_HOLE && lastoff < endoff) {
++ found = 1;
++ *offset = lastoff;
++ }
+ out:
+ pagevec_release(&pvec);
+ return found;
+@@ -440,7 +421,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
+ mutex_lock(&inode->i_mutex);
+
+ isize = i_size_read(inode);
+- if (offset >= isize) {
++ if (offset < 0 || offset >= isize) {
+ mutex_unlock(&inode->i_mutex);
+ return -ENXIO;
+ }
+@@ -523,7 +504,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
+ mutex_lock(&inode->i_mutex);
+
+ isize = i_size_read(inode);
+- if (offset >= isize) {
++ if (offset < 0 || offset >= isize) {
+ mutex_unlock(&inode->i_mutex);
+ return -ENXIO;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 1095d77c2a9d..26054c19e6cd 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5045,8 +5045,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
+ /* No extended attributes present */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
+- new_extra_isize);
++ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
++ EXT4_I(inode)->i_extra_isize, 0,
++ new_extra_isize - EXT4_I(inode)->i_extra_isize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ return 0;
+ }
+@@ -5097,8 +5098,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ sbi->s_want_extra_isize,
+ iloc, handle);
+ if (ret) {
+- ext4_set_inode_state(inode,
+- EXT4_STATE_NO_EXPAND);
+ if (mnt_count !=
+ le16_to_cpu(sbi->s_es->s_mnt_count)) {
+ ext4_warning(inode->i_sb,
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index cf0a70486618..f6190fdfd8ce 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1911,7 +1911,8 @@ retry:
+ n_desc_blocks = o_desc_blocks +
+ le16_to_cpu(es->s_reserved_gdt_blocks);
+ n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
+- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
++ n_blocks_count = (ext4_fsblk_t)n_group *
++ EXT4_BLOCKS_PER_GROUP(sb);
+ n_group--; /* set to last group number */
+ }
+
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 92850bab4513..dde00d1e2994 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1266,11 +1266,13 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
+
+ down_write(&EXT4_I(inode)->xattr_sem);
++ /*
++ * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
++ */
++ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ retry:
+- if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
+- up_write(&EXT4_I(inode)->xattr_sem);
+- return 0;
+- }
++ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
++ goto out;
+
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+@@ -1295,8 +1297,7 @@ retry:
+ (void *)header, total_ino,
+ inode->i_sb->s_blocksize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+- error = 0;
+- goto cleanup;
++ goto out;
+ }
+
+ /*
+@@ -1457,6 +1458,8 @@ retry:
+ kfree(bs);
+ }
+ brelse(bh);
++out:
++ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return 0;
+
+@@ -1468,6 +1471,10 @@ cleanup:
+ kfree(is);
+ kfree(bs);
+ brelse(bh);
++ /*
++ * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
++ * size expansion failed.
++ */
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return error;
+ }
+diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
+index f27c89d17885..e7cf8c5f2677 100644
+--- a/fs/fscache/object-list.c
++++ b/fs/fscache/object-list.c
+@@ -338,6 +338,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
+ rcu_read_lock();
+
+ confkey = key->payload.data;
++ if (!confkey) {
++ /* key was revoked */
++ rcu_read_unlock();
++ key_put(key);
++ goto no_config;
++ }
++
+ buf = confkey->data;
+
+ for (len = confkey->datalen - 1; len >= 0; len--) {
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 1dce93041012..ee5c3e9a5983 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -54,7 +54,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
+ {
+ struct fuse_file *ff;
+
+- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
++ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ if (unlikely(!ff))
+ return NULL;
+
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 5c1120a5fa42..0ead8bed774a 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1237,8 +1237,8 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ return err;
+ }
+ set_size:
+- truncate_setsize(inode, newsize);
+ up_write(&iinfo->i_data_sem);
++ truncate_setsize(inode, newsize);
+ } else {
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ down_write(&iinfo->i_data_sem);
+@@ -1255,9 +1255,9 @@ set_size:
+ udf_get_block);
+ if (err)
+ return err;
++ truncate_setsize(inode, newsize);
+ down_write(&iinfo->i_data_sem);
+ udf_clear_extent_cache(inode);
+- truncate_setsize(inode, newsize);
+ udf_truncate_extents(inode);
+ up_write(&iinfo->i_data_sem);
+ }
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 4dfde1161c5e..66633b5f2f65 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -162,6 +162,7 @@ struct key {
+ #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
+ #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
+ #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
++#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
+
+ /* the description string
+ * - this is used to match a key against search criteria
+@@ -203,6 +204,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
+ #define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
+ #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
++#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 120dd354849d..4dab847a1d75 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -306,6 +306,7 @@ enum {
+
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
+
+ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+@@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+ #define alloc_ordered_workqueue(fmt, flags, args...) \
+- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
+
+ #define create_workqueue(name) \
+ alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 413e23be60d1..1c96547c2a3f 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -822,6 +822,7 @@ extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+ */
+ extern const struct proto_ops inet6_stream_ops;
+ extern const struct proto_ops inet6_dgram_ops;
++extern const struct proto_ops inet6_sockraw_ops;
+
+ struct group_source_req;
+ struct group_filter;
+diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
+index 5d5a6a4732ef..5af07a1ab0c2 100644
+--- a/include/net/iw_handler.h
++++ b/include/net/iw_handler.h
+@@ -551,7 +551,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
+ memcpy(stream + lcp_len,
+ ((char *) &iwe->u) + IW_EV_POINT_OFF,
+ IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
+- memcpy(stream + point_len, extra, iwe->u.data.length);
++ if (iwe->u.data.length && extra)
++ memcpy(stream + point_len, extra, iwe->u.data.length);
+ stream += event_len;
+ }
+ return stream;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 845ab6decc45..ee81c68f24a6 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -555,6 +555,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
+
+ #define _sctp_walk_params(pos, chunk, end, member)\
+ for (pos.v = chunk->member;\
++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
++ (void *)chunk + end) &&\
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
+ pos.v += WORD_ROUND(ntohs(pos.p->length)))
+@@ -565,6 +567,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
+ #define _sctp_walk_errors(err, chunk_hdr, end)\
+ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+ sizeof(sctp_chunkhdr_t));\
++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
++ (void *)chunk_hdr + end) &&\
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+ ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
+index ca4693b4e09e..00c0e5bf5d3e 100644
+--- a/include/net/sctp/ulpevent.h
++++ b/include/net/sctp/ulpevent.h
+@@ -143,8 +143,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+ static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
+ struct sctp_event_subscribe *mask)
+ {
++ int offset = sn_type - SCTP_SN_TYPE_BASE;
+ char *amask = (char *) mask;
+- return amask[sn_type - SCTP_SN_TYPE_BASE];
++
++ if (offset >= sizeof(struct sctp_event_subscribe))
++ return 0;
++ return amask[offset];
+ }
+
+ /* Given an event subscription, is this event enabled? */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 79cd118d5994..c4db9acefa9c 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1592,4 +1592,14 @@ struct tcp_request_sock_ops {
+ extern void tcp_v4_init(void);
+ extern void tcp_init(void);
+
++/* At how many jiffies into the future should the RTO fire? */
++static inline s32 tcp_rto_delta(const struct sock *sk)
++{
++ const struct sk_buff *skb = tcp_write_queue_head(sk);
++ const u32 rto = inet_csk(sk)->icsk_rto;
++ const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
++
++ return (s32)(rto_time_stamp - tcp_time_stamp);
++}
++
+ #endif /* _TCP_H */
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 7d99c0b5b789..8e271438f77c 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -729,6 +729,7 @@ struct se_port_stat_grps {
+ struct se_lun {
+ #define SE_LUN_LINK_MAGIC 0xffff7771
+ u32 lun_link_magic;
++ bool lun_shutdown;
+ /* See transport_lun_status_table */
+ enum transport_lun_status_table lun_status;
+ u32 lun_access;
+diff --git a/kernel/extable.c b/kernel/extable.c
+index 67460b93b1a1..5ec4b6f861d1 100644
+--- a/kernel/extable.c
++++ b/kernel/extable.c
+@@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
+ return 0;
+ }
+
+-int core_kernel_text(unsigned long addr)
++int notrace core_kernel_text(unsigned long addr)
+ {
+ if (addr >= (unsigned long)_stext &&
+ addr <= (unsigned long)_etext)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index edffb6781c0e..359fbd32dc9e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3061,11 +3061,17 @@ static int tracing_open(struct inode *inode, struct file *file)
+ /* If this file was open for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ int cpu = tracing_get_cpu(inode);
++ struct trace_buffer *trace_buf = &tr->trace_buffer;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++ if (tr->current_trace->print_max)
++ trace_buf = &tr->max_buffer;
++#endif
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
+- tracing_reset_online_cpus(&tr->trace_buffer);
++ tracing_reset_online_cpus(trace_buf);
+ else
+- tracing_reset(&tr->trace_buffer, cpu);
++ tracing_reset(trace_buf, cpu);
+ }
+
+ if (file->f_mode & FMODE_READ) {
+@@ -4654,7 +4660,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
+ tracing_reset_online_cpus(&tr->trace_buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
++ if (tr->max_buffer.buffer)
+ ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
+ tracing_reset_online_cpus(&tr->max_buffer);
+ #endif
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 66972ac0c6c0..f55fbfa7feda 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3399,7 +3399,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
+ * attributes breaks ordering guarantee. Disallow exposing ordered
+ * workqueues.
+ */
+- if (WARN_ON(wq->flags & __WQ_ORDERED))
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+ return -EINVAL;
+
+ wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
+@@ -3964,8 +3964,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
+ return -EINVAL;
+
+ /* creating multiple pwqs breaks ordering guarantee */
+- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+- return -EINVAL;
++ if (!list_empty(&wq->pwqs)) {
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
++ return -EINVAL;
++
++ wq->flags &= ~__WQ_ORDERED;
++ }
+
+ pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
+ new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+@@ -4213,6 +4217,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
+
++ /*
++ * Unbound && max_active == 1 used to imply ordered, which is no
++ * longer the case on NUMA machines due to per-node pools. While
++ * alloc_ordered_workqueue() is the right way to create an ordered
++ * workqueue, keep the previous behavior to avoid subtle breakages
++ * on NUMA.
++ */
++ if ((flags & WQ_UNBOUND) && max_active == 1)
++ flags |= __WQ_ORDERED;
++
+ /* allocate wq and format name */
+ if (flags & WQ_UNBOUND)
+ tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
+@@ -4401,13 +4415,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
+ struct pool_workqueue *pwq;
+
+ /* disallow meddling with max_active for ordered workqueues */
+- if (WARN_ON(wq->flags & __WQ_ORDERED))
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+ return;
+
+ max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
+
+ mutex_lock(&wq->mutex);
+
++ wq->flags &= ~__WQ_ORDERED;
+ wq->saved_max_active = max_active;
+
+ for_each_pwq(pwq, wq)
+diff --git a/lib/cmdline.c b/lib/cmdline.c
+index eb6791188cf5..efc35fbce780 100644
+--- a/lib/cmdline.c
++++ b/lib/cmdline.c
+@@ -22,14 +22,14 @@
+ * the values[M, M+1, ..., N] into the ints array in get_options.
+ */
+
+-static int get_range(char **str, int *pint)
++static int get_range(char **str, int *pint, int n)
+ {
+ int x, inc_counter, upper_range;
+
+ (*str)++;
+ upper_range = simple_strtol((*str), NULL, 0);
+ inc_counter = upper_range - *pint;
+- for (x = *pint; x < upper_range; x++)
++ for (x = *pint; n && x < upper_range; x++, n--)
+ *pint++ = x;
+ return inc_counter;
+ }
+@@ -95,7 +95,7 @@ char *get_options(const char *str, int nints, int *ints)
+ break;
+ if (res == 3) {
+ int range_nums;
+- range_nums = get_range((char **)&str, ints + i);
++ range_nums = get_range((char **)&str, ints + i, nints - i);
+ if (range_nums < 0)
+ break;
+ /*
+diff --git a/lib/digsig.c b/lib/digsig.c
+index 2f31e6a45f0a..ae703dfc9731 100644
+--- a/lib/digsig.c
++++ b/lib/digsig.c
+@@ -86,6 +86,12 @@ static int digsig_verify_rsa(struct key *key,
+ down_read(&key->sem);
+ ukp = key->payload.data;
+
++ if (!ukp) {
++ /* key was revoked before we acquired its semaphore */
++ err = -EKEYREVOKED;
++ goto err1;
++ }
++
+ if (ukp->datalen < sizeof(*pkh))
+ goto err1;
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 3c4e4d7ae54e..d042e254a163 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2132,7 +2132,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+- if (address >= TASK_SIZE)
++ if (address >= (TASK_SIZE & PAGE_MASK))
+ return -ENOMEM;
+ address += PAGE_SIZE;
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4e8927539299..829ee76d5521 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5177,8 +5177,8 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
+ }
+
+ if (pages && s)
+- pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
+- s, pages << (PAGE_SHIFT - 10), start, end);
++ pr_info("Freeing %s memory: %ldK\n",
++ s, pages << (PAGE_SHIFT - 10));
+
+ return pages;
+ }
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 86abb2e59aea..82fdb35154fc 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -274,7 +274,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+ return 0;
+
+ out_free_newdev:
+- free_netdev(new_dev);
++ if (new_dev->reg_state == NETREG_UNINITIALIZED)
++ free_netdev(new_dev);
+ return err;
+ }
+
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index e430b1abcd2f..e387e6719fa2 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -32,6 +32,7 @@
+ #include <asm/unaligned.h>
+
+ #include <net/bluetooth/bluetooth.h>
++#include <net/bluetooth/l2cap.h>
+ #include <net/bluetooth/hci_core.h>
+
+ #include "bnep.h"
+@@ -539,6 +540,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
+
+ BT_DBG("");
+
++ if (!l2cap_is_socket(sock))
++ return -EBADFD;
++
+ baswap((void *) dst, &bt_sk(sock->sk)->dst);
+ baswap((void *) src, &bt_sk(sock->sk)->src);
+
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index e0a6ebf2baa6..84460f623fc8 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
+
+ BT_DBG("");
+
++ if (!l2cap_is_socket(sock))
++ return -EBADFD;
++
+ session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 682bf5ad63a0..d69d8ec11383 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2342,9 +2342,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
+ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ {
+ if (tx_path)
+- return skb->ip_summed != CHECKSUM_PARTIAL;
+- else
+- return skb->ip_summed == CHECKSUM_NONE;
++ return skb->ip_summed != CHECKSUM_PARTIAL &&
++ skb->ip_summed != CHECKSUM_NONE;
++
++ return skb->ip_summed == CHECKSUM_NONE;
+ }
+
+ /**
+@@ -2361,11 +2362,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, bool tx_path)
+ {
++ struct sk_buff *segs;
++
+ if (unlikely(skb_needs_check(skb, tx_path))) {
+ int err;
+
+- skb_warn_bad_offload(skb);
+-
++ /* We're going to init ->check field in TCP or UDP header */
+ if (skb_header_cloned(skb) &&
+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return ERR_PTR(err);
+@@ -2375,7 +2377,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+
+- return skb_mac_gso_segment(skb, features);
++ segs = skb_mac_gso_segment(skb, features);
++
++ if (unlikely(skb_needs_check(skb, tx_path)))
++ skb_warn_bad_offload(skb);
++
++ return segs;
+ }
+ EXPORT_SYMBOL(__skb_gso_segment);
+
+@@ -5636,7 +5643,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ } else {
+ netdev_stats_to_stats64(storage, &dev->stats);
+ }
+- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
++ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+ return storage;
+ }
+ EXPORT_SYMBOL(dev_get_stats);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 96e125919324..104784ee4bbd 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1470,6 +1470,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+
+ sock_copy(newsk, sk);
+
++ newsk->sk_prot_creator = sk->sk_prot;
++
+ /* SANITY */
+ get_net(sock_net(newsk));
+ sk_node_init(&newsk->sk_node);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 975c369d4e6d..03610ebd8d0b 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1053,7 +1053,7 @@ static struct inet_protosw inetsw_array[] =
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMP,
+ .prot = &ping_prot,
+- .ops = &inet_dgram_ops,
++ .ops = &inet_sockraw_ops,
+ .no_check = UDP_CSUM_DEFAULT,
+ .flags = INET_PROTOSW_REUSE,
+ },
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 017b4792cd44..bcd0a05d6002 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1170,13 +1170,14 @@ static struct pernet_operations fib_net_ops = {
+
+ void __init ip_fib_init(void)
+ {
+- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
++ fib_trie_init();
+
+ register_pernet_subsys(&fib_net_ops);
++
+ register_netdevice_notifier(&fib_netdev_notifier);
+ register_inetaddr_notifier(&fib_inetaddr_notifier);
+
+- fib_trie_init();
++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 5f077efad29d..40faf48cb10c 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -846,10 +846,12 @@ static int __ip_append_data(struct sock *sk,
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if (((length > mtu) || (skb && skb_has_frags(skb))) &&
++ if ((skb && skb_has_frags(skb)) ||
++ ((length > mtu) &&
++ (skb_queue_len(queue) <= 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+- (sk->sk_type == SOCK_DGRAM)) {
++ (sk->sk_type == SOCK_DGRAM))) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, transhdrlen,
+ maxfraglen, flags);
+@@ -1160,6 +1162,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+
+ cork->length += size;
+ if ((size + skb->len > mtu) &&
++ (skb_queue_len(&sk->sk_write_queue) == 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO)) {
+ skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
+diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+index 5f011cc89cd9..1e82bdb0f07e 100644
+--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
++++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+@@ -1305,6 +1305,7 @@ static int __init nf_nat_snmp_basic_init(void)
+ static void __exit nf_nat_snmp_basic_fini(void)
+ {
+ RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
++ synchronize_rcu();
+ nf_conntrack_helper_unregister(&snmp_trap_helper);
+ }
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index d1e04221c275..b80b399f2377 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2313,9 +2313,15 @@ int tcp_disconnect(struct sock *sk, int flags)
+ tcp_set_ca_state(sk, TCP_CA_Open);
+ tcp_clear_retrans(tp);
+ inet_csk_delack_init(sk);
++ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
++ * issue in __tcp_select_window()
++ */
++ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
+ tcp_init_send_head(sk);
+ memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
+ __sk_dst_reset(sk);
++ dst_release(sk->sk_rx_dst);
++ sk->sk_rx_dst = NULL;
+
+ WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 019c2389a341..2ca6c080a4bc 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -95,6 +95,7 @@ void tcp_init_congestion_control(struct sock *sk)
+ rcu_read_unlock();
+ }
+
++ tcp_sk(sk)->prior_ssthresh = 0;
+ if (icsk->icsk_ca_ops->init)
+ icsk->icsk_ca_ops->init(sk);
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 0680058fe693..85dd09be1618 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -111,6 +111,7 @@ int sysctl_tcp_early_retrans __read_mostly = 3;
+ #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
+ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
+ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
++#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
+ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
+ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+
+@@ -2553,8 +2554,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
+- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
++ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
++ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
+@@ -2972,14 +2973,11 @@ void tcp_rearm_rto(struct sock *sk)
+ /* Offset the time elapsed after installing regular RTO */
+ if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+- struct sk_buff *skb = tcp_write_queue_head(sk);
+- const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
+- s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
++ s32 delta = tcp_rto_delta(sk);
+ /* delta may not be positive if the socket is locked
+ * when the retrans timer fires and is rescheduled.
+ */
+- if (delta > 0)
+- rto = delta;
++ rto = max_t(int, delta, 1);
+ }
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+ TCP_RTO_MAX);
+@@ -3004,6 +3002,13 @@ void tcp_resume_early_retransmit(struct sock *sk)
+ tcp_xmit_retransmit_queue(sk);
+ }
+
++/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
++static void tcp_set_xmit_timer(struct sock *sk)
++{
++ if (!tcp_schedule_loss_probe(sk))
++ tcp_rearm_rto(sk);
++}
++
+ /* If we get here, the whole TSO packet has not been acked. */
+ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
+ {
+@@ -3134,7 +3139,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ }
+
+ tcp_ack_update_rtt(sk, flag, seq_rtt);
+- tcp_rearm_rto(sk);
++ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
+
+ if (tcp_is_reno(tp)) {
+ tcp_remove_reno_sacks(sk, pkts_acked);
+@@ -3394,10 +3399,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ if (after(ack, tp->snd_nxt))
+ goto invalid_ack;
+
+- if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
+- tcp_rearm_rto(sk);
+-
+ if (after(ack, prior_snd_una))
+ flag |= FLAG_SND_UNA_ADVANCED;
+
+@@ -3454,6 +3455,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+
+ pkts_acked = previous_packets_out - tp->packets_out;
+
++ if (tp->tlp_high_seq)
++ tcp_process_tlp_ack(sk, ack, flag);
++ /* If needed, reset TLP/RTO timer; RACK may later override this. */
++ if (flag & FLAG_SET_XMIT_TIMER)
++ tcp_set_xmit_timer(sk);
++
+ if (tcp_ack_is_dubious(sk, flag)) {
+ /* Advance CWND, if state allows this. */
+ if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
+@@ -3466,17 +3473,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+ }
+
+- if (tp->tlp_high_seq)
+- tcp_process_tlp_ack(sk, ack, flag);
+-
+ if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
+ struct dst_entry *dst = __sk_dst_get(sk);
+ if (dst)
+ dst_confirm(dst);
+ }
+
+- if (icsk->icsk_pending == ICSK_TIME_RETRANS)
+- tcp_schedule_loss_probe(sk);
+ if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
+ tcp_update_pacing_rate(sk);
+ return 1;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 8729a934124f..f5d670ccd403 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1945,28 +1945,16 @@ repair:
+
+ bool tcp_schedule_loss_probe(struct sock *sk)
+ {
+- struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+- u32 timeout, tlp_time_stamp, rto_time_stamp;
+ u32 rtt = tp->srtt >> 3;
++ u32 timeout, rto_delta;
+
+- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
+- return false;
+- /* No consecutive loss probes. */
+- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
+- tcp_rearm_rto(sk);
+- return false;
+- }
+ /* Don't do any loss probe on a Fast Open connection before 3WHS
+ * finishes.
+ */
+ if (sk->sk_state == TCP_SYN_RECV)
+ return false;
+
+- /* TLP is only scheduled when next timer event is RTO. */
+- if (icsk->icsk_pending != ICSK_TIME_RETRANS)
+- return false;
+-
+ /* Schedule a loss probe in 2*RTT for SACK capable connections
+ * in Open state, that are either limited by cwnd or application.
+ */
+@@ -1987,14 +1975,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
+ (rtt + (rtt >> 1) + TCP_DELACK_MAX));
+ timeout = max_t(u32, timeout, msecs_to_jiffies(10));
+
+- /* If RTO is shorter, just schedule TLP in its place. */
+- tlp_time_stamp = tcp_time_stamp + timeout;
+- rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
+- if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
+- s32 delta = rto_time_stamp - tcp_time_stamp;
+- if (delta > 0)
+- timeout = delta;
+- }
++ /* If the RTO formula yields an earlier time, then use that time. */
++ rto_delta = tcp_rto_delta(sk); /* How far in future is RTO? */
++ if (rto_delta > 0)
++ timeout = min_t(u32, timeout, rto_delta);
+
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
+ TCP_RTO_MAX);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 68174e4d88c7..882b23e8e777 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -763,7 +763,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
+ if (is_udplite) /* UDP-Lite */
+ csum = udplite_csum(skb);
+
+- else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
++ else if (sk->sk_no_check == UDP_CSUM_NOXMIT && !skb_has_frags(skb)) { /* UDP csum off */
+
+ skb->ip_summed = CHECKSUM_NONE;
+ goto send;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 9c4aa2e22448..5ea5f77c0ec1 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2892,6 +2892,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ {
+ struct net_device *dev = (struct net_device *) data;
+ struct inet6_dev *idev = __in6_dev_get(dev);
++ struct net *net = dev_net(dev);
+ int run_pending = 0;
+ int err;
+
+@@ -2988,7 +2989,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ * IPV6_MIN_MTU stop IPv6 on this interface.
+ */
+ if (dev->mtu < IPV6_MIN_MTU)
+- addrconf_ifdown(dev, 1);
++ addrconf_ifdown(dev, dev != net->loopback_dev);
+ }
+ break;
+
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 46458ee31939..6de0d44a9429 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -167,6 +167,12 @@ static __inline__ void rt6_release(struct rt6_info *rt)
+ dst_free(&rt->dst);
+ }
+
++static void fib6_free_table(struct fib6_table *table)
++{
++ inetpeer_invalidate_tree(&table->tb6_peers);
++ kfree(table);
++}
++
+ static void fib6_link_table(struct net *net, struct fib6_table *tb)
+ {
+ unsigned int h;
+@@ -1738,15 +1744,22 @@ out_timer:
+
+ static void fib6_net_exit(struct net *net)
+ {
++ unsigned int i;
++
+ rt6_ifdown(net, NULL);
+ del_timer_sync(&net->ipv6.ip6_fib_timer);
+
+-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
+- kfree(net->ipv6.fib6_local_tbl);
+-#endif
+- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
+- kfree(net->ipv6.fib6_main_tbl);
++ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
++ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
++ struct hlist_node *tmp;
++ struct fib6_table *tb;
++
++ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
++ hlist_del(&tb->tb6_hlist);
++ fib6_free_table(tb);
++ }
++ }
++
+ kfree(net->ipv6.fib_table_hash);
+ kfree(net->ipv6.rt6_stats);
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index ae88e17f5c72..529348e6a98b 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -419,7 +419,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ if (code == ICMPV6_HDR_FIELD)
+ teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
+
+- if (teli && teli == info - 2) {
++ if (teli && teli == be32_to_cpu(info) - 2) {
+ tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
+ if (tel->encap_limit == 0) {
+ net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+@@ -431,7 +431,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ }
+ break;
+ case ICMPV6_PKT_TOOBIG:
+- mtu = info - offset;
++ mtu = be32_to_cpu(info) - offset;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ t->dev->mtu = mtu;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 17a88ebcc845..3a65b9a9cb4d 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1288,11 +1288,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+
+ skb = skb_peek_tail(&sk->sk_write_queue);
+ cork->length += length;
+- if (((length > mtu) ||
+- (skb && skb_has_frags(skb))) &&
++ if ((skb && skb_has_frags(skb)) ||
++ (((length + fragheaderlen) > mtu) &&
++ (skb_queue_len(&sk->sk_write_queue) <= 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) &&
+- (sk->sk_type == SOCK_DGRAM)) {
++ (sk->sk_type == SOCK_DGRAM))) {
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+ hh_len, fragheaderlen,
+ transhdrlen, mtu, flags, rt);
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index c7ce2be09d90..a05e1f1a1a38 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1319,7 +1319,7 @@ void raw6_proc_exit(void)
+ #endif /* CONFIG_PROC_FS */
+
+ /* Same as inet6_dgram_ops, sans udp_poll. */
+-static const struct proto_ops inet6_sockraw_ops = {
++const struct proto_ops inet6_sockraw_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = inet6_release,
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 66f51c5a8a3a..3ff567fb90ee 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ goto out;
+ }
+
++ err = -ENOBUFS;
+ key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
+ if (sa->sadb_sa_auth) {
+ int keysize = 0;
+@@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ if (key)
+ keysize = (key->sadb_key_bits + 7) / 8;
+ x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
+- if (!x->aalg)
++ if (!x->aalg) {
++ err = -ENOMEM;
+ goto out;
++ }
+ strcpy(x->aalg->alg_name, a->name);
+ x->aalg->alg_key_len = 0;
+ if (key) {
+@@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ goto out;
+ }
+ x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
+- if (!x->calg)
++ if (!x->calg) {
++ err = -ENOMEM;
+ goto out;
++ }
+ strcpy(x->calg->alg_name, a->name);
+ x->props.calgo = sa->sadb_sa_encrypt;
+ } else {
+@@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ if (key)
+ keysize = (key->sadb_key_bits + 7) / 8;
+ x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
+- if (!x->ealg)
++ if (!x->ealg) {
++ err = -ENOMEM;
+ goto out;
++ }
+ strcpy(x->ealg->alg_name, a->name);
+ x->ealg->alg_key_len = 0;
+ if (key) {
+@@ -1230,8 +1237,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ struct xfrm_encap_tmpl *natt;
+
+ x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
+- if (!x->encap)
++ if (!x->encap) {
++ err = -ENOMEM;
+ goto out;
++ }
+
+ natt = x->encap;
+ n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 1c6a71c41e62..ca66520b8942 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -795,10 +795,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
+ {
+ unsigned int verdict = NF_DROP;
+
+- if (IP_VS_FWD_METHOD(cp) != 0) {
+- pr_err("shouldn't reach here, because the box is on the "
+- "half connection in the tun/dr module.\n");
+- }
++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
++ goto ignore_cp;
+
+ /* Ensure the checksum is correct */
+ if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
+@@ -832,6 +830,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
+ ip_vs_notrack(skb);
+ else
+ ip_vs_update_conntrack(skb, cp, 0);
++
++ignore_cp:
+ verdict = NF_ACCEPT;
+
+ out:
+@@ -1182,8 +1182,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ */
+ cp = pp->conn_out_get(af, skb, &iph, 0);
+
+- if (likely(cp))
++ if (likely(cp)) {
++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
++ goto ignore_cp;
+ return handle_response(af, skb, pd, cp, &iph, hooknum);
++ }
+ if (sysctl_nat_icmp_send(net) &&
+ (pp->protocol == IPPROTO_TCP ||
+ pp->protocol == IPPROTO_UDP ||
+@@ -1225,9 +1228,15 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ }
+ }
+ }
++
++out:
+ IP_VS_DBG_PKT(12, af, pp, skb, 0,
+ "ip_vs_out: packet continues traversal as normal");
+ return NF_ACCEPT;
++
++ignore_cp:
++ __ip_vs_conn_put(cp);
++ goto out;
+ }
+
+ /*
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index 1df176146567..c9f131fc4bf3 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -116,6 +116,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
+ BUG_ON(notify != new);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
+ mutex_unlock(&nf_ct_ecache_mutex);
++ /* synchronize_rcu() is called from ctnetlink_exit. */
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
+
+@@ -152,6 +153,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
+ BUG_ON(notify != new);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
+ mutex_unlock(&nf_ct_ecache_mutex);
++ /* synchronize_rcu() is called from ctnetlink_exit. */
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
+
+diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
+index 1a9545965c0d..531ca55f1af6 100644
+--- a/net/netfilter/nf_conntrack_extend.c
++++ b/net/netfilter/nf_conntrack_extend.c
+@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
+
+ rcu_read_lock();
+ t = rcu_dereference(nf_ct_ext_types[id]);
+- BUG_ON(t == NULL);
++ if (!t) {
++ rcu_read_unlock();
++ return NULL;
++ }
++
+ off = ALIGN(sizeof(struct nf_ct_ext), t->align);
+ len = off + t->len + var_alloc_len;
+ alloc_size = t->alloc_size + var_alloc_len;
+@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
+
+ rcu_read_lock();
+ t = rcu_dereference(nf_ct_ext_types[id]);
+- BUG_ON(t == NULL);
++ if (!t) {
++ rcu_read_unlock();
++ return NULL;
++ }
+
+ newoff = ALIGN(old->len, t->align);
+ newlen = newoff + t->len + var_alloc_len;
+@@ -186,6 +193,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
+ RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
+ update_alloc_size(type);
+ mutex_unlock(&nf_ct_ext_type_mutex);
+- rcu_barrier(); /* Wait for completion of call_rcu()'s */
++ synchronize_rcu();
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index ecf065f94032..df65d52ba768 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3132,6 +3132,7 @@ static void __exit ctnetlink_exit(void)
+ #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+ RCU_INIT_POINTER(nfq_ct_hook, NULL);
+ #endif
++ synchronize_rcu();
+ }
+
+ module_init(ctnetlink_init);
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 2bb801e3ee8c..7658d0181050 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -853,6 +853,8 @@ static void __exit nf_nat_cleanup(void)
+ #ifdef CONFIG_XFRM
+ RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
+ #endif
++ synchronize_rcu();
++
+ for (i = 0; i < NFPROTO_NUMPROTO; i++)
+ kfree(nf_nat_l4protos[i]);
+ synchronize_net();
+diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
+index 65074dfb9383..10d78dc0d2c6 100644
+--- a/net/netfilter/nfnetlink_cttimeout.c
++++ b/net/netfilter/nfnetlink_cttimeout.c
+@@ -431,6 +431,7 @@ static void __exit cttimeout_exit(void)
+ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
+ RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
++ synchronize_rcu();
+ #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+ }
+
+diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
+index 7011c71646f0..c656269c4cf0 100644
+--- a/net/netfilter/xt_TCPMSS.c
++++ b/net/netfilter/xt_TCPMSS.c
+@@ -68,7 +68,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+
+ /* Header cannot be larger than the packet */
+- if (tcplen < tcph->doff*4)
++ if (tcplen < tcph->doff*4 || tcph->doff*4 < sizeof(struct tcphdr))
+ return -1;
+
+ if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+@@ -117,6 +117,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ if (tcplen > tcph->doff*4)
+ return 0;
+
++ /* tcph->doff has 4 bits, do not wrap it to 0 */
++ if (tcph->doff >= 15)
++ return 0;
++
+ /*
+ * MSS Option not found ?! add it..
+ */
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 0bbb3470fa78..2f22b0759f2c 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3183,14 +3183,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+- return -EBUSY;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
+- po->tp_reserve = val;
+- return 0;
++ lock_sock(sk);
++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++ ret = -EBUSY;
++ } else {
++ po->tp_reserve = val;
++ ret = 0;
++ }
++ release_sock(sk);
++ return ret;
+ }
+ case PACKET_LOSS:
+ {
+@@ -3338,6 +3343,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ case PACKET_HDRLEN:
+ if (len > sizeof(int))
+ len = sizeof(int);
++ if (len < sizeof(int))
++ return -EINVAL;
+ if (copy_from_user(&val, optval, len))
+ return -EFAULT;
+ switch (val) {
+diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
+index 7633a752c65e..10e6e5de36e1 100644
+--- a/net/rxrpc/ar-key.c
++++ b/net/rxrpc/ar-key.c
+@@ -213,7 +213,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ unsigned int *_toklen)
+ {
+ const __be32 *xdr = *_xdr;
+- unsigned int toklen = *_toklen, n_parts, loop, tmp;
++ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
+
+ /* there must be at least one name, and at least #names+1 length
+ * words */
+@@ -243,16 +243,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ toklen -= 4;
+ if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
+ return -EINVAL;
+- if (tmp > toklen)
++ paddedlen = (tmp + 3) & ~3;
++ if (paddedlen > toklen)
+ return -EINVAL;
+ princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
+ if (!princ->name_parts[loop])
+ return -ENOMEM;
+ memcpy(princ->name_parts[loop], xdr, tmp);
+ princ->name_parts[loop][tmp] = 0;
+- tmp = (tmp + 3) & ~3;
+- toklen -= tmp;
+- xdr += tmp >> 2;
++ toklen -= paddedlen;
++ xdr += paddedlen >> 2;
+ }
+
+ if (toklen < 4)
+@@ -261,16 +261,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ toklen -= 4;
+ if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
+ return -EINVAL;
+- if (tmp > toklen)
++ paddedlen = (tmp + 3) & ~3;
++ if (paddedlen > toklen)
+ return -EINVAL;
+ princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
+ if (!princ->realm)
+ return -ENOMEM;
+ memcpy(princ->realm, xdr, tmp);
+ princ->realm[tmp] = 0;
+- tmp = (tmp + 3) & ~3;
+- toklen -= tmp;
+- xdr += tmp >> 2;
++ toklen -= paddedlen;
++ xdr += paddedlen >> 2;
+
+ _debug("%s/...@%s", princ->name_parts[0], princ->realm);
+
+@@ -289,7 +289,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
+ unsigned int *_toklen)
+ {
+ const __be32 *xdr = *_xdr;
+- unsigned int toklen = *_toklen, len;
++ unsigned int toklen = *_toklen, len, paddedlen;
+
+ /* there must be at least one tag and one length word */
+ if (toklen <= 8)
+@@ -303,15 +303,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
+ toklen -= 8;
+ if (len > max_data_size)
+ return -EINVAL;
++ paddedlen = (len + 3) & ~3;
++ if (paddedlen > toklen)
++ return -EINVAL;
+ td->data_len = len;
+
+ if (len > 0) {
+ td->data = kmemdup(xdr, len, GFP_KERNEL);
+ if (!td->data)
+ return -ENOMEM;
+- len = (len + 3) & ~3;
+- toklen -= len;
+- xdr += len >> 2;
++ toklen -= paddedlen;
++ xdr += paddedlen >> 2;
+ }
+
+ _debug("tag %x len %x", td->tag, td->data_len);
+@@ -383,7 +385,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ const __be32 **_xdr, unsigned int *_toklen)
+ {
+ const __be32 *xdr = *_xdr;
+- unsigned int toklen = *_toklen, len;
++ unsigned int toklen = *_toklen, len, paddedlen;
+
+ /* there must be at least one length word */
+ if (toklen <= 4)
+@@ -395,6 +397,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ toklen -= 4;
+ if (len > AFSTOKEN_K5_TIX_MAX)
+ return -EINVAL;
++ paddedlen = (len + 3) & ~3;
++ if (paddedlen > toklen)
++ return -EINVAL;
+ *_tktlen = len;
+
+ _debug("ticket len %u", len);
+@@ -403,9 +408,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ *_ticket = kmemdup(xdr, len, GFP_KERNEL);
+ if (!*_ticket)
+ return -ENOMEM;
+- len = (len + 3) & ~3;
+- toklen -= len;
+- xdr += len >> 2;
++ toklen -= paddedlen;
++ xdr += paddedlen >> 2;
+ }
+
+ *_xdr = xdr;
+@@ -549,7 +553,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
+ {
+ const __be32 *xdr = data, *token;
+ const char *cp;
+- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
++ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
+ int ret;
+
+ _enter(",{%x,%x,%x,%x},%zu",
+@@ -574,22 +578,21 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
+ if (len < 1 || len > AFSTOKEN_CELL_MAX)
+ goto not_xdr;
+ datalen -= 4;
+- tmp = (len + 3) & ~3;
+- if (tmp > datalen)
++ paddedlen = (len + 3) & ~3;
++ if (paddedlen > datalen)
+ goto not_xdr;
+
+ cp = (const char *) xdr;
+ for (loop = 0; loop < len; loop++)
+ if (!isprint(cp[loop]))
+ goto not_xdr;
+- if (len < tmp)
+- for (; loop < tmp; loop++)
+- if (cp[loop])
+- goto not_xdr;
++ for (; loop < paddedlen; loop++)
++ if (cp[loop])
++ goto not_xdr;
+ _debug("cellname: [%u/%u] '%*.*s'",
+- len, tmp, len, len, (const char *) xdr);
+- datalen -= tmp;
+- xdr += tmp >> 2;
++ len, paddedlen, len, len, (const char *) xdr);
++ datalen -= paddedlen;
++ xdr += paddedlen >> 2;
+
+ /* get the token count */
+ if (datalen < 12)
+@@ -610,10 +613,11 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
+ sec_ix = ntohl(*xdr);
+ datalen -= 4;
+ _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
+- if (toklen < 20 || toklen > datalen)
++ paddedlen = (toklen + 3) & ~3;
++ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
+ goto not_xdr;
+- datalen -= (toklen + 3) & ~3;
+- xdr += (toklen + 3) >> 2;
++ datalen -= paddedlen;
++ xdr += paddedlen >> 2;
+
+ } while (--loop > 0);
+
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 10d3e2874dd1..7c2fea69c832 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -492,7 +492,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ {
+ addr->sa.sa_family = AF_INET6;
+ addr->v6.sin6_port = port;
++ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_addr = *saddr;
++ addr->v6.sin6_scope_id = 0;
+ }
+
+ /* Compare addresses exactly.
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index dd3dbed89c8f..da79f9b86dfd 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -310,8 +310,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
+ [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_PID] = { .type = NLA_U32 },
+ [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
+- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
+- .len = WLAN_PMKID_LEN },
++ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
+ [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
+@@ -5044,6 +5043,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
+ struct nlattr *attr1, *attr2;
+ int n_channels = 0, tmp1, tmp2;
+
++ nla_for_each_nested(attr1, freqs, tmp1)
++ if (nla_len(attr1) != sizeof(u32))
++ return 0;
++
+ nla_for_each_nested(attr1, freqs, tmp1) {
+ n_channels++;
+ /*
+@@ -8010,6 +8013,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
+ if (err)
+ return err;
+
++ if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
++ !tb[NL80211_REKEY_DATA_KCK])
++ return -EINVAL;
+ if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
+ return -ERANGE;
+ if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index ea970b8002a2..10c556e373b0 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3201,9 +3201,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_state *x_new[XFRM_MAX_DEPTH];
+ struct xfrm_migrate *mp;
+
++ /* Stage 0 - sanity checks */
+ if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
+ goto out;
+
++ if (dir >= XFRM_POLICY_MAX) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ /* Stage 1 - find policy */
+ if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
+ err = -ENOENT;
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index c4c8df4b214d..b7d7cffe7349 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -315,6 +315,13 @@ static struct key *request_user_key(const char *master_desc, u8 **master_key,
+
+ down_read(&ukey->sem);
+ upayload = ukey->payload.data;
++ if (!upayload) {
++ /* key was revoked before we acquired its semaphore */
++ up_read(&ukey->sem);
++ key_put(ukey);
++ ukey = ERR_PTR(-EKEYREVOKED);
++ goto error;
++ }
+ *master_key = upayload->data;
+ *master_keylen = upayload->datalen;
+ error:
+@@ -428,7 +435,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
+ static struct key *request_master_key(struct encrypted_key_payload *epayload,
+ u8 **master_key, size_t *master_keylen)
+ {
+- struct key *mkey = NULL;
++ struct key *mkey = ERR_PTR(-EINVAL);
+
+ if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+ KEY_TRUSTED_PREFIX_LEN)) {
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index d4f1468b9b50..ce6d4634a840 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -126,7 +126,7 @@ extern key_ref_t search_process_keyrings(struct key_type *type,
+ key_match_func_t match,
+ const struct cred *cred);
+
+-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
++extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+
+ extern int install_user_keyrings(void);
+ extern int install_thread_keyring_to_cred(struct cred *);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 6595b2dd89fe..248c2e731375 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -299,6 +299,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
+ key->flags |= 1 << KEY_FLAG_IN_QUOTA;
++ if (flags & KEY_ALLOC_UID_KEYRING)
++ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+
+ memset(&key->type_data, 0, sizeof(key->type_data));
+
+@@ -897,6 +899,16 @@ error:
+ */
+ __key_link_end(keyring, ktype, prealloc);
+
++ key = key_ref_to_ptr(key_ref);
++ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
++ ret = wait_for_key_construction(key, true);
++ if (ret < 0) {
++ key_ref_put(key_ref);
++ key_ref = ERR_PTR(ret);
++ goto error_free_prep;
++ }
++ }
++
+ key_ref = __key_update(key_ref, &prep);
+ goto error_free_prep;
+ }
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 066baa1926bb..7576f49eeb34 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -93,7 +93,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
+ payload = NULL;
+
+ vm = false;
+- if (_payload) {
++ if (plen) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
+ if (!payload) {
+@@ -327,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+- if (_payload) {
++ if (plen) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL);
+ if (!payload)
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 6ece7f2e5707..b0cabf68c678 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -583,15 +583,15 @@ found:
+ /*
+ * Find a keyring with the specified name.
+ *
+- * All named keyrings in the current user namespace are searched, provided they
+- * grant Search permission directly to the caller (unless this check is
+- * skipped). Keyrings whose usage points have reached zero or who have been
+- * revoked are skipped.
++ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
++ * user in the current user namespace are considered. If @uid_keyring is %true,
++ * the keyring additionally must have been allocated as a user or user session
++ * keyring; otherwise, it must grant Search permission directly to the caller.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success. -ENOKEY is returned if a key could not be found.
+ */
+-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
++struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+ {
+ struct key *keyring;
+ int bucket;
+@@ -619,10 +619,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
+ if (strcmp(keyring->description, name) != 0)
+ continue;
+
+- if (!skip_perm_check &&
+- key_permission(make_key_ref(keyring, 0),
+- KEY_SEARCH) < 0)
+- continue;
++ if (uid_keyring) {
++ if (!test_bit(KEY_FLAG_UID_KEYRING,
++ &keyring->flags))
++ continue;
++ } else {
++ if (key_permission(make_key_ref(keyring, 0),
++ KEY_SEARCH) < 0)
++ continue;
++ }
+
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 33384662fc82..f58a5aa05fa4 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -76,7 +76,9 @@ int install_user_keyrings(void)
+ if (IS_ERR(uid_keyring)) {
+ uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+- KEY_ALLOC_IN_QUOTA, NULL);
++ KEY_ALLOC_UID_KEYRING |
++ KEY_ALLOC_IN_QUOTA,
++ NULL);
+ if (IS_ERR(uid_keyring)) {
+ ret = PTR_ERR(uid_keyring);
+ goto error;
+@@ -92,7 +94,9 @@ int install_user_keyrings(void)
+ session_keyring =
+ keyring_alloc(buf, user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+- KEY_ALLOC_IN_QUOTA, NULL);
++ KEY_ALLOC_UID_KEYRING |
++ KEY_ALLOC_IN_QUOTA,
++ NULL);
+ if (IS_ERR(session_keyring)) {
+ ret = PTR_ERR(session_keyring);
+ goto error_release;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 251bc575f5c3..c39282611368 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1088,7 +1088,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
+ mutex_lock(&ue->card->user_ctl_lock);
+ change = ue->tlv_data_size != size;
+ if (!change)
+- change = memcmp(ue->tlv_data, new_data, size);
++ change = memcmp(ue->tlv_data, new_data, size) != 0;
+ kfree(ue->tlv_data);
+ ue->tlv_data = new_data;
+ ue->tlv_data_size = size;
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index d449dde1bf50..7b5a7902b7a2 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1248,6 +1248,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port;
+ struct snd_seq_port_info info;
+ struct snd_seq_port_callback *callback;
++ int port_idx;
+
+ if (copy_from_user(&info, arg, sizeof(info)))
+ return -EFAULT;
+@@ -1261,7 +1262,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
+ return -ENOMEM;
+
+ if (client->type == USER_CLIENT && info.kernel) {
+- snd_seq_delete_port(client, port->addr.port);
++ port_idx = port->addr.port;
++ snd_seq_port_unlock(port);
++ snd_seq_delete_port(client, port_idx);
+ return -EINVAL;
+ }
+ if (client->type == KERNEL_CLIENT) {
+@@ -1283,6 +1286,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
+
+ snd_seq_set_port_info(port, &info);
+ snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
++ snd_seq_port_unlock(port);
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index ee0522a8f730..a28d1acad574 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
+ }
+
+
+-/* create a port, port number is returned (-1 on failure) */
++/* create a port, port number is returned (-1 on failure);
++ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
++ */
+ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ int port)
+ {
+@@ -153,6 +155,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ snd_use_lock_init(&new_port->use_lock);
+ port_subs_info_init(&new_port->c_src);
+ port_subs_info_init(&new_port->c_dest);
++ snd_use_lock_use(&new_port->use_lock);
+
+ num = port >= 0 ? port : 0;
+ mutex_lock(&client->ports_mutex);
+@@ -167,9 +170,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ list_add_tail(&new_port->list, &p->list);
+ client->num_ports++;
+ new_port->addr.port = num; /* store the port number in the port */
++ sprintf(new_port->name, "port-%d", num);
+ write_unlock_irqrestore(&client->ports_lock, flags);
+ mutex_unlock(&client->ports_mutex);
+- sprintf(new_port->name, "port-%d", num);
+
+ return new_port;
+ }
+diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
+index bbc782e364b0..9118fb8cc100 100644
+--- a/tools/perf/ui/browser.c
++++ b/tools/perf/ui/browser.c
+@@ -672,7 +672,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
+ ui_browser__gotorc(browser, row, column + 1);
+ SLsmg_draw_hline(2);
+
+- if (row++ == 0)
++ if (++row == 0)
+ goto out;
+ } else
+ row = 0;