diff options
author | Mike Pagano <mpagano@gentoo.org> | 2019-06-04 07:11:36 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2019-06-04 07:11:36 -0400 |
commit | 1f5bd0eab3b265afe827baee5f0992f7acfb80a7 (patch) | |
tree | 2abaeb1ee7171a2fd66715abf54add8c25d13a04 | |
parent | Linux patch 4.19.47 (diff) | |
download | linux-patches-4.19-49.tar.gz linux-patches-4.19-49.tar.bz2 linux-patches-4.19-49.zip |
Linux patch 4.19.484.19-49
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1047_linux-4.19.48.patch | 2086 |
2 files changed, 2090 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 0c0cd1aa..7740bafa 100644 --- a/0000_README +++ b/0000_README @@ -231,6 +231,10 @@ Patch: 1046_linux-4.19.47.patch From: http://www.kernel.org Desc: Linux 4.19.47 +Patch: 1047_linux-4.19.48.patch +From: http://www.kernel.org +Desc: Linux 4.19.48 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1047_linux-4.19.48.patch b/1047_linux-4.19.48.patch new file mode 100644 index 00000000..ad12e2e6 --- /dev/null +++ b/1047_linux-4.19.48.patch @@ -0,0 +1,2086 @@ +diff --git a/Makefile b/Makefile +index b3ba28ff73d5..42529a87f3b4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 47 ++SUBLEVEL = 48 + EXTRAVERSION = + NAME = "People's Front" + +@@ -508,13 +508,6 @@ export RETPOLINE_VDSO_CFLAGS + KBUILD_CFLAGS += $(call cc-option,-fno-PIE) + KBUILD_AFLAGS += $(call cc-option,-fno-PIE) + +-# check for 'asm goto' +-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) +- CC_HAVE_ASM_GOTO := 1 +- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO +- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +-endif +- + # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. + # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. + # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), +diff --git a/arch/Kconfig b/arch/Kconfig +index 6801123932a5..a336548487e6 100644 +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -71,6 +71,7 @@ config KPROBES + config JUMP_LABEL + bool "Optimize very unlikely/likely branches" + depends on HAVE_ARCH_JUMP_LABEL ++ depends on CC_HAS_ASM_GOTO + help + This option enables a transparent branch optimization that + makes certain almost-always-true or almost-always-false branch +diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c +index 90bce3d9928e..303b3ab87f7e 100644 +--- a/arch/arm/kernel/jump_label.c ++++ b/arch/arm/kernel/jump_label.c +@@ -4,8 +4,6 @@ + #include <asm/patch.h> + #include <asm/insn.h> + +-#ifdef HAVE_JUMP_LABEL +- + static void __arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type, + bool is_static) +@@ -35,5 +33,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, + { + __arch_jump_label_transform(entry, type, true); + } +- +-#endif +diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c +index e0756416e567..b90754aebd12 100644 +--- a/arch/arm64/kernel/jump_label.c ++++ b/arch/arm64/kernel/jump_label.c +@@ -20,8 +20,6 @@ + #include <linux/jump_label.h> + #include <asm/insn.h> + +-#ifdef HAVE_JUMP_LABEL +- + void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) + { +@@ -49,5 +47,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, + * NOP needs to be replaced by a branch. + */ + } +- +-#endif /* HAVE_JUMP_LABEL */ +diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c +index 32e3168316cd..ab943927f97a 100644 +--- a/arch/mips/kernel/jump_label.c ++++ b/arch/mips/kernel/jump_label.c +@@ -16,8 +16,6 @@ + #include <asm/cacheflush.h> + #include <asm/inst.h> + +-#ifdef HAVE_JUMP_LABEL +- + /* + * Define parameters for the standard MIPS and the microMIPS jump + * instruction encoding respectively: +@@ -70,5 +68,3 @@ void arch_jump_label_transform(struct jump_entry *e, + + mutex_unlock(&text_mutex); + } +- +-#endif /* HAVE_JUMP_LABEL */ +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h +index 1f4691ce4126..e398173ae67d 100644 +--- a/arch/powerpc/include/asm/asm-prototypes.h ++++ b/arch/powerpc/include/asm/asm-prototypes.h +@@ -38,7 +38,7 @@ extern struct static_key hcall_tracepoint_key; + void __trace_hcall_entry(unsigned long opcode, unsigned long *args); + void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); + /* OPAL tracing */ +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + extern struct static_key opal_tracepoint_key; + #endif + +diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c +index 6472472093d0..0080c5fbd225 100644 +--- a/arch/powerpc/kernel/jump_label.c ++++ b/arch/powerpc/kernel/jump_label.c +@@ -11,7 +11,6 @@ + #include <linux/jump_label.h> + #include <asm/code-patching.h> + +-#ifdef HAVE_JUMP_LABEL + void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) + { +@@ -22,4 +21,3 @@ void arch_jump_label_transform(struct jump_entry *entry, + else + patch_instruction(addr, PPC_INST_NOP); + } +-#endif +diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c +index 1ab7d26c0a2c..f16a43540e30 100644 +--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c ++++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c +@@ -4,7 +4,7 @@ + #include <asm/trace.h> + #include <asm/asm-prototypes.h> + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + struct static_key opal_tracepoint_key = STATIC_KEY_INIT; + + int opal_tracepoint_regfunc(void) +diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S +index 251528231a9e..f4875fe3f8ff 100644 +--- a/arch/powerpc/platforms/powernv/opal-wrappers.S ++++ b/arch/powerpc/platforms/powernv/opal-wrappers.S +@@ -20,7 +20,7 @@ + .section ".text" + + #ifdef CONFIG_TRACEPOINTS +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + #define OPAL_BRANCH(LABEL) \ + ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) + #else +diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S +index d91412c591ef..50dc9426d0be 100644 +--- a/arch/powerpc/platforms/pseries/hvCall.S ++++ b/arch/powerpc/platforms/pseries/hvCall.S +@@ -19,7 +19,7 @@ + + #ifdef CONFIG_TRACEPOINTS + +-#ifndef HAVE_JUMP_LABEL ++#ifndef CONFIG_JUMP_LABEL + .section ".toc","aw" + + .globl hcall_tracepoint_refcount +@@ -79,7 +79,7 @@ hcall_tracepoint_refcount: + mr r5,BUFREG; \ + __HCALL_INST_POSTCALL + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + #define HCALL_BRANCH(LABEL) \ + ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) + #else +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c +index d3992ced0782..9e52b686a8fa 100644 +--- a/arch/powerpc/platforms/pseries/lpar.c ++++ b/arch/powerpc/platforms/pseries/lpar.c +@@ -828,7 +828,7 @@ EXPORT_SYMBOL(arch_free_page); + #endif /* CONFIG_PPC_BOOK3S_64 */ + + #ifdef CONFIG_TRACEPOINTS +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; + + int hcall_tracepoint_regfunc(void) +diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile +index dbfd1730e631..b205c0ff0b22 100644 +--- a/arch/s390/kernel/Makefile ++++ b/arch/s390/kernel/Makefile +@@ -44,7 +44,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' + obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o + obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o + obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o +-obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o ++obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o + obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o + obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o + obj-y += nospec-branch.o +@@ -68,6 +68,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o + obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o + obj-$(CONFIG_UPROBES) += uprobes.o ++obj-$(CONFIG_JUMP_LABEL) += jump_label.o + + obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o + obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o +diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c +index 43f8430fb67d..68f415e334a5 100644 +--- a/arch/s390/kernel/jump_label.c ++++ b/arch/s390/kernel/jump_label.c +@@ -10,8 +10,6 @@ + #include <linux/jump_label.h> + #include <asm/ipl.h> + +-#ifdef HAVE_JUMP_LABEL +- + struct insn { + u16 opcode; + s32 offset; +@@ -102,5 +100,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, + { + __jump_label_transform(entry, type, 1); + } +- +-#endif +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile +index cf8640841b7a..97c0e19263d1 100644 +--- a/arch/sparc/kernel/Makefile ++++ b/arch/sparc/kernel/Makefile +@@ -118,4 +118,4 @@ pc--$(CONFIG_PERF_EVENTS) := perf_event.o + obj-$(CONFIG_SPARC64) += $(pc--y) + + obj-$(CONFIG_UPROBES) += uprobes.o +-obj-$(CONFIG_SPARC64) += jump_label.o ++obj-$(CONFIG_JUMP_LABEL) += jump_label.o +diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c +index 7f8eac51df33..a4cfaeecaf5e 100644 +--- a/arch/sparc/kernel/jump_label.c ++++ b/arch/sparc/kernel/jump_label.c +@@ -9,8 +9,6 @@ + + #include <asm/cacheflush.h> + +-#ifdef HAVE_JUMP_LABEL +- + void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) + { +@@ -47,5 +45,3 @@ void arch_jump_label_transform(struct jump_entry *entry, + flushi(insn); + mutex_unlock(&text_mutex); + } +- +-#endif +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index ab2071e40efe..ce0d0424a53d 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -305,7 +305,7 @@ vdso_install: + + archprepare: checkbin + checkbin: +-ifndef CC_HAVE_ASM_GOTO ++ifndef CONFIG_CC_HAS_ASM_GOTO + @echo Compiler lacks asm-goto support. + @exit 1 + endif +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h +index 352e70cd33e8..e699b2041665 100644 +--- a/arch/x86/entry/calling.h ++++ b/arch/x86/entry/calling.h +@@ -337,7 +337,7 @@ For 32-bit we have the following conventions - kernel is built with + */ + .macro CALL_enter_from_user_mode + #ifdef CONFIG_CONTEXT_TRACKING +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 + #endif + call enter_from_user_mode +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index aced6c9290d6..ce95b8cbd229 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); + + #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) + +-#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO) ++#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) + + /* + * Workaround for the sake of BPF compilation which utilizes kernel +diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h +index 8c0de4282659..7010e1c594c4 100644 +--- a/arch/x86/include/asm/jump_label.h ++++ b/arch/x86/include/asm/jump_label.h +@@ -2,19 +2,6 @@ + #ifndef _ASM_X86_JUMP_LABEL_H + #define _ASM_X86_JUMP_LABEL_H + +-#ifndef HAVE_JUMP_LABEL +-/* +- * For better or for worse, if jump labels (the gcc extension) are missing, +- * then the entire static branch patching infrastructure is compiled out. +- * If that happens, the code in here will malfunction. Raise a compiler +- * error instead. +- * +- * In theory, jump labels and the static branch patching infrastructure +- * could be decoupled to fix this. +- */ +-#error asm/jump_label.h included on a non-jump-label kernel +-#endif +- + #define JUMP_LABEL_NOP_SIZE 5 + + #ifdef CONFIG_X86_64 +diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h +index 4914a3e7c803..033dc7ca49e9 100644 +--- a/arch/x86/include/asm/rmwcc.h ++++ b/arch/x86/include/asm/rmwcc.h +@@ -4,7 +4,7 @@ + + #define __CLOBBERS_MEM(clb...) "memory", ## clb + +-#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) ++#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) + + /* Use asm goto */ + +@@ -21,7 +21,7 @@ cc_label: \ + #define __BINARY_RMWcc_ARG " %1, " + + +-#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ ++#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ + + /* Use flags output or a set instruction */ + +@@ -36,7 +36,7 @@ do { \ + + #define __BINARY_RMWcc_ARG " %2, " + +-#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ ++#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ + + #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ + __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) +diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile +index 8824d01c0c35..da0b6bc090f3 100644 +--- a/arch/x86/kernel/Makefile ++++ b/arch/x86/kernel/Makefile +@@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o + obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o + obj-y += time.o ioport.o dumpstack.o nmi.o + obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o +-obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o ++obj-y += setup.o x86_init.o i8259.o irqinit.o ++obj-$(CONFIG_JUMP_LABEL) += jump_label.o + obj-$(CONFIG_IRQ_WORK) += irq_work.o + obj-y += probe_roms.o + obj-$(CONFIG_X86_64) += sys_x86_64.o +diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c +index eeea935e9bb5..4c3d9a3d45b2 100644 +--- a/arch/x86/kernel/jump_label.c ++++ b/arch/x86/kernel/jump_label.c +@@ -16,8 +16,6 @@ + #include <asm/alternative.h> + #include <asm/text-patching.h> + +-#ifdef HAVE_JUMP_LABEL +- + union jump_code_union { + char code[JUMP_LABEL_NOP_SIZE]; + struct { +@@ -142,5 +140,3 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, + if (jlstate == JL_STATE_UPDATE) + __jump_label_transform(entry, type, text_poke_early, 1); + } +- +-#endif +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 860bd271619d..4a688ef9e448 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -456,7 +456,7 @@ FOP_END; + + /* + * XXX: inoutclob user must know where the argument is being expanded. +- * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. ++ * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. + */ + #define asm_safe(insn, inoutclob...) \ + ({ \ +diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c +index dd8b8716467a..2d1a8cd35509 100644 +--- a/drivers/crypto/vmx/ghash.c ++++ b/drivers/crypto/vmx/ghash.c +@@ -1,22 +1,14 @@ ++// SPDX-License-Identifier: GPL-2.0 + /** + * GHASH routines supporting VMX instructions on the Power 8 + * +- * Copyright (C) 2015 International Business Machines Inc. +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; version 2 only. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, write to the Free Software +- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * Copyright (C) 2015, 2019 International Business Machines Inc. + * + * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> ++ * ++ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback ++ * mechanism. The new approach is based on arm64 code, which is: ++ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> + */ + + #include <linux/types.h> +@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], + const u8 *in, size_t len); + + struct p8_ghash_ctx { ++ /* key used by vector asm */ + u128 htable[16]; +- struct crypto_shash *fallback; ++ /* key used by software fallback */ ++ be128 key; + }; + + struct p8_ghash_desc_ctx { + u64 shash[2]; + u8 buffer[GHASH_DIGEST_SIZE]; + int bytes; +- struct shash_desc fallback_desc; + }; + +-static int p8_ghash_init_tfm(struct crypto_tfm *tfm) +-{ +- const char *alg = "ghash-generic"; +- struct crypto_shash *fallback; +- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); +- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); +- +- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); +- if (IS_ERR(fallback)) { +- printk(KERN_ERR +- "Failed to allocate transformation for '%s': %ld\n", +- alg, PTR_ERR(fallback)); +- return PTR_ERR(fallback); +- } +- +- crypto_shash_set_flags(fallback, +- crypto_shash_get_flags((struct crypto_shash +- *) tfm)); +- +- /* Check if the descsize defined in the algorithm is still enough. */ +- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) +- + crypto_shash_descsize(fallback)) { +- printk(KERN_ERR +- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", +- alg, +- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), +- crypto_shash_descsize(fallback)); +- return -EINVAL; +- } +- ctx->fallback = fallback; +- +- return 0; +-} +- +-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) +-{ +- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); +- +- if (ctx->fallback) { +- crypto_free_shash(ctx->fallback); +- ctx->fallback = NULL; +- } +-} +- + static int p8_ghash_init(struct shash_desc *desc) + { +- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + dctx->bytes = 0; + memset(dctx->shash, 0, GHASH_DIGEST_SIZE); +- dctx->fallback_desc.tfm = ctx->fallback; +- dctx->fallback_desc.flags = desc->flags; +- return crypto_shash_init(&dctx->fallback_desc); ++ return 0; + } + + static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, +@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); +- return crypto_shash_setkey(ctx->fallback, key, keylen); ++ ++ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); ++ ++ return 0; ++} ++ ++static inline void __ghash_block(struct p8_ghash_ctx *ctx, ++ struct p8_ghash_desc_ctx *dctx) ++{ ++ if (!IN_INTERRUPT) { ++ preempt_disable(); ++ pagefault_disable(); ++ enable_kernel_vsx(); ++ gcm_ghash_p8(dctx->shash, ctx->htable, ++ dctx->buffer, GHASH_DIGEST_SIZE); ++ disable_kernel_vsx(); ++ pagefault_enable(); ++ preempt_enable(); ++ } else { ++ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); ++ gf128mul_lle((be128 *)dctx->shash, &ctx->key); ++ } ++} ++ ++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, ++ struct p8_ghash_desc_ctx *dctx, ++ const u8 *src, unsigned int srclen) ++{ ++ if (!IN_INTERRUPT) { ++ preempt_disable(); ++ pagefault_disable(); ++ enable_kernel_vsx(); ++ gcm_ghash_p8(dctx->shash, ctx->htable, ++ src, srclen); ++ disable_kernel_vsx(); ++ pagefault_enable(); ++ preempt_enable(); ++ } else { ++ while (srclen >= GHASH_BLOCK_SIZE) { ++ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); ++ gf128mul_lle((be128 *)dctx->shash, &ctx->key); ++ srclen -= GHASH_BLOCK_SIZE; ++ src += GHASH_BLOCK_SIZE; ++ } ++ } + } + + static int p8_ghash_update(struct shash_desc *desc, +@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc, + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + +- if (IN_INTERRUPT) { +- return crypto_shash_update(&dctx->fallback_desc, src, +- srclen); +- } else { +- if (dctx->bytes) { +- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { +- memcpy(dctx->buffer + dctx->bytes, src, +- srclen); +- dctx->bytes += srclen; +- return 0; +- } ++ if (dctx->bytes) { ++ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { + memcpy(dctx->buffer + dctx->bytes, src, +- GHASH_DIGEST_SIZE - dctx->bytes); +- preempt_disable(); +- pagefault_disable(); +- enable_kernel_vsx(); +- gcm_ghash_p8(dctx->shash, ctx->htable, +- dctx->buffer, GHASH_DIGEST_SIZE); +- disable_kernel_vsx(); +- pagefault_enable(); +- preempt_enable(); +- src += GHASH_DIGEST_SIZE - dctx->bytes; +- srclen -= GHASH_DIGEST_SIZE - dctx->bytes; +- dctx->bytes = 0; +- } +- len = srclen & ~(GHASH_DIGEST_SIZE - 1); +- if (len) { +- preempt_disable(); +- pagefault_disable(); +- enable_kernel_vsx(); +- gcm_ghash_p8(dctx->shash, ctx->htable, src, len); +- disable_kernel_vsx(); +- pagefault_enable(); +- preempt_enable(); +- src += len; +- srclen -= len; +- } +- if (srclen) { +- memcpy(dctx->buffer, src, srclen); +- dctx->bytes = srclen; ++ srclen); ++ dctx->bytes += srclen; ++ return 0; + } +- return 0; ++ memcpy(dctx->buffer + dctx->bytes, src, ++ GHASH_DIGEST_SIZE - dctx->bytes); ++ ++ __ghash_block(ctx, dctx); ++ ++ src += GHASH_DIGEST_SIZE - dctx->bytes; ++ srclen -= GHASH_DIGEST_SIZE - dctx->bytes; ++ dctx->bytes = 0; ++ } ++ len = srclen & ~(GHASH_DIGEST_SIZE - 1); ++ if (len) { ++ __ghash_blocks(ctx, dctx, src, len); ++ src += len; ++ srclen -= len; + } ++ if (srclen) { ++ memcpy(dctx->buffer, src, srclen); ++ dctx->bytes = srclen; ++ } ++ return 0; + } + + static int p8_ghash_final(struct shash_desc *desc, u8 *out) +@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + +- if (IN_INTERRUPT) { +- return crypto_shash_final(&dctx->fallback_desc, out); +- } else { +- if (dctx->bytes) { +- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) +- dctx->buffer[i] = 0; +- preempt_disable(); +- pagefault_disable(); +- enable_kernel_vsx(); +- gcm_ghash_p8(dctx->shash, ctx->htable, +- dctx->buffer, GHASH_DIGEST_SIZE); +- disable_kernel_vsx(); +- pagefault_enable(); +- preempt_enable(); +- dctx->bytes = 0; +- } +- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); +- return 0; ++ if (dctx->bytes) { ++ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) ++ dctx->buffer[i] = 0; ++ __ghash_block(ctx, dctx); ++ dctx->bytes = 0; + } ++ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); ++ return 0; + } + + struct shash_alg p8_ghash_alg = { +@@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = { + .cra_name = "ghash", + .cra_driver_name = "p8_ghash", + .cra_priority = 1000, +- .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = GHASH_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_ghash_ctx), + .cra_module = THIS_MODULE, +- .cra_init = p8_ghash_init_tfm, +- .cra_exit = p8_ghash_exit_tfm, + }, + }; +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 091b454e83fc..039beb5e0fa2 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -3107,13 +3107,18 @@ static int bond_slave_netdev_event(unsigned long event, + case NETDEV_CHANGE: + /* For 802.3ad mode only: + * Getting invalid Speed/Duplex values here will put slave +- * in weird state. So mark it as link-fail for the time +- * being and let link-monitoring (miimon) set it right when +- * correct speeds/duplex are available. ++ * in weird state. Mark it as link-fail if the link was ++ * previously up or link-down if it hasn't yet come up, and ++ * let link-monitoring (miimon) set it right when correct ++ * speeds/duplex are available. + */ + if (bond_update_speed_duplex(slave) && +- BOND_MODE(bond) == BOND_MODE_8023AD) +- slave->link = BOND_LINK_FAIL; ++ BOND_MODE(bond) == BOND_MODE_8023AD) { ++ if (slave->last_link_up) ++ slave->link = BOND_LINK_FAIL; ++ else ++ slave->link = BOND_LINK_DOWN; ++ } + + if (BOND_MODE(bond) == BOND_MODE_8023AD) + bond_3ad_adapter_speed_duplex_changed(slave); +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index 2caa5c0c2bc4..dfaad1c2c2b8 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -877,7 +877,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, + err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®); + if (err) + return U64_MAX; +- high = reg; ++ low |= ((u32)reg) << 16; + } + break; + case STATS_TYPE_BANK1: +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index de46331aefc1..c54a74de7b08 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -1599,6 +1599,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + bnxt_reuse_rx_data(rxr, cons, data); + if (!skb) { ++ if (agg_bufs) ++ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); + rc = -ENOMEM; + goto next_rx; + } +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +index c116f96956fe..f2aba5b160c2 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +@@ -228,6 +228,9 @@ static void cxgb4_process_flow_match(struct net_device *dev, + fs->val.ivlan = vlan_tci; + fs->mask.ivlan = vlan_tci_mask; + ++ fs->val.ivlan_vld = 1; ++ fs->mask.ivlan_vld = 1; ++ + /* Chelsio adapters use ivlan_vld bit to match vlan packets + * as 802.1Q. Also, when vlan tag is present in packets, + * ethtype match is used then to match on ethtype of inner +@@ -238,8 +241,6 @@ static void cxgb4_process_flow_match(struct net_device *dev, + * ethtype value with ethtype of inner header. + */ + if (fs->val.ethtype == ETH_P_8021Q) { +- fs->val.ivlan_vld = 1; +- fs->mask.ivlan_vld = 1; + fs->val.ethtype = 0; + fs->mask.ethtype = 0; + } +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index ad41ace0a27a..bf715a367273 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -3571,7 +3571,7 @@ failed_init: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + failed_reset: +- pm_runtime_put(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index 2ba0d89aaf3c..28762314353f 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -4611,7 +4611,7 @@ static int mvneta_probe(struct platform_device *pdev) + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register\n"); +- goto err_free_stats; ++ goto err_netdev; + } + + netdev_info(dev, "Using %s mac address %pM\n", mac_from, +@@ -4622,14 +4622,12 @@ static int mvneta_probe(struct platform_device *pdev) + return 0; + + err_netdev: +- unregister_netdev(dev); + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + mvneta_bm_put(pp->bm_priv); + } +-err_free_stats: + free_percpu(pp->stats); + err_free_ports: + free_percpu(pp->ports); +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 9b10abb604cb..59212d3d9587 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -1404,7 +1404,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) + /* Set defaults to the MVPP2 port */ + static void mvpp2_defaults_set(struct mvpp2_port *port) + { +- int tx_port_num, val, queue, ptxq, lrxq; ++ int tx_port_num, val, queue, lrxq; + + if (port->priv->hw_version == MVPP21) { + /* Update TX FIFO MIN Threshold */ +@@ -1422,11 +1422,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) + mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + + /* Close bandwidth for all queues */ +- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { +- ptxq = mvpp2_txq_phys(port->id, queue); ++ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) + mvpp2_write(port->priv, +- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); +- } ++ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); + + /* Set refill period to 1 usec, refill tokens + * and bucket size to maximum +@@ -2271,7 +2269,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, + txq->descs_dma = 0; + + /* Set minimum bandwidth for disabled TXQs */ +- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); ++ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); + + /* Set Tx descriptors queue starting address and size */ + cpu = get_cpu(); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index b190c447aeb0..0f1c296c3ce4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -3734,6 +3734,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, + netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + } + ++ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { ++ features &= ~NETIF_F_RXHASH; ++ if (netdev->features & NETIF_F_RXHASH) ++ netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); ++ } ++ + mutex_unlock(&priv->state_lock); + + return features; +@@ -3860,6 +3866,9 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) + memcpy(&priv->tstamp, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); + ++ /* might need to fix some features */ ++ netdev_update_features(priv->netdev); ++ + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; + } +@@ -4702,6 +4711,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) + if (!priv->channels.params.scatter_fcs_en) + netdev->features &= ~NETIF_F_RXFCS; + ++ /* prefere CQE compression over rxhash */ ++ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) ++ netdev->features &= ~NETIF_F_RXHASH; ++ + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) + if (FT_CAP(flow_modify_en) && + FT_CAP(modify_root) && +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index d181645fd968..c079f85593d6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2220,7 +2220,7 @@ static struct mlx5_flow_root_namespace + cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); + + /* Create the root namespace */ +- root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); ++ root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); + if (!root_ns) + return NULL; + +@@ -2363,6 +2363,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + + kfree(steering->esw_egress_root_ns); ++ steering->esw_egress_root_ns = NULL; + } + + static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) +@@ -2377,6 +2378,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + + kfree(steering->esw_ingress_root_ns); ++ steering->esw_ingress_root_ns = NULL; + } + + void mlx5_cleanup_fs(struct mlx5_core_dev *dev) +@@ -2505,6 +2507,7 @@ cleanup_root_ns: + for (i--; i >= 0; i--) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + kfree(steering->esw_egress_root_ns); ++ steering->esw_egress_root_ns = NULL; + return err; + } + +@@ -2532,6 +2535,7 @@ cleanup_root_ns: + for (i--; i >= 0; i--) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + kfree(steering->esw_ingress_root_ns); ++ steering->esw_ingress_root_ns = NULL; + return err; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 5debe93ea4eb..50c00822b2d8 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2195,6 +2195,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) + if (priv->plat->axi) + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); + ++ /* DMA CSR Channel configuration */ ++ for (chan = 0; chan < dma_csr_ch; chan++) ++ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); ++ + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_channels_count; chan++) { + rx_q = &priv->rx_queue[chan]; +@@ -2220,10 +2224,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) + tx_q->tx_tail_addr, chan); + } + +- /* DMA CSR Channel configuration */ +- for (chan = 0; chan < dma_csr_ch; chan++) +- stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); +- + return ret; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +index bdd351597b55..093a223fe408 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +@@ -267,7 +267,8 @@ int stmmac_mdio_reset(struct mii_bus *bus) + of_property_read_u32_array(np, + "snps,reset-delays-us", data->delays, 3); + +- if (gpio_request(data->reset_gpio, "mdio-reset")) ++ if (devm_gpio_request(priv->device, data->reset_gpio, ++ "mdio-reset")) + return 0; + } + +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c +index f77a2d9e7f9d..456a1f882b09 100644 +--- a/drivers/net/phy/marvell10g.c ++++ b/drivers/net/phy/marvell10g.c +@@ -27,6 +27,9 @@ + #include <linux/phy.h> + + enum { ++ MV_PMA_BOOT = 0xc050, ++ MV_PMA_BOOT_FATAL = BIT(0), ++ + MV_PCS_BASE_T = 0x0000, + MV_PCS_BASE_R = 0x1000, + MV_PCS_1000BASEX = 0x2000, +@@ -226,6 +229,16 @@ static int mv3310_probe(struct phy_device *phydev) + (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask) + return -ENODEV; + ++ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_BOOT); ++ if (ret < 0) ++ return ret; ++ ++ if (ret & MV_PMA_BOOT_FATAL) { ++ dev_warn(&phydev->mdio.dev, ++ "PHY failed to boot firmware, status=%04x\n", ret); ++ return -ENODEV; ++ } ++ + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index 770aa624147f..10854977c55f 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -506,6 +506,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) + + if (netif_running (dev->net) && + netif_device_present (dev->net) && ++ test_bit(EVENT_DEV_OPEN, &dev->flags) && + !test_bit (EVENT_RX_HALT, &dev->flags) && + !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { + switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { +@@ -1431,6 +1432,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } ++ if (netif_queue_stopped(net)) { ++ usb_autopm_put_interface_async(dev->intf); ++ spin_unlock_irqrestore(&dev->txq.lock, flags); ++ goto drop; ++ } + + #ifdef CONFIG_PM + /* if this triggers the device is still a sleep */ +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c +index ea4a08b83fa0..787966f44589 100644 +--- a/drivers/xen/xen-pciback/pciback_ops.c ++++ b/drivers/xen/xen-pciback/pciback_ops.c +@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev) + if (pci_is_enabled(dev)) + pci_disable_device(dev); + +- pci_write_config_word(dev, PCI_COMMAND, 0); +- + dev->is_busmaster = 0; + } else { + pci_read_config_word(dev, PCI_COMMAND, &cmd); +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 81c2238b884c..bb22908c79e8 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -319,29 +319,14 @@ static inline void *offset_to_ptr(const int *off) + #endif + #ifndef __compiletime_error + # define __compiletime_error(message) +-/* +- * Sparse complains of variable sized arrays due to the temporary variable in +- * __compiletime_assert. Unfortunately we can't just expand it out to make +- * sparse see a constant array size without breaking compiletime_assert on old +- * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. +- */ +-# ifndef __CHECKER__ +-# define __compiletime_error_fallback(condition) \ +- do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +-# endif +-#endif +-#ifndef __compiletime_error_fallback +-# define __compiletime_error_fallback(condition) do { } while (0) + #endif + + #ifdef __OPTIMIZE__ + # define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ +- int __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ +- if (__cond) \ ++ if (!(condition)) \ + prefix ## suffix(); \ +- __compiletime_error_fallback(__cond); \ + } while (0) + #else + # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h +index db192becfec4..c2ded31a4cec 100644 +--- a/include/linux/compiler_types.h ++++ b/include/linux/compiler_types.h +@@ -151,6 +151,10 @@ struct ftrace_likely_data { + #define __assume_aligned(a, ...) + #endif + ++#ifndef asm_volatile_goto ++#define asm_volatile_goto(x...) asm goto(x) ++#endif ++ + /* Are two types/vars the same type (ignoring qualifiers)? */ + #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h +index 2fd8006153c3..b3419da1a776 100644 +--- a/include/linux/dynamic_debug.h ++++ b/include/linux/dynamic_debug.h +@@ -2,7 +2,7 @@ + #ifndef _DYNAMIC_DEBUG_H + #define _DYNAMIC_DEBUG_H + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) ++#if defined(CONFIG_JUMP_LABEL) + #include <linux/jump_label.h> + #endif + +@@ -38,7 +38,7 @@ struct _ddebug { + #define _DPRINTK_FLAGS_DEFAULT 0 + #endif + unsigned int flags:8; +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; +@@ -83,7 +83,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, + dd_key_init(key, init) \ + } + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + + #define dd_key_init(key, init) key = (init) + +diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h +index 1a0b6f17a5d6..4c3e77687d4e 100644 +--- a/include/linux/jump_label.h ++++ b/include/linux/jump_label.h +@@ -71,10 +71,6 @@ + * Additional babbling in: Documentation/static-keys.txt + */ + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +-# define HAVE_JUMP_LABEL +-#endif +- + #ifndef __ASSEMBLY__ + + #include <linux/types.h> +@@ -86,7 +82,7 @@ extern bool static_key_initialized; + "%s(): static key '%pS' used before call to jump_label_init()", \ + __func__, (key)) + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + + struct static_key { + atomic_t enabled; +@@ -114,10 +110,10 @@ struct static_key { + struct static_key { + atomic_t enabled; + }; +-#endif /* HAVE_JUMP_LABEL */ ++#endif /* CONFIG_JUMP_LABEL */ + #endif /* __ASSEMBLY__ */ + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + #include <asm/jump_label.h> + #endif + +@@ -130,7 +126,7 @@ enum jump_label_type { + + struct module; + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + + #define JUMP_TYPE_FALSE 0UL + #define JUMP_TYPE_TRUE 1UL +@@ -184,7 +180,7 @@ extern void static_key_disable_cpuslocked(struct static_key *key); + { .enabled = { 0 }, \ + { .entries = (void *)JUMP_TYPE_FALSE } } + +-#else /* !HAVE_JUMP_LABEL */ ++#else /* !CONFIG_JUMP_LABEL */ + + #include <linux/atomic.h> + #include <linux/bug.h> +@@ -271,7 +267,7 @@ static inline void static_key_disable(struct static_key *key) + #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } + #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } + +-#endif /* HAVE_JUMP_LABEL */ ++#endif /* CONFIG_JUMP_LABEL */ + + #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE + #define jump_label_enabled static_key_enabled +@@ -335,7 +331,7 @@ extern bool ____wrong_branch_error(void); + static_key_count((struct static_key *)x) > 0; \ + }) + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + + /* + * Combine the right initial value (type) with the right branch order +@@ -417,12 +413,12 @@ extern bool ____wrong_branch_error(void); + unlikely(branch); \ + }) + +-#else /* !HAVE_JUMP_LABEL */ ++#else /* !CONFIG_JUMP_LABEL */ + + #define static_branch_likely(x) likely(static_key_enabled(&(x)->key)) + #define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) + +-#endif /* HAVE_JUMP_LABEL */ ++#endif /* CONFIG_JUMP_LABEL */ + + /* + * Advanced usage; refcount, branch is enabled when: count != 0 +diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h +index baa8eabbaa56..a49f2b45b3f0 100644 +--- a/include/linux/jump_label_ratelimit.h ++++ b/include/linux/jump_label_ratelimit.h +@@ -5,21 +5,19 @@ + #include <linux/jump_label.h> + #include <linux/workqueue.h> + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) ++#if defined(CONFIG_JUMP_LABEL) + struct static_key_deferred { + struct static_key key; + unsigned long timeout; + struct delayed_work work; + }; +-#endif + +-#ifdef HAVE_JUMP_LABEL + extern void static_key_slow_dec_deferred(struct static_key_deferred *key); + extern void static_key_deferred_flush(struct static_key_deferred *key); + extern void + jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +-#else /* !HAVE_JUMP_LABEL */ ++#else /* !CONFIG_JUMP_LABEL */ + struct static_key_deferred { + struct static_key key; + }; +@@ -38,5 +36,5 @@ jump_label_rate_limit(struct static_key_deferred *key, + { + STATIC_KEY_CHECK_USE(key); + } +-#endif /* HAVE_JUMP_LABEL */ ++#endif /* CONFIG_JUMP_LABEL */ + #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ +diff --git a/include/linux/module.h b/include/linux/module.h +index 904f94628132..c71044644979 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -433,7 +433,7 @@ struct module { + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + #endif +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + #endif +diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h +index bbe99d2b28b4..72cb19c3db6a 100644 +--- a/include/linux/netfilter.h ++++ b/include/linux/netfilter.h +@@ -176,7 +176,7 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, + int nf_register_sockopt(struct nf_sockopt_ops *reg); + void nf_unregister_sockopt(struct nf_sockopt_ops *reg); + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; + #endif + +@@ -198,7 +198,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct nf_hook_entries *hook_head = NULL; + int ret = 1; + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook) && + !static_key_false(&nf_hooks_needed[pf][hook])) +diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h +index 554c920691dd..a13774be2eb5 100644 +--- a/include/linux/netfilter_ingress.h ++++ b/include/linux/netfilter_ingress.h +@@ -8,7 +8,7 @@ + #ifdef CONFIG_NETFILTER_INGRESS + static inline bool nf_hook_ingress_active(const struct sk_buff *skb) + { +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) + return false; + #endif +diff --git a/include/linux/siphash.h b/include/linux/siphash.h +index fa7a6b9cedbf..bf21591a9e5e 100644 +--- a/include/linux/siphash.h ++++ b/include/linux/siphash.h +@@ -21,6 +21,11 @@ typedef struct { + u64 key[2]; + } siphash_key_t; + ++static inline bool siphash_key_is_zero(const siphash_key_t *key) ++{ ++ return !(key->key[0] | key->key[1]); ++} ++ + u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); + #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index e47503b4e4d1..622db6bc2f02 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -9,6 +9,7 @@ + #include <linux/uidgid.h> + #include <net/inet_frag.h> + #include <linux/rcupdate.h> ++#include <linux/siphash.h> + + struct tcpm_hash_bucket; + struct ctl_table_header; +@@ -214,5 +215,6 @@ struct netns_ipv4 { + unsigned int ipmr_seq; /* protected by rtnl_mutex */ + + atomic_t rt_genid; ++ siphash_key_t ip_id_key; + }; + #endif +diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h +index 4b2c93b1934c..4955e1a9f1bc 100644 +--- a/include/uapi/linux/tipc_config.h ++++ b/include/uapi/linux/tipc_config.h +@@ -307,8 +307,10 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) + tlv_ptr = (struct tlv_desc *)tlv; + tlv_ptr->tlv_type = htons(type); + tlv_ptr->tlv_len = htons(tlv_len); +- if (len && data) +- memcpy(TLV_DATA(tlv_ptr), data, tlv_len); ++ if (len && data) { ++ memcpy(TLV_DATA(tlv_ptr), data, len); ++ memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len); ++ } + return TLV_SPACE(len); + } + +@@ -405,8 +407,10 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags, + tcm_hdr->tcm_len = htonl(msg_len); + tcm_hdr->tcm_type = htons(cmd); + tcm_hdr->tcm_flags = htons(flags); +- if (data_len && data) ++ if (data_len && data) { + memcpy(TCM_DATA(msg), data, data_len); ++ memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len); ++ } + return TCM_SPACE(data_len); + } + +diff --git a/init/Kconfig b/init/Kconfig +index 864af10bb1b9..47035b5a46f6 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -23,6 +23,9 @@ config CLANG_VERSION + int + default $(shell,$(srctree)/scripts/clang-version.sh $(CC)) + ++config CC_HAS_ASM_GOTO ++ def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) ++ + config CONSTRUCTORS + bool + depends on !UML +diff --git a/kernel/jump_label.c b/kernel/jump_label.c +index 2e62503bea0d..7c8262635b29 100644 +--- a/kernel/jump_label.c ++++ b/kernel/jump_label.c +@@ -18,8 +18,6 @@ + #include <linux/cpu.h> + #include <asm/sections.h> + +-#ifdef HAVE_JUMP_LABEL +- + /* mutex to protect coming/going of the the jump_label table */ + static DEFINE_MUTEX(jump_label_mutex); + +@@ -60,13 +58,13 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) + static void jump_label_update(struct static_key *key); + + /* +- * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. ++ * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. + * The use of 'atomic_read()' requires atomic.h and its problematic for some + * kernel headers such as kernel.h and others. Since static_key_count() is not +- * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok ++ * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok + * to have it be a function here. Similarly, for 'static_key_enable()' and + * 'static_key_disable()', which require bug.h. This should allow jump_label.h +- * to be included from most/all places for HAVE_JUMP_LABEL. ++ * to be included from most/all places for CONFIG_JUMP_LABEL. + */ + int static_key_count(struct static_key *key) + { +@@ -796,5 +794,3 @@ static __init int jump_label_test(void) + } + early_initcall(jump_label_test); + #endif /* STATIC_KEYS_SELFTEST */ +- +-#endif /* HAVE_JUMP_LABEL */ +diff --git a/kernel/module.c b/kernel/module.c +index f797c6ace712..b8f37376856b 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -3100,7 +3100,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) + sizeof(*mod->tracepoints_ptrs), + &mod->num_tracepoints); + #endif +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + mod->jump_entries = section_objs(info, "__jump_table", + sizeof(*mod->jump_entries), + &mod->num_jump_entries); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 6138754e5030..6859ea1d5c04 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -24,7 +24,7 @@ + + DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); + +-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) + /* + * Debugging: various feature bits + * +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 141ea9ff210e..78fadf0438ea 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -73,7 +73,7 @@ static int sched_feat_show(struct seq_file *m, void *v) + return 0; + } + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + + #define jump_label_key__true STATIC_KEY_INIT_TRUE + #define jump_label_key__false STATIC_KEY_INIT_FALSE +@@ -99,7 +99,7 @@ static void sched_feat_enable(int i) + #else + static void sched_feat_disable(int i) { }; + static void sched_feat_enable(int i) { }; +-#endif /* HAVE_JUMP_LABEL */ ++#endif /* CONFIG_JUMP_LABEL */ + + static int sched_feat_set(char *cmp) + { +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 7a1e9db617f7..4a433608ba74 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4209,7 +4209,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) + + #ifdef CONFIG_CFS_BANDWIDTH + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + static struct static_key __cfs_bandwidth_used; + + static inline bool cfs_bandwidth_used(void) +@@ -4226,7 +4226,7 @@ void cfs_bandwidth_usage_dec(void) + { + static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); + } +-#else /* HAVE_JUMP_LABEL */ ++#else /* CONFIG_JUMP_LABEL */ + static bool cfs_bandwidth_used(void) + { + return true; +@@ -4234,7 +4234,7 @@ static bool cfs_bandwidth_used(void) + + void cfs_bandwidth_usage_inc(void) {} + void cfs_bandwidth_usage_dec(void) {} +-#endif /* HAVE_JUMP_LABEL */ ++#endif /* CONFIG_JUMP_LABEL */ + + /* + * default period for cfs group bandwidth. +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 4c7a837d7c14..9a7c3d08b39f 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1359,7 +1359,7 @@ enum { + + #undef SCHED_FEAT + +-#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) + + /* + * To support run-time toggling of sched features, all the translation units +@@ -1379,7 +1379,7 @@ static __always_inline bool static_branch_##name(struct static_key *key) \ + extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; + #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) + +-#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ ++#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ + + /* + * Each translation unit has its own copy of sysctl_sched_features to allow +@@ -1395,7 +1395,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features = + + #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) + +-#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ ++#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ + + extern struct static_key_false sched_numa_balancing; + extern struct static_key_false sched_schedstats; +diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c +index c7c96bc7654a..dbf2b457e47e 100644 +--- a/lib/dynamic_debug.c ++++ b/lib/dynamic_debug.c +@@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query, + newflags = (dp->flags & mask) | flags; + if (newflags == dp->flags) + continue; +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + if (dp->flags & _DPRINTK_FLAGS_PRINT) { + if (!(flags & _DPRINTK_FLAGS_PRINT)) + static_branch_disable(&dp->key.dd_key_true); +diff --git a/net/core/dev.c b/net/core/dev.c +index 13a82744a00a..138951d28643 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1821,7 +1821,7 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); + #endif + + static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + static atomic_t netstamp_needed_deferred; + static atomic_t netstamp_wanted; + static void netstamp_clear(struct work_struct *work) +@@ -1840,7 +1840,7 @@ static DECLARE_WORK(netstamp_work, netstamp_clear); + + void net_enable_timestamp(void) + { +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + int wanted; + + while (1) { +@@ -1860,7 +1860,7 @@ EXPORT_SYMBOL(net_enable_timestamp); + + void net_disable_timestamp(void) + { +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + int wanted; + + while (1) { +@@ -5725,7 +5725,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) + skb_reset_mac_header(skb); + skb_gro_reset_offset(skb); + +- eth = skb_gro_header_fast(skb, 0); + if (unlikely(skb_gro_header_hard(skb, hlen))) { + eth = skb_gro_header_slow(skb, hlen, 0); + if (unlikely(!eth)) { +@@ -5735,6 +5734,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) + return NULL; + } + } else { ++ eth = (const struct ethhdr *)skb->data; + gro_pull_from_frag0(skb, hlen); + NAPI_GRO_CB(skb)->frag0 += hlen; + NAPI_GRO_CB(skb)->frag0_len -= hlen; +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 4da39446da2d..d187ee8156a1 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -190,6 +190,17 @@ static void ip_ma_put(struct ip_mc_list *im) + pmc != NULL; \ + pmc = rtnl_dereference(pmc->next_rcu)) + ++static void ip_sf_list_clear_all(struct ip_sf_list *psf) ++{ ++ struct ip_sf_list *next; ++ ++ while (psf) { ++ next = psf->sf_next; ++ kfree(psf); ++ psf = next; ++ } ++} ++ + #ifdef CONFIG_IP_MULTICAST + + /* +@@ -635,6 +646,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf) + } + } + ++static void kfree_pmc(struct ip_mc_list *pmc) ++{ ++ ip_sf_list_clear_all(pmc->sources); ++ ip_sf_list_clear_all(pmc->tomb); ++ kfree(pmc); ++} ++ + static void igmpv3_send_cr(struct in_device *in_dev) + { + struct ip_mc_list *pmc, *pmc_prev, *pmc_next; +@@ -671,7 +689,7 @@ static void igmpv3_send_cr(struct in_device *in_dev) + else + in_dev->mc_tomb = pmc_next; + in_dev_put(pmc->interface); +- kfree(pmc); ++ kfree_pmc(pmc); + } else + pmc_prev = pmc; + } +@@ -1201,14 +1219,18 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) + im->interface = pmc->interface; + if (im->sfmode == MCAST_INCLUDE) { + im->tomb = pmc->tomb; ++ pmc->tomb = NULL; ++ + im->sources = pmc->sources; ++ pmc->sources = NULL; ++ + for (psf = im->sources; psf; psf = psf->sf_next) + psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + } else { + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + } + in_dev_put(pmc->interface); +- kfree(pmc); ++ kfree_pmc(pmc); + } + spin_unlock_bh(&im->lock); + } +@@ -1229,21 +1251,18 @@ static void igmpv3_clear_delrec(struct in_device *in_dev) + nextpmc = pmc->next; + ip_mc_clear_src(pmc); + in_dev_put(pmc->interface); +- kfree(pmc); ++ kfree_pmc(pmc); + } + /* clear dead sources, too */ + rcu_read_lock(); + for_each_pmc_rcu(in_dev, pmc) { +- struct ip_sf_list *psf, *psf_next; ++ struct ip_sf_list *psf; + + spin_lock_bh(&pmc->lock); + psf = pmc->tomb; + pmc->tomb = NULL; + spin_unlock_bh(&pmc->lock); +- for (; psf; psf = psf_next) { +- psf_next = psf->sf_next; +- kfree(psf); +- } ++ ip_sf_list_clear_all(psf); + } + rcu_read_unlock(); + } +@@ -2114,7 +2133,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, + + static void ip_mc_clear_src(struct ip_mc_list *pmc) + { +- struct ip_sf_list *psf, *nextpsf, *tomb, *sources; ++ struct ip_sf_list *tomb, *sources; + + spin_lock_bh(&pmc->lock); + tomb = pmc->tomb; +@@ -2126,14 +2145,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc) + pmc->sfcount[MCAST_EXCLUDE] = 1; + spin_unlock_bh(&pmc->lock); + +- for (psf = tomb; psf; psf = nextpsf) { +- nextpsf = psf->sf_next; +- kfree(psf); +- } +- for (psf = sources; psf; psf = nextpsf) { +- nextpsf = psf->sf_next; +- kfree(psf); +- } ++ ip_sf_list_clear_all(tomb); ++ ip_sf_list_clear_all(sources); + } + + /* Join a multicast group +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 8bacbcd2db90..40bf19f7ae1a 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -500,15 +500,17 @@ EXPORT_SYMBOL(ip_idents_reserve); + + void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) + { +- static u32 ip_idents_hashrnd __read_mostly; + u32 hash, id; + +- net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); ++ /* Note the following code is not safe, but this is okay. */ ++ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) ++ get_random_bytes(&net->ipv4.ip_id_key, ++ sizeof(net->ipv4.ip_id_key)); + +- hash = jhash_3words((__force u32)iph->daddr, ++ hash = siphash_3u32((__force u32)iph->daddr, + (__force u32)iph->saddr, +- iph->protocol ^ net_hash_mix(net), +- ip_idents_hashrnd); ++ iph->protocol, ++ &net->ipv4.ip_id_key); + id = ip_idents_reserve(hash, segs); + iph->id = htons(id); + } +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index 4fe7c90962dd..868ae23dbae1 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -10,15 +10,25 @@ + #include <net/secure_seq.h> + #include <linux/netfilter.h> + +-static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, ++static u32 __ipv6_select_ident(struct net *net, + const struct in6_addr *dst, + const struct in6_addr *src) + { ++ const struct { ++ struct in6_addr dst; ++ struct in6_addr src; ++ } __aligned(SIPHASH_ALIGNMENT) combined = { ++ .dst = *dst, ++ .src = *src, ++ }; + u32 hash, id; + +- hash = __ipv6_addr_jhash(dst, hashrnd); +- hash = __ipv6_addr_jhash(src, hash); +- hash ^= net_hash_mix(net); ++ /* Note the following code is not safe, but this is okay. */ ++ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) ++ get_random_bytes(&net->ipv4.ip_id_key, ++ sizeof(net->ipv4.ip_id_key)); ++ ++ hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key); + + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, + * set the hight order instead thus minimizing possible future +@@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, + */ + __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) + { +- static u32 ip6_proxy_idents_hashrnd __read_mostly; + struct in6_addr buf[2]; + struct in6_addr *addrs; + u32 id; +@@ -53,11 +62,7 @@ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) + if (!addrs) + return 0; + +- net_get_random_once(&ip6_proxy_idents_hashrnd, +- sizeof(ip6_proxy_idents_hashrnd)); +- +- id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd, +- &addrs[1], &addrs[0]); ++ id = __ipv6_select_ident(net, &addrs[1], &addrs[0]); + return htonl(id); + } + EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); +@@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net, + const struct in6_addr *daddr, + const struct in6_addr *saddr) + { +- static u32 ip6_idents_hashrnd __read_mostly; + u32 id; + +- net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); +- +- id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr); ++ id = __ipv6_select_ident(net, daddr, saddr); + return htonl(id); + } + EXPORT_SYMBOL(ipv6_select_ident); +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 5e0efd3954e9..5833d4af7311 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -288,7 +288,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) + /* Binding to link-local address requires an interface */ + if (!sk->sk_bound_dev_if) + goto out_unlock; ++ } + ++ if (sk->sk_bound_dev_if) { + err = -ENODEV; + dev = dev_get_by_index_rcu(sock_net(sk), + sk->sk_bound_dev_if); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index bf0940c42810..24f7b2cf504b 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2480,6 +2480,12 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, + struct fib6_info *rt; + struct fib6_node *fn; + ++ /* l3mdev_update_flow overrides oif if the device is enslaved; in ++ * this case we must match on the real ingress device, so reset it ++ */ ++ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) ++ fl6->flowi6_oif = skb->dev->ifindex; ++ + /* Get the "current" route for this destination and + * check if the redirect has come from appropriate router. + * +diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c +index 94425e421213..9e4b6bcf6920 100644 +--- a/net/llc/llc_output.c ++++ b/net/llc/llc_output.c +@@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, + rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac); + if (likely(!rc)) + rc = dev_queue_xmit(skb); ++ else ++ kfree_skb(skb); + return rc; + } + +diff --git a/net/netfilter/core.c b/net/netfilter/core.c +index dc240cb47ddf..93aaec3a54ec 100644 +--- a/net/netfilter/core.c ++++ b/net/netfilter/core.c +@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(nf_ipv6_ops); + DEFINE_PER_CPU(bool, nf_skb_duplicated); + EXPORT_SYMBOL_GPL(nf_skb_duplicated); + +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; + EXPORT_SYMBOL(nf_hooks_needed); + #endif +@@ -347,7 +347,7 @@ static int __nf_register_net_hook(struct net *net, int pf, + if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) + net_inc_ingress_queue(); + #endif +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]); + #endif + BUG_ON(p == new_hooks); +@@ -405,7 +405,7 @@ static void __nf_unregister_net_hook(struct net *net, int pf, + if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) + net_dec_ingress_queue(); + #endif +-#ifdef HAVE_JUMP_LABEL ++#ifdef CONFIG_JUMP_LABEL + static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]); + #endif + } else { +diff --git a/net/sched/act_api.c b/net/sched/act_api.c +index e12f8ef7baa4..7c4a4b874248 100644 +--- a/net/sched/act_api.c ++++ b/net/sched/act_api.c +@@ -744,7 +744,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], + + for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + a = actions[i]; +- nest = nla_nest_start(skb, a->order); ++ nest = nla_nest_start(skb, i + 1); + if (nest == NULL) + goto nla_put_failure; + err = tcf_action_dump_1(skb, a, bind, ref); +@@ -1257,7 +1257,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, + ret = PTR_ERR(act); + goto err; + } +- act->order = i; + attr_size += tcf_action_fill_size(act); + actions[i - 1] = act; + } +diff --git a/net/tipc/core.c b/net/tipc/core.c +index d7b0688c98dd..3ecca3b88bf8 100644 +--- a/net/tipc/core.c ++++ b/net/tipc/core.c +@@ -66,10 +66,6 @@ static int __net_init tipc_init_net(struct net *net) + INIT_LIST_HEAD(&tn->node_list); + spin_lock_init(&tn->node_list_lock); + +- err = tipc_socket_init(); +- if (err) +- goto out_socket; +- + err = tipc_sk_rht_init(net); + if (err) + goto out_sk_rht; +@@ -79,9 +75,6 @@ static int __net_init tipc_init_net(struct net *net) + goto out_nametbl; + + INIT_LIST_HEAD(&tn->dist_queue); +- err = tipc_topsrv_start(net); +- if (err) +- goto out_subscr; + + err = tipc_bcast_init(net); + if (err) +@@ -90,25 +83,19 @@ static int __net_init tipc_init_net(struct net *net) + return 0; + + out_bclink: +- tipc_bcast_stop(net); +-out_subscr: + tipc_nametbl_stop(net); + out_nametbl: + tipc_sk_rht_destroy(net); + out_sk_rht: +- tipc_socket_stop(); +-out_socket: + return err; + } + + static void __net_exit tipc_exit_net(struct net *net) + { +- tipc_topsrv_stop(net); + tipc_net_stop(net); + tipc_bcast_stop(net); + tipc_nametbl_stop(net); + tipc_sk_rht_destroy(net); +- tipc_socket_stop(); + } + + static struct pernet_operations tipc_net_ops = { +@@ -118,6 +105,11 @@ static struct pernet_operations tipc_net_ops = { + .size = sizeof(struct tipc_net), + }; + ++static struct pernet_operations tipc_topsrv_net_ops = { ++ .init = tipc_topsrv_init_net, ++ .exit = tipc_topsrv_exit_net, ++}; ++ + static int __init tipc_init(void) + { + int err; +@@ -144,6 +136,14 @@ static int __init tipc_init(void) + if (err) + goto out_pernet; + ++ err = tipc_socket_init(); ++ if (err) ++ goto out_socket; ++ ++ err = register_pernet_subsys(&tipc_topsrv_net_ops); ++ if (err) ++ goto out_pernet_topsrv; ++ + err = tipc_bearer_setup(); + if (err) + goto out_bearer; +@@ -151,6 +151,10 @@ static int __init tipc_init(void) + pr_info("Started in single node mode\n"); + return 0; + out_bearer: ++ unregister_pernet_subsys(&tipc_topsrv_net_ops); ++out_pernet_topsrv: ++ tipc_socket_stop(); ++out_socket: + unregister_pernet_subsys(&tipc_net_ops); + out_pernet: + tipc_unregister_sysctl(); +@@ -166,6 +170,8 @@ out_netlink: + static void __exit tipc_exit(void) + { + tipc_bearer_cleanup(); ++ unregister_pernet_subsys(&tipc_topsrv_net_ops); ++ tipc_socket_stop(); + unregister_pernet_subsys(&tipc_net_ops); + tipc_netlink_stop(); + tipc_netlink_compat_stop(); +diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h +index d793b4343885..aa015c233898 100644 +--- a/net/tipc/subscr.h ++++ b/net/tipc/subscr.h +@@ -77,8 +77,9 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub, + u32 found_lower, u32 found_upper, + u32 event, u32 port, u32 node, + u32 scope, int must); +-int tipc_topsrv_start(struct net *net); +-void tipc_topsrv_stop(struct net *net); ++ ++int __net_init tipc_topsrv_init_net(struct net *net); ++void __net_exit tipc_topsrv_exit_net(struct net *net); + + void tipc_sub_put(struct tipc_subscription *subscription); + void tipc_sub_get(struct tipc_subscription *subscription); +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c +index 2301b09df234..35558656fe02 100644 +--- a/net/tipc/topsrv.c ++++ b/net/tipc/topsrv.c +@@ -643,7 +643,7 @@ static void tipc_topsrv_work_stop(struct tipc_topsrv *s) + destroy_workqueue(s->send_wq); + } + +-int tipc_topsrv_start(struct net *net) ++static int tipc_topsrv_start(struct net *net) + { + struct tipc_net *tn = tipc_net(net); + const char name[] = "topology_server"; +@@ -677,7 +677,7 @@ int tipc_topsrv_start(struct net *net) + return ret; + } + +-void tipc_topsrv_stop(struct net *net) ++static void tipc_topsrv_stop(struct net *net) + { + struct tipc_topsrv *srv = tipc_topsrv(net); + struct socket *lsock = srv->listener; +@@ -702,3 +702,13 @@ void tipc_topsrv_stop(struct net *net) + idr_destroy(&srv->conn_idr); + kfree(srv); + } ++ ++int __net_init tipc_topsrv_init_net(struct net *net) ++{ ++ return tipc_topsrv_start(net); ++} ++ ++void __net_exit tipc_topsrv_exit_net(struct net *net) ++{ ++ tipc_topsrv_stop(net); ++} +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index fdf22cb0b3e6..8035bf495eb2 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -916,12 +916,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk) + if (!netdev) + goto out; + +- if (!(netdev->features & NETIF_F_HW_TLS_RX)) { +- pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n", +- __func__); +- goto out; +- } +- + netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, + TLS_OFFLOAD_CTX_DIR_RX); + +@@ -980,7 +974,8 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event, + { + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + +- if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) ++ if (!dev->tlsdev_ops && ++ !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) + return NOTIFY_DONE; + + switch (event) { +diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh +index 083c526073ef..8b980fb2270a 100755 +--- a/scripts/gcc-goto.sh ++++ b/scripts/gcc-goto.sh +@@ -3,7 +3,7 @@ + # Test for gcc 'asm goto' support + # Copyright (C) 2010, Jason Baron <jbaron@redhat.com> + +-cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" ++cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null + int main(void) + { + #if defined(__arm__) || defined(__aarch64__) +diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h +index dc90c0c2fae3..fee7983a90b4 100644 +--- a/tools/arch/x86/include/asm/rmwcc.h ++++ b/tools/arch/x86/include/asm/rmwcc.h +@@ -2,7 +2,7 @@ + #ifndef _TOOLS_LINUX_ASM_X86_RMWcc + #define _TOOLS_LINUX_ASM_X86_RMWcc + +-#ifdef CC_HAVE_ASM_GOTO ++#ifdef CONFIG_CC_HAS_ASM_GOTO + + #define __GEN_RMWcc(fullop, var, cc, ...) \ + do { \ +@@ -20,7 +20,7 @@ cc_label: \ + #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ + __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) + +-#else /* !CC_HAVE_ASM_GOTO */ ++#else /* !CONFIG_CC_HAS_ASM_GOTO */ + + #define __GEN_RMWcc(fullop, var, cc, ...) \ + do { \ +@@ -37,6 +37,6 @@ do { \ + #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ + __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) + +-#endif /* CC_HAVE_ASM_GOTO */ ++#endif /* CONFIG_CC_HAS_ASM_GOTO */ + + #endif /* _TOOLS_LINUX_ASM_X86_RMWcc */ |