diff options
author | Mike Pagano <mpagano@gentoo.org> | 2021-01-17 11:19:20 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2021-01-17 11:19:20 -0500 |
commit | 5f74453df8e87c968a5d45bdfd6284eea40a29af (patch) | |
tree | 3228cbdaabbcf771cfd83d9ed11a558cfecfdb59 | |
parent | Linux patch 5.4.89 (diff) | |
download | linux-patches-5f74453df8e87c968a5d45bdfd6284eea40a29af.tar.gz linux-patches-5f74453df8e87c968a5d45bdfd6284eea40a29af.tar.bz2 linux-patches-5f74453df8e87c968a5d45bdfd6284eea40a29af.zip |
Linux patch 5.4.905.4-92
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1089_linux-5.4.90.patch | 1787 |
2 files changed, 1791 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 3db22cb0..5d28defb 100644 --- a/0000_README +++ b/0000_README @@ -399,6 +399,10 @@ Patch: 1088_linux-5.4.89.patch From: http://www.kernel.org Desc: Linux 5.4.89 +Patch: 1089_linux-5.4.90.patch +From: http://www.kernel.org +Desc: Linux 5.4.90 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1089_linux-5.4.90.patch b/1089_linux-5.4.90.patch new file mode 100644 index 00000000..bc5ebac3 --- /dev/null +++ b/1089_linux-5.4.90.patch @@ -0,0 +1,1787 @@ +diff --git a/Makefile b/Makefile +index 95848875110ef..5c9d680b7ce51 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 89 ++SUBLEVEL = 90 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c +index 3acb4192918df..f85a0fd6aca5c 100644 +--- a/arch/arm/mach-omap2/omap_device.c ++++ b/arch/arm/mach-omap2/omap_device.c +@@ -234,10 +234,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb, + break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); +- if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && +- pm_runtime_status_suspended(dev)) { ++ if (od) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; +- pm_runtime_set_active(dev); ++ if (od->_state == OMAP_DEVICE_STATE_ENABLED && ++ pm_runtime_status_suspended(dev)) { ++ pm_runtime_set_active(dev); ++ } + } + break; + case BUS_NOTIFY_ADD_DEVICE: +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index 6478635ff2142..98a177dd1f89f 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -625,6 +625,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) + { + u64 pmcr, val; + ++ /* No PMU available, PMCR_EL0 may UNDEF... */ ++ if (!kvm_arm_support_pmu_v3()) ++ return; ++ + pmcr = read_sysreg(pmcr_el0); + /* + * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S +index 390edb7638265..bde3e0f85425f 100644 +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -869,9 +869,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region) + * Xen doesn't set %esp to be precisely what the normal SYSENTER + * entry point expects, so fix it up before using the normal path. + */ +-ENTRY(xen_sysenter_target) ++SYM_CODE_START(xen_sysenter_target) + addl $5*4, %esp /* remove xen-provided frame */ + jmp .Lsysenter_past_esp ++SYM_CODE_END(xen_sysenter_target) + #endif + + /* +diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S +index e95e95960156b..5b076cb79f5fb 100644 +--- a/arch/x86/kernel/acpi/wakeup_32.S ++++ b/arch/x86/kernel/acpi/wakeup_32.S +@@ -9,8 +9,7 @@ + .code32 + ALIGN + +-ENTRY(wakeup_pmode_return) +-wakeup_pmode_return: ++SYM_CODE_START(wakeup_pmode_return) + movw $__KERNEL_DS, %ax + movw %ax, %ss + movw %ax, %fs +@@ -39,6 +38,7 @@ wakeup_pmode_return: + # jump to place where we left off + movl saved_eip, %eax + jmp *%eax ++SYM_CODE_END(wakeup_pmode_return) + + bogus_magic: + jmp bogus_magic +@@ -72,7 +72,7 @@ restore_registers: + popfl + ret + +-ENTRY(do_suspend_lowlevel) ++SYM_CODE_START(do_suspend_lowlevel) + call save_processor_state + call save_registers + pushl $3 +@@ -87,6 +87,7 @@ ret_point: + call restore_registers + call restore_processor_state + ret ++SYM_CODE_END(do_suspend_lowlevel) + + .data + ALIGN +diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c +index 830ccc396e26d..28f786289fce4 100644 +--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c ++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c +@@ -525,85 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp) + kfree(rdtgrp); + } + +-struct task_move_callback { +- struct callback_head work; +- struct rdtgroup *rdtgrp; +-}; +- +-static void move_myself(struct callback_head *head) ++static void _update_task_closid_rmid(void *task) + { +- struct task_move_callback *callback; +- struct rdtgroup *rdtgrp; +- +- callback = container_of(head, struct task_move_callback, work); +- rdtgrp = callback->rdtgrp; +- + /* +- * If resource group was deleted before this task work callback +- * was invoked, then assign the task to root group and free the +- * resource group. ++ * If the task is still current on this CPU, update PQR_ASSOC MSR. ++ * Otherwise, the MSR is updated when the task is scheduled in. + */ +- if (atomic_dec_and_test(&rdtgrp->waitcount) && +- (rdtgrp->flags & RDT_DELETED)) { +- current->closid = 0; +- current->rmid = 0; +- rdtgroup_remove(rdtgrp); +- } +- +- preempt_disable(); +- /* update PQR_ASSOC MSR to make resource group go into effect */ +- resctrl_sched_in(); +- preempt_enable(); ++ if (task == current) ++ resctrl_sched_in(); ++} + +- kfree(callback); ++static void update_task_closid_rmid(struct task_struct *t) ++{ ++ if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) ++ smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); ++ else ++ _update_task_closid_rmid(t); + } + + static int __rdtgroup_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) + { +- struct task_move_callback *callback; +- int ret; +- +- callback = kzalloc(sizeof(*callback), GFP_KERNEL); +- if (!callback) +- return -ENOMEM; +- callback->work.func = move_myself; +- callback->rdtgrp = rdtgrp; ++ /* If the task is already in rdtgrp, no need to move the task. */ ++ if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && ++ tsk->rmid == rdtgrp->mon.rmid) || ++ (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && ++ tsk->closid == rdtgrp->mon.parent->closid)) ++ return 0; + + /* +- * Take a refcount, so rdtgrp cannot be freed before the +- * callback has been invoked. ++ * Set the task's closid/rmid before the PQR_ASSOC MSR can be ++ * updated by them. ++ * ++ * For ctrl_mon groups, move both closid and rmid. ++ * For monitor groups, can move the tasks only from ++ * their parent CTRL group. + */ +- atomic_inc(&rdtgrp->waitcount); +- ret = task_work_add(tsk, &callback->work, true); +- if (ret) { +- /* +- * Task is exiting. Drop the refcount and free the callback. +- * No need to check the refcount as the group cannot be +- * deleted before the write function unlocks rdtgroup_mutex. +- */ +- atomic_dec(&rdtgrp->waitcount); +- kfree(callback); +- rdt_last_cmd_puts("Task exited\n"); +- } else { +- /* +- * For ctrl_mon groups move both closid and rmid. +- * For monitor groups, can move the tasks only from +- * their parent CTRL group. +- */ +- if (rdtgrp->type == RDTCTRL_GROUP) { +- tsk->closid = rdtgrp->closid; ++ ++ if (rdtgrp->type == RDTCTRL_GROUP) { ++ tsk->closid = rdtgrp->closid; ++ tsk->rmid = rdtgrp->mon.rmid; ++ } else if (rdtgrp->type == RDTMON_GROUP) { ++ if (rdtgrp->mon.parent->closid == tsk->closid) { + tsk->rmid = rdtgrp->mon.rmid; +- } else if (rdtgrp->type == RDTMON_GROUP) { +- if (rdtgrp->mon.parent->closid == tsk->closid) { +- tsk->rmid = rdtgrp->mon.rmid; +- } else { +- rdt_last_cmd_puts("Can't move task to different control group\n"); +- ret = -EINVAL; +- } ++ } else { ++ rdt_last_cmd_puts("Can't move task to different control group\n"); ++ return -EINVAL; + } + } +- return ret; ++ ++ /* ++ * Ensure the task's closid and rmid are written before determining if ++ * the task is current that will decide if it will be interrupted. ++ */ ++ barrier(); ++ ++ /* ++ * By now, the task's closid and rmid are set. If the task is current ++ * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource ++ * group go into effect. If the task is not current, the MSR will be ++ * updated when the task is scheduled in. ++ */ ++ update_task_closid_rmid(tsk); ++ ++ return 0; + } + + /** +diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S +index 073aab525d800..2cc0303522c99 100644 +--- a/arch/x86/kernel/ftrace_32.S ++++ b/arch/x86/kernel/ftrace_32.S +@@ -89,7 +89,7 @@ WEAK(ftrace_stub) + ret + END(ftrace_caller) + +-ENTRY(ftrace_regs_caller) ++SYM_CODE_START(ftrace_regs_caller) + /* + * We're here from an mcount/fentry CALL, and the stack frame looks like: + * +@@ -163,6 +163,7 @@ GLOBAL(ftrace_regs_call) + popl %eax + + jmp .Lftrace_ret ++SYM_CODE_END(ftrace_regs_caller) + + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + ENTRY(ftrace_graph_caller) +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index 2e6a0676c1f43..11a5d5ade52ce 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) + * can. + */ + __HEAD +-ENTRY(startup_32) ++SYM_CODE_START(startup_32) + movl pa(initial_stack),%ecx + + /* test KEEP_SEGMENTS flag to see if the bootloader is asking +@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4 + #else + jmp .Ldefault_entry + #endif /* CONFIG_PARAVIRT */ ++SYM_CODE_END(startup_32) + + #ifdef CONFIG_HOTPLUG_CPU + /* +diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S +index 6fe383002125f..a19ed3d231853 100644 +--- a/arch/x86/power/hibernate_asm_32.S ++++ b/arch/x86/power/hibernate_asm_32.S +@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend) + ret + ENDPROC(swsusp_arch_suspend) + +-ENTRY(restore_image) ++SYM_CODE_START(restore_image) + /* prepare to jump to the image kernel */ + movl restore_jump_address, %ebx + movl restore_cr3, %ebp +@@ -45,9 +45,10 @@ ENTRY(restore_image) + /* jump to relocated restore code */ + movl relocated_restore_code, %eax + jmpl *%eax ++SYM_CODE_END(restore_image) + + /* code below has been relocated to a safe page */ +-ENTRY(core_restore_code) ++SYM_CODE_START(core_restore_code) + movl temp_pgt, %eax + movl %eax, %cr3 + +@@ -77,6 +78,7 @@ copy_loop: + + done: + jmpl *%ebx ++SYM_CODE_END(core_restore_code) + + /* code below belongs to the image kernel */ + .align PAGE_SIZE +diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S +index 1868b158480d4..3a0ef0d577344 100644 +--- a/arch/x86/realmode/rm/trampoline_32.S ++++ b/arch/x86/realmode/rm/trampoline_32.S +@@ -29,7 +29,7 @@ + .code16 + + .balign PAGE_SIZE +-ENTRY(trampoline_start) ++SYM_CODE_START(trampoline_start) + wbinvd # Needed for NUMA-Q should be harmless for others + + LJMPW_RM(1f) +@@ -54,11 +54,13 @@ ENTRY(trampoline_start) + lmsw %dx # into protected mode + + ljmpl $__BOOT_CS, $pa_startup_32 ++SYM_CODE_END(trampoline_start) + + .section ".text32","ax" + .code32 +-ENTRY(startup_32) # note: also used from wakeup_asm.S ++SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S + jmp *%eax ++SYM_CODE_END(startup_32) + + .bss + .balign 8 +diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S +index cd177772fe4d5..2712e91553063 100644 +--- a/arch/x86/xen/xen-asm_32.S ++++ b/arch/x86/xen/xen-asm_32.S +@@ -56,7 +56,7 @@ + _ASM_EXTABLE(1b,2b) + .endm + +-ENTRY(xen_iret) ++SYM_CODE_START(xen_iret) + /* test eflags for special cases */ + testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) + jnz hyper_iret +@@ -122,6 +122,7 @@ xen_iret_end_crit: + hyper_iret: + /* put this out of line since its very rarely used */ + jmp hypercall_page + __HYPERVISOR_iret * 32 ++SYM_CODE_END(xen_iret) + + .globl xen_iret_start_crit, xen_iret_end_crit + +@@ -152,7 +153,7 @@ hyper_iret: + * The only caveat is that if the outer eax hasn't been restored yet (i.e. + * it's still on stack), we need to restore its value here. + */ +-ENTRY(xen_iret_crit_fixup) ++SYM_CODE_START(xen_iret_crit_fixup) + /* + * Paranoia: Make sure we're really coming from kernel space. + * One could imagine a case where userspace jumps into the +@@ -179,4 +180,4 @@ ENTRY(xen_iret_crit_fixup) + + 2: + ret +-END(xen_iret_crit_fixup) ++SYM_CODE_END(xen_iret_crit_fixup) +diff --git a/block/genhd.c b/block/genhd.c +index 26b31fcae217f..604f0a2cbc9a0 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -222,14 +222,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) + part = rcu_dereference(ptbl->part[piter->idx]); + if (!part) + continue; ++ get_device(part_to_dev(part)); ++ piter->part = part; + if (!part_nr_sects_read(part) && + !(piter->flags & DISK_PITER_INCL_EMPTY) && + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && +- piter->idx == 0)) ++ piter->idx == 0)) { ++ put_device(part_to_dev(part)); ++ piter->part = NULL; + continue; ++ } + +- get_device(part_to_dev(part)); +- piter->part = part; + piter->idx += inc; + break; + } +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c +index f58baff2be0af..398991381e9af 100644 +--- a/drivers/base/regmap/regmap-debugfs.c ++++ b/drivers/base/regmap/regmap-debugfs.c +@@ -583,8 +583,12 @@ void regmap_debugfs_init(struct regmap *map, const char *name) + devname = dev_name(map->dev); + + if (name) { +- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", ++ if (!map->debugfs_name) { ++ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", + devname, name); ++ if (!map->debugfs_name) ++ return; ++ } + name = map->debugfs_name; + } else { + name = devname; +@@ -592,9 +596,10 @@ void regmap_debugfs_init(struct regmap *map, const char *name) + + if (!strcmp(name, "dummy")) { + kfree(map->debugfs_name); +- + map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", + dummy_index); ++ if (!map->debugfs_name) ++ return; + name = map->debugfs_name; + dummy_index++; + } +diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig +index 1bb8ec5753527..0fc27ac14f29c 100644 +--- a/drivers/block/Kconfig ++++ b/drivers/block/Kconfig +@@ -461,6 +461,7 @@ config BLK_DEV_RBD + config BLK_DEV_RSXX + tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" + depends on PCI ++ select CRC32 + help + Device driver for IBM's high speed PCIe SSD + storage device: Flash Adapter 900GB Full Height. +diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c +index 2db2f1739e092..1b2ec3be59eb7 100644 +--- a/drivers/cpufreq/powernow-k8.c ++++ b/drivers/cpufreq/powernow-k8.c +@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data) + + /* Take a frequency, and issue the fid/vid transition command */ + static int transition_frequency_fidvid(struct powernow_k8_data *data, +- unsigned int index) ++ unsigned int index, ++ struct cpufreq_policy *policy) + { +- struct cpufreq_policy *policy; + u32 fid = 0; + u32 vid = 0; + int res; +@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, + freqs.old = find_khz_freq_from_fid(data->currfid); + freqs.new = find_khz_freq_from_fid(fid); + +- policy = cpufreq_cpu_get(smp_processor_id()); +- cpufreq_cpu_put(policy); +- + cpufreq_freq_transition_begin(policy, &freqs); + res = transition_fid_vid(data, fid, vid); + cpufreq_freq_transition_end(policy, &freqs, res); +@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg) + + powernow_k8_acpi_pst_values(data, newstate); + +- ret = transition_frequency_fidvid(data, newstate); ++ ret = transition_frequency_fidvid(data, newstate, pol); + + if (ret) { + pr_err("transition frequency failed\n"); +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c +index f81a5e35d8fd1..eddc6d1bdb2d1 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c +@@ -577,7 +577,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx) + + while (!skb_queue_empty(&listen_ctx->synq)) { + struct chtls_sock *csk = +- container_of((struct synq *)__skb_dequeue ++ container_of((struct synq *)skb_peek + (&listen_ctx->synq), struct chtls_sock, synq); + struct sock *child = csk->sk; + +@@ -1021,6 +1021,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + const struct cpl_pass_accept_req *req, + struct chtls_dev *cdev) + { ++ struct adapter *adap = pci_get_drvdata(cdev->pdev); + struct inet_sock *newinet; + const struct iphdr *iph; + struct tls_context *ctx; +@@ -1030,9 +1031,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + struct neighbour *n; + struct tcp_sock *tp; + struct sock *newsk; ++ bool found = false; + u16 port_id; + int rxq_idx; +- int step; ++ int step, i; + + iph = (const struct iphdr *)network_hdr; + newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); +@@ -1044,7 +1046,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + goto free_sk; + + n = dst_neigh_lookup(dst, &iph->saddr); +- if (!n) ++ if (!n || !n->dev) + goto free_sk; + + ndev = n->dev; +@@ -1053,6 +1055,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + if (is_vlan_dev(ndev)) + ndev = vlan_dev_real_dev(ndev); + ++ for_each_port(adap, i) ++ if (cdev->ports[i] == ndev) ++ found = true; ++ ++ if (!found) ++ goto free_dst; ++ + port_id = cxgb4_port_idx(ndev); + + csk = chtls_sock_create(cdev); +@@ -1108,6 +1117,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + free_csk: + chtls_sock_release(&csk->kref); + free_dst: ++ neigh_release(n); + dst_release(dst); + free_sk: + inet_csk_prepare_forced_close(newsk); +@@ -1443,6 +1453,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb) + sk_wake_async(sk, 0, POLL_OUT); + + data = lookup_stid(cdev->tids, stid); ++ if (!data) { ++ /* listening server close */ ++ kfree_skb(skb); ++ goto unlock; ++ } + lsk = ((struct listen_ctx *)data)->lsk; + + bh_lock_sock(lsk); +@@ -1828,39 +1843,6 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) + kfree_skb(skb); + } + +-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, +- struct chtls_dev *cdev, int status, int queue) +-{ +- struct cpl_abort_req_rss *req = cplhdr(skb); +- struct sk_buff *reply_skb; +- struct chtls_sock *csk; +- +- csk = rcu_dereference_sk_user_data(sk); +- +- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), +- GFP_KERNEL); +- +- if (!reply_skb) { +- req->status = (queue << 1); +- send_defer_abort_rpl(cdev, skb); +- return; +- } +- +- set_abort_rpl_wr(reply_skb, GET_TID(req), status); +- kfree_skb(skb); +- +- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); +- if (csk_conn_inline(csk)) { +- struct l2t_entry *e = csk->l2t_entry; +- +- if (e && sk->sk_state != TCP_SYN_RECV) { +- cxgb4_l2t_send(csk->egress_dev, reply_skb, e); +- return; +- } +- } +- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); +-} +- + /* + * Add an skb to the deferred skb queue for processing from process context. + */ +@@ -1923,9 +1905,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb) + queue = csk->txq_idx; + + skb->sk = NULL; ++ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, ++ CPL_ABORT_NO_RST, queue); + do_abort_syn_rcv(child, lsk); +- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, +- CPL_ABORT_NO_RST, queue); + } + + static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) +@@ -1955,8 +1937,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) + if (!sock_owned_by_user(psk)) { + int queue = csk->txq_idx; + ++ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); + do_abort_syn_rcv(sk, psk); +- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); + } else { + skb->sk = sk; + BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; +@@ -1974,9 +1956,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) + int queue = csk->txq_idx; + + if (is_neg_adv(req->status)) { +- if (sk->sk_state == TCP_SYN_RECV) +- chtls_set_tcb_tflag(sk, 0, 0); +- + kfree_skb(skb); + return; + } +@@ -2002,12 +1981,11 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) + + if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) + return; +- +- chtls_release_resources(sk); +- chtls_conn_done(sk); + } + + chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue); ++ chtls_release_resources(sk); ++ chtls_conn_done(sk); + } + + static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb) +diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c +index 7f9a86c3c58ff..31577316f80bc 100644 +--- a/drivers/dma/dw-edma/dw-edma-core.c ++++ b/drivers/dma/dw-edma/dw-edma-core.c +@@ -85,12 +85,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) + + if (desc->chunk) { + /* Create and add new element into the linked list */ +- desc->chunks_alloc++; +- list_add_tail(&chunk->list, &desc->chunk->list); + if (!dw_edma_alloc_burst(chunk)) { + kfree(chunk); + return NULL; + } ++ desc->chunks_alloc++; ++ list_add_tail(&chunk->list, &desc->chunk->list); + } else { + /* List head */ + chunk->burst = NULL; +diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c +index 4c58da7421432..04d89eec11e74 100644 +--- a/drivers/dma/mediatek/mtk-hsdma.c ++++ b/drivers/dma/mediatek/mtk-hsdma.c +@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) + return 0; + + err_free: ++ mtk_hsdma_hw_deinit(hsdma); + of_dma_controller_free(pdev->dev.of_node); + err_unregister: + dma_async_device_unregister(dd); +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c +index a6abfe702c5a3..1b5f3e9f43d70 100644 +--- a/drivers/dma/xilinx/xilinx_dma.c ++++ b/drivers/dma/xilinx/xilinx_dma.c +@@ -2431,7 +2431,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + has_dre = false; + + if (!has_dre) +- xdev->common.copy_align = fls(width - 1); ++ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || +@@ -2543,7 +2543,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) + { +- int ret, i, nr_channels = 1; ++ int ret, i; ++ u32 nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) +@@ -2742,7 +2743,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) + } + + /* Register the DMA engine with the core */ +- dma_async_device_register(&xdev->common); ++ err = dma_async_device_register(&xdev->common); ++ if (err) { ++ dev_err(xdev->dev, "failed to register the dma device\n"); ++ goto error; ++ } + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index cd71e71339446..9e852b4bbf92b 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom, + group); + } + ++static void wacom_devm_kfifo_release(struct device *dev, void *res) ++{ ++ struct kfifo_rec_ptr_2 *devres = res; ++ ++ kfifo_free(devres); ++} ++ ++static int wacom_devm_kfifo_alloc(struct wacom *wacom) ++{ ++ struct wacom_wac *wacom_wac = &wacom->wacom_wac; ++ struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo; ++ int error; ++ ++ pen_fifo = devres_alloc(wacom_devm_kfifo_release, ++ sizeof(struct kfifo_rec_ptr_2), ++ GFP_KERNEL); ++ ++ if (!pen_fifo) ++ return -ENOMEM; ++ ++ error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); ++ if (error) { ++ devres_free(pen_fifo); ++ return error; ++ } ++ ++ devres_add(&wacom->hdev->dev, pen_fifo); ++ ++ return 0; ++} ++ + enum led_brightness wacom_leds_brightness_get(struct wacom_led *led) + { + struct wacom *wacom = led->wacom; +@@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev, + if (features->check_for_hid_type && features->hid_type != hdev->type) + return -ENODEV; + +- error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); ++ error = wacom_devm_kfifo_alloc(wacom); + if (error) + return error; + +@@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev) + + if (wacom->wacom_wac.features.type != REMOTE) + wacom_release_resources(wacom); +- +- kfifo_free(&wacom_wac->pen_fifo); + } + + #ifdef CONFIG_PM +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index c40eef4e7a985..2b6a4c1f188f4 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -1424,7 +1424,7 @@ static int i801_add_mux(struct i801_priv *priv) + + /* Register GPIO descriptor lookup table */ + lookup = devm_kzalloc(dev, +- struct_size(lookup, table, mux_config->n_gpios), ++ struct_size(lookup, table, mux_config->n_gpios + 1), + GFP_KERNEL); + if (!lookup) + return -ENOMEM; +diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c +index b432e7580458d..b2dc802864641 100644 +--- a/drivers/i2c/busses/i2c-sprd.c ++++ b/drivers/i2c/busses/i2c-sprd.c +@@ -72,6 +72,8 @@ + + /* timeout (ms) for pm runtime autosuspend */ + #define SPRD_I2C_PM_TIMEOUT 1000 ++/* timeout (ms) for transfer message */ ++#define I2C_XFER_TIMEOUT 1000 + + /* SPRD i2c data structure */ + struct sprd_i2c { +@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, bool is_last_msg) + { + struct sprd_i2c *i2c_dev = i2c_adap->algo_data; ++ unsigned long time_left; + + i2c_dev->msg = msg; + i2c_dev->buf = msg->buf; +@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, + + sprd_i2c_opt_start(i2c_dev); + +- wait_for_completion(&i2c_dev->complete); ++ time_left = wait_for_completion_timeout(&i2c_dev->complete, ++ msecs_to_jiffies(I2C_XFER_TIMEOUT)); ++ if (!time_left) ++ return -ETIMEDOUT; + + return i2c_dev->err; + } +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +index b0f3da1976e4f..d1f2109012ed5 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +@@ -664,13 +664,29 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private) + static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private) + { + struct st_lsm6dsx_hw *hw = private; +- int count; ++ int fifo_len = 0, len; + +- mutex_lock(&hw->fifo_lock); +- count = hw->settings->fifo_ops.read_fifo(hw); +- mutex_unlock(&hw->fifo_lock); ++ /* ++ * If we are using edge IRQs, new samples can arrive while ++ * processing current interrupt since there are no hw ++ * guarantees the irq line stays "low" long enough to properly ++ * detect the new interrupt. In this case the new sample will ++ * be missed. ++ * Polling FIFO status register allow us to read new ++ * samples even if the interrupt arrives while processing ++ * previous data and the timeslot where the line is "low" is ++ * too short to be properly detected. ++ */ ++ do { ++ mutex_lock(&hw->fifo_lock); ++ len = hw->settings->fifo_ops.read_fifo(hw); ++ mutex_unlock(&hw->fifo_lock); ++ ++ if (len > 0) ++ fifo_len += len; ++ } while (len > 0); + +- return count ? IRQ_HANDLED : IRQ_NONE; ++ return fifo_len ? IRQ_HANDLED : IRQ_NONE; + } + + static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev) +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c +index f697f3a1d46bc..5dcc81b1df623 100644 +--- a/drivers/iommu/intel_irq_remapping.c ++++ b/drivers/iommu/intel_irq_remapping.c +@@ -1400,6 +1400,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_cfg = irqd_cfg(irq_data); + if (!irq_data || !irq_cfg) { ++ if (!i) ++ kfree(data); + ret = -EINVAL; + goto out_free_data; + } +diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig +index 8f39f9ba5c80e..4c2ce210c1237 100644 +--- a/drivers/lightnvm/Kconfig ++++ b/drivers/lightnvm/Kconfig +@@ -19,6 +19,7 @@ if NVM + + config NVM_PBLK + tristate "Physical Block Device Open-Channel SSD target" ++ select CRC32 + help + Allows an open-channel SSD to be exposed as a block device to the + host. The target assumes the device exposes raw flash and must be +diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig +index 17c166cc8482d..e4d944770ccaf 100644 +--- a/drivers/net/can/Kconfig ++++ b/drivers/net/can/Kconfig +@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3 + config CAN_KVASER_PCIEFD + depends on PCI + tristate "Kvaser PCIe FD cards" ++ select CRC32 + help + This is a driver for the Kvaser PCI Express CAN FD family. + +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index d2bb9a87eff9a..8a842545e3f69 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -1868,8 +1868,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev) + { + unregister_candev(m_can_dev->net); + +- m_can_clk_stop(m_can_dev); +- + free_candev(m_can_dev->net); + } + EXPORT_SYMBOL_GPL(m_can_class_unregister); +diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c +index 681bb861de05e..1f8710b35c6d7 100644 +--- a/drivers/net/can/m_can/tcan4x5x.c ++++ b/drivers/net/can/m_can/tcan4x5x.c +@@ -126,30 +126,6 @@ struct tcan4x5x_priv { + int reg_offset; + }; + +-static struct can_bittiming_const tcan4x5x_bittiming_const = { +- .name = DEVICE_NAME, +- .tseg1_min = 2, +- .tseg1_max = 31, +- .tseg2_min = 2, +- .tseg2_max = 16, +- .sjw_max = 16, +- .brp_min = 1, +- .brp_max = 32, +- .brp_inc = 1, +-}; +- +-static struct can_bittiming_const tcan4x5x_data_bittiming_const = { +- .name = DEVICE_NAME, +- .tseg1_min = 1, +- .tseg1_max = 32, +- .tseg2_min = 1, +- .tseg2_max = 16, +- .sjw_max = 16, +- .brp_min = 1, +- .brp_max = 32, +- .brp_inc = 1, +-}; +- + static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) + { + int wake_state = 0; +@@ -449,8 +425,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi) + mcan_class->dev = &spi->dev; + mcan_class->ops = &tcan4x5x_ops; + mcan_class->is_peripheral = true; +- mcan_class->bit_timing = &tcan4x5x_bittiming_const; +- mcan_class->data_timing = &tcan4x5x_data_bittiming_const; + mcan_class->net->irq = spi->irq; + + spi_set_drvdata(spi, priv); +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c +index 0d9b3fa7bd94e..ee1e67df1e7b4 100644 +--- a/drivers/net/dsa/lantiq_gswip.c ++++ b/drivers/net/dsa/lantiq_gswip.c +@@ -1419,11 +1419,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + +- /* With the exclusion of MII and Reverse MII, we support Gigabit, +- * including Half duplex ++ /* With the exclusion of MII, Reverse MII and Reduced MII, we ++ * support Gigabit, including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && +- state->interface != PHY_INTERFACE_MODE_REVMII) { ++ state->interface != PHY_INTERFACE_MODE_REVMII && ++ state->interface != PHY_INTERFACE_MODE_RMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } +diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +index f8a87f8ca9833..148e53812d89c 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h ++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +@@ -123,7 +123,7 @@ struct hclgevf_mbx_arq_ring { + #define hclge_mbx_ring_ptr_move_crq(crq) \ + (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) + #define hclge_mbx_tail_ptr_move_arq(arq) \ +- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) ++ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) + #define hclge_mbx_head_ptr_move_arq(arq) \ +- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) ++ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) + #endif +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +index 6c3d13110993f..6887b7fda6e07 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +@@ -746,7 +746,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + +- if (hdev->hw.mac.phydev) { ++ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && ++ hdev->hw.mac.phydev->drv->set_loopback) { + count += 1; + handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; + } +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 8827ab4b4932e..6988bbf2576f5 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -4545,7 +4545,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) + struct mvpp2 *priv = port->priv; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int thread; +- int queue, err; ++ int queue, err, val; + + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > +@@ -4559,6 +4559,18 @@ static int mvpp2_port_init(struct mvpp2_port *port) + mvpp2_egress_disable(port); + mvpp2_port_disable(port); + ++ if (mvpp2_is_xlg(port->phy_interface)) { ++ val = readl(port->base + MVPP22_XLG_CTRL0_REG); ++ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; ++ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; ++ writel(val, port->base + MVPP22_XLG_CTRL0_REG); ++ } else { ++ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); ++ val &= ~MVPP2_GMAC_FORCE_LINK_PASS; ++ val |= MVPP2_GMAC_FORCE_LINK_DOWN; ++ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); ++ } ++ + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +index 6d55e3d0b7ea2..54e9f6dc24ea0 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +@@ -725,8 +725,10 @@ static int cgx_lmac_init(struct cgx *cgx) + if (!lmac) + return -ENOMEM; + lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); +- if (!lmac->name) +- return -ENOMEM; ++ if (!lmac->name) { ++ err = -ENOMEM; ++ goto err_lmac_free; ++ } + sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + lmac->lmac_id = i; + lmac->cgx = cgx; +@@ -737,7 +739,7 @@ static int cgx_lmac_init(struct cgx *cgx) + CGX_LMAC_FWI + i * 9), + cgx_fwi_event_handler, 0, lmac->name, lmac); + if (err) +- return err; ++ goto err_irq; + + /* Enable interrupt */ + cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, +@@ -748,6 +750,12 @@ static int cgx_lmac_init(struct cgx *cgx) + } + + return cgx_lmac_verify_fwi_version(cgx); ++ ++err_irq: ++ kfree(lmac->name); ++err_lmac_free: ++ kfree(lmac); ++ return err; + } + + static int cgx_lmac_exit(struct cgx *cgx) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 8cd529556b214..01089c2283d7f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -976,6 +976,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, + return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); + } + ++static int mlx5e_speed_validate(struct net_device *netdev, bool ext, ++ const unsigned long link_modes, u8 autoneg) ++{ ++ /* Extended link-mode has no speed limitations. */ ++ if (ext) ++ return 0; ++ ++ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && ++ autoneg != AUTONEG_ENABLE) { ++ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n", ++ __func__); ++ return -EINVAL; ++ } ++ return 0; ++} ++ + static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) + { + u32 i, ptys_modes = 0; +@@ -1068,13 +1084,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, + link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : + mlx5e_port_speed2linkmodes(mdev, speed, !ext); + +- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && +- autoneg != AUTONEG_ENABLE) { +- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", +- __func__); +- err = -EINVAL; ++ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg); ++ if (err) + goto out; +- } + + link_modes = link_modes & eproto.cap; + if (!link_modes) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index 713dc210f710c..c4ac7a9968d16 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -927,6 +927,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ft->g); ++ ft->g = NULL; + return -ENOMEM; + } + +@@ -1067,6 +1068,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ft->g); ++ ft->g = NULL; + return -ENOMEM; + } + +@@ -1346,6 +1348,7 @@ err_destroy_groups: + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + kvfree(in); ++ kfree(ft->g); + + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +index 0fc7de4aa572f..8e0dddc6383f0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +@@ -116,7 +116,7 @@ free: + static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) + { + mlx5_core_roce_gid_set(dev, 0, 0, 0, +- NULL, NULL, false, 0, 0); ++ NULL, NULL, false, 0, 1); + } + + static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid) +diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c +index 0937fc2a928ed..23c9394cd5d22 100644 +--- a/drivers/net/ethernet/natsemi/macsonic.c ++++ b/drivers/net/ethernet/natsemi/macsonic.c +@@ -540,10 +540,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev) + + err = register_netdev(dev); + if (err) +- goto out; ++ goto undo_probe; + + return 0; + ++undo_probe: ++ dma_free_coherent(lp->device, ++ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), ++ lp->descriptors, lp->descriptors_laddr); + out: + free_netdev(dev); + +@@ -618,12 +622,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board) + + err = register_netdev(ndev); + if (err) +- goto out; ++ goto undo_probe; + + nubus_set_drvdata(board, ndev); + + return 0; + ++undo_probe: ++ dma_free_coherent(lp->device, ++ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), ++ lp->descriptors, lp->descriptors_laddr); + out: + free_netdev(ndev); + return err; +diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c +index e1b886e87a762..44171d7bb434c 100644 +--- a/drivers/net/ethernet/natsemi/xtsonic.c ++++ b/drivers/net/ethernet/natsemi/xtsonic.c +@@ -265,11 +265,14 @@ int xtsonic_probe(struct platform_device *pdev) + sonic_msg_init(dev); + + if ((err = register_netdev(dev))) +- goto out1; ++ goto undo_probe1; + + return 0; + +-out1: ++undo_probe1: ++ dma_free_coherent(lp->device, ++ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), ++ lp->descriptors, lp->descriptors_laddr); + release_region(dev->base_addr, SONIC_MEM_SIZE); + out: + free_netdev(dev); +diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig +index 55a29ec766807..58eac2471d53a 100644 +--- a/drivers/net/ethernet/qlogic/Kconfig ++++ b/drivers/net/ethernet/qlogic/Kconfig +@@ -78,6 +78,7 @@ config QED + depends on PCI + select ZLIB_INFLATE + select CRC8 ++ select CRC32 + select NET_DEVLINK + ---help--- + This enables the support for ... +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +index e9e0867ec139d..c4c9cbdeb601e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -64,6 +64,7 @@ struct emac_variant { + * @variant: reference to the current board variant + * @regmap: regmap for using the syscon + * @internal_phy_powered: Does the internal PHY is enabled ++ * @use_internal_phy: Is the internal PHY selected for use + * @mux_handle: Internal pointer used by mdio-mux lib + */ + struct sunxi_priv_data { +@@ -74,6 +75,7 @@ struct sunxi_priv_data { + const struct emac_variant *variant; + struct regmap_field *regmap_field; + bool internal_phy_powered; ++ bool use_internal_phy; + void *mux_handle; + }; + +@@ -523,8 +525,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { + .dma_interrupt = sun8i_dwmac_dma_interrupt, + }; + ++static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv); ++ + static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) + { ++ struct net_device *ndev = platform_get_drvdata(pdev); + struct sunxi_priv_data *gmac = priv; + int ret; + +@@ -538,13 +543,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) + + ret = clk_prepare_enable(gmac->tx_clk); + if (ret) { +- if (gmac->regulator) +- regulator_disable(gmac->regulator); + dev_err(&pdev->dev, "Could not enable AHB clock\n"); +- return ret; ++ goto err_disable_regulator; ++ } ++ ++ if (gmac->use_internal_phy) { ++ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev)); ++ if (ret) ++ goto err_disable_clk; + } + + return 0; ++ ++err_disable_clk: ++ clk_disable_unprepare(gmac->tx_clk); ++err_disable_regulator: ++ if (gmac->regulator) ++ regulator_disable(gmac->regulator); ++ ++ return ret; + } + + static void sun8i_dwmac_core_init(struct mac_device_info *hw, +@@ -815,7 +832,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + u32 reg, val; + int ret = 0; +- bool need_power_ephy = false; + + if (current_child ^ desired_child) { + regmap_field_read(gmac->regmap_field, ®); +@@ -823,13 +839,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, + case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: + dev_info(priv->device, "Switch mux to internal PHY"); + val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT; +- +- need_power_ephy = true; ++ gmac->use_internal_phy = true; + break; + case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID: + dev_info(priv->device, "Switch mux to external PHY"); + val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN; +- need_power_ephy = false; ++ gmac->use_internal_phy = false; + break; + default: + dev_err(priv->device, "Invalid child ID %x\n", +@@ -837,7 +852,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, + return -EINVAL; + } + regmap_field_write(gmac->regmap_field, val); +- if (need_power_ephy) { ++ if (gmac->use_internal_phy) { + ret = sun8i_dwmac_power_internal_phy(priv); + if (ret) + return ret; +@@ -988,17 +1003,12 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) + struct sunxi_priv_data *gmac = priv; + + if (gmac->variant->soc_has_internal_phy) { +- /* sun8i_dwmac_exit could be called with mdiomux uninit */ +- if (gmac->mux_handle) +- mdio_mux_uninit(gmac->mux_handle); + if (gmac->internal_phy_powered) + sun8i_dwmac_unpower_internal_phy(gmac); + } + + sun8i_dwmac_unset_syscon(gmac); + +- reset_control_put(gmac->rst_ephy); +- + clk_disable_unprepare(gmac->tx_clk); + + if (gmac->regulator) +@@ -1227,12 +1237,32 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) + + return ret; + dwmac_mux: ++ reset_control_put(gmac->rst_ephy); ++ clk_put(gmac->ephy_clk); + sun8i_dwmac_unset_syscon(gmac); + dwmac_exit: + stmmac_pltfr_remove(pdev); + return ret; + } + ++static int sun8i_dwmac_remove(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct stmmac_priv *priv = netdev_priv(ndev); ++ struct sunxi_priv_data *gmac = priv->plat->bsp_priv; ++ ++ if (gmac->variant->soc_has_internal_phy) { ++ mdio_mux_uninit(gmac->mux_handle); ++ sun8i_dwmac_unpower_internal_phy(gmac); ++ reset_control_put(gmac->rst_ephy); ++ clk_put(gmac->ephy_clk); ++ } ++ ++ stmmac_pltfr_remove(pdev); ++ ++ return 0; ++} ++ + static const struct of_device_id sun8i_dwmac_match[] = { + { .compatible = "allwinner,sun8i-h3-emac", + .data = &emac_variant_h3 }, +@@ -1252,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); + + static struct platform_driver sun8i_dwmac_driver = { + .probe = sun8i_dwmac_probe, +- .remove = stmmac_pltfr_remove, ++ .remove = sun8i_dwmac_remove, + .driver = { + .name = "dwmac-sun8i", + .pm = &stmmac_pltfr_pm_ops, +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index d407489cec904..cbe7f35eac982 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -1126,7 +1126,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + * accordingly. Otherwise, we should check here. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) +- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); ++ delayed_ndp_size = ctx->max_ndp_size + ++ max_t(u32, ++ ctx->tx_ndp_modulus, ++ ctx->tx_modulus + ctx->tx_remainder) - 1; + else + delayed_ndp_size = 0; + +@@ -1307,7 +1310,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && + skb_out->len > ctx->min_tx_pkt) { + padding_count = ctx->tx_curr_size - skb_out->len; +- skb_put_zero(skb_out, padding_count); ++ if (!WARN_ON(padding_count > ctx->tx_curr_size)) ++ skb_put_zero(skb_out, padding_count); + } else if (skb_out->len < ctx->tx_curr_size && + (skb_out->len % dev->maxpacket) == 0) { + skb_put_u8(skb_out, 0); /* force short packet */ +diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig +index 058d77d2e693d..0d6e1829e0ac9 100644 +--- a/drivers/net/wan/Kconfig ++++ b/drivers/net/wan/Kconfig +@@ -282,6 +282,7 @@ config SLIC_DS26522 + tristate "Slic Maxim ds26522 card support" + depends on SPI + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST ++ select BITREVERSE + help + This module initializes and configures the slic maxim card + in T1 or E1 mode. +diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig +index 0d1a8dab30ed4..32e1c036f3ac9 100644 +--- a/drivers/net/wireless/ath/wil6210/Kconfig ++++ b/drivers/net/wireless/ath/wil6210/Kconfig +@@ -2,6 +2,7 @@ + config WIL6210 + tristate "Wilocity 60g WiFi card wil6210 support" + select WANT_DEV_COREDUMP ++ select CRC32 + depends on CFG80211 + depends on PCI + default n +diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c +index f11e4bfbc91be..a47f87b8373df 100644 +--- a/drivers/regulator/qcom-rpmh-regulator.c ++++ b/drivers/regulator/qcom-rpmh-regulator.c +@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { + static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { + .regulator_type = VRM, + .ops = &rpmh_regulator_vrm_ops, +- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600), ++ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000), + .n_voltages = 5, + .pmic_mode_map = pmic_mode_map_pmic5_smps, + .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c +index a1c23e998f977..8dee16aca421f 100644 +--- a/drivers/s390/net/qeth_l3_main.c ++++ b/drivers/s390/net/qeth_l3_main.c +@@ -2114,7 +2114,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) + { +- if (qeth_get_ip_version(skb) != 4) ++ if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + return qeth_features_check(skb, dev, features); + } +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c +index ed20ad2950885..77ddf23b65d65 100644 +--- a/drivers/spi/spi-stm32.c ++++ b/drivers/spi/spi-stm32.c +@@ -494,9 +494,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) + + /* align packet size with data registers access */ + if (spi->cur_bpw > 8) +- fthlv -= (fthlv % 2); /* multiple of 2 */ ++ fthlv += (fthlv % 2) ? 1 : 0; + else +- fthlv -= (fthlv % 4); /* multiple of 4 */ ++ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0; + + if (!fthlv) + fthlv = 1; +diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c +index 58c7d66060f7e..dd12777b9a788 100644 +--- a/drivers/staging/exfat/exfat_super.c ++++ b/drivers/staging/exfat/exfat_super.c +@@ -59,7 +59,7 @@ static void exfat_write_super(struct super_block *sb); + /* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ + static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp) + { +- ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day, ++ ts->tv_sec = mktime64(tp->Year + 1980, tp->Month, tp->Day, + tp->Hour, tp->Minute, tp->Second); + + ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC; +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index 3b31e83a92155..bc6ba41686fa3 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -2303,6 +2303,24 @@ out_unlock: + return ret; + } + ++static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu, ++ struct vfio_info_cap *caps) ++{ ++ struct vfio_iommu_type1_info_dma_avail cap_dma_avail; ++ int ret; ++ ++ mutex_lock(&iommu->lock); ++ cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL; ++ cap_dma_avail.header.version = 1; ++ ++ cap_dma_avail.avail = iommu->dma_avail; ++ ++ ret = vfio_info_add_capability(caps, &cap_dma_avail.header, ++ sizeof(cap_dma_avail)); ++ mutex_unlock(&iommu->lock); ++ return ret; ++} ++ + static long vfio_iommu_type1_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) + { +@@ -2349,6 +2367,10 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, + info.iova_pgsizes = vfio_pgsize_bitmap(iommu); + + ret = vfio_iommu_iova_build_caps(iommu, &caps); ++ ++ if (!ret) ++ ret = vfio_iommu_dma_avail_build_caps(iommu, &caps); ++ + if (ret) + return ret; + +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index e5e2425875953..130f16cc0b86d 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -520,7 +520,10 @@ + */ + #define TEXT_TEXT \ + ALIGN_FUNCTION(); \ +- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ ++ *(.text.hot .text.hot.*) \ ++ *(TEXT_MAIN .text.fixup) \ ++ *(.text.unlikely .text.unlikely.*) \ ++ *(.text.unknown .text.unknown.*) \ + *(.text..refcount) \ + *(.ref.text) \ + MEM_KEEP(init.text*) \ +diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h +index 9e843a147ead0..cabc93118f9c8 100644 +--- a/include/uapi/linux/vfio.h ++++ b/include/uapi/linux/vfio.h +@@ -748,6 +748,21 @@ struct vfio_iommu_type1_info_cap_iova_range { + struct vfio_iova_range iova_ranges[]; + }; + ++/* ++ * The DMA available capability allows to report the current number of ++ * simultaneously outstanding DMA mappings that are allowed. ++ * ++ * The structure below defines version 1 of this capability. ++ * ++ * avail: specifies the current number of outstanding DMA mappings allowed. ++ */ ++#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3 ++ ++struct vfio_iommu_type1_info_dma_avail { ++ struct vfio_info_cap_header header; ++ __u32 avail; ++}; ++ + #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) + + /** +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index d4bcfd8f95bf6..3f47abf9ef4a6 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -280,7 +280,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) + return 0; + + out_free_newdev: +- if (new_dev->reg_state == NETREG_UNINITIALIZED) ++ if (new_dev->reg_state == NETREG_UNINITIALIZED || ++ new_dev->reg_state == NETREG_UNREGISTERED) + free_netdev(new_dev); + return err; + } +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index a0486dcf5425b..49d923c227a21 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2017,6 +2017,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) + skb->csum = csum_block_sub(skb->csum, + skb_checksum(skb, len, delta, 0), + len); ++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) { ++ int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; ++ int offset = skb_checksum_start_offset(skb) + skb->csum_offset; ++ ++ if (offset + sizeof(__sum16) > hdlen) ++ return -EINVAL; + } + return __pskb_trim(skb, len); + } +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 079dcf9f0c56d..7a394479dd56c 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -303,7 +303,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff * + if (skb_is_gso(skb)) + return ip_finish_output_gso(net, sk, skb, mtu); + +- if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) ++ if (skb->len > mtu || IPCB(skb)->frag_max_size) + return ip_fragment(net, sk, skb, mtu, ip_finish_output2); + + return ip_finish_output2(net, sk, skb); +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index f61c5a0b502a8..ca525cf681a4e 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -765,8 +765,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + +- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph, +- 0, 0, false)) { ++ df = tnl_params->frag_off; ++ if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) ++ df |= (inner_iph->frag_off & htons(IP_DF)); ++ ++ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) { + ip_rt_put(rt); + goto tx_error; + } +@@ -794,10 +797,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + ttl = ip4_dst_hoplimit(&rt->dst); + } + +- df = tnl_params->frag_off; +- if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) +- df |= (inner_iph->frag_off&htons(IP_DF)); +- + max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); + if (max_headroom > dev->needed_headroom) +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index ea32b113089d3..c2b7d43d92b0e 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -1157,8 +1157,10 @@ static struct nexthop *nexthop_create_group(struct net *net, + return nh; + + out_no_nh: +- for (; i >= 0; --i) ++ for (i--; i >= 0; --i) { ++ list_del(&nhg->nh_entries[i].nh_list); + nexthop_put(nhg->nh_entries[i].nh); ++ } + + kfree(nhg->spare); + kfree(nhg); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 0646fce31b67a..906ac5e6d96cd 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -973,6 +973,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, + { + struct fib6_table *table = rt->fib6_table; + ++ /* Flush all cached dst in exception table */ ++ rt6_flush_exceptions(rt); + fib6_drop_pcpu_from(rt, table); + + if (rt->nh && !list_empty(&rt->nh_list)) +@@ -1839,9 +1841,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, + net->ipv6.rt6_stats->fib_rt_entries--; + net->ipv6.rt6_stats->fib_discarded_routes++; + +- /* Flush all cached dst in exception table */ +- rt6_flush_exceptions(rt); +- + /* Reset round-robin state, if necessary */ + if (rcu_access_pointer(fn->rr_ptr) == rt) + fn->rr_ptr = NULL; +diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c +index bb311ccc6c487..c6787a1daa481 100644 +--- a/tools/bpf/bpftool/net.c ++++ b/tools/bpf/bpftool/net.c +@@ -9,7 +9,6 @@ + #include <unistd.h> + #include <libbpf.h> + #include <net/if.h> +-#include <linux/if.h> + #include <linux/rtnetlink.h> + #include <linux/tc_act/tc_bpf.h> + #include <sys/socket.h> +diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh +index 71a62e7e35b1c..3429767cadcdd 100755 +--- a/tools/testing/selftests/net/pmtu.sh ++++ b/tools/testing/selftests/net/pmtu.sh +@@ -119,7 +119,15 @@ + # - list_flush_ipv6_exception + # Using the same topology as in pmtu_ipv6, create exceptions, and check + # they are shown when listing exception caches, gone after flushing them +- ++# ++# - pmtu_ipv4_route_change ++# Use the same topology as in pmtu_ipv4, but issue a route replacement ++# command and delete the corresponding device afterward. This tests for ++# proper cleanup of the PMTU exceptions by the route replacement path. ++# Device unregistration should complete successfully ++# ++# - pmtu_ipv6_route_change ++# Same as above but with IPv6 + + # Kselftest framework requirement - SKIP code is 4. + ksft_skip=4 +@@ -161,7 +169,9 @@ tests=" + cleanup_ipv4_exception ipv4: cleanup of cached exceptions 1 + cleanup_ipv6_exception ipv6: cleanup of cached exceptions 1 + list_flush_ipv4_exception ipv4: list and flush cached exceptions 1 +- list_flush_ipv6_exception ipv6: list and flush cached exceptions 1" ++ list_flush_ipv6_exception ipv6: list and flush cached exceptions 1 ++ pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1 ++ pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1" + + NS_A="ns-A" + NS_B="ns-B" +@@ -1316,6 +1326,63 @@ test_list_flush_ipv6_exception() { + return ${fail} + } + ++test_pmtu_ipvX_route_change() { ++ family=${1} ++ ++ setup namespaces routing || return 2 ++ trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ ++ "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \ ++ "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \ ++ "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2 ++ ++ if [ ${family} -eq 4 ]; then ++ ping=ping ++ dst1="${prefix4}.${b_r1}.1" ++ dst2="${prefix4}.${b_r2}.1" ++ gw="${prefix4}.${a_r1}.2" ++ else ++ ping=${ping6} ++ dst1="${prefix6}:${b_r1}::1" ++ dst2="${prefix6}:${b_r2}::1" ++ gw="${prefix6}:${a_r1}::2" ++ fi ++ ++ # Set up initial MTU values ++ mtu "${ns_a}" veth_A-R1 2000 ++ mtu "${ns_r1}" veth_R1-A 2000 ++ mtu "${ns_r1}" veth_R1-B 1400 ++ mtu "${ns_b}" veth_B-R1 1400 ++ ++ mtu "${ns_a}" veth_A-R2 2000 ++ mtu "${ns_r2}" veth_R2-A 2000 ++ mtu "${ns_r2}" veth_R2-B 1500 ++ mtu "${ns_b}" veth_B-R2 1500 ++ ++ # Create route exceptions ++ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} ++ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} ++ ++ # Check that exceptions have been created with the correct PMTU ++ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" ++ check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 ++ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" ++ check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 ++ ++ # Replace the route from A to R1 ++ run_cmd ${ns_a} ip route change default via ${gw} ++ ++ # Delete the device in A ++ run_cmd ${ns_a} ip link del "veth_A-R1" ++} ++ ++test_pmtu_ipv4_route_change() { ++ test_pmtu_ipvX_route_change 4 ++} ++ ++test_pmtu_ipv6_route_change() { ++ test_pmtu_ipvX_route_change 6 ++} ++ + usage() { + echo + echo "$0 [OPTIONS] [TEST]..." |