summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-03-19 13:00:45 -0400
committerMike Pagano <mpagano@gentoo.org>2019-03-19 13:00:45 -0400
commit9e73079481bd1f7384d57cde9b6d67984fe872cc (patch)
tree33754a2dae19d93155df18deed52c83d288a1fc3
parentproj/linux-patches: Linux patch 5.0.2 (diff)
downloadlinux-patches-9e73079481bd1f7384d57cde9b6d67984fe872cc.tar.gz
linux-patches-9e73079481bd1f7384d57cde9b6d67984fe872cc.tar.bz2
linux-patches-9e73079481bd1f7384d57cde9b6d67984fe872cc.zip
proj/linux-patches: Linux patch 5.0.35.0-4
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1002_linux-5.0.3.patch1487
2 files changed, 1491 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 04daf20b..4989a604 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch: 1001_linux-5.0.2.patch
From: http://www.kernel.org
Desc: Linux 5.0.2
+Patch: 1002_linux-5.0.3.patch
+From: http://www.kernel.org
+Desc: Linux 5.0.3
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1002_linux-5.0.3.patch b/1002_linux-5.0.3.patch
new file mode 100644
index 00000000..9019944a
--- /dev/null
+++ b/1002_linux-5.0.3.patch
@@ -0,0 +1,1487 @@
+diff --git a/Makefile b/Makefile
+index bb2f7664594a..fb888787e7d1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 0
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index dadb8f7e5a0d..2480feb07df3 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3398,7 +3398,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ /*
+ * Without TFA we must not use PMC3.
+ */
+- if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
+ c = dyn_constraint(cpuc, c, idx);
+ c->idxmsk64 &= ~(1ULL << 3);
+ c->weight--;
+@@ -4142,7 +4142,7 @@ static struct attribute *intel_pmu_caps_attrs[] = {
+ NULL
+ };
+
+-DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
++static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
+
+ static struct attribute *intel_pmu_attrs[] = {
+ &dev_attr_freeze_on_smi.attr,
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index a345d079f876..acd72e669c04 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1032,12 +1032,12 @@ static inline int intel_pmu_init(void)
+ return 0;
+ }
+
+-static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
++static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+ {
+ return 0;
+ }
+
+-static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
++static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+ {
+ }
+
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index ed5e42461094..ad48fd52cb53 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
++ struct task_struct *parent;
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+@@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
+ ev->what = PROC_EVENT_COREDUMP;
+ ev->event_data.coredump.process_pid = task->pid;
+ ev->event_data.coredump.process_tgid = task->tgid;
+- ev->event_data.coredump.parent_pid = task->real_parent->pid;
+- ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
++
++ rcu_read_lock();
++ if (pid_alive(task)) {
++ parent = rcu_dereference(task->real_parent);
++ ev->event_data.coredump.parent_pid = parent->pid;
++ ev->event_data.coredump.parent_tgid = parent->tgid;
++ }
++ rcu_read_unlock();
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+@@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
++ struct task_struct *parent;
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+@@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
+ ev->event_data.exit.process_tgid = task->tgid;
+ ev->event_data.exit.exit_code = task->exit_code;
+ ev->event_data.exit.exit_signal = task->exit_signal;
+- ev->event_data.exit.parent_pid = task->real_parent->pid;
+- ev->event_data.exit.parent_tgid = task->real_parent->tgid;
++
++ rcu_read_lock();
++ if (pid_alive(task)) {
++ parent = rcu_dereference(task->real_parent);
++ ev->event_data.exit.parent_pid = parent->pid;
++ ev->event_data.exit.parent_tgid = parent->tgid;
++ }
++ rcu_read_unlock();
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index f4290f6b0c38..2323ba9310d9 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -1611,6 +1611,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
++ /*
++ * FIXME: Since prepare_fb and cleanup_fb are always called on
++ * the new_plane_state for async updates we need to block framebuffer
++ * changes. This prevents use of a fb that's been cleaned up and
++ * double cleanups from occuring.
++ */
++ if (old_plane_state->fb != new_plane_state->fb)
++ return -EINVAL;
++
+ funcs = plane->helper_private;
+ if (!funcs->atomic_async_update)
+ return -EINVAL;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index abb5d382f64d..ecef42bfe19d 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -4670,7 +4670,6 @@ read_more:
+ atomic_inc(&r10_bio->remaining);
+ read_bio->bi_next = NULL;
+ generic_make_request(read_bio);
+- sector_nr += nr_sectors;
+ sectors_done += nr_sectors;
+ if (sector_nr <= last)
+ goto read_more;
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 76cc163b3cf1..4a0ec8e87c7a 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -559,6 +559,9 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
+ goto restore_link;
+ }
+
++ if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
++ mode = chip->info->ops->port_max_speed_mode(port);
++
+ if (chip->info->ops->port_set_pause) {
+ err = chip->info->ops->port_set_pause(chip, port, pause);
+ if (err)
+@@ -3042,6 +3045,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6341_port_set_speed,
++ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3360,6 +3364,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3404,6 +3409,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390x_port_set_speed,
++ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3448,6 +3454,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3541,6 +3548,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3672,6 +3680,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6341_port_set_speed,
++ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3847,6 +3856,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+@@ -3895,6 +3905,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390x_port_set_speed,
++ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index 546651d8c3e1..dfb1af65c205 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -377,6 +377,9 @@ struct mv88e6xxx_ops {
+ */
+ int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
+
++ /* What interface mode should be used for maximum speed? */
++ phy_interface_t (*port_max_speed_mode)(int port);
++
+ int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
+diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
+index 184c2b1b3115..5e921bb6c214 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.c
++++ b/drivers/net/dsa/mv88e6xxx/port.c
+@@ -312,6 +312,14 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+ return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
+ }
+
++phy_interface_t mv88e6341_port_max_speed_mode(int port)
++{
++ if (port == 5)
++ return PHY_INTERFACE_MODE_2500BASEX;
++
++ return PHY_INTERFACE_MODE_NA;
++}
++
+ /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
+ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+ {
+@@ -345,6 +353,14 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+ return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+ }
+
++phy_interface_t mv88e6390_port_max_speed_mode(int port)
++{
++ if (port == 9 || port == 10)
++ return PHY_INTERFACE_MODE_2500BASEX;
++
++ return PHY_INTERFACE_MODE_NA;
++}
++
+ /* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
+ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+ {
+@@ -360,6 +376,14 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+ return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+ }
+
++phy_interface_t mv88e6390x_port_max_speed_mode(int port)
++{
++ if (port == 9 || port == 10)
++ return PHY_INTERFACE_MODE_XAUI;
++
++ return PHY_INTERFACE_MODE_NA;
++}
++
+ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+ {
+diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
+index 4aadf321edb7..c7bed263a0f4 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.h
++++ b/drivers/net/dsa/mv88e6xxx/port.h
+@@ -285,6 +285,10 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+
++phy_interface_t mv88e6341_port_max_speed_mode(int port);
++phy_interface_t mv88e6390_port_max_speed_mode(int port);
++phy_interface_t mv88e6390x_port_max_speed_mode(int port);
++
+ int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
+
+ int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index 36eab37d8a40..09c774fe8853 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -192,6 +192,7 @@ struct hnae3_ae_dev {
+ const struct hnae3_ae_ops *ops;
+ struct list_head node;
+ u32 flag;
++ u8 override_pci_need_reset; /* fix to stop multiple reset happening */
+ enum hnae3_dev_type dev_type;
+ enum hnae3_reset_type reset_type;
+ void *priv;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 1bf7a5f116a0..d84c50068f66 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1852,7 +1852,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
+
+ /* request the reset */
+ if (ae_dev->ops->reset_event) {
+- ae_dev->ops->reset_event(pdev, NULL);
++ if (!ae_dev->override_pci_need_reset)
++ ae_dev->ops->reset_event(pdev, NULL);
++
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+@@ -2476,6 +2478,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
++ /* make sure HW write desc complete */
++ dma_rmb();
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+index d0f654123b9b..efb6c1a25171 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -1259,8 +1259,10 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
+ hclge_handle_all_ras_errors(hdev);
+ } else {
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+- hdev->pdev->revision < 0x21)
++ hdev->pdev->revision < 0x21) {
++ ae_dev->override_pci_need_reset = 1;
+ return PCI_ERS_RESULT_RECOVERED;
++ }
+ }
+
+ if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+@@ -1269,8 +1271,11 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
+ }
+
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+- status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
++ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
++ ae_dev->override_pci_need_reset = 0;
+ return PCI_ERS_RESULT_NEED_RESET;
++ }
++ ae_dev->override_pci_need_reset = 1;
+
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index e65bc3c95630..857588e2488d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
+ if (!priv->cmd.context)
+ return -ENOMEM;
+
++ if (mlx4_is_mfunc(dev))
++ mutex_lock(&priv->cmd.slave_cmd_mutex);
+ down_write(&priv->cmd.switch_sem);
+ for (i = 0; i < priv->cmd.max_cmds; ++i) {
+ priv->cmd.context[i].token = i;
+@@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
+ down(&priv->cmd.poll_sem);
+ priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);
++ if (mlx4_is_mfunc(dev))
++ mutex_unlock(&priv->cmd.slave_cmd_mutex);
+
+ return err;
+ }
+@@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
++ if (mlx4_is_mfunc(dev))
++ mutex_lock(&priv->cmd.slave_cmd_mutex);
+ down_write(&priv->cmd.switch_sem);
+ priv->cmd.use_events = 0;
+
+@@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
+ down(&priv->cmd.event_sem);
+
+ kfree(priv->cmd.context);
++ priv->cmd.context = NULL;
+
+ up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
++ if (mlx4_is_mfunc(dev))
++ mutex_unlock(&priv->cmd.slave_cmd_mutex);
+ }
+
+ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index eb13d3618162..4356f3a58002 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+ int total_pages;
+ int total_mem;
+ int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
++ int tot;
+
+ sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+ rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+ total_mem = sq_size + rq_size;
+- total_pages =
+- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+- page_shift);
++ tot = (total_mem + (page_offset << 6)) >> page_shift;
++ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
+
+ return total_pages;
+ }
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 4d1b4a24907f..13e6bf13ac4d 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
+
+ if (adapter->csr.flags &
+ LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
+- flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
+- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
++ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
+ LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
+ LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
+@@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
+ /* map TX interrupt to vector */
+ int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
+ lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
+- if (flags &
+- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
+- int_vec_en_auto_clr |= INT_VEC_EN_(vector);
+- lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
+- int_vec_en_auto_clr);
+- }
+
+ /* Remove TX interrupt from shared mask */
+ intr->vector_list[0].int_mask &= ~int_bit;
+@@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
+ return ((++index) % rx->ring_size);
+ }
+
+-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
++static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
++{
++ int length = 0;
++
++ length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
++ return __netdev_alloc_skb(rx->adapter->netdev,
++ length, GFP_ATOMIC | GFP_DMA);
++}
++
++static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
++ struct sk_buff *skb)
+ {
+ struct lan743x_rx_buffer_info *buffer_info;
+ struct lan743x_rx_descriptor *descriptor;
+@@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
+ length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
+ descriptor = &rx->ring_cpu_ptr[index];
+ buffer_info = &rx->buffer_info[index];
+- buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
+- length,
+- GFP_ATOMIC | GFP_DMA);
++ buffer_info->skb = skb;
+ if (!(buffer_info->skb))
+ return -ENOMEM;
+ buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
+@@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ /* packet is available */
+ if (first_index == last_index) {
+ /* single buffer packet */
++ struct sk_buff *new_skb = NULL;
+ int packet_length;
+
++ new_skb = lan743x_rx_allocate_skb(rx);
++ if (!new_skb) {
++ /* failed to allocate next skb.
++ * Memory is very low.
++ * Drop this packet and reuse buffer.
++ */
++ lan743x_rx_reuse_ring_element(rx, first_index);
++ goto process_extension;
++ }
++
+ buffer_info = &rx->buffer_info[first_index];
+ skb = buffer_info->skb;
+ descriptor = &rx->ring_cpu_ptr[first_index];
+@@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ skb_put(skb, packet_length - 4);
+ skb->protocol = eth_type_trans(skb,
+ rx->adapter->netdev);
+- lan743x_rx_allocate_ring_element(rx, first_index);
++ lan743x_rx_init_ring_element(rx, first_index, new_skb);
+ } else {
+ int index = first_index;
+
+@@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+ if (first_index <= last_index) {
+ while ((index >= first_index) &&
+ (index <= last_index)) {
+- lan743x_rx_release_ring_element(rx,
+- index);
+- lan743x_rx_allocate_ring_element(rx,
+- index);
++ lan743x_rx_reuse_ring_element(rx,
++ index);
+ index = lan743x_rx_next_index(rx,
+ index);
+ }
+ } else {
+ while ((index >= first_index) ||
+ (index <= last_index)) {
+- lan743x_rx_release_ring_element(rx,
+- index);
+- lan743x_rx_allocate_ring_element(rx,
+- index);
++ lan743x_rx_reuse_ring_element(rx,
++ index);
+ index = lan743x_rx_next_index(rx,
+ index);
+ }
+ }
+ }
+
++process_extension:
+ if (extension_index >= 0) {
+ descriptor = &rx->ring_cpu_ptr[extension_index];
+ buffer_info = &rx->buffer_info[extension_index];
+@@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
+
+ rx->last_head = 0;
+ for (index = 0; index < rx->ring_size; index++) {
+- ret = lan743x_rx_allocate_ring_element(rx, index);
++ struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
++
++ ret = lan743x_rx_init_ring_element(rx, index, new_skb);
+ if (ret)
+ goto cleanup;
+ }
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index d28c8f9ca55b..8154b38c08f7 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev)
+ RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
+
+ /* Set FIFO size */
+- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
++ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
+
+ /* Timestamp enable */
+ ravb_write(ndev, TCCR_TFEN, TCCR);
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 8f09edd811e9..50c60550f295 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
+ pppox_unbind_sock(sk);
+ }
+ skb_queue_purge(&sk->sk_receive_queue);
++ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
+ }
+
+ static int pptp_create(struct net *net, struct socket *sock, int kern)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 2aae11feff0c..d6fb6a89f9b3 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1657,6 +1657,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
++ rcu_read_lock();
++
++ if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
++ rcu_read_unlock();
++ atomic_long_inc(&vxlan->dev->rx_dropped);
++ goto drop;
++ }
++
+ stats = this_cpu_ptr(vxlan->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+@@ -1664,6 +1672,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ u64_stats_update_end(&stats->syncp);
+
+ gro_cells_receive(&vxlan->gro_cells, skb);
++
++ rcu_read_unlock();
++
+ return 0;
+
+ drop:
+@@ -2693,6 +2704,8 @@ static void vxlan_uninit(struct net_device *dev)
+ {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
++ gro_cells_destroy(&vxlan->gro_cells);
++
+ vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
+
+ free_percpu(dev->tstats);
+@@ -3794,7 +3807,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
+
+ vxlan_flush(vxlan, true);
+
+- gro_cells_destroy(&vxlan->gro_cells);
+ list_del(&vxlan->next);
+ unregister_netdevice_queue(dev, head);
+ }
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index bba56b39dcc5..ae2b45e75847 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1750,10 +1750,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+- if (!get_dirty_pages(inode))
+- goto skip_flush;
+-
+- f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
++ /*
++ * Should wait end_io to count F2FS_WB_CP_DATA correctly by
++ * f2fs_is_atomic_file.
++ */
++ if (get_dirty_pages(inode))
++ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+ "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+ inode->i_ino, get_dirty_pages(inode));
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+@@ -1761,7 +1763,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ goto out;
+ }
+-skip_flush:
++
+ set_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
+index acf45ddbe924..e095fb871d91 100644
+--- a/net/core/gro_cells.c
++++ b/net/core/gro_cells.c
+@@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+ {
+ struct net_device *dev = skb->dev;
+ struct gro_cell *cell;
++ int res;
+
+- if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
+- return netif_rx(skb);
++ rcu_read_lock();
++ if (unlikely(!(dev->flags & IFF_UP)))
++ goto drop;
++
++ if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
++ res = netif_rx(skb);
++ goto unlock;
++ }
+
+ cell = this_cpu_ptr(gcells->cells);
+
+ if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
++drop:
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+- return NET_RX_DROP;
++ res = NET_RX_DROP;
++ goto unlock;
+ }
+
+ __skb_queue_tail(&cell->napi_skbs, skb);
+ if (skb_queue_len(&cell->napi_skbs) == 1)
+ napi_schedule(&cell->napi);
+- return NET_RX_SUCCESS;
++
++ res = NET_RX_SUCCESS;
++
++unlock:
++ rcu_read_unlock();
++ return res;
+ }
+ EXPORT_SYMBOL(gro_cells_receive);
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index b8cd43c9ed5b..a97bf326b231 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
+ && (old_operstate != IF_OPER_UP)) {
+ /* Went up */
+ hsr->announce_count = 0;
+- hsr->announce_timer.expires = jiffies +
+- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+- add_timer(&hsr->announce_timer);
++ mod_timer(&hsr->announce_timer,
++ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
+ }
+
+ if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
+@@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
+ {
+ struct hsr_priv *hsr;
+ struct hsr_port *master;
++ unsigned long interval;
+
+ hsr = from_timer(hsr, t, announce_timer);
+
+@@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
+ hsr->protVersion);
+ hsr->announce_count++;
+
+- hsr->announce_timer.expires = jiffies +
+- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
++ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ } else {
+ send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
+ hsr->protVersion);
+
+- hsr->announce_timer.expires = jiffies +
+- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
++ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ }
+
+ if (is_admin_up(master->dev))
+- add_timer(&hsr->announce_timer);
++ mod_timer(&hsr->announce_timer, jiffies + interval);
+
+ rcu_read_unlock();
+ }
+@@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+
+ res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
+ if (res)
+- return res;
++ goto err_add_port;
+
+ res = register_netdevice(hsr_dev);
+ if (res)
+@@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ fail:
+ hsr_for_each_port(hsr, port)
+ hsr_del_port(port);
++err_add_port:
++ hsr_del_node(&hsr->self_node_db);
+
+ return res;
+ }
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 286ceb41ac0c..9af16cb68f76 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
+ return 0;
+ }
+
++void hsr_del_node(struct list_head *self_node_db)
++{
++ struct hsr_node *node;
++
++ rcu_read_lock();
++ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
++ rcu_read_unlock();
++ if (node) {
++ list_del_rcu(&node->mac_list);
++ kfree(node);
++ }
++}
+
+ /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
+ * seq_out is used to initialize filtering of outgoing duplicate frames
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index 370b45998121..531fd3dfcac1 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -16,6 +16,7 @@
+
+ struct hsr_node;
+
++void hsr_del_node(struct list_head *self_node_db);
+ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+ u16 seq_out);
+ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 437070d1ffb1..79e98e21cdd7 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -1024,7 +1024,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
+ int ret;
+
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+- if (!pskb_may_pull(skb, len))
++ if (!pskb_may_pull(skb, transport_offset + len))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+@@ -1059,7 +1059,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
+
+ optlen = guehdr->hlen << 2;
+
+- if (!pskb_may_pull(skb, len + optlen))
++ if (!pskb_may_pull(skb, transport_offset + len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 7bb9128c8363..e04cdb58a602 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1303,6 +1303,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+ if (fnhe->fnhe_daddr == daddr) {
+ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++ /* set fnhe_daddr to 0 to ensure it won't bind with
++ * new dsts in rt_bind_exception().
++ */
++ fnhe->fnhe_daddr = 0;
+ fnhe_flush_routes(fnhe);
+ kfree_rcu(fnhe, rcu);
+ break;
+@@ -2144,12 +2148,13 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ int our = 0;
+ int err = -EINVAL;
+
+- if (in_dev)
+- our = ip_check_mc_rcu(in_dev, daddr, saddr,
+- ip_hdr(skb)->protocol);
++ if (!in_dev)
++ return err;
++ our = ip_check_mc_rcu(in_dev, daddr, saddr,
++ ip_hdr(skb)->protocol);
+
+ /* check l3 master if no match yet */
+- if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
++ if (!our && netif_is_l3_slave(dev)) {
+ struct in_device *l3_in_dev;
+
+ l3_in_dev = __in_dev_get_rcu(skb->dev);
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 606f868d9f3f..e531344611a0 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -216,7 +216,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ refcount_set(&req->rsk_refcnt, 1);
+ tcp_sk(child)->tsoffset = tsoff;
+ sock_rps_save_rxhash(child, skb);
+- inet_csk_reqsk_queue_add(sk, req, child);
++ if (!inet_csk_reqsk_queue_add(sk, req, child)) {
++ bh_unlock_sock(child);
++ sock_put(child);
++ child = NULL;
++ reqsk_put(req);
++ }
+ } else {
+ reqsk_free(req);
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index cf3c5095c10e..ce365cbba1d1 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1914,6 +1914,11 @@ static int tcp_inq_hint(struct sock *sk)
+ inq = tp->rcv_nxt - tp->copied_seq;
+ release_sock(sk);
+ }
++ /* After receiving a FIN, tell the user-space to continue reading
++ * by returning a non-zero inq.
++ */
++ if (inq == 0 && sock_flag(sk, SOCK_DONE))
++ inq = 1;
+ return inq;
+ }
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 76858b14ebe9..7b1ef897b398 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6519,7 +6519,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ af_ops->send_synack(fastopen_sk, dst, &fl, req,
+ &foc, TCP_SYNACK_FASTOPEN);
+ /* Add the child socket directly into the accept queue */
+- inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
++ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
++ reqsk_fastopen_remove(fastopen_sk, req, false);
++ bh_unlock_sock(fastopen_sk);
++ sock_put(fastopen_sk);
++ reqsk_put(req);
++ goto drop;
++ }
+ sk->sk_data_ready(sk);
+ bh_unlock_sock(fastopen_sk);
+ sock_put(fastopen_sk);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index ec3cea9d6828..1aae9ab57fe9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1734,15 +1734,8 @@ EXPORT_SYMBOL(tcp_add_backlog);
+ int tcp_filter(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcphdr *th = (struct tcphdr *)skb->data;
+- unsigned int eaten = skb->len;
+- int err;
+
+- err = sk_filter_trim_cap(sk, skb, th->doff * 4);
+- if (!err) {
+- eaten -= skb->len;
+- TCP_SKB_CB(skb)->end_seq -= eaten;
+- }
+- return err;
++ return sk_filter_trim_cap(sk, skb, th->doff * 4);
+ }
+ EXPORT_SYMBOL(tcp_filter);
+
+diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
+index 867474abe269..ec4e2ed95f36 100644
+--- a/net/ipv6/fou6.c
++++ b/net/ipv6/fou6.c
+@@ -94,7 +94,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ int ret;
+
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+- if (!pskb_may_pull(skb, len))
++ if (!pskb_may_pull(skb, transport_offset + len))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+@@ -129,7 +129,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+
+ optlen = guehdr->hlen << 2;
+
+- if (!pskb_may_pull(skb, len + optlen))
++ if (!pskb_may_pull(skb, transport_offset + len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 09e440e8dfae..07e21a82ce4c 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -778,8 +778,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
+ pbw0 = tunnel->ip6rd.prefixlen >> 5;
+ pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
+
+- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+- tunnel->ip6rd.relay_prefixlen;
++ d = tunnel->ip6rd.relay_prefixlen < 32 ?
++ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
++ tunnel->ip6rd.relay_prefixlen : 0;
+
+ pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
+ if (pbi1 > 0)
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 0ae6899edac0..37a69df17cab 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -674,9 +674,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*lsa);
+-
+ if (flags & MSG_ERRQUEUE)
+ return ipv6_recv_error(sk, msg, len, addr_len);
+
+@@ -706,6 +703,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ lsa->l2tp_conn_id = 0;
+ if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+ lsa->l2tp_scope_id = inet6_iif(skb);
++ *addr_len = sizeof(*lsa);
+ }
+
+ if (np->rxopt.all)
+diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
+index b2adfa825363..5cf6d9f4761d 100644
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -353,7 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
+ * normally have to take channel_lock but we do this before anyone else
+ * can see the connection.
+ */
+- list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
++ list_add(&call->chan_wait_link, &candidate->waiting_calls);
+
+ if (cp->exclusive) {
+ call->conn = candidate;
+@@ -432,7 +432,7 @@ found_extant_conn:
+ call->conn = conn;
+ call->security_ix = conn->security_ix;
+ call->service_id = conn->service_id;
+- list_add(&call->chan_wait_link, &conn->waiting_calls);
++ list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
+ spin_unlock(&conn->channel_lock);
+ _leave(" = 0 [extant %d]", conn->debug_id);
+ return 0;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 12ca9d13db83..bf67ae5ac1c3 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1327,46 +1327,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
+ if (err < 0)
+ goto errout;
+
+- if (!handle) {
+- handle = 1;
+- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+- INT_MAX, GFP_KERNEL);
+- } else if (!fold) {
+- /* user specifies a handle and it doesn't exist */
+- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+- handle, GFP_KERNEL);
+- }
+- if (err)
+- goto errout;
+- fnew->handle = handle;
+-
+ if (tb[TCA_FLOWER_FLAGS]) {
+ fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
+ if (!tc_flags_valid(fnew->flags)) {
+ err = -EINVAL;
+- goto errout_idr;
++ goto errout;
+ }
+ }
+
+ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
+ tp->chain->tmplt_priv, extack);
+ if (err)
+- goto errout_idr;
++ goto errout;
+
+ err = fl_check_assign_mask(head, fnew, fold, mask);
+ if (err)
+- goto errout_idr;
++ goto errout;
++
++ if (!handle) {
++ handle = 1;
++ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
++ INT_MAX, GFP_KERNEL);
++ } else if (!fold) {
++ /* user specifies a handle and it doesn't exist */
++ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
++ handle, GFP_KERNEL);
++ }
++ if (err)
++ goto errout_mask;
++ fnew->handle = handle;
+
+ if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
+ err = -EEXIST;
+- goto errout_mask;
++ goto errout_idr;
+ }
+
+ err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+ fnew->mask->filter_ht_params);
+ if (err)
+- goto errout_mask;
++ goto errout_idr;
+
+ if (!tc_skip_hw(fnew->flags)) {
+ err = fl_hw_replace_filter(tp, fnew, extack);
+@@ -1405,12 +1405,13 @@ errout_mask_ht:
+ rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
+ fnew->mask->filter_ht_params);
+
+-errout_mask:
+- fl_mask_put(head, fnew->mask, false);
+-
+ errout_idr:
+ if (!fold)
+ idr_remove(&head->handle_idr, fnew->handle);
++
++errout_mask:
++ fl_mask_put(head, fnew->mask, false);
++
+ errout:
+ tcf_exts_destroy(&fnew->exts);
+ kfree(fnew);
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index 2936ed17bf9e..3b47457862cc 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -230,8 +230,6 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+
+- sched->init(stream);
+-
+ in:
+ sctp_stream_interleave_init(stream);
+ if (!incnt)
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 3ae3a33da70b..602715fc9a75 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
+ */
+ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+ {
++ const struct virtio_transport *t;
++ struct virtio_vsock_pkt *reply;
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = le16_to_cpu(pkt->hdr.type),
+@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+- pkt = virtio_transport_alloc_pkt(&info, 0,
+- le64_to_cpu(pkt->hdr.dst_cid),
+- le32_to_cpu(pkt->hdr.dst_port),
+- le64_to_cpu(pkt->hdr.src_cid),
+- le32_to_cpu(pkt->hdr.src_port));
+- if (!pkt)
++ reply = virtio_transport_alloc_pkt(&info, 0,
++ le64_to_cpu(pkt->hdr.dst_cid),
++ le32_to_cpu(pkt->hdr.dst_port),
++ le64_to_cpu(pkt->hdr.src_cid),
++ le32_to_cpu(pkt->hdr.src_port));
++ if (!reply)
+ return -ENOMEM;
+
+- return virtio_transport_get_ops()->send_pkt(pkt);
++ t = virtio_transport_get_ops();
++ if (!t) {
++ virtio_transport_free_pkt(reply);
++ return -ENOTCONN;
++ }
++
++ return t->send_pkt(reply);
+ }
+
+ static void virtio_transport_wait_close(struct sock *sk, long timeout)
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index eff31348e20b..20a511398389 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -820,8 +820,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
+ sock->state = SS_CONNECTED;
+ rc = 0;
+ out_put_neigh:
+- if (rc)
++ if (rc) {
++ read_lock_bh(&x25_list_lock);
+ x25_neigh_put(x25->neighbour);
++ x25->neighbour = NULL;
++ read_unlock_bh(&x25_list_lock);
++ x25->state = X25_STATE_0;
++ }
+ out_put_route:
+ x25_route_put(rt);
+ out:
+diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
+index d91874275d2c..5b46e8dcc2dd 100644
+--- a/sound/firewire/bebob/bebob.c
++++ b/sound/firewire/bebob/bebob.c
+@@ -448,7 +448,19 @@ static const struct ieee1394_device_id bebob_id_table[] = {
+ /* Focusrite, SaffirePro 26 I/O */
+ SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
+ /* Focusrite, SaffirePro 10 I/O */
+- SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
++ {
++ // The combination of vendor_id and model_id is the same as the
++ // same as the one of Liquid Saffire 56.
++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
++ IEEE1394_MATCH_MODEL_ID |
++ IEEE1394_MATCH_SPECIFIER_ID |
++ IEEE1394_MATCH_VERSION,
++ .vendor_id = VEN_FOCUSRITE,
++ .model_id = 0x000006,
++ .specifier_id = 0x00a02d,
++ .version = 0x010001,
++ .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
++ },
+ /* Focusrite, Saffire(no label and LE) */
+ SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
+ &saffire_spec),
+diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
+index f0555a24d90e..6c9b743ea74b 100644
+--- a/sound/firewire/motu/amdtp-motu.c
++++ b/sound/firewire/motu/amdtp-motu.c
+@@ -136,7 +136,9 @@ static void read_pcm_s32(struct amdtp_stream *s,
+ byte = (u8 *)buffer + p->pcm_byte_offset;
+
+ for (c = 0; c < channels; ++c) {
+- *dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
++ *dst = (byte[0] << 24) |
++ (byte[1] << 16) |
++ (byte[2] << 8);
+ byte += 3;
+ dst++;
+ }
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
+index 617ff1aa818f..27eb0270a711 100644
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -144,9 +144,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
+ return -ENODEV;
+ if (!acomp->ops) {
+ request_module("i915");
+- /* 10s timeout */
++ /* 60s timeout */
+ wait_for_completion_timeout(&bind_complete,
+- msecs_to_jiffies(10 * 1000));
++ msecs_to_jiffies(60 * 1000));
+ }
+ if (!acomp->ops) {
+ dev_info(bus->dev, "couldn't bind with audio component\n");
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index a4ee7656d9ee..fb65ad31e86c 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -936,6 +936,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1ffa36e987b4..3a8568d3928f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -118,6 +118,7 @@ struct alc_spec {
+ unsigned int has_alc5505_dsp:1;
+ unsigned int no_depop_delay:1;
+ unsigned int done_hp_init:1;
++ unsigned int no_shutup_pins:1;
+
+ /* for PLL fix */
+ hda_nid_t pll_nid;
+@@ -476,6 +477,14 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
+ set_eapd(codec, *p, on);
+ }
+
++static void alc_shutup_pins(struct hda_codec *codec)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (!spec->no_shutup_pins)
++ snd_hda_shutup_pins(codec);
++}
++
+ /* generic shutup callback;
+ * just turning off EAPD and a little pause for avoiding pop-noise
+ */
+@@ -486,7 +495,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
+ alc_auto_setup_eapd(codec, false);
+ if (!spec->no_depop_delay)
+ msleep(200);
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ }
+
+ /* generic EAPD initialization */
+@@ -814,7 +823,7 @@ static inline void alc_shutup(struct hda_codec *codec)
+ if (spec && spec->shutup)
+ spec->shutup(codec);
+ else
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ }
+
+ static void alc_reboot_notify(struct hda_codec *codec)
+@@ -2950,7 +2959,7 @@ static void alc269_shutup(struct hda_codec *codec)
+ (alc_get_coef0(codec) & 0x00ff) == 0x018) {
+ msleep(150);
+ }
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ }
+
+ static struct coef_fw alc282_coefs[] = {
+@@ -3053,14 +3062,15 @@ static void alc282_shutup(struct hda_codec *codec)
+ if (hp_pin_sense)
+ msleep(85);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (!spec->no_shutup_pins)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ if (hp_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ alc_write_coef_idx(codec, 0x78, coef78);
+ }
+
+@@ -3166,15 +3176,16 @@ static void alc283_shutup(struct hda_codec *codec)
+ if (hp_pin_sense)
+ msleep(100);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (!spec->no_shutup_pins)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+
+ if (hp_pin_sense)
+ msleep(100);
+ alc_auto_setup_eapd(codec, false);
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ alc_write_coef_idx(codec, 0x43, 0x9614);
+ }
+
+@@ -3240,14 +3251,15 @@ static void alc256_shutup(struct hda_codec *codec)
+ /* NOTE: call this before clearing the pin, otherwise codec stalls */
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (!spec->no_shutup_pins)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ if (hp_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ }
+
+ static void alc225_init(struct hda_codec *codec)
+@@ -3334,7 +3346,7 @@ static void alc225_shutup(struct hda_codec *codec)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ }
+
+ static void alc_default_init(struct hda_codec *codec)
+@@ -3388,14 +3400,15 @@ static void alc_default_shutup(struct hda_codec *codec)
+ if (hp_pin_sense)
+ msleep(85);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (!spec->no_shutup_pins)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ if (hp_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+- snd_hda_shutup_pins(codec);
++ alc_shutup_pins(codec);
+ }
+
+ static void alc294_hp_init(struct hda_codec *codec)
+@@ -3412,8 +3425,9 @@ static void alc294_hp_init(struct hda_codec *codec)
+
+ msleep(100);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (!spec->no_shutup_pins)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+@@ -5007,16 +5021,12 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
+ }
+ }
+
+-static void alc_no_shutup(struct hda_codec *codec)
+-{
+-}
+-
+ static void alc_fixup_no_shutup(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ struct alc_spec *spec = codec->spec;
+- spec->shutup = alc_no_shutup;
++ spec->no_shutup_pins = 1;
+ }
+ }
+
+@@ -5661,6 +5671,7 @@ enum {
+ ALC225_FIXUP_HEADSET_JACK,
+ ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
++ ALC255_FIXUP_ACER_HEADSET_MIC,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6627,6 +6638,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
+ },
++ [ALC255_FIXUP_ACER_HEADSET_MIC] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11130 },
++ { 0x1a, 0x90a60140 }, /* use as internal mic */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -6646,6 +6667,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+ SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
+@@ -6677,6 +6699,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
++ SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
+ SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+@@ -6751,11 +6774,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
+- SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+- SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+@@ -6771,7 +6796,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+- SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+@@ -7388,6 +7412,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x14, 0x90170110},
+ {0x1b, 0x90a70130},
+ {0x21, 0x04211020}),
++ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
++ {0x12, 0x90a60130},
++ {0x17, 0x90170110},
++ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},