summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-02-23 09:42:05 -0500
committerMike Pagano <mpagano@gentoo.org>2019-02-23 09:42:05 -0500
commit2d45eb6669ad918a2b954c2d34e2dce62a53648d (patch)
treeba89493d319c56d5e09eef251beaa388fb43f689
parentproj/linux-patches: Linux patch 4.9.159 (diff)
downloadlinux-patches-2d45eb6669ad918a2b954c2d34e2dce62a53648d.tar.gz
linux-patches-2d45eb6669ad918a2b954c2d34e2dce62a53648d.tar.bz2
linux-patches-2d45eb6669ad918a2b954c2d34e2dce62a53648d.zip
proj/linux-patches: Linux patch 4.9.1604.9-162
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README14
-rw-r--r--1159_linux-4.9.160.patch599
2 files changed, 608 insertions, 5 deletions
diff --git a/0000_README b/0000_README
index 53c6c44c..203d1e52 100644
--- a/0000_README
+++ b/0000_README
@@ -660,25 +660,29 @@ From: http://www.kernel.org
Desc: Linux 4.9.154
Patch: 1154_linux-4.9.155.patch
-From: http://www.k5rnel.org
+From: http://www.kernel.org
Desc: Linux 4.9.155
Patch: 1155_linux-4.9.156.patch
-From: http://www.k5rnel.org
+From: http://www.kernel.org
Desc: Linux 4.9.156
Patch: 1156_linux-4.9.157.patch
-From: http://www.k5rnel.org
+From: http://www.kernel.org
Desc: Linux 4.9.157
Patch: 1157_linux-4.9.158.patch
-From: http://www.k5rnel.org
+From: http://www.kernel.org
Desc: Linux 4.9.158
Patch: 1158_linux-4.9.159.patch
-From: http://www.k5rnel.org
+From: http://www.kernel.org
Desc: Linux 4.9.159
+Patch: 1159_linux-4.9.160.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.160
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1159_linux-4.9.160.patch b/1159_linux-4.9.160.patch
new file mode 100644
index 00000000..b99296e1
--- /dev/null
+++ b/1159_linux-4.9.160.patch
@@ -0,0 +1,599 @@
+diff --git a/Makefile b/Makefile
+index a452ead13b1e..af70503df3f4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 159
++SUBLEVEL = 160
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
+index cb6606a0470d..be60bd5bab78 100644
+--- a/drivers/hwmon/lm80.c
++++ b/drivers/hwmon/lm80.c
+@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
+ }
+
+ rv = lm80_read_value(client, LM80_REG_FANDIV);
+- if (rv < 0)
++ if (rv < 0) {
++ mutex_unlock(&data->update_lock);
+ return rv;
++ }
+ reg = (rv & ~(3 << (2 * (nr + 1))))
+ | (data->fan_div[nr] << (2 * (nr + 1)));
+ lm80_write_value(client, LM80_REG_FANDIV, reg);
+diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
+index 9438d7ec3308..8b29e97cf668 100644
+--- a/drivers/isdn/mISDN/timerdev.c
++++ b/drivers/isdn/mISDN/timerdev.c
+@@ -168,8 +168,8 @@ dev_expire_timer(unsigned long data)
+ spin_lock_irqsave(&timer->dev->lock, flags);
+ if (timer->id >= 0)
+ list_move_tail(&timer->list, &timer->dev->expired);
+- spin_unlock_irqrestore(&timer->dev->lock, flags);
+ wake_up_interruptible(&timer->dev->wait);
++ spin_unlock_irqrestore(&timer->dev->lock, flags);
+ }
+
+ static int
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 93ab0b3ad393..af11781fe5f9 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ INIT_WORK(&hw->restart_work, sky2_restart);
+
+ pci_set_drvdata(pdev, hw);
+- pdev->d3_delay = 200;
++ pdev->d3_delay = 300;
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+index a601f8d43b75..f988c7573ba5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+@@ -237,15 +237,18 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
+ static int dwmac4_rx_check_timestamp(void *desc)
+ {
+ struct dma_desc *p = (struct dma_desc *)desc;
++ unsigned int rdes0 = le32_to_cpu(p->des0);
++ unsigned int rdes1 = le32_to_cpu(p->des1);
++ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 own, ctxt;
+ int ret = 1;
+
+- own = p->des3 & RDES3_OWN;
+- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
++ own = rdes3 & RDES3_OWN;
++ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
+ >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+
+ if (likely(!own && ctxt)) {
+- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
++ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
+ /* Corrupted value */
+ ret = -EINVAL;
+ else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index c5d0142adda2..3519a8a589dd 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -676,25 +676,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
++ int ret;
+
+- priv->eee_enabled = edata->eee_enabled;
+-
+- if (!priv->eee_enabled)
++ if (!edata->eee_enabled) {
+ stmmac_disable_eee_mode(priv);
+- else {
++ } else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+- priv->eee_enabled = stmmac_eee_init(priv);
+- if (!priv->eee_enabled)
++ edata->eee_enabled = stmmac_eee_init(priv);
++ if (!edata->eee_enabled)
+ return -EOPNOTSUPP;
+-
+- /* Do not change tx_lpi_timer in case of failure */
+- priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+- return phy_ethtool_set_eee(priv->phydev, edata);
++ ret = phy_ethtool_set_eee(dev->phydev, edata);
++ if (ret)
++ return ret;
++
++ priv->eee_enabled = edata->eee_enabled;
++ priv->tx_lpi_timer = edata->tx_lpi_timer;
++ return 0;
+ }
+
+ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
+index 7a14e8170e82..aef525467af0 100644
+--- a/drivers/net/phy/xilinx_gmii2rgmii.c
++++ b/drivers/net/phy/xilinx_gmii2rgmii.c
+@@ -42,7 +42,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
+ u16 val = 0;
+ int err;
+
+- err = priv->phy_drv->read_status(phydev);
++ if (priv->phy_drv->read_status)
++ err = priv->phy_drv->read_status(phydev);
++ else
++ err = genphy_read_status(phydev);
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 28afdf22b88f..373713faa1f5 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1911,7 +1911,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+- struct net_device *dev = skb->dev;
++ struct net_device *dev;
+ int len = skb->len;
+
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+@@ -1931,8 +1931,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ #endif
+ }
+
++ rcu_read_lock();
++ dev = skb->dev;
++ if (unlikely(!(dev->flags & IFF_UP))) {
++ kfree_skb(skb);
++ goto drop;
++ }
++
+ if (dst_vxlan->flags & VXLAN_F_LEARN)
+- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
++ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source);
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tx_packets++;
+@@ -1945,8 +1952,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ rx_stats->rx_bytes += len;
+ u64_stats_update_end(&rx_stats->syncp);
+ } else {
++drop:
+ dev->stats.rx_dropped++;
+ }
++ rcu_read_unlock();
+ }
+
+ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index dc387a974325..2383caf88b67 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1696,7 +1696,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+ len, iov, 64, VHOST_ACCESS_WO);
+- if (ret)
++ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 793d4d571d8d..5c5c389d8fed 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4463,29 +4463,25 @@ try_submit_last:
+ }
+
+ /*
+- * Sanity check for fiemap cache
++ * Emit last fiemap cache
+ *
+- * All fiemap cache should be submitted by emit_fiemap_extent()
+- * Iteration should be terminated either by last fiemap extent or
+- * fieinfo->fi_extents_max.
+- * So no cached fiemap should exist.
++ * The last fiemap cache may still be cached in the following case:
++ * 0 4k 8k
++ * |<- Fiemap range ->|
++ * |<------------ First extent ----------->|
++ *
++ * In this case, the first extent range will be cached but not emitted.
++ * So we must emit it before ending extent_fiemap().
+ */
+-static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
+- struct fiemap_extent_info *fieinfo,
+- struct fiemap_cache *cache)
++static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
++ struct fiemap_extent_info *fieinfo,
++ struct fiemap_cache *cache)
+ {
+ int ret;
+
+ if (!cache->cached)
+ return 0;
+
+- /* Small and recoverbale problem, only to info developer */
+-#ifdef CONFIG_BTRFS_DEBUG
+- WARN_ON(1);
+-#endif
+- btrfs_warn(fs_info,
+- "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
+- cache->offset, cache->phys, cache->len, cache->flags);
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+ cache->len, cache->flags);
+ cache->cached = false;
+@@ -4701,7 +4697,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ }
+ out_free:
+ if (!ret)
+- ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
++ ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
+ free_extent_map(em);
+ out:
+ btrfs_free_path(path);
+diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
+index 9c6c8ef2e9e7..b692edeb0b90 100644
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -11,6 +11,8 @@
+ #define _LINUX_NETDEV_FEATURES_H
+
+ #include <linux/types.h>
++#include <linux/bitops.h>
++#include <asm/byteorder.h>
+
+ typedef u64 netdev_features_t;
+
+@@ -137,8 +139,26 @@ enum {
+ #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+ #define NETIF_F_HW_TC __NETIF_F(HW_TC)
+
+-#define for_each_netdev_feature(mask_addr, bit) \
+- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
++/* Finds the next feature with the highest number of the range of start till 0.
++ */
++static inline int find_next_netdev_feature(u64 feature, unsigned long start)
++{
++ /* like BITMAP_LAST_WORD_MASK() for u64
++ * this sets the most significant 64 - start to 0.
++ */
++ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
++
++ return fls64(feature) - 1;
++}
++
++/* This goes for the MSB to the LSB through the set feature bits,
++ * mask_addr should be a u64 and bit an int
++ */
++#define for_each_netdev_feature(mask_addr, bit) \
++ for ((bit) = find_next_netdev_feature((mask_addr), \
++ NETDEV_FEATURE_COUNT); \
++ (bit) >= 0; \
++ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+
+ /* Features valid for ethtool to change */
+ /* = all defined minus driver/device-class-related */
+diff --git a/include/net/ax25.h b/include/net/ax25.h
+index e602f8177ebf..b507ce2b1952 100644
+--- a/include/net/ax25.h
++++ b/include/net/ax25.h
+@@ -199,6 +199,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
+
+ void __ax25_put_route(ax25_route *ax25_rt);
+
++extern rwlock_t ax25_route_lock;
++
++static inline void ax25_route_lock_use(void)
++{
++ read_lock(&ax25_route_lock);
++}
++
++static inline void ax25_route_lock_unuse(void)
++{
++ read_unlock(&ax25_route_lock);
++}
++
+ static inline void ax25_put_route(ax25_route *ax25_rt)
+ {
+ if (atomic_dec_and_test(&ax25_rt->refcount))
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 235c7811a86a..408d76f47bd2 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -40,6 +40,7 @@ struct inet_peer {
+
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
++ u32 n_redirects;
+ unsigned long rate_last;
+ union {
+ struct list_head gc_list;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index c3f4f6a9e6c3..fed2a78fb8cb 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1526,6 +1526,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
+ sk_wmem_free_skb(sk, skb);
+ sk_mem_reclaim(sk);
+ tcp_clear_all_retrans_hints(tcp_sk(sk));
++ inet_csk(sk)->icsk_backoff = 0;
+ }
+
+ static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
+diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
+index 2fa3be965101..cd9a24e5b97a 100644
+--- a/net/ax25/ax25_ip.c
++++ b/net/ax25/ax25_ip.c
+@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ dst = (ax25_address *)(bp + 1);
+ src = (ax25_address *)(bp + 8);
+
++ ax25_route_lock_use();
+ route = ax25_get_route(dst, NULL);
+ if (route) {
+ digipeat = route->digipeat;
+@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ ax25_queue_xmit(skb, dev);
+
+ put:
+- if (route)
+- ax25_put_route(route);
+
++ ax25_route_lock_unuse();
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
+index d39097737e38..149f82bd83fd 100644
+--- a/net/ax25/ax25_route.c
++++ b/net/ax25/ax25_route.c
+@@ -40,7 +40,7 @@
+ #include <linux/export.h>
+
+ static ax25_route *ax25_route_list;
+-static DEFINE_RWLOCK(ax25_route_lock);
++DEFINE_RWLOCK(ax25_route_lock);
+
+ void ax25_rt_device_down(struct net_device *dev)
+ {
+@@ -349,6 +349,7 @@ const struct file_operations ax25_route_fops = {
+ * Find AX.25 route
+ *
+ * Only routes with a reference count of zero can be destroyed.
++ * Must be called with ax25_route_lock read locked.
+ */
+ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
+ {
+@@ -356,7 +357,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
+ ax25_route *ax25_def_rt = NULL;
+ ax25_route *ax25_rt;
+
+- read_lock(&ax25_route_lock);
+ /*
+ * Bind to the physical interface we heard them on, or the default
+ * route if none is found;
+@@ -379,11 +379,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
+ if (ax25_spe_rt != NULL)
+ ax25_rt = ax25_spe_rt;
+
+- if (ax25_rt != NULL)
+- ax25_hold_route(ax25_rt);
+-
+- read_unlock(&ax25_route_lock);
+-
+ return ax25_rt;
+ }
+
+@@ -414,9 +409,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ ax25_route *ax25_rt;
+ int err = 0;
+
+- if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
++ ax25_route_lock_use();
++ ax25_rt = ax25_get_route(addr, NULL);
++ if (!ax25_rt) {
++ ax25_route_lock_unuse();
+ return -EHOSTUNREACH;
+-
++ }
+ if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto put;
+@@ -451,8 +449,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ }
+
+ put:
+- ax25_put_route(ax25_rt);
+-
++ ax25_route_lock_unuse();
+ return err;
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 071c589f7994..8e187f90c85d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6909,7 +6909,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
+ netdev_features_t feature;
+ int feature_bit;
+
+- for_each_netdev_feature(&upper_disables, feature_bit) {
++ for_each_netdev_feature(upper_disables, feature_bit) {
+ feature = __NETIF_F_BIT(feature_bit);
+ if (!(upper->wanted_features & feature)
+ && (features & feature)) {
+@@ -6929,7 +6929,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
+ netdev_features_t feature;
+ int feature_bit;
+
+- for_each_netdev_feature(&upper_disables, feature_bit) {
++ for_each_netdev_feature(upper_disables, feature_bit) {
+ feature = __NETIF_F_BIT(feature_bit);
+ if (!(features & feature) && (lower->features & feature)) {
+ netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 11501165f0df..4a71d78d0c6a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -383,6 +383,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ */
+ void *netdev_alloc_frag(unsigned int fragsz)
+ {
++ fragsz = SKB_DATA_ALIGN(fragsz);
++
+ return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+ }
+ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -396,6 +398,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+
+ void *napi_alloc_frag(unsigned int fragsz)
+ {
++ fragsz = SKB_DATA_ALIGN(fragsz);
++
+ return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+ }
+ EXPORT_SYMBOL(napi_alloc_frag);
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 86fa45809540..0c5862914f05 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -448,6 +448,7 @@ relookup:
+ atomic_set(&p->rid, 0);
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ p->rate_tokens = 0;
++ p->n_redirects = 0;
+ /* 60*HZ is arbitrary, but chosen enough high so that the first
+ * calculation of tokens is at its maximum.
+ */
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 890141d32ab9..d606de65e2d0 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -882,13 +882,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ /* No redirected packets during ip_rt_redirect_silence;
+ * reset the algorithm.
+ */
+- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
++ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
+ peer->rate_tokens = 0;
++ peer->n_redirects = 0;
++ }
+
+ /* Too many ignored redirects; do not send anything
+ * set dst.rate_last to the last seen redirected packet.
+ */
+- if (peer->rate_tokens >= ip_rt_redirect_number) {
++ if (peer->n_redirects >= ip_rt_redirect_number) {
+ peer->rate_last = jiffies;
+ goto out_put_peer;
+ }
+@@ -905,6 +907,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+ ++peer->rate_tokens;
++ ++peer->n_redirects;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+ if (log_martians &&
+ peer->rate_tokens == ip_rt_redirect_number)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 9de77d946f5a..2ededb32b754 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2292,7 +2292,6 @@ int tcp_disconnect(struct sock *sk, int flags)
+ tp->write_seq += tp->max_window + 2;
+ if (tp->write_seq == 0)
+ tp->write_seq = 1;
+- icsk->icsk_backoff = 0;
+ tp->snd_cwnd = 2;
+ icsk->icsk_probes_out = 0;
+ tp->packets_out = 0;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 1ea0c91ba994..82c1064ff4aa 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -464,14 +464,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ if (sock_owned_by_user(sk))
+ break;
+
++ skb = tcp_write_queue_head(sk);
++ if (WARN_ON_ONCE(!skb))
++ break;
++
+ icsk->icsk_backoff--;
+ icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
+ TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+
+- skb = tcp_write_queue_head(sk);
+- BUG_ON(!skb);
+-
+ remaining = icsk->icsk_rto -
+ min(icsk->icsk_rto,
+ tcp_time_stamp - tcp_skb_timestamp(skb));
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 8f79f0414bc3..4ce7f9195151 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1074,7 +1074,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (ifa == ifp)
+ continue;
+- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
++ if (ifa->prefix_len != ifp->prefix_len ||
++ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
+ ifp->prefix_len))
+ continue;
+ if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index 008f3424dcbc..102bf9194662 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -1656,6 +1656,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
+
+ static void vmci_transport_destruct(struct vsock_sock *vsk)
+ {
++ /* transport can be NULL if we hit a failure at init() time */
++ if (!vmci_trans(vsk))
++ return;
++
+ /* Ensure that the detach callback doesn't use the sk/vsk
+ * we are about to destruct.
+ */
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 007721632b07..0a7e5d992bba 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
+ unsigned int lci = 1;
+ struct sock *sk;
+
+- read_lock_bh(&x25_list_lock);
+-
+- while ((sk = __x25_find_socket(lci, nb)) != NULL) {
++ while ((sk = x25_find_socket(lci, nb)) != NULL) {
+ sock_put(sk);
+ if (++lci == 4096) {
+ lci = 0;
+ break;
+ }
++ cond_resched();
+ }
+
+- read_unlock_bh(&x25_list_lock);
+ return lci;
+ }
+