diff options
author | Alice Ferrazzi <alicef@gentoo.org> | 2021-02-13 23:45:49 +0900 |
---|---|---|
committer | Alice Ferrazzi <alicef@gentoo.org> | 2021-02-13 23:45:58 +0900 |
commit | ad012609f52d2a93cab85e8b0c9800722ac992da (patch) | |
tree | 17737e7380d958c2426f41f8ed9992573a0bec29 | |
parent | Linux patch 5.4.97 (diff) | |
download | linux-patches-ad012609f52d2a93cab85e8b0c9800722ac992da.tar.gz linux-patches-ad012609f52d2a93cab85e8b0c9800722ac992da.tar.bz2 linux-patches-ad012609f52d2a93cab85e8b0c9800722ac992da.zip |
Linux patch 5.4.985.4-100
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1097_linux-5.4.98.patch | 1203 |
2 files changed, 1207 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 1182cab6..0c2bf6f4 100644 --- a/0000_README +++ b/0000_README @@ -431,6 +431,10 @@ Patch: 1096_linux-5.4.97.patch From: http://www.kernel.org Desc: Linux 5.4.97 +Patch: 1097_linux-5.4.98.patch +From: http://www.kernel.org +Desc: Linux 5.4.98 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1097_linux-5.4.98.patch b/1097_linux-5.4.98.patch new file mode 100644 index 00000000..6e7c8524 --- /dev/null +++ b/1097_linux-5.4.98.patch @@ -0,0 +1,1203 @@ +diff --git a/Makefile b/Makefile +index 032751f6be0c1..4f6bfcf434e80 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 97 ++SUBLEVEL = 98 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 4906e480b5bb6..296b0d7570d06 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -1835,6 +1835,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, + struct page **pages; + unsigned long first, last; + ++ lockdep_assert_held(&kvm->lock); ++ + if (ulen == 0 || uaddr + ulen < uaddr) + return NULL; + +@@ -7091,12 +7093,21 @@ static int svm_register_enc_region(struct kvm *kvm, + if (!region) + return -ENOMEM; + ++ mutex_lock(&kvm->lock); + region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); + if (!region->pages) { + ret = -ENOMEM; ++ mutex_unlock(&kvm->lock); + goto e_free; + } + ++ region->uaddr = range->addr; ++ region->size = range->size; ++ ++ mutex_lock(&kvm->lock); ++ list_add_tail(®ion->list, &sev->regions_list); ++ mutex_unlock(&kvm->lock); ++ + /* + * The guest may change the memory encryption attribute from C=0 -> C=1 + * or vice versa for this memory range. Lets make sure caches are +@@ -7105,13 +7116,6 @@ static int svm_register_enc_region(struct kvm *kvm, + */ + sev_clflush_pages(region->pages, region->npages); + +- region->uaddr = range->addr; +- region->size = range->size; +- +- mutex_lock(&kvm->lock); +- list_add_tail(®ion->list, &sev->regions_list); +- mutex_unlock(&kvm->lock); +- + return ret; + + e_free: +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index 3d34ac02d76ef..cb3d44d200055 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -1089,6 +1089,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) + */ + void blkcg_destroy_blkgs(struct blkcg *blkcg) + { ++ might_sleep(); ++ + spin_lock_irq(&blkcg->lock); + + while (!hlist_empty(&blkcg->blkg_list)) { +@@ -1096,14 +1098,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg) + struct blkcg_gq, blkcg_node); + struct request_queue *q = blkg->q; + +- if (spin_trylock(&q->queue_lock)) { +- blkg_destroy(blkg); +- spin_unlock(&q->queue_lock); +- } else { ++ if (need_resched() || !spin_trylock(&q->queue_lock)) { ++ /* ++ * Given that the system can accumulate a huge number ++ * of blkgs in pathological cases, check to see if we ++ * need to rescheduling to avoid softlockup. ++ */ + spin_unlock_irq(&blkcg->lock); +- cpu_relax(); ++ cond_resched(); + spin_lock_irq(&blkcg->lock); ++ continue; + } ++ ++ blkg_destroy(blkg); ++ spin_unlock(&q->queue_lock); + } + + spin_unlock_irq(&blkcg->lock); +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c +index eddc6d1bdb2d1..82b76df43ae57 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c +@@ -1047,11 +1047,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + + n = dst_neigh_lookup(dst, &iph->saddr); + if (!n || !n->dev) +- goto free_sk; ++ goto free_dst; + + ndev = n->dev; +- if (!ndev) +- goto free_dst; + if (is_vlan_dev(ndev)) + ndev = vlan_dev_real_dev(ndev); + +@@ -1117,7 +1115,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + free_csk: + chtls_sock_release(&csk->kref); + free_dst: +- neigh_release(n); ++ if (n) ++ neigh_release(n); + dst_release(dst); + free_sk: + inet_csk_prepare_forced_close(newsk); +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c +index 5a9f0d17f52c8..e1ef0122ef759 100644 +--- a/drivers/i2c/busses/i2c-mt65xx.c ++++ b/drivers/i2c/busses/i2c-mt65xx.c +@@ -1008,7 +1008,8 @@ static int mtk_i2c_probe(struct platform_device *pdev) + mtk_i2c_clock_disable(i2c); + + ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, +- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c); ++ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, ++ I2C_DRV_NAME, i2c); + if (ret < 0) { + dev_err(&pdev->dev, + "Request I2C IRQ %d fail\n", irq); +@@ -1035,7 +1036,16 @@ static int mtk_i2c_remove(struct platform_device *pdev) + } + + #ifdef CONFIG_PM_SLEEP +-static int mtk_i2c_resume(struct device *dev) ++static int mtk_i2c_suspend_noirq(struct device *dev) ++{ ++ struct mtk_i2c *i2c = dev_get_drvdata(dev); ++ ++ i2c_mark_adapter_suspended(&i2c->adap); ++ ++ return 0; ++} ++ ++static int mtk_i2c_resume_noirq(struct device *dev) + { + int ret; + struct mtk_i2c *i2c = dev_get_drvdata(dev); +@@ -1050,12 +1060,15 @@ static int mtk_i2c_resume(struct device *dev) + + mtk_i2c_clock_disable(i2c); + ++ i2c_mark_adapter_resumed(&i2c->adap); ++ + return 0; + } + #endif + + static const struct dev_pm_ops mtk_i2c_pm = { +- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume) ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq, ++ mtk_i2c_resume_noirq) + }; + + static struct platform_driver mtk_i2c_driver = { +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +index f043eefabb4ec..7b1d2dac6ceb8 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +@@ -514,7 +514,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + const size_t bufsz = sizeof(buf); + int pos = 0; + ++ mutex_lock(&mvm->mutex); + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); ++ mutex_unlock(&mvm->mutex); ++ + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index daae86cd61140..fc6430edd1107 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -4169,6 +4169,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, + iwl_mvm_binding_remove_vif(mvm, vif); + + out: ++ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && ++ switching_chanctx) ++ return; + mvmvif->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); + } +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index b04cc6214bac8..8b0576cde797e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -838,6 +838,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, + if (!mvm->scan_cmd) + goto out_free; + ++ /* invalidate ids to prevent accidental removal of sta_id 0 */ ++ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; ++ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; ++ + /* Set EBS as successful as long as not stated otherwise by the FW. */ + mvm->last_ebs_successful = true; + +@@ -1238,6 +1242,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); ++ put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); + } +@@ -1288,7 +1293,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) + module_put(THIS_MODULE); + return; + } +- reprobe->dev = mvm->trans->dev; ++ reprobe->dev = get_device(mvm->trans->dev); + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); + } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +index a36aa9e85e0b3..40cafcf40ccf0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +@@ -2070,6 +2070,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + + lockdep_assert_held(&mvm->mutex); + ++ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) ++ return -EINVAL; ++ + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); + if (ret) +@@ -2084,6 +2087,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) + + lockdep_assert_held(&mvm->mutex); + ++ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) ++ return -EINVAL; ++ + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); + if (ret) +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +index 7a5b024a6d384..eab159205e48b 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +@@ -164,8 +164,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + /* Allocate IML */ + iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, + &trans_pcie->iml_dma_addr, GFP_KERNEL); +- if (!iml_img) +- return -ENOMEM; ++ if (!iml_img) { ++ ret = -ENOMEM; ++ goto err_free_ctxt_info; ++ } + + memcpy(iml_img, trans->iml, trans->iml_len); + +@@ -207,6 +209,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + + return 0; + ++err_free_ctxt_info: ++ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), ++ trans_pcie->ctxt_info_gen3, ++ trans_pcie->ctxt_info_dma_addr); ++ trans_pcie->ctxt_info_gen3 = NULL; + err_free_prph_info: + dma_free_coherent(trans->dev, + sizeof(*prph_info), +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +index d3b58334e13ea..e7dcf8bc99b7c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +@@ -657,6 +657,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + ++ if (!txq) { ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); ++ return; ++ } ++ + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index c9b8613e69db2..5b9d570df85cc 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1772,13 +1772,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + { + struct regulator_dev *r; + struct device *dev = rdev->dev.parent; +- int ret; ++ int ret = 0; + + /* No supply to resolve? */ + if (!rdev->supply_name) + return 0; + +- /* Supply already resolved? */ ++ /* Supply already resolved? (fast-path without locking contention) */ + if (rdev->supply) + return 0; + +@@ -1788,7 +1788,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) +- return ret; ++ goto out; + + if (have_full_constraints()) { + r = dummy_regulator_rdev; +@@ -1796,15 +1796,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + } else { + dev_err(dev, "Failed to resolve %s-supply for %s\n", + rdev->supply_name, rdev->desc->name); +- return -EPROBE_DEFER; ++ ret = -EPROBE_DEFER; ++ goto out; + } + } + + if (r == rdev) { + dev_err(dev, "Supply for %s (%s) resolved to itself\n", + rdev->desc->name, rdev->supply_name); +- if (!have_full_constraints()) +- return -EINVAL; ++ if (!have_full_constraints()) { ++ ret = -EINVAL; ++ goto out; ++ } + r = dummy_regulator_rdev; + get_device(&r->dev); + } +@@ -1818,7 +1821,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + if (r->dev.parent && r->dev.parent != rdev->dev.parent) { + if (!device_is_bound(r->dev.parent)) { + put_device(&r->dev); +- return -EPROBE_DEFER; ++ ret = -EPROBE_DEFER; ++ goto out; + } + } + +@@ -1826,15 +1830,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + ret = regulator_resolve_supply(r); + if (ret < 0) { + put_device(&r->dev); +- return ret; ++ goto out; ++ } ++ ++ /* ++ * Recheck rdev->supply with rdev->mutex lock held to avoid a race ++ * between rdev->supply null check and setting rdev->supply in ++ * set_supply() from concurrent tasks. ++ */ ++ regulator_lock(rdev); ++ ++ /* Supply just resolved by a concurrent task? */ ++ if (rdev->supply) { ++ regulator_unlock(rdev); ++ put_device(&r->dev); ++ goto out; + } + + ret = set_supply(rdev, r); + if (ret < 0) { ++ regulator_unlock(rdev); + put_device(&r->dev); +- return ret; ++ goto out; + } + ++ regulator_unlock(rdev); ++ + /* + * In set_machine_constraints() we may have turned this regulator on + * but we couldn't propagate to the supply if it hadn't been resolved +@@ -1845,11 +1866,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + if (ret < 0) { + _regulator_put(rdev->supply); + rdev->supply = NULL; +- return ret; ++ goto out; + } + } + +- return 0; ++out: ++ return ret; + } + + /* Internal regulator request function */ +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index ca1d98f274d12..e3a79e6958124 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -2369,7 +2369,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) + * We got an entirely new state ID. Mark all segments for the + * inode invalid, and retry the layoutget + */ +- pnfs_mark_layout_stateid_invalid(lo, &free_me); ++ struct pnfs_layout_range range = { ++ .iomode = IOMODE_ANY, ++ .length = NFS4_MAX_UINT64, ++ }; ++ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0); ++ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, ++ &range, 0); + goto out_forget; + } + +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c +index ae2c87bb0fbec..eb02072d28dd6 100644 +--- a/fs/squashfs/export.c ++++ b/fs/squashfs/export.c +@@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) + struct squashfs_sb_info *msblk = sb->s_fs_info; + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ u64 start; + __le64 ino; + int err; + + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); + ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) ++ return -EINVAL; ++ ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); + if (err < 0) + return err; +@@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + u64 lookup_table_start, u64 next_table, unsigned int inodes) + { + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_inode_lookup_table, length %d\n", length); + +@@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + if (inodes == 0) + return ERR_PTR(-EINVAL); + +- /* length bytes should not extend into the next table - this check +- * also traps instances where lookup_table_start is incorrectly larger +- * than the next table start ++ /* ++ * The computed size of the lookup table (length bytes) should exactly ++ * match the table start and end points + */ +- if (lookup_table_start + length > next_table) ++ if (length != (next_table - lookup_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, lookup_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first inode lookup table metadata block, +- * this should be less than lookup_table_start ++ * table0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed inode lookup blocks. Each entry should be ++ * less than the next (i.e. table[0] < table[1]), and the difference ++ * between them should be SQUASHFS_METADATA_SIZE or less. ++ * table[indexes - 1] should be less than lookup_table_start, and ++ * again the difference should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c +index 6be5afe7287d6..11581bf31af41 100644 +--- a/fs/squashfs/id.c ++++ b/fs/squashfs/id.c +@@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_ID_BLOCK(index); + int offset = SQUASHFS_ID_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->id_table[block]); ++ u64 start_block; + __le32 disk_id; + int err; + ++ if (index >= msblk->ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->id_table[block]); ++ + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, + sizeof(disk_id)); + if (err < 0) +@@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + u64 id_table_start, u64 next_table, unsigned short no_ids) + { + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_id_index_table, length %d\n", length); + +@@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + return ERR_PTR(-EINVAL); + + /* +- * length bytes should not extend into the next table - this check +- * also traps instances where id_table_start is incorrectly larger +- * than the next table start ++ * The computed size of the index table (length bytes) should exactly ++ * match the table start and end points + */ +- if (id_table_start + length > next_table) ++ if (length != (next_table - id_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, id_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first id lookup table metadata block, this +- * should be less than id_table_start ++ * table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than id_table_start, and again the difference ++ * should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h +index 34c21ffb6df37..166e98806265b 100644 +--- a/fs/squashfs/squashfs_fs_sb.h ++++ b/fs/squashfs/squashfs_fs_sb.h +@@ -64,5 +64,6 @@ struct squashfs_sb_info { + unsigned int inodes; + unsigned int fragments; + int xattr_ids; ++ unsigned int ids; + }; + #endif +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index 0cc4ceec05624..2110323b610b9 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + msblk->directory_table = le64_to_cpu(sblk->directory_table_start); + msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); ++ msblk->ids = le16_to_cpu(sblk->no_ids); + flags = le16_to_cpu(sblk->flags); + + TRACE("Found valid superblock on %pg\n", sb->s_bdev); +@@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + TRACE("Block size %d\n", msblk->block_size); + TRACE("Number of inodes %d\n", msblk->inodes); + TRACE("Number of fragments %d\n", msblk->fragments); +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); ++ TRACE("Number of ids %d\n", msblk->ids); + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); + TRACE("sblk->fragment_table_start %llx\n", +@@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + allocate_id_index_table: + /* Allocate and read id index table */ + msblk->id_table = squashfs_read_id_index_table(sb, +- le64_to_cpu(sblk->id_table_start), next_table, +- le16_to_cpu(sblk->no_ids)); ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); + if (IS_ERR(msblk->id_table)) { + errorf(fc, "unable to read id index table"); + err = PTR_ERR(msblk->id_table); +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h +index 184129afd4566..d8a270d3ac4cb 100644 +--- a/fs/squashfs/xattr.h ++++ b/fs/squashfs/xattr.h +@@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, + u64 start, u64 *xattr_table_start, int *xattr_ids) + { ++ struct squashfs_xattr_id_table *id_table; ++ ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ if (IS_ERR(id_table)) ++ return (__le64 *) id_table; ++ ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); ++ kfree(id_table); ++ + ERROR("Xattrs in filesystem, these will be ignored\n"); +- *xattr_table_start = start; + return ERR_PTR(-ENOTSUPP); + } + +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c +index d99e08464554f..ead66670b41a5 100644 +--- a/fs/squashfs/xattr_id.c ++++ b/fs/squashfs/xattr_id.c +@@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_XATTR_BLOCK(index); + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ u64 start_block; + struct squashfs_xattr_id id; + int err; + ++ if (index >= msblk->xattr_ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ + err = squashfs_read_metadata(sb, &id, &start_block, &offset, + sizeof(id)); + if (err < 0) +@@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + /* + * Read uncompressed xattr id lookup table indexes from disk into memory + */ +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, + u64 *xattr_table_start, int *xattr_ids) + { +- unsigned int len; ++ struct squashfs_sb_info *msblk = sb->s_fs_info; ++ unsigned int len, indexes; + struct squashfs_xattr_id_table *id_table; ++ __le64 *table; ++ u64 start, end; ++ int n; + +- id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); + if (IS_ERR(id_table)) + return (__le64 *) id_table; + +@@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, + if (*xattr_ids == 0) + return ERR_PTR(-EINVAL); + +- /* xattr_table should be less than start */ +- if (*xattr_table_start >= start) ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); ++ ++ /* ++ * The computed size of the index table (len bytes) should exactly ++ * match the table start and end points ++ */ ++ start = table_start + sizeof(*id_table); ++ end = msblk->bytes_used; ++ ++ if (len != (end - start)) + return ERR_PTR(-EINVAL); + +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ table = squashfs_read_table(sb, start, len); ++ if (IS_ERR(table)) ++ return table; ++ ++ /* table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed xattr id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than table_start, and again the difference ++ * shouls be SQUASHFS_METADATA_SIZE or less. ++ * ++ * Finally xattr_table_start should be less than table[0]. ++ */ ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- TRACE("In read_xattr_index_table, length %d\n", len); ++ if (*xattr_table_start >= le64_to_cpu(table[0])) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- return squashfs_read_table(sb, start + sizeof(*id_table), len); ++ return table; + } +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index a60488867dd06..a121fd8e7c3a0 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -232,7 +232,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); + extern bool arch_within_kprobe_blacklist(unsigned long addr); + extern int arch_populate_kprobe_blacklist(void); + extern bool arch_kprobe_on_func_entry(unsigned long offset); +-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); ++extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); + + extern bool within_kprobe_blacklist(unsigned long addr); + extern int kprobe_add_ksym_blacklist(unsigned long entry); +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h +index 9db6097c22c5d..a8d68c5a4ca61 100644 +--- a/include/linux/sunrpc/xdr.h ++++ b/include/linux/sunrpc/xdr.h +@@ -27,8 +27,7 @@ struct rpc_rqst; + #define XDR_QUADLEN(l) (((l) + 3) >> 2) + + /* +- * Generic opaque `network object.' At the kernel level, this type +- * is used only by lockd. ++ * Generic opaque `network object.' + */ + #define XDR_MAX_NETOBJ 1024 + struct xdr_netobj { +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index a67bfa803d983..2c248c4f6419c 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -9002,30 +9002,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) + insn->code == (BPF_ALU | BPF_MOD | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; +- struct bpf_insn mask_and_div[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), ++ bool isdiv = BPF_OP(insn->code) == BPF_DIV; ++ struct bpf_insn *patchlet; ++ struct bpf_insn chk_and_div[] = { + /* Rx div 0 -> 0 */ +- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JNE | BPF_K, insn->src_reg, ++ 0, 2, 0), + BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + }; +- struct bpf_insn mask_and_mod[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), ++ struct bpf_insn chk_and_mod[] = { + /* Rx mod 0 -> Rx */ +- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JEQ | BPF_K, insn->src_reg, ++ 0, 1, 0), + *insn, + }; +- struct bpf_insn *patchlet; + +- if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || +- insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { +- patchlet = mask_and_div + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); +- } else { +- patchlet = mask_and_mod + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); +- } ++ patchlet = isdiv ? chk_and_div : chk_and_mod; ++ cnt = isdiv ? ARRAY_SIZE(chk_and_div) : ++ ARRAY_SIZE(chk_and_mod); + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + if (!new_prog) +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 26ae92c12fc22..a7812c115e487 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1948,29 +1948,45 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset) + return !offset; + } + +-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) ++/** ++ * kprobe_on_func_entry() -- check whether given address is function entry ++ * @addr: Target address ++ * @sym: Target symbol name ++ * @offset: The offset from the symbol or the address ++ * ++ * This checks whether the given @addr+@offset or @sym+@offset is on the ++ * function entry address or not. ++ * This returns 0 if it is the function entry, or -EINVAL if it is not. ++ * And also it returns -ENOENT if it fails the symbol or address lookup. ++ * Caller must pass @addr or @sym (either one must be NULL), or this ++ * returns -EINVAL. ++ */ ++int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) + { + kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); + + if (IS_ERR(kp_addr)) +- return false; ++ return PTR_ERR(kp_addr); + +- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || +- !arch_kprobe_on_func_entry(offset)) +- return false; ++ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) ++ return -ENOENT; + +- return true; ++ if (!arch_kprobe_on_func_entry(offset)) ++ return -EINVAL; ++ ++ return 0; + } + + int register_kretprobe(struct kretprobe *rp) + { +- int ret = 0; ++ int ret; + struct kretprobe_instance *inst; + int i; + void *addr; + +- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) +- return -EINVAL; ++ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); ++ if (ret) ++ return ret; + + /* If only rp->kp.addr is specified, check reregistering kprobes */ + if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 1074a69beff3f..233322c77b76c 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -220,9 +220,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call) + { + struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); + +- return tk ? kprobe_on_func_entry(tk->rp.kp.addr, ++ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr, + tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, +- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false; ++ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false; + } + + bool trace_kprobe_error_injectable(struct trace_event_call *call) +@@ -811,9 +811,11 @@ static int trace_kprobe_create(int argc, const char *argv[]) + trace_probe_log_err(0, BAD_PROBE_ADDR); + goto parse_error; + } +- if (kprobe_on_func_entry(NULL, symbol, offset)) ++ ret = kprobe_on_func_entry(NULL, symbol, offset); ++ if (ret == 0) + flags |= TPARG_FL_FENTRY; +- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { ++ /* Defer the ENOENT case until register kprobe */ ++ if (ret == -EINVAL && is_return) { + trace_probe_log_err(0, BAD_RETPROBE); + goto parse_error; + } +diff --git a/net/key/af_key.c b/net/key/af_key.c +index a915bc86620af..907d04a474597 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -2902,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) + break; + if (!aalg->pfkey_supported) + continue; +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + return sz + sizeof(struct sadb_prop); +@@ -2920,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!ealg->pfkey_supported) + continue; + +- if (!(ealg_tmpl_set(t, ealg) && ealg->available)) ++ if (!(ealg_tmpl_set(t, ealg))) + continue; + + for (k = 1; ; k++) { +@@ -2931,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!aalg->pfkey_supported) + continue; + +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + } +diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c +index 5fe2b645912f6..132f8423addaa 100644 +--- a/net/mac80211/spectmgmt.c ++++ b/net/mac80211/spectmgmt.c +@@ -132,16 +132,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + } + + if (wide_bw_chansw_ie) { ++ u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1; + struct ieee80211_vht_operation vht_oper = { + .chan_width = + wide_bw_chansw_ie->new_channel_width, + .center_freq_seg0_idx = + wide_bw_chansw_ie->new_center_freq_seg0, +- .center_freq_seg1_idx = +- wide_bw_chansw_ie->new_center_freq_seg1, ++ .center_freq_seg1_idx = new_seg1, + /* .basic_mcs_set doesn't matter */ + }; +- struct ieee80211_ht_operation ht_oper = {}; ++ struct ieee80211_ht_operation ht_oper = { ++ .operation_mode = ++ cpu_to_le16(new_seg1 << ++ IEEE80211_HT_OP_MODE_CCFS2_SHIFT), ++ }; + + /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, + * to the previously parsed chandef +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 5fc6c028f89c0..b7a71578bd986 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -29,6 +29,7 @@ + #include <linux/uaccess.h> + #include <linux/hashtable.h> + ++#include "auth_gss_internal.h" + #include "../netns.h" + + #include <trace/events/rpcgss.h> +@@ -125,35 +126,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, size_t len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static inline const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- dest->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(dest->data == NULL)) +- return ERR_PTR(-ENOMEM); +- dest->len = len; +- return q; +-} +- + static struct gss_cl_ctx * + gss_cred_get_ctx(struct rpc_cred *cred) + { +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h +new file mode 100644 +index 0000000000000..f6d9631bd9d00 +--- /dev/null ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: BSD-3-Clause ++/* ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h ++ * ++ * Internal definitions for RPCSEC_GSS client authentication ++ * ++ * Copyright (c) 2000 The Regents of the University of Michigan. ++ * All rights reserved. ++ * ++ */ ++#include <linux/err.h> ++#include <linux/string.h> ++#include <linux/sunrpc/xdr.h> ++ ++static inline const void * ++simple_get_bytes(const void *p, const void *end, void *res, size_t len) ++{ ++ const void *q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ memcpy(res, p, len); ++ return q; ++} ++ ++static inline const void * ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ++{ ++ const void *q; ++ unsigned int len; ++ ++ p = simple_get_bytes(p, end, &len, sizeof(len)); ++ if (IS_ERR(p)) ++ return p; ++ q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ if (len) { ++ dest->data = kmemdup(p, len, GFP_NOFS); ++ if (unlikely(dest->data == NULL)) ++ return ERR_PTR(-ENOMEM); ++ } else ++ dest->data = NULL; ++ dest->len = len; ++ return q; ++} +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c +index 6e5d6d2402158..b552dd4f32f80 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c +@@ -21,6 +21,8 @@ + #include <linux/sunrpc/xdr.h> + #include <linux/sunrpc/gss_krb5_enctypes.h> + ++#include "auth_gss_internal.h" ++ + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + # define RPCDBG_FACILITY RPCDBG_AUTH + #endif +@@ -164,35 +166,6 @@ get_gss_krb5_enctype(int etype) + return NULL; + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, int len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- res->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(res->data == NULL)) +- return ERR_PTR(-ENOMEM); +- res->len = len; +- return q; +-} +- + static inline const void * + get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_sync_skcipher **res) +diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c +index 71562154c0b1e..217e8ce9a4ba4 100644 +--- a/sound/soc/codecs/ak4458.c ++++ b/sound/soc/codecs/ak4458.c +@@ -523,18 +523,10 @@ static struct snd_soc_dai_driver ak4497_dai = { + .ops = &ak4458_dai_ops, + }; + +-static void ak4458_power_off(struct ak4458_priv *ak4458) ++static void ak4458_reset(struct ak4458_priv *ak4458, bool active) + { + if (ak4458->reset_gpiod) { +- gpiod_set_value_cansleep(ak4458->reset_gpiod, 0); +- usleep_range(1000, 2000); +- } +-} +- +-static void ak4458_power_on(struct ak4458_priv *ak4458) +-{ +- if (ak4458->reset_gpiod) { +- gpiod_set_value_cansleep(ak4458->reset_gpiod, 1); ++ gpiod_set_value_cansleep(ak4458->reset_gpiod, active); + usleep_range(1000, 2000); + } + } +@@ -548,7 +540,7 @@ static int ak4458_init(struct snd_soc_component *component) + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); + +- ak4458_power_on(ak4458); ++ ak4458_reset(ak4458, false); + + ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1, + 0x80, 0x80); /* ACKS bit = 1; 10000000 */ +@@ -571,7 +563,7 @@ static void ak4458_remove(struct snd_soc_component *component) + { + struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component); + +- ak4458_power_off(ak4458); ++ ak4458_reset(ak4458, true); + } + + #ifdef CONFIG_PM +@@ -581,7 +573,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev) + + regcache_cache_only(ak4458->regmap, true); + +- ak4458_power_off(ak4458); ++ ak4458_reset(ak4458, true); + + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 0); +@@ -596,8 +588,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev) + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); + +- ak4458_power_off(ak4458); +- ak4458_power_on(ak4458); ++ ak4458_reset(ak4458, true); ++ ak4458_reset(ak4458, false); + + regcache_cache_only(ak4458->regmap, false); + regcache_mark_dirty(ak4458->regmap); +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index 2cb719893324a..1940b17f27efa 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -3632,7 +3632,7 @@ static void skl_tplg_complete(struct snd_soc_component *component) + sprintf(chan_text, "c%d", mach->mach_params.dmic_num); + + for (i = 0; i < se->items; i++) { +- struct snd_ctl_elem_value val; ++ struct snd_ctl_elem_value val = {}; + + if (strstr(texts[i], chan_text)) { + val.value.enumerated.item[0] = i; |