summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1002_linux-6.7.3.patch18749
2 files changed, 18753 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 4b219a2e..8b508855 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch: 1001_linux-6.7.2.patch
From: https://www.kernel.org
Desc: Linux 6.7.2
+Patch: 1002_linux-6.7.3.patch
+From: https://www.kernel.org
+Desc: Linux 6.7.3
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
Desc: Enable link security restrictions by default.
diff --git a/1002_linux-6.7.3.patch b/1002_linux-6.7.3.patch
new file mode 100644
index 00000000..a92db1e5
--- /dev/null
+++ b/1002_linux-6.7.3.patch
@@ -0,0 +1,18749 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
+index 5e6b74f304062..1e7e0bb4c14ec 100644
+--- a/Documentation/ABI/testing/sysfs-class-devfreq
++++ b/Documentation/ABI/testing/sysfs-class-devfreq
+@@ -52,6 +52,9 @@ Description:
+
+ echo 0 > /sys/class/devfreq/.../trans_stat
+
++ If the transition table is bigger than PAGE_SIZE, reading
++ this will return an -EFBIG error.
++
+ What: /sys/class/devfreq/.../available_frequencies
+ Date: October 2012
+ Contact: Nishanth Menon <nm@ti.com>
+diff --git a/Documentation/admin-guide/abi-obsolete.rst b/Documentation/admin-guide/abi-obsolete.rst
+index d095867899c59..594e697aa1b2f 100644
+--- a/Documentation/admin-guide/abi-obsolete.rst
++++ b/Documentation/admin-guide/abi-obsolete.rst
+@@ -7,5 +7,5 @@ marked to be removed at some later point in time.
+ The description of the interface will document the reason why it is
+ obsolete and when it can be expected to be removed.
+
+-.. kernel-abi:: $srctree/Documentation/ABI/obsolete
++.. kernel-abi:: ABI/obsolete
+ :rst:
+diff --git a/Documentation/admin-guide/abi-removed.rst b/Documentation/admin-guide/abi-removed.rst
+index f7e9e43023c13..f9e000c81828e 100644
+--- a/Documentation/admin-guide/abi-removed.rst
++++ b/Documentation/admin-guide/abi-removed.rst
+@@ -1,5 +1,5 @@
+ ABI removed symbols
+ ===================
+
+-.. kernel-abi:: $srctree/Documentation/ABI/removed
++.. kernel-abi:: ABI/removed
+ :rst:
+diff --git a/Documentation/admin-guide/abi-stable.rst b/Documentation/admin-guide/abi-stable.rst
+index 70490736e0d30..fc3361d847b12 100644
+--- a/Documentation/admin-guide/abi-stable.rst
++++ b/Documentation/admin-guide/abi-stable.rst
+@@ -10,5 +10,5 @@ for at least 2 years.
+ Most interfaces (like syscalls) are expected to never change and always
+ be available.
+
+-.. kernel-abi:: $srctree/Documentation/ABI/stable
++.. kernel-abi:: ABI/stable
+ :rst:
+diff --git a/Documentation/admin-guide/abi-testing.rst b/Documentation/admin-guide/abi-testing.rst
+index b205b16a72d08..19767926b3440 100644
+--- a/Documentation/admin-guide/abi-testing.rst
++++ b/Documentation/admin-guide/abi-testing.rst
+@@ -16,5 +16,5 @@ Programs that use these interfaces are strongly encouraged to add their
+ name to the description of these interfaces, so that the kernel
+ developers can easily notify them if any changes occur.
+
+-.. kernel-abi:: $srctree/Documentation/ABI/testing
++.. kernel-abi:: ABI/testing
+ :rst:
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index f47f63bcf67c9..7acd64c61f50c 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -71,6 +71,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A510 | #3117295 | ARM64_ERRATUM_3117295 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A520 | #2966298 | ARM64_ERRATUM_2966298 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst
+index dccd61c7c5c3b..193c22687851a 100644
+--- a/Documentation/filesystems/directory-locking.rst
++++ b/Documentation/filesystems/directory-locking.rst
+@@ -22,13 +22,16 @@ exclusive.
+ 3) object removal. Locking rules: caller locks parent, finds victim,
+ locks victim and calls the method. Locks are exclusive.
+
+-4) rename() that is _not_ cross-directory. Locking rules: caller locks the
+-parent and finds source and target. We lock both (provided they exist). If we
+-need to lock two inodes of different type (dir vs non-dir), we lock directory
+-first. If we need to lock two inodes of the same type, lock them in inode
+-pointer order. Then call the method. All locks are exclusive.
+-NB: we might get away with locking the source (and target in exchange
+-case) shared.
++4) rename() that is _not_ cross-directory. Locking rules: caller locks
++the parent and finds source and target. Then we decide which of the
++source and target need to be locked. Source needs to be locked if it's a
++non-directory; target - if it's a non-directory or about to be removed.
++Take the locks that need to be taken, in inode pointer order if need
++to take both (that can happen only when both source and target are
++non-directories - the source because it wouldn't be locked otherwise
++and the target because mixing directory and non-directory is allowed
++only with RENAME_EXCHANGE, and that won't be removing the target).
++After the locks had been taken, call the method. All locks are exclusive.
+
+ 5) link creation. Locking rules:
+
+@@ -44,20 +47,17 @@ rules:
+
+ * lock the filesystem
+ * lock parents in "ancestors first" order. If one is not ancestor of
+- the other, lock them in inode pointer order.
++ the other, lock the parent of source first.
+ * find source and target.
+ * if old parent is equal to or is a descendent of target
+ fail with -ENOTEMPTY
+ * if new parent is equal to or is a descendent of source
+ fail with -ELOOP
+- * Lock both the source and the target provided they exist. If we
+- need to lock two inodes of different type (dir vs non-dir), we lock
+- the directory first. If we need to lock two inodes of the same type,
+- lock them in inode pointer order.
++ * Lock subdirectories involved (source before target).
++ * Lock non-directories involved, in inode pointer order.
+ * call the method.
+
+-All ->i_rwsem are taken exclusive. Again, we might get away with locking
+-the source (and target in exchange case) shared.
++All ->i_rwsem are taken exclusive.
+
+ The rules above obviously guarantee that all directories that are going to be
+ read, modified or removed by method will be locked by caller.
+@@ -67,6 +67,7 @@ If no directory is its own ancestor, the scheme above is deadlock-free.
+
+ Proof:
+
++[XXX: will be updated once we are done massaging the lock_rename()]
+ First of all, at any moment we have a linear ordering of the
+ objects - A < B iff (A is an ancestor of B) or (B is not an ancestor
+ of A and ptr(A) < ptr(B)).
+diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
+index 7be2900806c85..bd12f2f850ad3 100644
+--- a/Documentation/filesystems/locking.rst
++++ b/Documentation/filesystems/locking.rst
+@@ -101,7 +101,7 @@ symlink: exclusive
+ mkdir: exclusive
+ unlink: exclusive (both)
+ rmdir: exclusive (both)(see below)
+-rename: exclusive (all) (see below)
++rename: exclusive (both parents, some children) (see below)
+ readlink: no
+ get_link: no
+ setattr: exclusive
+@@ -123,6 +123,9 @@ get_offset_ctx no
+ Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_rwsem
+ exclusive on victim.
+ cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
++ ->unlink() and ->rename() have ->i_rwsem exclusive on all non-directories
++ involved.
++ ->rename() has ->i_rwsem exclusive on any subdirectory that changes parent.
+
+ See Documentation/filesystems/directory-locking.rst for more detailed discussion
+ of the locking scheme for directory operations.
+diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
+index 0407f361f32a2..b28e5e3c239f6 100644
+--- a/Documentation/filesystems/overlayfs.rst
++++ b/Documentation/filesystems/overlayfs.rst
+@@ -145,7 +145,9 @@ filesystem, an overlay filesystem needs to record in the upper filesystem
+ that files have been removed. This is done using whiteouts and opaque
+ directories (non-directories are always opaque).
+
+-A whiteout is created as a character device with 0/0 device number.
++A whiteout is created as a character device with 0/0 device number or
++as a zero-size regular file with the xattr "trusted.overlay.whiteout".
++
+ When a whiteout is found in the upper level of a merged directory, any
+ matching name in the lower level is ignored, and the whiteout itself
+ is also hidden.
+@@ -154,6 +156,13 @@ A directory is made opaque by setting the xattr "trusted.overlay.opaque"
+ to "y". Where the upper filesystem contains an opaque directory, any
+ directory in the lower filesystem with the same name is ignored.
+
++An opaque directory should not conntain any whiteouts, because they do not
++serve any purpose. A merge directory containing regular files with the xattr
++"trusted.overlay.whiteout", should be additionally marked by setting the xattr
++"trusted.overlay.opaque" to "x" on the merge directory itself.
++This is needed to avoid the overhead of checking the "trusted.overlay.whiteout"
++on all entries during readdir in the common case.
++
+ readdir
+ -------
+
+@@ -534,8 +543,9 @@ A lower dir with a regular whiteout will always be handled by the overlayfs
+ mount, so to support storing an effective whiteout file in an overlayfs mount an
+ alternative form of whiteout is supported. This form is a regular, zero-size
+ file with the "overlay.whiteout" xattr set, inside a directory with the
+-"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
+-but can be used by userspace tools (like containers) that generate lower layers.
++"overlay.opaque" xattr set to "x" (see `whiteouts and opaque directories`_).
++These alternative whiteouts are never created by overlayfs, but can be used by
++userspace tools (like containers) that generate lower layers.
+ These alternative whiteouts can be escaped using the standard xattr escape
+ mechanism in order to properly nest to any depth.
+
+diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
+index 878e72b2f8b76..9100969e7de62 100644
+--- a/Documentation/filesystems/porting.rst
++++ b/Documentation/filesystems/porting.rst
+@@ -1061,3 +1061,21 @@ export_operations ->encode_fh() no longer has a default implementation to
+ encode FILEID_INO32_GEN* file handles.
+ Filesystems that used the default implementation may use the generic helper
+ generic_encode_ino32_fh() explicitly.
++
++---
++
++**mandatory**
++
++If ->rename() update of .. on cross-directory move needs an exclusion with
++directory modifications, do *not* lock the subdirectory in question in your
++->rename() - it's done by the caller now [that item should've been added in
++28eceeda130f "fs: Lock moved directories"].
++
++---
++
++**mandatory**
++
++On same-directory ->rename() the (tautological) update of .. is not protected
++by any locks; just don't do it if the old parent is the same as the new one.
++We really can't lock two subdirectories in same-directory rename - not without
++deadlocks.
+diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
+index 270d320407c7c..a98a7e04e86f3 100644
+--- a/Documentation/gpu/drm-kms.rst
++++ b/Documentation/gpu/drm-kms.rst
+@@ -548,6 +548,8 @@ Plane Composition Properties
+ .. kernel-doc:: drivers/gpu/drm/drm_blend.c
+ :doc: overview
+
++.. _damage_tracking_properties:
++
+ Damage Tracking Properties
+ --------------------------
+
+diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
+index 03fe5d1247be2..85bbe05436098 100644
+--- a/Documentation/gpu/todo.rst
++++ b/Documentation/gpu/todo.rst
+@@ -337,8 +337,8 @@ connector register/unregister fixes
+
+ Level: Intermediate
+
+-Remove load/unload callbacks from all non-DRIVER_LEGACY drivers
+----------------------------------------------------------------
++Remove load/unload callbacks
++----------------------------
+
+ The load/unload callbacks in struct &drm_driver are very much midlayers, plus
+ for historical reasons they get the ordering wrong (and we can't fix that)
+@@ -347,8 +347,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
+ - Rework drivers to no longer use the load/unload callbacks, directly coding the
+ load/unload sequence into the driver's probe function.
+
+-- Once all non-DRIVER_LEGACY drivers are converted, disallow the load/unload
+- callbacks for all modern drivers.
++- Once all drivers are converted, remove the load/unload callbacks.
+
+ Contact: Daniel Vetter
+
+diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
+index 49797c55479c4..5911bd0d79657 100644
+--- a/Documentation/sphinx/kernel_abi.py
++++ b/Documentation/sphinx/kernel_abi.py
+@@ -39,8 +39,6 @@ import sys
+ import re
+ import kernellog
+
+-from os import path
+-
+ from docutils import nodes, statemachine
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives, Directive
+@@ -73,60 +71,26 @@ class KernelCmd(Directive):
+ }
+
+ def run(self):
+-
+ doc = self.state.document
+ if not doc.settings.file_insertion_enabled:
+ raise self.warning("docutils: file insertion disabled")
+
+- env = doc.settings.env
+- cwd = path.dirname(doc.current_source)
+- cmd = "get_abi.pl rest --enable-lineno --dir "
+- cmd += self.arguments[0]
+-
+- if 'rst' in self.options:
+- cmd += " --rst-source"
++ srctree = os.path.abspath(os.environ["srctree"])
+
+- srctree = path.abspath(os.environ["srctree"])
++ args = [
++ os.path.join(srctree, 'scripts/get_abi.pl'),
++ 'rest',
++ '--enable-lineno',
++ '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]),
++ ]
+
+- fname = cmd
+-
+- # extend PATH with $(srctree)/scripts
+- path_env = os.pathsep.join([
+- srctree + os.sep + "scripts",
+- os.environ["PATH"]
+- ])
+- shell_env = os.environ.copy()
+- shell_env["PATH"] = path_env
+- shell_env["srctree"] = srctree
++ if 'rst' in self.options:
++ args.append('--rst-source')
+
+- lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
++ lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
+ nodeList = self.nestedParse(lines, self.arguments[0])
+ return nodeList
+
+- def runCmd(self, cmd, **kwargs):
+- u"""Run command ``cmd`` and return its stdout as unicode."""
+-
+- try:
+- proc = subprocess.Popen(
+- cmd
+- , stdout = subprocess.PIPE
+- , stderr = subprocess.PIPE
+- , **kwargs
+- )
+- out, err = proc.communicate()
+-
+- out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+-
+- if proc.returncode != 0:
+- raise self.severe(
+- u"command '%s' failed with return code %d"
+- % (cmd, proc.returncode)
+- )
+- except OSError as exc:
+- raise self.severe(u"problems with '%s' directive: %s."
+- % (self.name, ErrorString(exc)))
+- return out
+-
+ def nestedParse(self, lines, fname):
+ env = self.state.document.settings.env
+ content = ViewList()
+diff --git a/Makefile b/Makefile
+index 0564d3d093954..96a08c9f0faa1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 7
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
+index fb3025396ac96..cfdf90bc8b3f8 100644
+--- a/arch/alpha/kernel/rtc.c
++++ b/arch/alpha/kernel/rtc.c
+@@ -80,7 +80,7 @@ init_rtc_epoch(void)
+ static int
+ alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ {
+- int ret = mc146818_get_time(tm);
++ int ret = mc146818_get_time(tm, 10);
+
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
+index 717decda0cebd..3ac7a45016205 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
+@@ -76,6 +76,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enable_can1_power>;
+ regulator-name = "can1_supply";
++ startup-delay-us = <1000>;
+ };
+
+ reg_can2_supply: regulator-can2-supply {
+@@ -85,6 +86,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enable_can2_power>;
+ regulator-name = "can2_supply";
++ startup-delay-us = <1000>;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+index a88f186fcf03c..a976370264fcf 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+@@ -585,10 +585,10 @@
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 51 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -612,7 +612,7 @@
+ compatible = "qcom,sdx55-pdc", "qcom,pdc";
+ reg = <0x0b210000 0x30000>;
+ qcom,pdc-ranges = <0 179 52>;
+- #interrupt-cells = <3>;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+diff --git a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+index a9ec1f6c1dea1..a076a1dfe41f8 100644
+--- a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+@@ -527,6 +527,14 @@
+ regulator-name = "VT_CAM_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++
++ /*
++ * Force-enable this regulator; otherwise the
++ * kernel hangs very early in the boot process
++ * for about 12 seconds, without apparent
++ * reason.
++ */
++ regulator-always-on;
+ };
+
+ vcclcd_reg: LDO13 {
+diff --git a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+index d7954ff466b49..e5254e32aa8fc 100644
+--- a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
++++ b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+@@ -434,6 +434,7 @@
+ };
+
+ &fimd {
++ samsung,invert-vclk;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7b071a00425d2..456e8680e16ea 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1037,8 +1037,12 @@ config ARM64_ERRATUM_2645198
+
+ If unsure, say Y.
+
++config ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++ bool
++
+ config ARM64_ERRATUM_2966298
+ bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
++ select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A520 erratum 2966298.
+@@ -1050,6 +1054,20 @@ config ARM64_ERRATUM_2966298
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_3117295
++ bool "Cortex-A510: 3117295: workaround for speculatively executed unprivileged load"
++ select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++ default y
++ help
++ This option adds the workaround for ARM Cortex-A510 erratum 3117295.
++
++ On an affected Cortex-A510 core, a speculatively executed unprivileged
++ load might leak data from a privileged level via a cache side channel.
++
++ Work around this problem by executing a TLBI before returning to EL0.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+index 47d1c5cb13f4e..aad4a2e5e67ec 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+@@ -93,6 +93,7 @@
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l17>;
++ vio-supply = <&pm8916_l6>;
+
+ led@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+index 419f35c1fc92e..241c3a73c82d7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+@@ -118,6 +118,7 @@
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l16>;
++ vio-supply = <&pm8916_l5>;
+
+ led@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 4f799b536a92a..057c1a0b7ab0b 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -2106,6 +2106,7 @@
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
++ qcom,controlled-remotely;
+ };
+
+ blsp_uart1: serial@78af000 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 324b5d26db400..f9b6122e65462 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1682,6 +1682,7 @@
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
++ qcom,controlled-remotely;
+ };
+
+ blsp_uart1: serial@78af000 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
+index ed95d09cedb1e..6b9245cd8b0c3 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
+@@ -111,6 +111,7 @@
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
+index 61ff629c9bf34..9ac4f507e321a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
+@@ -104,6 +104,7 @@
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+index 1a1d3f92a5116..b0588f30f8f1a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+@@ -113,6 +113,7 @@
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index c0365832c3152..5b7ffe2081593 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -2966,8 +2966,8 @@
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 8 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 9 IRQ_TYPE_LEVEL_HIGH>;
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index f7f616906f6f3..84de20c88a869 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -3685,9 +3685,9 @@
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 14 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+- <&pdc 17 IRQ_TYPE_EDGE_BOTH>;
++ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+index b5eb842878703..b389d49d3ec8c 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+@@ -2552,10 +2552,10 @@
+ usb_prim: usb@a6f8800 {
+ compatible = "qcom,sc8180x-dwc3", "qcom,dwc3";
+ reg = <0 0x0a6f8800 0 0x400>;
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+@@ -2626,10 +2626,10 @@
+ "xo";
+ resets = <&gcc GCC_USB30_SEC_BCR>;
+ power-domains = <&gcc USB30_SEC_GDSC>;
+- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+index e4861c61a65bd..ffc4406422ae2 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+@@ -458,6 +458,8 @@
+ };
+
+ &mdss0_dp3_phy {
++ compatible = "qcom,sc8280xp-edp-phy";
++
+ vdda-phy-supply = <&vreg_l6b>;
+ vdda-pll-supply = <&vreg_l3b>;
+
+diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+index ba2043d67370a..730c8351bcaa3 100644
+--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+@@ -1295,10 +1295,10 @@
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 9648505644ff1..91169e438b463 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4053,10 +4053,10 @@
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -4104,10 +4104,10 @@
+ <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 7 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 0e1aa86758795..e302eb7821af0 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3565,10 +3565,10 @@
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -3618,10 +3618,10 @@
+ <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+index 5d7d567283e52..4237f2ee8fee3 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+@@ -26,9 +26,11 @@
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+
++ motorcomm,auto-sleep-disabled;
+ motorcomm,clk-out-frequency-hz = <125000000>;
+ motorcomm,keep-pll-enabled;
+- motorcomm,auto-sleep-disabled;
++ motorcomm,rx-clk-drv-microamp = <5020>;
++ motorcomm,rx-data-drv-microamp = <5020>;
+
+ pinctrl-0 = <&eth_phy_reset_pin>;
+ pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+index 8aa0499f9b032..f9f9749848bd9 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+@@ -916,6 +916,7 @@
+ reg = <RK3588_PD_USB>;
+ clocks = <&cru PCLK_PHP_ROOT>,
+ <&cru ACLK_USB_ROOT>,
++ <&cru ACLK_USB>,
+ <&cru HCLK_USB_ROOT>,
+ <&cru HCLK_HOST0>,
+ <&cru HCLK_HOST_ARB0>,
+diff --git a/arch/arm64/boot/dts/sprd/ums512.dtsi b/arch/arm64/boot/dts/sprd/ums512.dtsi
+index 024be594c47d1..97ac550af2f11 100644
+--- a/arch/arm64/boot/dts/sprd/ums512.dtsi
++++ b/arch/arm64/boot/dts/sprd/ums512.dtsi
+@@ -96,7 +96,7 @@
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+- compatible = "arm,cortex-a55";
++ compatible = "arm,cortex-a75";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD>;
+@@ -104,7 +104,7 @@
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+- compatible = "arm,cortex-a55";
++ compatible = "arm,cortex-a75";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD>;
+diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
+index 7399d706967a4..9b7a09808a3dd 100755
+--- a/arch/arm64/boot/install.sh
++++ b/arch/arm64/boot/install.sh
+@@ -17,7 +17,8 @@
+ # $3 - kernel map file
+ # $4 - default install path (blank if root directory)
+
+-if [ "$(basename $2)" = "Image.gz" ]; then
++if [ "$(basename $2)" = "Image.gz" ] || [ "$(basename $2)" = "vmlinuz.efi" ]
++then
+ # Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index e29e0fea63fb6..967c7c7a4e7db 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -416,6 +416,19 @@ static struct midr_range broken_aarch32_aes[] = {
+ };
+ #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+
++#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++static const struct midr_range erratum_spec_unpriv_load_list[] = {
++#ifdef CONFIG_ARM64_ERRATUM_3117295
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_2966298
++ /* Cortex-A520 r0p0 to r0p1 */
++ MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
++#endif
++ {},
++};
++#endif
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+ {
+@@ -713,12 +726,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
+ },
+ #endif
+-#ifdef CONFIG_ARM64_ERRATUM_2966298
++#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ {
+- .desc = "ARM erratum 2966298",
+- .capability = ARM64_WORKAROUND_2966298,
++ .desc = "ARM errata 2966298, 3117295",
++ .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
+ /* Cortex-A520 r0p0 - r0p1 */
+- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
++ ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
+ },
+ #endif
+ #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index a6030913cd58c..7fcbee0f6c0e4 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -428,16 +428,9 @@ alternative_else_nop_endif
+ ldp x28, x29, [sp, #16 * 14]
+
+ .if \el == 0
+-alternative_if ARM64_WORKAROUND_2966298
+- tlbi vale1, xzr
+- dsb nsh
+-alternative_else_nop_endif
+-alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+- ldr lr, [sp, #S_LR]
+- add sp, sp, #PT_REGS_SIZE // restore sp
+- eret
+-alternative_else_nop_endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++ alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
++
+ msr far_el1, x29
+
+ ldr_this_cpu x30, this_cpu_vector, x29
+@@ -446,7 +439,18 @@ alternative_else_nop_endif
+ ldr lr, [sp, #S_LR] // restore x30
+ add sp, sp, #PT_REGS_SIZE // restore sp
+ br x29
++
++.L_skip_tramp_exit_\@:
+ #endif
++ ldr lr, [sp, #S_LR]
++ add sp, sp, #PT_REGS_SIZE // restore sp
++
++ /* This must be after the last explicit memory access */
++alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++ tlbi vale1, xzr
++ dsb nsh
++alternative_else_nop_endif
++ eret
+ .else
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #PT_REGS_SIZE // restore sp
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 1559c706d32d1..7363f2eb98e81 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1245,8 +1245,10 @@ void fpsimd_release_task(struct task_struct *dead_task)
+ */
+ void sme_alloc(struct task_struct *task, bool flush)
+ {
+- if (task->thread.sme_state && flush) {
+- memset(task->thread.sme_state, 0, sme_state_size(task));
++ if (task->thread.sme_state) {
++ if (flush)
++ memset(task->thread.sme_state, 0,
++ sme_state_size(task));
+ return;
+ }
+
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index b98c38288a9d3..3781ad1d0b265 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -84,7 +84,6 @@ WORKAROUND_2077057
+ WORKAROUND_2457168
+ WORKAROUND_2645198
+ WORKAROUND_2658417
+-WORKAROUND_2966298
+ WORKAROUND_AMPERE_AC03_CPU_38
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
+ WORKAROUND_TSB_FLUSH_FAILURE
+@@ -100,3 +99,4 @@ WORKAROUND_NVIDIA_CARMEL_CNP
+ WORKAROUND_QCOM_FALKOR_E1003
+ WORKAROUND_REPEAT_TLBI
+ WORKAROUND_SPECULATIVE_AT
++WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index 5bca12d16e069..2fbf541d7b4f6 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -506,7 +506,6 @@ asmlinkage void start_secondary(void)
+ sync_counter();
+ cpu = raw_smp_processor_id();
+ set_my_cpu_offset(per_cpu_offset(cpu));
+- rcutree_report_cpu_starting(cpu);
+
+ cpu_probe();
+ constant_clockevent_init();
+diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
+index 2c0a411f23aa7..0b95d32b30c94 100644
+--- a/arch/loongarch/mm/tlb.c
++++ b/arch/loongarch/mm/tlb.c
+@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
+ set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+- }
++ } else {
++ int vec_sz __maybe_unused;
++ void *addr __maybe_unused;
++ struct page *page __maybe_unused;
++
++ /* Avoid lockdep warning */
++ rcutree_report_cpu_starting(cpu);
++
+ #ifdef CONFIG_NUMA
+- else {
+- void *addr;
+- struct page *page;
+- const int vec_sz = sizeof(exception_handlers);
++ vec_sz = sizeof(exception_handlers);
+
+ if (pcpu_handlers[cpu])
+ return;
+@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
+ csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
+- }
+ #endif
++ }
+ }
+
+ void tlb_init(int cpu)
+diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
+index 5582a4ca1e9e3..7aa2c2360ff60 100644
+--- a/arch/mips/kernel/elf.c
++++ b/arch/mips/kernel/elf.c
+@@ -11,6 +11,7 @@
+
+ #include <asm/cpu-features.h>
+ #include <asm/cpu-info.h>
++#include <asm/fpu.h>
+
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+
+@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
+ struct cpuinfo_mips *c = &boot_cpu_data;
+ struct task_struct *t = current;
+
++ /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
++ * we are preempted before the lose_fpu(0) in start_thread.
++ */
++ lose_fpu(0);
++
+ t->thread.fpu.fcr31 = c->fpu_csr31;
+ switch (state->nan_2008) {
+ case 0:
+diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
+index a3cf293658581..0c45767eacf67 100644
+--- a/arch/mips/lantiq/prom.c
++++ b/arch/mips/lantiq/prom.c
+@@ -108,10 +108,9 @@ void __init prom_init(void)
+ prom_init_cmdline();
+
+ #if defined(CONFIG_MIPS_MT_SMP)
+- if (cpu_has_mipsmt) {
+- lantiq_smp_ops = vsmp_smp_ops;
++ lantiq_smp_ops = vsmp_smp_ops;
++ if (cpu_has_mipsmt)
+ lantiq_smp_ops.init_secondary = lantiq_init_secondary;
+- register_smp_ops(&lantiq_smp_ops);
+- }
++ register_smp_ops(&lantiq_smp_ops);
+ #endif
+ }
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index 5dcb525a89954..6e368a4658b54 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -422,7 +422,12 @@ void __init paging_init(void)
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ }
++
++ max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
++#else
++ max_mapnr = max_low_pfn;
+ #endif
++ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ free_area_init(max_zone_pfns);
+ }
+@@ -458,13 +463,6 @@ void __init mem_init(void)
+ */
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
+
+-#ifdef CONFIG_HIGHMEM
+- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
+-#else
+- max_mapnr = max_low_pfn;
+-#endif
+- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+-
+ maar_init();
+ memblock_free_all();
+ setup_zero_pages(); /* Setup zeroed pages. */
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index 904ca3b9e7a71..c69f6d5946e90 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address)
+ #ifdef CONFIG_64BIT
+ if(unlikely(parisc_narrow_firmware)) {
+ if((address & 0xff000000) == 0xf0000000)
+- return 0xf0f0f0f000000000UL | (u32)address;
++ return (0xfffffff0UL << 32) | (u32)address;
+
+ if((address & 0xf0000000) == 0xf0000000)
+- return 0xffffffff00000000UL | (u32)address;
++ return (0xffffffffUL << 32) | (u32)address;
+ }
+ #endif
+ return address;
+diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
+index 2b175ddf82f0b..aa8bb0208bcc8 100644
+--- a/arch/powerpc/configs/ps3_defconfig
++++ b/arch/powerpc/configs/ps3_defconfig
+@@ -24,6 +24,7 @@ CONFIG_PS3_VRAM=m
+ CONFIG_PS3_LPM=m
+ # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+ CONFIG_KEXEC=y
++# CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set
+ CONFIG_PPC_4K_PAGES=y
+ CONFIG_SCHED_SMT=y
+ CONFIG_PM=y
+diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+index 93256540d0788..ead1cc35d88b2 100644
+--- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi
++++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+@@ -93,144 +93,160 @@
+ <&cpu63_intc 3>;
+ };
+
+- clint_mtimer0: timer@70ac000000 {
++ clint_mtimer0: timer@70ac004000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac000000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac004000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu0_intc 7>,
+ <&cpu1_intc 7>,
+ <&cpu2_intc 7>,
+ <&cpu3_intc 7>;
+ };
+
+- clint_mtimer1: timer@70ac010000 {
++ clint_mtimer1: timer@70ac014000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac010000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac014000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu4_intc 7>,
+ <&cpu5_intc 7>,
+ <&cpu6_intc 7>,
+ <&cpu7_intc 7>;
+ };
+
+- clint_mtimer2: timer@70ac020000 {
++ clint_mtimer2: timer@70ac024000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac020000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac024000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu8_intc 7>,
+ <&cpu9_intc 7>,
+ <&cpu10_intc 7>,
+ <&cpu11_intc 7>;
+ };
+
+- clint_mtimer3: timer@70ac030000 {
++ clint_mtimer3: timer@70ac034000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac030000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac034000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu12_intc 7>,
+ <&cpu13_intc 7>,
+ <&cpu14_intc 7>,
+ <&cpu15_intc 7>;
+ };
+
+- clint_mtimer4: timer@70ac040000 {
++ clint_mtimer4: timer@70ac044000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac040000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac044000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu16_intc 7>,
+ <&cpu17_intc 7>,
+ <&cpu18_intc 7>,
+ <&cpu19_intc 7>;
+ };
+
+- clint_mtimer5: timer@70ac050000 {
++ clint_mtimer5: timer@70ac054000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac050000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac054000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu20_intc 7>,
+ <&cpu21_intc 7>,
+ <&cpu22_intc 7>,
+ <&cpu23_intc 7>;
+ };
+
+- clint_mtimer6: timer@70ac060000 {
++ clint_mtimer6: timer@70ac064000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac060000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac064000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu24_intc 7>,
+ <&cpu25_intc 7>,
+ <&cpu26_intc 7>,
+ <&cpu27_intc 7>;
+ };
+
+- clint_mtimer7: timer@70ac070000 {
++ clint_mtimer7: timer@70ac074000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac070000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac074000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu28_intc 7>,
+ <&cpu29_intc 7>,
+ <&cpu30_intc 7>,
+ <&cpu31_intc 7>;
+ };
+
+- clint_mtimer8: timer@70ac080000 {
++ clint_mtimer8: timer@70ac084000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac080000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac084000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu32_intc 7>,
+ <&cpu33_intc 7>,
+ <&cpu34_intc 7>,
+ <&cpu35_intc 7>;
+ };
+
+- clint_mtimer9: timer@70ac090000 {
++ clint_mtimer9: timer@70ac094000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac090000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac094000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu36_intc 7>,
+ <&cpu37_intc 7>,
+ <&cpu38_intc 7>,
+ <&cpu39_intc 7>;
+ };
+
+- clint_mtimer10: timer@70ac0a0000 {
++ clint_mtimer10: timer@70ac0a4000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac0a0000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac0a4000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu40_intc 7>,
+ <&cpu41_intc 7>,
+ <&cpu42_intc 7>,
+ <&cpu43_intc 7>;
+ };
+
+- clint_mtimer11: timer@70ac0b0000 {
++ clint_mtimer11: timer@70ac0b4000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac0b0000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac0b4000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu44_intc 7>,
+ <&cpu45_intc 7>,
+ <&cpu46_intc 7>,
+ <&cpu47_intc 7>;
+ };
+
+- clint_mtimer12: timer@70ac0c0000 {
++ clint_mtimer12: timer@70ac0c4000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac0c0000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac0c4000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu48_intc 7>,
+ <&cpu49_intc 7>,
+ <&cpu50_intc 7>,
+ <&cpu51_intc 7>;
+ };
+
+- clint_mtimer13: timer@70ac0d0000 {
++ clint_mtimer13: timer@70ac0d4000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac0d0000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac0d4000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu52_intc 7>,
+ <&cpu53_intc 7>,
+ <&cpu54_intc 7>,
+ <&cpu55_intc 7>;
+ };
+
+- clint_mtimer14: timer@70ac0e0000 {
++ clint_mtimer14: timer@70ac0e4000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac0e0000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac0e4000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu56_intc 7>,
+ <&cpu57_intc 7>,
+ <&cpu58_intc 7>,
+ <&cpu59_intc 7>;
+ };
+
+- clint_mtimer15: timer@70ac0f0000 {
++ clint_mtimer15: timer@70ac0f4000 {
+ compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
+- reg = <0x00000070 0xac0f0000 0x00000000 0x00007ff8>;
++ reg = <0x00000070 0xac0f4000 0x00000000 0x0000c000>;
++ reg-names = "mtimecmp";
+ interrupts-extended = <&cpu60_intc 7>,
+ <&cpu61_intc 7>,
+ <&cpu62_intc 7>,
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index ab00235b018f8..74ffb2178f545 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -881,7 +881,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+ #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+ #ifdef CONFIG_COMPAT
+-#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
++#define TASK_SIZE_32 (_AC(0x80000000, UL))
+ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+ #else
+diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
+index f19f861cda549..e1944ff0757a8 100644
+--- a/arch/riscv/include/asm/processor.h
++++ b/arch/riscv/include/asm/processor.h
+@@ -16,7 +16,7 @@
+
+ #ifdef CONFIG_64BIT
+ #define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+-#define STACK_TOP_MAX TASK_SIZE_64
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define arch_get_mmap_end(addr, len, flags) \
+ ({ \
+diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
+index 862834bb1d643..c9d59a5448b6c 100644
+--- a/arch/riscv/kernel/module.c
++++ b/arch/riscv/kernel/module.c
+@@ -723,8 +723,8 @@ static int add_relocation_to_accumulate(struct module *me, int type,
+
+ if (!bucket) {
+ kfree(entry);
+- kfree(rel_head);
+ kfree(rel_head->rel_entry);
++ kfree(rel_head);
+ return -ENOMEM;
+ }
+
+@@ -747,6 +747,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
+ {
+ /* Can safely assume that bits is not greater than sizeof(long) */
+ unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
++ /*
++ * When hashtable_size == 1, hashtable_bits == 0.
++ * This is valid because the hashing algorithm returns 0 in this case.
++ */
+ unsigned int hashtable_bits = ilog2(hashtable_size);
+
+ /*
+@@ -760,10 +764,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
+ hashtable_size <<= should_double_size;
+
+ *relocation_hashtable = kmalloc_array(hashtable_size,
+- sizeof(*relocation_hashtable),
++ sizeof(**relocation_hashtable),
+ GFP_KERNEL);
+ if (!*relocation_hashtable)
+- return -ENOMEM;
++ return 0;
+
+ __hash_init(*relocation_hashtable, hashtable_size);
+
+@@ -789,8 +793,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ hashtable_bits = initialize_relocation_hashtable(num_relocations,
+ &relocation_hashtable);
+
+- if (hashtable_bits < 0)
+- return hashtable_bits;
++ if (!relocation_hashtable)
++ return -ENOMEM;
+
+ INIT_LIST_HEAD(&used_buckets_list);
+
+diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
+index 68e786c84c949..f6d4dedffb842 100644
+--- a/arch/riscv/kernel/pi/cmdline_early.c
++++ b/arch/riscv/kernel/pi/cmdline_early.c
+@@ -38,8 +38,7 @@ static char *get_early_cmdline(uintptr_t dtb_pa)
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ fdt_cmdline_size == 0 /* CONFIG_CMDLINE_FALLBACK */) {
+- strncat(early_cmdline, CONFIG_CMDLINE,
+- COMMAND_LINE_SIZE - fdt_cmdline_size);
++ strlcat(early_cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ }
+
+ return early_cmdline;
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index c773820e4af90..c6fe5405de4a4 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -597,7 +597,9 @@ static int ctr_aes_crypt(struct skcipher_request *req)
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+- cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
++ cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
+ AES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
+index 8b541e44151d4..55ee5567a5ea9 100644
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -693,9 +693,11 @@ static int ctr_paes_crypt(struct skcipher_request *req)
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
+ while (1) {
+ if (cpacf_kmctr(ctx->fc, &param, buf,
+- walk.src.virt.addr, AES_BLOCK_SIZE,
++ buf, AES_BLOCK_SIZE,
+ walk.iv) == AES_BLOCK_SIZE)
+ break;
+ if (__paes_convert_key(ctx))
+diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
+index 0f279360838a4..30d117f9ad7ee 100644
+--- a/arch/sh/boards/mach-ecovec24/setup.c
++++ b/arch/sh/boards/mach-ecovec24/setup.c
+@@ -1220,7 +1220,7 @@ static int __init arch_setup(void)
+ lcdc_info.ch[0].num_modes = ARRAY_SIZE(ecovec_dvi_modes);
+
+ /* No backlight */
+- gpio_backlight_data.fbdev = NULL;
++ gpio_backlight_data.dev = NULL;
+
+ gpio_set_value(GPIO_PTA2, 1);
+ gpio_set_value(GPIO_PTU1, 1);
+diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
+index 21f9407be5d35..7e88705e907f4 100644
+--- a/arch/x86/include/asm/syscall_wrapper.h
++++ b/arch/x86/include/asm/syscall_wrapper.h
+@@ -58,12 +58,29 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ ,,regs->di,,regs->si,,regs->dx \
+ ,,regs->r10,,regs->r8,,regs->r9) \
+
++
++/* SYSCALL_PT_ARGS is Adapted from s390x */
++#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \
++ SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
++#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \
++ SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
++#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \
++ SYSCALL_PT_ARG3(m, t1, t2, t3), m(t4, (regs->si))
++#define SYSCALL_PT_ARG3(m, t1, t2, t3) \
++ SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
++#define SYSCALL_PT_ARG2(m, t1, t2) \
++ SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
++#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
++#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
++
++#define __SC_COMPAT_CAST(t, a) \
++ (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U))) \
++ (unsigned int)a
++
+ /* Mapping of registers to parameters for syscalls on i386 */
+ #define SC_IA32_REGS_TO_ARGS(x, ...) \
+- __MAP(x,__SC_ARGS \
+- ,,(unsigned int)regs->bx,,(unsigned int)regs->cx \
+- ,,(unsigned int)regs->dx,,(unsigned int)regs->si \
+- ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
++ SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST, \
++ __MAP(x, __SC_TYPE, __VA_ARGS__)) \
+
+ #define __SYS_STUB0(abi, name) \
+ long __##abi##_##name(const struct pt_regs *regs); \
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 41eecf180b7f4..17adad4cbe78c 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -1438,7 +1438,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+ memset(&curr_time, 0, sizeof(struct rtc_time));
+
+ if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
+- if (unlikely(mc146818_get_time(&curr_time) < 0)) {
++ if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) {
+ pr_err_ratelimited("unable to read current time from RTC\n");
+ return IRQ_HANDLED;
+ }
+diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
+index 1309b9b053386..2e7066980f3e8 100644
+--- a/arch/x86/kernel/rtc.c
++++ b/arch/x86/kernel/rtc.c
+@@ -67,7 +67,7 @@ void mach_get_cmos_time(struct timespec64 *now)
+ return;
+ }
+
+- if (mc146818_get_time(&tm)) {
++ if (mc146818_get_time(&tm, 1000)) {
+ pr_err("Unable to read current time from RTC\n");
+ now->tv_sec = now->tv_nsec = 0;
+ return;
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 9c73a763ef883..438f79c564cfc 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -20,8 +20,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ struct blkpg_partition p;
+ sector_t start, length;
+
+- if (disk->flags & GENHD_FL_NO_PART)
+- return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
+diff --git a/block/partitions/core.c b/block/partitions/core.c
+index f47ffcfdfcec2..f14602022c5eb 100644
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -447,6 +447,11 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+ goto out;
+ }
+
++ if (disk->flags & GENHD_FL_NO_PART) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ if (partition_overlaps(disk, start, length, -1)) {
+ ret = -EBUSY;
+ goto out;
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 4fe95c4480473..85bc279b4233f 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -341,6 +341,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
+ }
+
+ if (!strcmp(q->cra_driver_name, alg->cra_name) ||
++ !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ !strcmp(q->cra_name, alg->cra_driver_name))
+ goto err;
+ }
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index f85f3515c258f..9c5a5f4dba5a6 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ }
+
+ /**
+- * device_resume_noirq - Execute a "noirq resume" callback for given device.
++ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -655,7 +655,13 @@ Skip:
+ Out:
+ complete_all(&dev->power.completion);
+ TRACE_RESUME(error);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_noirq++;
++ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
++ }
+ }
+
+ static bool is_async(struct device *dev)
+@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ {
+ reinit_completion(&dev->power.completion);
+
+- if (is_async(dev)) {
+- get_device(dev);
+- async_schedule_dev(func, dev);
++ if (!is_async(dev))
++ return false;
++
++ get_device(dev);
++
++ if (async_schedule_dev_nocall(func, dev))
+ return true;
+- }
++
++ put_device(dev);
+
+ return false;
+ }
+@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ static void async_resume_noirq(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_noirq(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_noirq(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_noirq(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_noirq))
++ return;
++
++ __device_resume_noirq(dev, pm_transition, false);
++}
++
+ static void dpm_noirq_resume_devices(pm_message_t state)
+ {
+ struct device *dev;
+@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+- dpm_async_fn(dev, async_resume_noirq);
+-
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
+ get_device(dev);
+@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_noirq(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_noirq++;
+- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " noirq", error);
+- }
+- }
++ device_resume_noirq(dev);
+
+ put_device(dev);
+
+@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
+ }
+
+ /**
+- * device_resume_early - Execute an "early resume" callback for given device.
++ * __device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -811,21 +807,31 @@ Out:
+
+ pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_early++;
++ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async early" : " early", error);
++ }
+ }
+
+ static void async_resume_early(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_early(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_early(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_early(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_early))
++ return;
++
++ __device_resume_early(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+- dpm_async_fn(dev, async_resume_early);
+-
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
+ get_device(dev);
+@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_early(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_early++;
+- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " early", error);
+- }
+- }
++ device_resume_early(dev);
+
+ put_device(dev);
+
+@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
+ EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+ /**
+- * device_resume - Execute "resume" callbacks for given device.
++ * __device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+-static int device_resume(struct device *dev, pm_message_t state, bool async)
++static void __device_resume(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+
+ TRACE_RESUME(error);
+
+- return error;
++ if (error) {
++ suspend_stats.failed_resume++;
++ dpm_save_failed_step(SUSPEND_RESUME);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async" : "", error);
++ }
+ }
+
+ static void async_resume(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+
+- error = device_resume(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
++ __device_resume(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume))
++ return;
++
++ __device_resume(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
+ pm_transition = state;
+ async_error = 0;
+
+- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+- dpm_async_fn(dev, async_resume);
+-
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
++
+ get_device(dev);
+- if (!is_async(dev)) {
+- int error;
+
+- mutex_unlock(&dpm_list_mtx);
++ mutex_unlock(&dpm_list_mtx);
++
++ device_resume(dev);
+
+- error = device_resume(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume++;
+- dpm_save_failed_step(SUSPEND_RESUME);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, "", error);
+- }
++ mutex_lock(&dpm_list_mtx);
+
+- mutex_lock(&dpm_list_mtx);
+- }
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index 72b7a92337b18..cd6e559648b21 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -120,7 +120,7 @@ static unsigned int read_magic_time(void)
+ struct rtc_time time;
+ unsigned int val;
+
+- if (mc146818_get_time(&time) < 0) {
++ if (mc146818_get_time(&time, 1000) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index b6414e1e645b7..aa65313aabb8d 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -510,7 +510,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
+ struct iov_iter *iter, int msg_flags, int *sent)
+ {
+ int result;
+- struct msghdr msg;
++ struct msghdr msg = {} ;
+ unsigned int noreclaim_flag;
+
+ if (unlikely(!sock)) {
+@@ -526,10 +526,6 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
+ do {
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+ sock->sk->sk_use_task_frag = false;
+- msg.msg_name = NULL;
+- msg.msg_namelen = 0;
+- msg.msg_control = NULL;
+- msg.msg_controllen = 0;
+ msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+ if (send)
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index a999b698b131f..1e2596c5efd81 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
+ static void rbd_lock_del_request(struct rbd_img_request *img_req)
+ {
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
+- bool need_wakeup;
++ bool need_wakeup = false;
+
+ lockdep_assert_held(&rbd_dev->lock_rwsem);
+ spin_lock(&rbd_dev->lock_lists_lock);
+- rbd_assert(!list_empty(&img_req->lock_item));
+- list_del_init(&img_req->lock_item);
+- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+- list_empty(&rbd_dev->running_list));
++ if (!list_empty(&img_req->lock_item)) {
++ list_del_init(&img_req->lock_item);
++ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
++ list_empty(&rbd_dev->running_list));
++ }
+ spin_unlock(&rbd_dev->lock_lists_lock);
+ if (need_wakeup)
+ complete(&rbd_dev->releasing_wait);
+@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
+ return;
+ }
+
+- list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
++ while (!list_empty(&rbd_dev->acquiring_list)) {
++ img_req = list_first_entry(&rbd_dev->acquiring_list,
++ struct rbd_img_request, lock_item);
+ mutex_lock(&img_req->state_mutex);
+ rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
++ if (!result)
++ list_move_tail(&img_req->lock_item,
++ &rbd_dev->running_list);
++ else
++ list_del_init(&img_req->lock_item);
+ rbd_img_schedule(img_req, result);
+ mutex_unlock(&img_req->state_mutex);
+ }
+-
+- list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
+ }
+
+ static bool locker_equal(const struct ceph_locker *lhs,
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index dcf627b36e829..d6653cbcf94a2 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -268,7 +268,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+
+ static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+ {
+- return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
++ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
++ !(addr & (sizeof(struct mhi_ring_element) - 1));
+ }
+
+ int mhi_destroy_device(struct device *dev, void *data)
+@@ -642,6 +643,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
++ read_unlock_bh(&mhi_chan->lock);
++
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+@@ -667,6 +670,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ kfree(buf_info->cb_buf);
+ }
+ }
++
++ read_lock_bh(&mhi_chan->lock);
+ }
+ break;
+ } /* CC_EOT */
+@@ -1122,17 +1127,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+-
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+- if (unlikely(ret)) {
+- ret = -EAGAIN;
+- goto exit_unlock;
+- }
++ if (unlikely(ret))
++ return -EAGAIN;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+- goto exit_unlock;
++ return ret;
++
++ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+@@ -1152,7 +1155,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+-exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
+@@ -1204,6 +1206,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ int eot, eob, chain, bei;
+ int ret;
+
++ /* Protect accesses for reading and incrementing WP */
++ write_lock_bh(&mhi_chan->lock);
++
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+@@ -1221,8 +1226,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+- if (ret)
++ if (ret) {
++ write_unlock_bh(&mhi_chan->lock);
+ return ret;
++ }
+ }
+
+ eob = !!(flags & MHI_EOB);
+@@ -1239,6 +1246,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
++ write_unlock_bh(&mhi_chan->lock);
++
+ return 0;
+ }
+
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 420f155d251fb..a3bbdd6e60fca 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -23,10 +23,13 @@
+ #include <linux/sched.h>
+ #include <linux/sched/signal.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+
+ #define RNG_MODULE_NAME "hw_random"
+
++#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
++
+ static struct hwrng *current_rng;
+ /* the current rng has been explicitly chosen by user via sysfs */
+ static int cur_rng_set_by_user;
+@@ -58,7 +61,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+
+ static size_t rng_buffer_size(void)
+ {
+- return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++ return RNG_BUFFER_SIZE;
+ }
+
+ static void add_early_randomness(struct hwrng *rng)
+@@ -209,6 +212,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+ {
++ u8 buffer[RNG_BUFFER_SIZE];
+ ssize_t ret = 0;
+ int err = 0;
+ int bytes_read, len;
+@@ -236,34 +240,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock_reading;
++ } else if (bytes_read == 0 &&
++ (filp->f_flags & O_NONBLOCK)) {
++ err = -EAGAIN;
++ goto out_unlock_reading;
+ }
++
+ data_avail = bytes_read;
+ }
+
+- if (!data_avail) {
+- if (filp->f_flags & O_NONBLOCK) {
+- err = -EAGAIN;
+- goto out_unlock_reading;
+- }
+- } else {
+- len = data_avail;
++ len = data_avail;
++ if (len) {
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+- if (copy_to_user(buf + ret, rng_buffer + data_avail,
+- len)) {
++ memcpy(buffer, rng_buffer + data_avail, len);
++ }
++ mutex_unlock(&reading_mutex);
++ put_rng(rng);
++
++ if (len) {
++ if (copy_to_user(buf + ret, buffer, len)) {
+ err = -EFAULT;
+- goto out_unlock_reading;
++ goto out;
+ }
+
+ size -= len;
+ ret += len;
+ }
+
+- mutex_unlock(&reading_mutex);
+- put_rng(rng);
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+@@ -274,6 +281,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ }
+ }
+ out:
++ memzero_explicit(buffer, sizeof(buffer));
+ return ret ? : err;
+
+ out_unlock_reading:
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 1f6186475715e..1791d37fbc53c 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -1232,14 +1232,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
++ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
++
+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+-
+- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+-
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index a534a1f7f1ee7..f5c69fa230d9b 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -526,6 +526,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
+ }
+ #endif /* CONFIG_ACPI_CPPC_LIB */
+
++static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
++ unsigned int relation)
++{
++ if (freq == cpu->pstate.turbo_freq)
++ return cpu->pstate.turbo_pstate;
++
++ if (freq == cpu->pstate.max_freq)
++ return cpu->pstate.max_pstate;
++
++ switch (relation) {
++ case CPUFREQ_RELATION_H:
++ return freq / cpu->pstate.scaling;
++ case CPUFREQ_RELATION_C:
++ return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
++ }
++
++ return DIV_ROUND_UP(freq, cpu->pstate.scaling);
++}
++
++static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
++{
++ return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
++}
++
+ /**
+ * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
+ * @cpu: Target CPU.
+@@ -543,6 +567,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+ int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
+ int scaling = cpu->pstate.scaling;
++ int freq;
+
+ pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+@@ -556,16 +581,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+ cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
+ perf_ctl_scaling);
+
+- cpu->pstate.max_pstate_physical =
+- DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
+- scaling);
++ freq = perf_ctl_max_phys * perf_ctl_scaling;
++ cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
+
+- cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
++ freq = cpu->pstate.min_pstate * perf_ctl_scaling;
++ cpu->pstate.min_freq = freq;
+ /*
+ * Cast the min P-state value retrieved via pstate_funcs.get_min() to
+ * the effective range of HWP performance levels.
+ */
+- cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
++ cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
+ }
+
+ static inline void update_turbo_state(void)
+@@ -2524,13 +2549,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ * abstract values to represent performance rather than pure ratios.
+ */
+ if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+- int scaling = cpu->pstate.scaling;
+ int freq;
+
+ freq = max_policy_perf * perf_ctl_scaling;
+- max_policy_perf = DIV_ROUND_UP(freq, scaling);
++ max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ freq = min_policy_perf * perf_ctl_scaling;
+- min_policy_perf = DIV_ROUND_UP(freq, scaling);
++ min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ }
+
+ pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
+@@ -2904,18 +2928,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+- switch (relation) {
+- case CPUFREQ_RELATION_L:
+- target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
+- break;
+- case CPUFREQ_RELATION_H:
+- target_pstate = freqs.new / cpu->pstate.scaling;
+- break;
+- default:
+- target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
+- break;
+- }
+-
++ target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
+
+ freqs.new = target_pstate * cpu->pstate.scaling;
+@@ -2933,7 +2946,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+
+ update_turbo_state();
+
+- target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
++ target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
+
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 76fec47a13a4c..7bb656237fa0c 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -525,7 +525,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct resource *res;
+- u32 remainder = 0;
++ u64 remainder = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+@@ -545,7 +545,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+ (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ return -ENXIO;
+
+- div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
++ div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
+ if (remainder)
+ return -EINVAL;
+
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index b3a68d5833bd6..907f50ab70ed9 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -1688,7 +1688,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct devfreq *df = to_devfreq(dev);
+- ssize_t len;
++ ssize_t len = 0;
+ int i, j;
+ unsigned int max_state;
+
+@@ -1697,7 +1697,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ max_state = df->max_state;
+
+ if (max_state == 0)
+- return sprintf(buf, "Not Supported.\n");
++ return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
+
+ mutex_lock(&df->lock);
+ if (!df->stop_polling &&
+@@ -1707,31 +1707,52 @@ static ssize_t trans_stat_show(struct device *dev,
+ }
+ mutex_unlock(&df->lock);
+
+- len = sprintf(buf, " From : To\n");
+- len += sprintf(buf + len, " :");
+- for (i = 0; i < max_state; i++)
+- len += sprintf(buf + len, "%10lu",
+- df->freq_table[i]);
++ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " :");
++ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
++ df->freq_table[i]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+- len += sprintf(buf + len, " time(ms)\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
+ if (df->freq_table[i] == df->previous_freq)
+- len += sprintf(buf + len, "*");
++ len += scnprintf(buf + len, PAGE_SIZE - len, "*");
+ else
+- len += sprintf(buf + len, " ");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " ");
++ if (len >= PAGE_SIZE - 1)
++ break;
++
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
++ df->freq_table[i]);
++ for (j = 0; j < max_state; j++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
++ df->stats.trans_table[(i * max_state) + j]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
++ jiffies64_to_msecs(df->stats.time_in_state[i]));
++ }
+
+- len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
+- for (j = 0; j < max_state; j++)
+- len += sprintf(buf + len, "%10u",
+- df->stats.trans_table[(i * max_state) + j]);
++ if (len < PAGE_SIZE - 1)
++ len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
++ df->stats.total_trans);
+
+- len += sprintf(buf + len, "%10llu\n", (u64)
+- jiffies64_to_msecs(df->stats.time_in_state[i]));
++ if (len >= PAGE_SIZE - 1) {
++ pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
++ return -EFBIG;
+ }
+
+- len += sprintf(buf + len, "Total transition : %u\n",
+- df->stats.total_trans);
+ return len;
+ }
+
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index b7388ae62d7f1..491b222402216 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1103,6 +1103,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+ static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+ {
++ if (chan->local == NULL)
++ return;
++
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
+index 238a69bd0d6f5..75cae7ccae270 100644
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -24,6 +24,8 @@
+ #define ARGS_RX BIT(0)
+ #define ARGS_REMOTE BIT(1)
+ #define ARGS_MULTI_FIFO BIT(2)
++#define ARGS_EVEN_CH BIT(3)
++#define ARGS_ODD_CH BIT(4)
+
+ static void fsl_edma_synchronize(struct dma_chan *chan)
+ {
+@@ -157,6 +159,12 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+
++ if ((dma_spec->args[2] & ARGS_EVEN_CH) && (i & 0x1))
++ continue;
++
++ if ((dma_spec->args[2] & ARGS_ODD_CH) && !(i & 0x1))
++ continue;
++
+ if (!b_chmux && i == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 8f754f922217d..fa0f880beae64 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -802,6 +802,9 @@ err_bmap:
+
+ static void idxd_device_evl_free(struct idxd_device *idxd)
+ {
++ void *evl_log;
++ unsigned int evl_log_size;
++ dma_addr_t evl_dma;
+ union gencfg_reg gencfg;
+ union genctrl_reg genctrl;
+ struct device *dev = &idxd->pdev->dev;
+@@ -822,11 +825,15 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+ iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
+ iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+- dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
+ bitmap_free(evl->bmap);
++ evl_log = evl->log;
++ evl_log_size = evl->log_size;
++ evl_dma = evl->dma;
+ evl->log = NULL;
+ evl->size = IDXD_EVL_SIZE_MIN;
+ spin_unlock(&evl->lock);
++
++ dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
+ }
+
+ static void idxd_group_config_write(struct idxd_group *group)
+diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
+index 84a88029226fd..2c9c72d4b5a20 100644
+--- a/drivers/dma/xilinx/xdma.c
++++ b/drivers/dma/xilinx/xdma.c
+@@ -754,9 +754,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+ if (ret)
+ goto out;
+
+- desc->completed_desc_num += complete_desc_num;
+-
+ if (desc->cyclic) {
++ desc->completed_desc_num = complete_desc_num;
++
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
+ &st);
+ if (ret)
+@@ -768,6 +768,8 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+ goto out;
+ }
+
++ desc->completed_desc_num += complete_desc_num;
++
+ /*
+ * if all data blocks are transferred, remove and complete the request
+ */
+diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
+index 3568149b95620..f8fbf03942888 100644
+--- a/drivers/dpll/dpll_core.c
++++ b/drivers/dpll/dpll_core.c
+@@ -28,8 +28,6 @@ static u32 dpll_xa_id;
+ WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+ #define ASSERT_DPLL_NOT_REGISTERED(d) \
+ WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+-#define ASSERT_PIN_REGISTERED(p) \
+- WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
+
+ struct dpll_device_registration {
+ struct list_head list;
+@@ -424,6 +422,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
+ }
+ EXPORT_SYMBOL_GPL(dpll_device_unregister);
+
++static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
++{
++ kfree(prop->package_label);
++ kfree(prop->panel_label);
++ kfree(prop->board_label);
++ kfree(prop->freq_supported);
++}
++
++static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
++ struct dpll_pin_properties *dst)
++{
++ memcpy(dst, src, sizeof(*dst));
++ if (src->freq_supported && src->freq_supported_num) {
++ size_t freq_size = src->freq_supported_num *
++ sizeof(*src->freq_supported);
++ dst->freq_supported = kmemdup(src->freq_supported,
++ freq_size, GFP_KERNEL);
++ if (!src->freq_supported)
++ return -ENOMEM;
++ }
++ if (src->board_label) {
++ dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
++ if (!dst->board_label)
++ goto err_board_label;
++ }
++ if (src->panel_label) {
++ dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
++ if (!dst->panel_label)
++ goto err_panel_label;
++ }
++ if (src->package_label) {
++ dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
++ if (!dst->package_label)
++ goto err_package_label;
++ }
++
++ return 0;
++
++err_package_label:
++ kfree(dst->panel_label);
++err_panel_label:
++ kfree(dst->board_label);
++err_board_label:
++ kfree(dst->freq_supported);
++ return -ENOMEM;
++}
++
+ static struct dpll_pin *
+ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
+ const struct dpll_pin_properties *prop)
+@@ -440,19 +485,23 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
+ if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
+ prop->type > DPLL_PIN_TYPE_MAX)) {
+ ret = -EINVAL;
+- goto err;
++ goto err_pin_prop;
+ }
+- pin->prop = prop;
++ ret = dpll_pin_prop_dup(prop, &pin->prop);
++ if (ret)
++ goto err_pin_prop;
+ refcount_set(&pin->refcount, 1);
+ xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
+ xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
+ ret = xa_alloc(&dpll_pin_xa, &pin->id, pin, xa_limit_16b, GFP_KERNEL);
+ if (ret)
+- goto err;
++ goto err_xa_alloc;
+ return pin;
+-err:
++err_xa_alloc:
+ xa_destroy(&pin->dpll_refs);
+ xa_destroy(&pin->parent_refs);
++ dpll_pin_prop_free(&pin->prop);
++err_pin_prop:
+ kfree(pin);
+ return ERR_PTR(ret);
+ }
+@@ -512,6 +561,7 @@ void dpll_pin_put(struct dpll_pin *pin)
+ xa_destroy(&pin->dpll_refs);
+ xa_destroy(&pin->parent_refs);
+ xa_erase(&dpll_pin_xa, pin->id);
++ dpll_pin_prop_free(&pin->prop);
+ kfree(pin);
+ }
+ mutex_unlock(&dpll_lock);
+@@ -562,8 +612,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ WARN_ON(!ops->state_on_dpll_get) ||
+ WARN_ON(!ops->direction_get))
+ return -EINVAL;
+- if (ASSERT_DPLL_REGISTERED(dpll))
+- return -EINVAL;
+
+ mutex_lock(&dpll_lock);
+ if (WARN_ON(!(dpll->module == pin->module &&
+@@ -634,15 +682,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ unsigned long i, stop;
+ int ret;
+
+- if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
++ if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
+ return -EINVAL;
+
+ if (WARN_ON(!ops) ||
+ WARN_ON(!ops->state_on_pin_get) ||
+ WARN_ON(!ops->direction_get))
+ return -EINVAL;
+- if (ASSERT_PIN_REGISTERED(parent))
+- return -EINVAL;
+
+ mutex_lock(&dpll_lock);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
+index 5585873c5c1b0..717f715015c74 100644
+--- a/drivers/dpll/dpll_core.h
++++ b/drivers/dpll/dpll_core.h
+@@ -44,7 +44,7 @@ struct dpll_device {
+ * @module: module of creator
+ * @dpll_refs: hold referencees to dplls pin was registered with
+ * @parent_refs: hold references to parent pins pin was registered with
+- * @prop: pointer to pin properties given by registerer
++ * @prop: pin properties copied from the registerer
+ * @rclk_dev_name: holds name of device when pin can recover clock from it
+ * @refcount: refcount
+ **/
+@@ -55,7 +55,7 @@ struct dpll_pin {
+ struct module *module;
+ struct xarray dpll_refs;
+ struct xarray parent_refs;
+- const struct dpll_pin_properties *prop;
++ struct dpll_pin_properties prop;
+ refcount_t refcount;
+ };
+
+diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
+index ce7cf736f0208..7cc99d627942e 100644
+--- a/drivers/dpll/dpll_netlink.c
++++ b/drivers/dpll/dpll_netlink.c
+@@ -278,17 +278,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
+ DPLL_A_PIN_PAD))
+ return -EMSGSIZE;
+- for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
++ for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
+ nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
+ if (!nest)
+ return -EMSGSIZE;
+- freq = pin->prop->freq_supported[fs].min;
++ freq = pin->prop.freq_supported[fs].min;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
+ &freq, DPLL_A_PIN_PAD)) {
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+ }
+- freq = pin->prop->freq_supported[fs].max;
++ freq = pin->prop.freq_supported[fs].max;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
+ &freq, DPLL_A_PIN_PAD)) {
+ nla_nest_cancel(msg, nest);
+@@ -304,9 +304,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
+ {
+ int fs;
+
+- for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
+- if (freq >= pin->prop->freq_supported[fs].min &&
+- freq <= pin->prop->freq_supported[fs].max)
++ for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
++ if (freq >= pin->prop.freq_supported[fs].min &&
++ freq <= pin->prop.freq_supported[fs].max)
+ return true;
+ return false;
+ }
+@@ -396,7 +396,7 @@ static int
+ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
+ struct netlink_ext_ack *extack)
+ {
+- const struct dpll_pin_properties *prop = pin->prop;
++ const struct dpll_pin_properties *prop = &pin->prop;
+ struct dpll_pin_ref *ref;
+ int ret;
+
+@@ -525,6 +525,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
+ return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
+ }
+
++static bool dpll_pin_available(struct dpll_pin *pin)
++{
++ struct dpll_pin_ref *par_ref;
++ unsigned long i;
++
++ if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
++ return false;
++ xa_for_each(&pin->parent_refs, i, par_ref)
++ if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
++ DPLL_REGISTERED))
++ return true;
++ xa_for_each(&pin->dpll_refs, i, par_ref)
++ if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
++ DPLL_REGISTERED))
++ return true;
++ return false;
++}
++
+ /**
+ * dpll_device_change_ntf - notify that the dpll device has been changed
+ * @dpll: registered dpll pointer
+@@ -551,7 +569,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
+ int ret = -ENOMEM;
+ void *hdr;
+
+- if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
++ if (!dpll_pin_available(pin))
+ return -ENODEV;
+
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+@@ -689,7 +707,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
+- pin->prop->capabilities)) {
++ pin->prop.capabilities)) {
+ NL_SET_ERR_MSG(extack, "state changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+@@ -725,7 +743,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
+- pin->prop->capabilities)) {
++ pin->prop.capabilities)) {
+ NL_SET_ERR_MSG(extack, "state changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+@@ -752,7 +770,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
+- pin->prop->capabilities)) {
++ pin->prop.capabilities)) {
+ NL_SET_ERR_MSG(extack, "prio changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+@@ -780,7 +798,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
+ int ret;
+
+ if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
+- pin->prop->capabilities)) {
++ pin->prop.capabilities)) {
+ NL_SET_ERR_MSG(extack, "direction changing is not allowed");
+ return -EOPNOTSUPP;
+ }
+@@ -810,8 +828,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
+ int ret;
+
+ phase_adj = nla_get_s32(phase_adj_attr);
+- if (phase_adj > pin->prop->phase_range.max ||
+- phase_adj < pin->prop->phase_range.min) {
++ if (phase_adj > pin->prop.phase_range.max ||
++ phase_adj < pin->prop.phase_range.min) {
+ NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
+ "phase adjust value not supported");
+ return -EINVAL;
+@@ -995,7 +1013,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
+ unsigned long i;
+
+ xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
+- prop = pin->prop;
++ prop = &pin->prop;
+ cid_match = clock_id ? pin->clock_id == clock_id : true;
+ mod_match = mod_name_attr && module_name(pin->module) ?
+ !nla_strcmp(mod_name_attr,
+@@ -1102,6 +1120,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
+ }
+ pin = dpll_pin_find_from_nlattr(info);
+ if (!IS_ERR(pin)) {
++ if (!dpll_pin_available(pin)) {
++ nlmsg_free(msg);
++ return -ENODEV;
++ }
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret) {
+ nlmsg_free(msg);
+@@ -1151,6 +1173,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+
+ xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
+ ctx->idx) {
++ if (!dpll_pin_available(pin))
++ continue;
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ &dpll_nl_family, NLM_F_MULTI,
+@@ -1413,7 +1437,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ }
+ info->user_ptr[0] = xa_load(&dpll_pin_xa,
+ nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
+- if (!info->user_ptr[0]) {
++ if (!info->user_ptr[0] ||
++ !dpll_pin_available(info->user_ptr[0])) {
+ NL_SET_ERR_MSG(info->extack, "pin not found");
+ ret = -ENODEV;
+ goto unlock_dev;
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 6146b2927d5c5..0ea1dd6e55c40 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -733,6 +733,11 @@ static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
+ void *cb_data;
+
+ partition = xa_load(&drv_info->partition_info, part_id);
++ if (!partition) {
++ pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
++ return;
++ }
++
+ read_lock(&partition->rw_lock);
+ callback = partition->callback;
+ cb_data = partition->cb_data;
+@@ -915,6 +920,11 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
+ return -EOPNOTSUPP;
+
+ partition = xa_load(&drv_info->partition_info, part_id);
++ if (!partition) {
++ pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
++ return -EINVAL;
++ }
++
+ write_lock(&partition->rw_lock);
+
+ cb_valid = !!partition->callback;
+@@ -1226,6 +1236,7 @@ static void ffa_setup_partitions(void)
+ ffa_device_unregister(ffa_dev);
+ continue;
+ }
++ rwlock_init(&info->rw_lock);
+ xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
+ }
+ drv_info->partition_count = count;
+@@ -1236,6 +1247,7 @@ static void ffa_setup_partitions(void)
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
++ rwlock_init(&info->rw_lock);
+ xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
+ drv_info->partition_count++;
+ }
+diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
+index 42b81c181d687..96b4f0694fed0 100644
+--- a/drivers/firmware/arm_scmi/clock.c
++++ b/drivers/firmware/arm_scmi/clock.c
+@@ -951,8 +951,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
+ scmi_clock_describe_rates_get(ph, clkid, clk);
+ }
+
+- if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
+- PROTOCOL_REV_MINOR(version) >= 0x1) {
++ if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
+ cinfo->clock_config_set = scmi_clock_config_set_v2;
+ cinfo->clock_config_get = scmi_clock_config_get_v2;
+ } else {
+diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
+index c46dc5215af7a..00b165d1f502d 100644
+--- a/drivers/firmware/arm_scmi/common.h
++++ b/drivers/firmware/arm_scmi/common.h
+@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
++bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
+
+ /* declarations for message passing transports */
+ struct scmi_msg_payld;
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 19246ed1f01ff..b8d470417e8f9 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
+ {
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
++ /*
++ * An A2P IRQ is NOT valid when received while the platform still has
++ * the ownership of the channel, because the platform at first releases
++ * the SMT channel and then sends the completion interrupt.
++ *
++ * This addresses a possible race condition in which a spurious IRQ from
++ * a previous timed-out reply which arrived late could be wrongly
++ * associated with the next pending transaction.
++ */
++ if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
++ dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
++ return;
++ }
++
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
+ }
+
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index e11555de99ab8..d26eca37dc143 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -347,8 +347,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
+ }
+
+ static inline void
+-process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
+- unsigned int loop_idx,
++process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
++ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels_v4 *r)
+ {
+ opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
+@@ -359,10 +359,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
+ /* Note that PERF v4 reports always five 32-bit words */
+ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
+ if (dom->level_indexing_mode) {
++ int ret;
++
+ opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
+
+- xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
+- xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
++ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
++ GFP_KERNEL);
++ if (ret)
++ dev_warn(dev,
++ "Failed to add opps_by_idx at %d - ret:%d\n",
++ opp->level_index, ret);
++
++ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
++ if (ret)
++ dev_warn(dev,
++ "Failed to add opps_by_lvl at %d - ret:%d\n",
++ opp->perf, ret);
++
+ hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
+ }
+ }
+@@ -379,7 +392,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+ if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
+ process_response_opp(opp, st->loop_idx, response);
+ else
+- process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
++ process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
+ response);
+ p->perf_dom->opp_count++;
+
+diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
+index 0493aa3c12bf5..3505735185033 100644
+--- a/drivers/firmware/arm_scmi/raw_mode.c
++++ b/drivers/firmware/arm_scmi/raw_mode.c
+@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ int i;
+
+ for (i = 0; i < num_chans; i++) {
+- void *xret;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_init(raw);
+@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ goto err_xa;
+ }
+
+- xret = xa_store(&raw->chans_q, channels[i], q,
++ ret = xa_insert(&raw->chans_q, channels[i], q,
+ GFP_KERNEL);
+- if (xa_err(xret)) {
++ if (ret) {
+ dev_err(dev,
+ "Fail to allocate Raw queue 0x%02X\n",
+ channels[i]);
+- ret = xa_err(xret);
+ goto err_xa;
+ }
+ }
+@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+ dev = raw->handle->dev;
+ q = scmi_raw_queue_select(raw, idx,
+ SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
++ if (!q) {
++ dev_warn(dev,
++ "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
++ idx, chan_id);
++ return;
++ }
+
+ /*
+ * Grab the msg_q_lock upfront to avoid a possible race between
+diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
+index 87b4f4d35f062..517d52fb3bcbb 100644
+--- a/drivers/firmware/arm_scmi/shmem.c
++++ b/drivers/firmware/arm_scmi/shmem.c
+@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ }
++
++bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
++{
++ return (ioread32(&shmem->channel_status) &
++ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
++}
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 82fcfd29bc4d2..3c197db42c9d9 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -128,4 +128,4 @@ unlock_mutex:
+ }
+
+ /* must execute after PCI subsystem for EFI quirks */
+-subsys_initcall_sync(sysfb_init);
++device_initcall(sysfb_init);
+diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
+index be7f2fa5aa7b6..806b88d8dfb7b 100644
+--- a/drivers/gpio/gpio-eic-sprd.c
++++ b/drivers/gpio/gpio-eic-sprd.c
+@@ -330,20 +330,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ switch (flow_type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ state = sprd_eic_get(chip, offset);
+- if (state)
++ if (state) {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IEV, 0);
+- else
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_DBNC_IC, 1);
++ } else {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IEV, 1);
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_DBNC_IC, 1);
++ }
+ break;
+ default:
+ return -ENOTSUPP;
+@@ -355,20 +362,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ switch (flow_type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ state = sprd_eic_get(chip, offset);
+- if (state)
++ if (state) {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTPOL, 0);
+- else
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_LATCH_INTCLR, 1);
++ } else {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTPOL, 1);
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_LATCH_INTCLR, 1);
++ }
+ break;
+ default:
+ return -ENOTSUPP;
+@@ -382,29 +396,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ default:
+@@ -417,29 +436,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ default:
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 88066826d8e5b..cd3e9657cc36d 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1651,6 +1651,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ .ignore_interrupt = "INT33FC:00@3",
+ },
+ },
++ {
++ /*
++ * Spurious wakeups from TP_ATTN# pin
++ * Found in BIOS 0.35
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
++ },
++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++ .ignore_wake = "PNP0C50:00@8",
++ },
++ },
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 9d92ca1576771..50f57d4dfd8ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -757,6 +757,7 @@ struct amdgpu_mqd_prop {
+ uint64_t eop_gpu_addr;
+ uint32_t hqd_pipe_priority;
+ uint32_t hqd_queue_priority;
++ bool allow_tunneling;
+ bool hqd_active;
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 75dc58470393f..3d126f2967a58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -684,10 +684,8 @@ err:
+ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
+ {
+ enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
+- /* Temporary workaround to fix issues observed in some
+- * compute applications when GFXOFF is enabled on GFX11.
+- */
+- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) {
++ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
++ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
+ amdgpu_gfx_off_ctrl(adev, idle);
+ } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 0431eafa86b53..c7d60dd0fb975 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -1963,8 +1963,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(9, 4, 3):
+- if (!amdgpu_exp_hw_support)
+- return -EINVAL;
+ amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index f4174eebe993c..a7ad77ed09ca4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -115,9 +115,10 @@
+ * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
+ * - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query
+ * - 3.56.0 - Update IB start address and size alignment for decode and encode
++ * - 3.57.0 - Compute tunneling on GFX10+
+ */
+ #define KMS_DRIVER_MAJOR 3
+-#define KMS_DRIVER_MINOR 56
++#define KMS_DRIVER_MINOR 57
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+ /*
+@@ -2265,8 +2266,6 @@ retry_init:
+
+ pci_wake_from_d3(pdev, TRUE);
+
+- pci_wake_from_d3(pdev, TRUE);
+-
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index 73b8cca35bab8..c623e23049d1d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -121,6 +121,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+ struct amdgpu_bo_param bp;
+ dma_addr_t dma_addr;
+ struct page *p;
++ unsigned long x;
+ int ret;
+
+ if (adev->gart.bo != NULL)
+@@ -130,6 +131,10 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+ if (!p)
+ return -ENOMEM;
+
++ /* assign pages to this device */
++ for (x = 0; x < (1UL << order); x++)
++ p[x].mapping = adev->mman.bdev.dev_mapping;
++
+ /* If the hardware does not support UTCL2 snooping of the CPU caches
+ * then set_memory_wc() could be used as a workaround to mark the pages
+ * as write combine memory.
+@@ -223,6 +228,7 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+ unsigned int order = get_order(adev->gart.table_size);
+ struct sg_table *sg = adev->gart.bo->tbo.sg;
+ struct page *p;
++ unsigned long x;
+ int ret;
+
+ ret = amdgpu_bo_reserve(adev->gart.bo, false);
+@@ -234,6 +240,8 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+ sg_free_table(sg);
+ kfree(sg);
+ p = virt_to_page(adev->gart.ptr);
++ for (x = 0; x < (1UL << order); x++)
++ p[x].mapping = NULL;
+ __free_pages(p, order);
+
+ adev->gart.ptr = NULL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 6a80d3ec887e9..45424ebf96814 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -642,6 +642,10 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
+ struct amdgpu_mqd_prop *prop)
+ {
+ struct amdgpu_device *adev = ring->adev;
++ bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
++ amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
++ bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
++ amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
+
+ memset(prop, 0, sizeof(*prop));
+
+@@ -659,10 +663,8 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
+ */
+ prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
+
+- if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
+- amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
+- (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
+- amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
++ prop->allow_tunneling = is_high_prio_compute;
++ if (is_high_prio_compute || is_high_prio_gfx) {
+ prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 08916538a615f..8db880244324f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -221,8 +221,23 @@ static struct attribute *amdgpu_vram_mgr_attributes[] = {
+ NULL
+ };
+
++static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
++ struct attribute *attr, int i)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = drm_to_adev(ddev);
++
++ if (attr == &dev_attr_mem_info_vram_vendor.attr &&
++ !adev->gmc.vram_vendor)
++ return 0;
++
++ return attr->mode;
++}
++
+ const struct attribute_group amdgpu_vram_mgr_attr_group = {
+- .attrs = amdgpu_vram_mgr_attributes
++ .attrs = amdgpu_vram_mgr_attributes,
++ .is_visible = amdgpu_vram_attrs_is_visible
+ };
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index c8a3bf01743f6..ecb622b7f9709 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3996,16 +3996,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+
+ if (!amdgpu_sriov_vf(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
+- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+- /* don't check this. There are apparently firmwares in the wild with
+- * incorrect size in the header
+- */
+- if (err == -ENODEV)
+- goto out;
++ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+- dev_dbg(adev->dev,
+- "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
+- fw_name);
++ goto out;
++
++ /* don't validate this firmware. There are apparently firmwares
++ * in the wild with incorrect size in the header
++ */
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+@@ -6592,8 +6589,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ #ifdef __BIG_ENDIAN
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+ #endif
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
++ prop->allow_tunneling);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ mqd->cp_hqd_pq_control = tmp;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 8ed4a6fb147a2..806a8cf90487e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -114,7 +114,7 @@ static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
+- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x8000b007),
++ SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
+@@ -3838,8 +3838,9 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ (order_base_2(prop->queue_size / 4) - 1));
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
++ prop->allow_tunneling);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ mqd->cp_hqd_pq_control = tmp;
+@@ -6328,6 +6329,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
++ bitmap = i * adev->gfx.config.max_sh_per_se + j;
++ if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
++ continue;
+ mask = 1;
+ counter = 0;
+ gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 2ac5820e9c924..77e625f24cd07 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1950,7 +1950,8 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
+
+- if (!amdgpu_sriov_vf(adev)) {
++ /* Only for dGPU, vendor informaton is reliable */
++ if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+index 8b7fed9135269..22cbfa1bdaddb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
++ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 15277f1d5cf0a..d722cbd317834 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
++ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a9bd020b165a1..9dbbaeb8c6cf1 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2656,6 +2656,7 @@ static int dm_suspend(void *handle)
+ hpd_rx_irq_work_suspend(dm);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
++ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ return 0;
+ }
+@@ -2824,7 +2825,7 @@ static int dm_resume(void *handle)
+ bool need_hotplug = false;
+
+ if (dm->dc->caps.ips_support) {
+- dc_dmub_srv_exit_low_power_state(dm->dc);
++ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+ }
+
+ if (amdgpu_in_reset(adev)) {
+@@ -2851,6 +2852,7 @@ static int dm_resume(void *handle)
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
++ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ dc_resume(dm->dc);
+@@ -2901,6 +2903,7 @@ static int dm_resume(void *handle)
+ }
+
+ /* power on hardware */
++ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ /* program HPD filter */
+@@ -8768,7 +8771,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ if (new_con_state->crtc &&
+ new_con_state->crtc->state->active &&
+ drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
+- dc_dmub_srv_exit_low_power_state(dm->dc);
++ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+ break;
+ }
+ }
+@@ -10617,7 +10620,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ input->cea_total_length = total_length;
+ memcpy(input->payload, data, length);
+
+- res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
++ res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ if (!res) {
+ DRM_ERROR("EDID CEA parser failed\n");
+ return false;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 13a177d343762..45c972f2630d8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -3647,12 +3647,16 @@ static int capabilities_show(struct seq_file *m, void *unused)
+ bool mall_supported = dc->caps.mall_size_total;
+ bool subvp_supported = dc->caps.subvp_fw_processing_delay_us;
+ unsigned int mall_in_use = false;
+- unsigned int subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state);
++ unsigned int subvp_in_use = false;
++
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (hubbub->funcs->get_mall_en)
+ hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
+
++ if (dc->cap_funcs.get_subvp_en)
++ subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state);
++
+ seq_printf(m, "mall supported: %s, enabled: %s\n",
+ mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no");
+ seq_printf(m, "sub-viewport supported: %s, enabled: %s\n",
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index aac98f93545a2..4fad9c478c8aa 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -965,6 +965,11 @@ int dm_helper_dmub_aux_transfer_sync(
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+ {
++ if (!link->hpd_status) {
++ *operation_result = AUX_RET_ERROR_HPD_DISCON;
++ return -1;
++ }
++
+ return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
+ operation_result);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+index ab0adabf9dd4c..293a919d605d1 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+@@ -123,7 +123,7 @@ static void encoder_control_dmcub(
+ sizeof(cmd.digx_encoder_control.header);
+ cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
+
+- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static enum bp_result encoder_control_digx_v1_5(
+@@ -259,7 +259,7 @@ static void transmitter_control_dmcub(
+ sizeof(cmd.dig1_transmitter_control.header);
+ cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
+
+- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static enum bp_result transmitter_control_v1_6(
+@@ -321,7 +321,7 @@ static void transmitter_control_dmcub_v1_7(
+ sizeof(cmd.dig1_transmitter_control.header);
+ cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
+
+- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static enum bp_result transmitter_control_v1_7(
+@@ -429,7 +429,7 @@ static void set_pixel_clock_dmcub(
+ sizeof(cmd.set_pixel_clock.header);
+ cmd.set_pixel_clock.pixel_clock.clk = *clk;
+
+- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static enum bp_result set_pixel_clock_v7(
+@@ -796,7 +796,7 @@ static void enable_disp_power_gating_dmcub(
+ sizeof(cmd.enable_disp_power_gating.header);
+ cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
+
+- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static enum bp_result enable_disp_power_gating_v2_1(
+@@ -1006,7 +1006,7 @@ static void enable_lvtma_control_dmcub(
+ pwrseq_instance;
+ cmd.lvtma_control.data.bypass_panel_control_wait =
+ bypass_panel_control_wait;
+- dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static enum bp_result enable_lvtma_control(
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+index 3db4ef564b997..ce1386e22576e 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+@@ -253,7 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+index 7326b75658461..59c2a3545db34 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+@@ -131,30 +131,27 @@ static int dcn314_get_active_display_cnt_wa(
+ return display_count;
+ }
+
+-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
++static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
++ bool safe_to_lower, bool disable)
+ {
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ struct pipe_ctx *pipe = safe_to_lower
++ ? &context->res_ctx.pipe_ctx[i]
++ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+- struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+-
+ if (disable) {
+- if (stream_enc && stream_enc->funcs->disable_fifo)
+- pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
++ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
++ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
+- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+-
+- if (stream_enc && stream_enc->funcs->enable_fifo)
+- pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
+ }
+ }
+@@ -252,11 +249,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+- dcn314_disable_otg_wa(clk_mgr_base, context, true);
++ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+- dcn314_disable_otg_wa(clk_mgr_base, context, false);
++ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+@@ -284,7 +281,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+index 8776055bbeaae..644da46373209 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+@@ -232,7 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+index 09151cc56ce4f..12f3e8aa46d8d 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+@@ -239,7 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+index d5fde7d23fbf8..45ede6440a79f 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+@@ -349,7 +349,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 5c11852066459..bc098098345cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -519,7 +519,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
+ cmd.secure_display.roi_info.y_end = rect->y + rect->height;
+ }
+
+- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ }
+
+ static inline void
+@@ -3386,7 +3386,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
+
+ update_dirty_rect->panel_inst = panel_inst;
+ update_dirty_rect->pipe_idx = j;
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ }
+ }
+ }
+@@ -3626,7 +3626,7 @@ static void commit_planes_for_stream(struct dc *dc,
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
+-
++ ASSERT(top_pipe_to_program != NULL);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+@@ -4457,6 +4457,8 @@ static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
+
+ cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
+ new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
++ if (!cur_pipe || !new_pipe)
++ return false;
+ cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
+ new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
+ if (cur_is_odm_in_use == new_is_odm_in_use)
+@@ -5213,7 +5215,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
+ );
+ }
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -5267,7 +5269,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
+
+- if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
++ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+ /* command is not processed by dmub */
+ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
+ return is_cmd_complete;
+@@ -5310,7 +5312,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
+
+- if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
++ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ /* command is not processed by dmub */
+ return DC_ERROR_UNEXPECTED;
+
+@@ -5348,7 +5350,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+index fe07160932d69..fc18b9dc946f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+@@ -724,7 +724,7 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
+ union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd;
+ enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type;
+
+- dm_execute_dmub_cmd(ctx, cmd, wait_type);
++ dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
+ }
+
+ void hwss_program_manual_trigger(union block_sequence_params *params)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index e4bb1e25ee3b2..990d775e4cea2 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2170,6 +2170,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
+ for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
+ otg_master = resource_get_otg_master_for_stream(
+ &state->res_ctx, state->streams[stream_idx]);
++ if (!otg_master || otg_master->stream_res.tg == NULL) {
++ DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
++ return;
++ }
+ slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &state->res_ctx, opp_heads);
+ for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+@@ -2233,7 +2237,7 @@ static struct pipe_ctx *get_last_dpp_pipe_in_mpcc_combine(
+ }
+
+ static bool update_pipe_params_after_odm_slice_count_change(
+- const struct dc_stream_state *stream,
++ struct pipe_ctx *otg_master,
+ struct dc_state *context,
+ const struct resource_pool *pool)
+ {
+@@ -2243,9 +2247,12 @@ static bool update_pipe_params_after_odm_slice_count_change(
+
+ for (i = 0; i < pool->pipe_count && result; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+- if (pipe->stream == stream && pipe->plane_state)
++ if (pipe->stream == otg_master->stream && pipe->plane_state)
+ result = resource_build_scaling_params(pipe);
+ }
++
++ if (pool->funcs->build_pipe_pix_clk_params)
++ pool->funcs->build_pipe_pix_clk_params(otg_master);
+ return result;
+ }
+
+@@ -2928,7 +2935,7 @@ bool resource_update_pipes_for_stream_with_slice_count(
+ otg_master, new_ctx, pool);
+ if (result)
+ result = update_pipe_params_after_odm_slice_count_change(
+- otg_master->stream, new_ctx, pool);
++ otg_master, new_ctx, pool);
+ return result;
+ }
+
+@@ -2990,7 +2997,8 @@ bool dc_add_plane_to_context(
+
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &context->res_ctx, stream);
+- added = resource_append_dpp_pipes_for_plane_composition(context,
++ if (otg_master_pipe)
++ added = resource_append_dpp_pipes_for_plane_composition(context,
+ dc->current_state, pool, otg_master_pipe, plane_state);
+
+ if (added) {
+@@ -3766,7 +3774,8 @@ void dc_resource_state_construct(
+ dst_ctx->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+- link_enc_cfg_init(dc, dst_ctx);
++ if (dc->res_pool)
++ link_enc_cfg_init(dc, dst_ctx);
+ }
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+index 0e07699c1e835..61d1b4eadbee3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+@@ -282,17 +282,11 @@ bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
+ bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int stream_mask)
+ {
+- struct dmub_srv *dmub;
+- const uint32_t timeout = 30;
+-
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+- dmub = dc_dmub_srv->dmub;
+-
+- return dmub_srv_send_gpint_command(
+- dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
+- stream_mask, timeout) == DMUB_STATUS_OK;
++ return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
++ stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
+@@ -341,7 +335,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
+ cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
+
+ // Send the command to the DMCUB.
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
+@@ -355,7 +349,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
+ cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
+
+ // Send the command to the DMCUB.
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
+@@ -448,7 +442,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
+ sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
+
+ // Send the command to the DMCUB.
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -469,7 +463,7 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
+ cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
+
+ /* If command was processed, copy feature caps to dmub srv */
+- if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
++ if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.query_feature_caps.header.ret_status == 0) {
+ memcpy(&dc_dmub_srv->dmub->feature_caps,
+ &cmd.query_feature_caps.query_feature_caps_data,
+@@ -494,7 +488,7 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
+ cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
+
+ // If command was processed, copy feature caps to dmub srv
+- if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
++ if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.visual_confirm_color.header.ret_status == 0) {
+ memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
+ &cmd.visual_confirm_color.visual_confirm_color_data,
+@@ -856,7 +850,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
+ cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
+ }
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
+@@ -1093,7 +1087,7 @@ void dc_send_update_cursor_info_to_dmu(
+ pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+ /* Combine 2nd cmds update_curosr_info to DMU */
+- dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+ }
+
+@@ -1107,25 +1101,20 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv)
+ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
+ {
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+- struct dmub_srv *dmub;
+- enum dmub_status status;
+- static const uint32_t timeout_us = 30;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
+ DC_LOG_ERROR("%s: invalid parameters.", __func__);
+ return;
+ }
+
+- dmub = dc_dmub_srv->dmub;
+-
+- status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us);
+- if (status != DMUB_STATUS_OK) {
++ if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
++ 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
+ DC_LOG_ERROR("timeout updating trace buffer mask word\n");
+ return;
+ }
+
+- status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us);
+- if (status != DMUB_STATUS_OK) {
++ if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
++ 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
+ DC_LOG_ERROR("timeout updating trace buffer mask word\n");
+ return;
+ }
+@@ -1143,6 +1132,9 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ enum dmub_status status;
+
++ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
++ return true;
++
+ if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
+ return true;
+
+@@ -1158,7 +1150,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
+ return true;
+ }
+
+-void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
++static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
+ {
+ union dmub_rb_cmd cmd = {0};
+
+@@ -1179,10 +1171,11 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
+ dc->hwss.set_idle_state(dc, true);
+ }
+
++ /* NOTE: This does not use the "wake" interface since this is part of the wake path. */
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+-void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
++static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+ {
+ const uint32_t max_num_polls = 10000;
+ uint32_t allow_state = 0;
+@@ -1195,6 +1188,9 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+ if (!dc->idle_optimizations_allowed)
+ return;
+
++ if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
++ return;
++
+ if (dc->hwss.get_idle_state &&
+ dc->hwss.set_idle_state &&
+ dc->clk_mgr->funcs->exit_low_power_state) {
+@@ -1251,3 +1247,131 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
+ ASSERT(0);
+ }
+
++void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
++{
++ struct dmub_srv *dmub;
++
++ if (!dc_dmub_srv)
++ return;
++
++ dmub = dc_dmub_srv->dmub;
++
++ if (powerState == DC_ACPI_CM_POWER_STATE_D0)
++ dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
++ else
++ dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
++}
++
++void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
++{
++ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
++
++ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
++ return;
++
++ if (dc_dmub_srv->idle_allowed == allow_idle)
++ return;
++
++ /*
++ * Entering a low power state requires a driver notification.
++ * Powering up the hardware requires notifying PMFW and DMCUB.
++ * Clearing the driver idle allow requires a DMCUB command.
++ * DMCUB commands requires the DMCUB to be powered up and restored.
++ *
++ * Exit out early to prevent an infinite loop of DMCUB commands
++ * triggering exit low power - use software state to track this.
++ */
++ dc_dmub_srv->idle_allowed = allow_idle;
++
++ if (!allow_idle)
++ dc_dmub_srv_exit_low_power_state(dc);
++ else
++ dc_dmub_srv_notify_idle(dc, allow_idle);
++}
++
++bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
++ enum dm_dmub_wait_type wait_type)
++{
++ return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
++}
++
++bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
++ union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
++{
++ struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
++ bool result = false, reallow_idle = false;
++
++ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
++ return false;
++
++ if (count == 0)
++ return true;
++
++ if (dc_dmub_srv->idle_allowed) {
++ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
++ reallow_idle = true;
++ }
++
++ /*
++ * These may have different implementations in DM, so ensure
++ * that we guide it to the expected helper.
++ */
++ if (count > 1)
++ result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
++ else
++ result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
++
++ if (result && reallow_idle)
++ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
++
++ return result;
++}
++
++static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
++ uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
++{
++ struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
++ const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
++ enum dmub_status status;
++
++ if (response)
++ *response = 0;
++
++ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
++ return false;
++
++ status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
++ if (status != DMUB_STATUS_OK) {
++ if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
++ return true;
++
++ return false;
++ }
++
++ if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
++ dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
++
++ return true;
++}
++
++bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
++ uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
++{
++ struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
++ bool result = false, reallow_idle = false;
++
++ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
++ return false;
++
++ if (dc_dmub_srv->idle_allowed) {
++ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
++ reallow_idle = true;
++ }
++
++ result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
++
++ if (result && reallow_idle)
++ dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
++
++ return result;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+index d4a60f53faab1..952bfb368886e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+@@ -50,6 +50,8 @@ struct dc_dmub_srv {
+
+ struct dc_context *ctx;
+ void *dm;
++
++ bool idle_allowed;
+ };
+
+ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+@@ -100,6 +102,59 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
+ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index);
+
+ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
+-void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle);
+-void dc_dmub_srv_exit_low_power_state(const struct dc *dc);
++
++void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
++
++void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
++
++/**
++ * dc_wake_and_execute_dmub_cmd() - Wrapper for DMUB command execution.
++ *
++ * Refer to dc_wake_and_execute_dmub_cmd_list() for usage and limitations,
++ * This function is a convenience wrapper for a single command execution.
++ *
++ * @ctx: DC context
++ * @cmd: The command to send/receive
++ * @wait_type: The wait behavior for the execution
++ *
++ * Return: true on command submission success, false otherwise
++ */
++bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
++ enum dm_dmub_wait_type wait_type);
++
++/**
++ * dc_wake_and_execute_dmub_cmd_list() - Wrapper for DMUB command list execution.
++ *
++ * If the DMCUB hardware was asleep then it wakes the DMUB before
++ * executing the command and attempts to re-enter if the command
++ * submission was successful.
++ *
++ * This should be the preferred command submission interface provided
++ * the DC lock is acquired.
++ *
++ * Entry/exit out of idle power optimizations would need to be
++ * manually performed otherwise through dc_allow_idle_optimizations().
++ *
++ * @ctx: DC context
++ * @count: Number of commands to send/receive
++ * @cmd: Array of commands to send
++ * @wait_type: The wait behavior for the execution
++ *
++ * Return: true on command submission success, false otherwise
++ */
++bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
++ union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
++
++/**
++ * dc_wake_and_execute_gpint()
++ *
++ * @ctx: DC context
++ * @command_code: The command ID to send to DMCUB
++ * @param: The parameter to message DMCUB
++ * @response: Optional response out value - may be NULL.
++ * @wait_type: The wait behavior for the execution
++ */
++bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
++ uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type);
++
+ #endif /* _DMUB_DC_SRV_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+index cb6eaddab7205..8f9a678256150 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
+@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
+ cmd_buf->header.payload_bytes =
+ sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
+
+- dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+@@ -67,7 +67,7 @@ static inline void submit_dmub_burst_write(
+ cmd_buf->header.payload_bytes =
+ sizeof(uint32_t) * offload->reg_seq_count;
+
+- dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+
+@@ -80,7 +80,7 @@ static inline void submit_dmub_reg_wait(
+ {
+ struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
+
+- dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ memset(cmd_buf, 0, sizeof(*cmd_buf));
+ offload->reg_seq_count = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+index 42c802afc4681..4cff36351f40a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+@@ -76,7 +76,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
+ cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ void dmub_abm_init(struct abm *abm, uint32_t backlight)
+@@ -155,7 +155,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask)
+ cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -186,7 +186,7 @@ void dmub_abm_init_config(struct abm *abm,
+
+ cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ }
+
+@@ -203,7 +203,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
+ cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -246,7 +246,7 @@ bool dmub_abm_save_restore(
+
+ cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ // Copy iramtable data into local structure
+ memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+@@ -274,7 +274,7 @@ bool dmub_abm_set_pipe(struct abm *abm,
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -296,7 +296,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm,
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+index 2aa0e01a6891b..ba1fec3016d5b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+@@ -47,7 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
+ if (!lock)
+ cmd.lock_hw.lock_hw_data.should_release = 1;
+
+- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+index d8009b2dc56a0..98a778996e1a9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+@@ -48,5 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
+ sizeof(cmd.outbox1_enable.header);
+ cmd.outbox1_enable.enable = true;
+
+- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 9d4170a356a20..3e243e407bb87 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -105,23 +105,18 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
+ */
+ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst)
+ {
+- struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ uint32_t raw_state = 0;
+ uint32_t retry_count = 0;
+- enum dmub_status status;
+
+ do {
+ // Send gpint command and wait for ack
+- status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30);
+-
+- if (status == DMUB_STATUS_OK) {
+- // GPINT was executed, get response
+- dmub_srv_get_gpint_response(srv, &raw_state);
++ if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__GET_PSR_STATE, panel_inst, &raw_state,
++ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+ *state = convert_psr_state(raw_state);
+- } else
++ } else {
+ // Return invalid state when GPINT times out
+ *state = PSR_STATE_INVALID;
+-
++ }
+ } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
+
+ // Assert if max retry hit
+@@ -171,7 +166,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
+ cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
+ cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -199,7 +194,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
+
+ cmd.psr_enable.header.payload_bytes = 0; // Send header only
+
+- dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+@@ -248,7 +243,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
+ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
+ cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ /*
+@@ -267,7 +262,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
+ cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
+ cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ /*
+@@ -286,7 +281,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
+ cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+ cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ /*
+@@ -423,7 +418,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
+ copy_settings_data->relock_delay_frame_cnt = 2;
+ copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -444,7 +439,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
+ cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
+ cmd.psr_enable.header.payload_bytes = 0;
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ /*
+@@ -452,13 +447,11 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
+ */
+ static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
+ {
+- struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ uint16_t param = (uint16_t)(panel_inst << 8);
+
+ /* Send gpint command and wait for ack */
+- dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30);
+-
+- dmub_srv_get_gpint_response(srv, residency);
++ dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__PSR_RESIDENCY, param, residency,
++ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ }
+
+ static const struct dmub_psr_funcs psr_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 0a422fbb14bc8..e73e597548375 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -1273,15 +1273,19 @@ static void build_clamping_params(struct dc_stream_state *stream)
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+ }
+
+-static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
++void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
+ {
+-
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+-
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+- pipe_ctx->clock_source,
+- &pipe_ctx->stream_res.pix_clk_params,
+- &pipe_ctx->pll_settings);
++ pipe_ctx->clock_source,
++ &pipe_ctx->stream_res.pix_clk_params,
++ &pipe_ctx->pll_settings);
++}
++
++static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
++{
++
++ dcn20_build_pipe_pix_clk_params(pipe_ctx);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+index 37ecaccc5d128..4cee3fa11a7ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+@@ -165,6 +165,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
+ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
+ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
++void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx);
+
+ #endif /* __DC_RESOURCE_DCN20_H__ */
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+index 68cad55c72ab8..e13d69a22c1c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+@@ -691,7 +691,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp,
+ cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+
+ PERF_TRACE(); // TODO: remove after performance is stable.
+- dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+index 4596f3bac1b4c..26be5fee7411d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+@@ -125,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+- if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
++ if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+@@ -436,7 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+- dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+index d849b1eaa4a5c..03248422d6ffd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+@@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
+ cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
+ cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
+
+- return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
++ return dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ }
+
+ static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
+@@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+ cmd.panel_cntl.data.bl_pwm_ref_div2 =
+ panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
+- if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
++ if (!dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return 0;
+
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+index a2c4db2cebdd6..8234935433254 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+@@ -166,6 +166,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
++ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
++ OPTC_SEG0_SRC_SEL, 0xf,
++ OPTC_SEG1_SRC_SEL, 0xf,
++ OPTC_SEG2_SRC_SEL, 0xf,
++ OPTC_SEG3_SRC_SEL, 0xf,
++ OPTC_NUM_OF_INPUT_SEGMENT, 0);
++
++ REG_UPDATE(OPTC_MEMORY_CONFIG,
++ OPTC_MEM_SEL, 0);
++
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+@@ -198,6 +208,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
++ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
++ OPTC_SEG0_SRC_SEL, 0xf,
++ OPTC_SEG1_SRC_SEL, 0xf,
++ OPTC_SEG2_SRC_SEL, 0xf,
++ OPTC_SEG3_SRC_SEL, 0xf,
++ OPTC_NUM_OF_INPUT_SEGMENT, 0);
++
+ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 89b072447dba9..e940dd0f92b73 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -2041,6 +2041,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
+ .retain_phantom_pipes = dcn32_retain_phantom_pipes,
+ .save_mall_state = dcn32_save_mall_state,
+ .restore_mall_state = dcn32_restore_mall_state,
++ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ };
+
+ static uint32_t read_pipe_fuses(struct dc_context *ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index f7de3eca1225a..4156a8cc2bc7e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1609,6 +1609,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
+ .retain_phantom_pipes = dcn32_retain_phantom_pipes,
+ .save_mall_state = dcn32_save_mall_state,
+ .restore_mall_state = dcn32_restore_mall_state,
++ .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
+ };
+
+ static uint32_t read_pipe_fuses(struct dc_context *ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c
+index a4a39f1638cf2..5b15475088503 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c
+@@ -138,6 +138,16 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
++ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
++ OPTC_SEG0_SRC_SEL, 0xf,
++ OPTC_SEG1_SRC_SEL, 0xf,
++ OPTC_SEG2_SRC_SEL, 0xf,
++ OPTC_SEG3_SRC_SEL, 0xf,
++ OPTC_NUM_OF_INPUT_SEGMENT, 0);
++
++ REG_UPDATE(OPTC_MEMORY_CONFIG,
++ OPTC_MEM_SEL, 0);
++
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index b46cde5250669..92e2ddc9ab7e2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1237,15 +1237,11 @@ static void update_pipes_with_slice_table(struct dc *dc, struct dc_state *contex
+ {
+ int i;
+
+- for (i = 0; i < table->odm_combine_count; i++) {
++ for (i = 0; i < table->odm_combine_count; i++)
+ resource_update_pipes_for_stream_with_slice_count(context,
+ dc->current_state, dc->res_pool,
+ table->odm_combines[i].stream,
+ table->odm_combines[i].slice_count);
+- /* TODO: move this into the function above */
+- dcn20_build_mapped_resource(dc, context,
+- table->odm_combines[i].stream);
+- }
+
+ for (i = 0; i < table->mpc_combine_count; i++)
+ resource_update_pipes_for_plane_with_slice_count(context,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index b95bf27f2fe2f..62ce95bac8f2b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ mode_lib->ms.NoOfDPPThisState,
+ mode_lib->ms.dpte_group_bytes,
+ s->HostVMInefficiencyFactor,
+- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
++ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
+
+ s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
+@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
+ mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
+ mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
+- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
++ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
+ mode_lib->ms.MetaRowBytes[j][k],
+ mode_lib->ms.DPTEBytesPerRow[j][k],
+@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
+ CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
+ CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
+ CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
+- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
++ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
+ CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
+ CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
+@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
+ UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
+ UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
+ UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
+- UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
++ UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
+ UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
+ UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
+@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
+ CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
+ CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
+- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
++ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
+ CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
+ CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
+@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
+ locals->dpte_group_bytes,
+ s->HostVMInefficiencyFactor,
+- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
++ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
+
+ locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
+@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
+ CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
+ CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
+- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
++ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
+ CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
+ CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
+ CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
+@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
+ mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
+ mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
+- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
++ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
+ locals->PDEAndMetaPTEBytesFrame[k],
+ locals->MetaRowByte[k],
+ locals->PixelPTEBytesPerRow[k],
+@@ -9446,13 +9446,13 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ CalculateWatermarks_params->CompressedBufferSizeInkByte = locals->CompressedBufferSizeInkByte;
+
+ // Output
+- CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
+- CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[0];
+- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
+- CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
+- CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[0];
+- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
+- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[0];
++ CalculateWatermarks_params->Watermark = &locals->Watermark; // Watermarks *Watermark
++ CalculateWatermarks_params->DRAMClockChangeSupport = &locals->DRAMClockChangeSupport;
++ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = locals->MaxActiveDRAMClockChangeLatencySupported; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
++ CalculateWatermarks_params->SubViewportLinesNeededInMALL = locals->SubViewportLinesNeededInMALL; // dml_uint_t SubViewportLinesNeededInMALL[]
++ CalculateWatermarks_params->FCLKChangeSupport = &locals->FCLKChangeSupport;
++ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &locals->MaxActiveFCLKChangeLatencySupported; // dml_float_t *MaxActiveFCLKChangeLatencySupported
++ CalculateWatermarks_params->USRRetrainingSupport = &locals->USRRetrainingSupport;
+
+ CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
+ &mode_lib->scratch,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+index db06a5b749b40..bcc03b184e5ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+@@ -624,8 +624,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
+ if (is_dp2p0_output_encoder(pipe))
+ out->OutputEncoder[location] = dml_dp2p0;
+ break;
+- out->OutputEncoder[location] = dml_edp;
+ case SIGNAL_TYPE_EDP:
++ out->OutputEncoder[location] = dml_edp;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+index 08783ad097d21..8e88dcaf88f5b 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+@@ -154,7 +154,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+@@ -173,7 +173,7 @@ static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+index d71faf2ecd413..772dc0db916f7 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+@@ -750,7 +750,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
+ cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ return true;
+ }
+@@ -872,7 +872,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.mall.cursor_height = cursor_attr.height;
+ cmd.mall.cursor_pitch = cursor_attr.pitch;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Use copied cursor, and it's okay to not switch back */
+ cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
+@@ -888,7 +888,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.mall.tmr_scale = tmr_scale;
+ cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ return true;
+ }
+@@ -905,7 +905,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.mall.header.payload_bytes =
+ sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+index 97798cee876e2..52656691ae484 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+@@ -415,7 +415,7 @@ void dcn31_z10_save_init(struct dc *dc)
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ void dcn31_z10_restore(const struct dc *dc)
+@@ -433,7 +433,7 @@ void dcn31_z10_restore(const struct dc *dc)
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+
+ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+index c1a9b746c43fe..5bf9e7c1e052f 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+@@ -277,7 +277,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ return true;
+ }
+@@ -311,7 +311,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+ cmd.cab.cab_alloc_ways = (uint8_t)ways;
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+ return true;
+ }
+@@ -327,7 +327,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.cab.header.payload_bytes =
+ sizeof(cmd.cab) - sizeof(cmd.cab.header);
+
+- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
++ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index 5a8258287438e..cf26d2ad40083 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -671,11 +671,7 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ }
+
+ // TODO: review other cases when idle optimization is allowed
+-
+- if (!enable)
+- dc_dmub_srv_exit_low_power_state(dc);
+- else
+- dc_dmub_srv_notify_idle(dc, enable);
++ dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
+
+ return true;
+ }
+@@ -685,7 +681,7 @@ void dcn35_z10_restore(const struct dc *dc)
+ if (dc->debug.disable_z10)
+ return;
+
+- dc_dmub_srv_exit_low_power_state(dc);
++ dc_dmub_srv_apply_idle_power_optimizations(dc, false);
+
+ dcn31_z10_restore(dc);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index bac1420b1de84..10397d4dfb075 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -205,6 +205,7 @@ struct resource_funcs {
+ void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
+ void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
+ void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
++ void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
+ };
+
+ struct audio_support{
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index f8e01ca09d964..04d1ecd6e5934 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -875,11 +875,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
+ {
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+- DC_LOGGER_INIT(dsc->ctx->logger);
+
+- if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
++ if (!pipe_ctx->stream->timing.flags.DSC)
+ return false;
+
++ if (!dsc)
++ return false;
++
++ DC_LOGGER_INIT(dsc->ctx->logger);
++
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ uint8_t dsc_packed_pps[128];
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index db87aa7b5c90f..2f11eaabbe5f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -1392,7 +1392,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
+ cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
+ cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
+ link->dc, link->link_enc->transmitter);
+- if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
++ if (dc_wake_and_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.cable_id.header.ret_status == 1) {
+ cable_id->raw = cmd.cable_id.data.output_raw;
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+index 0bb7491339098..982eda3c46f56 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+@@ -90,7 +90,8 @@ bool dpia_query_hpd_status(struct dc_link *link)
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+- if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
++ if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
++ cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+index 5c9a30211c109..fc50931c2aecb 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+@@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
+ uint32_t extended_size;
+ /* size of the remaining partitioned address space */
+ uint32_t size_left_to_read;
+- enum dc_status status;
++ enum dc_status status = DC_ERROR_UNEXPECTED;
+ /* size of the next partition to be read from */
+ uint32_t partition_size;
+ uint32_t data_index = 0;
+@@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
+ {
+ uint32_t partition_size;
+ uint32_t data_index = 0;
+- enum dc_status status;
++ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ while (size) {
+ partition_size = dpcd_get_next_partition_size(address, size);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+index e5cfaaef70b3f..bdae54c4648bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+@@ -927,8 +927,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
+ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+ {
+ /* To-do: Setup Replay */
+- struct dc *dc = link->ctx->dc;
+- struct dmub_replay *replay = dc->res_pool->replay;
++ struct dc *dc;
++ struct dmub_replay *replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+@@ -944,6 +944,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
+ if (!link)
+ return false;
+
++ dc = link->ctx->dc;
++
++ replay = dc->res_pool->replay;
++
+ if (!replay)
+ return false;
+
+@@ -972,8 +976,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+- if (replay)
+- link->replay_settings.replay_feature_enabled =
++ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+ if (link->replay_settings.replay_feature_enabled) {
+
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index df63aa8f01e98..d1a4ed6f5916e 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -150,6 +150,13 @@ enum dmub_memory_access_type {
+ DMUB_MEMORY_ACCESS_DMA
+ };
+
++/* enum dmub_power_state type - to track DC power state in dmub_srv */
++enum dmub_srv_power_state_type {
++ DMUB_POWER_STATE_UNDEFINED = 0,
++ DMUB_POWER_STATE_D0 = 1,
++ DMUB_POWER_STATE_D3 = 8
++};
++
+ /**
+ * struct dmub_region - dmub hw memory region
+ * @base: base address for region, must be 256 byte aligned
+@@ -485,6 +492,8 @@ struct dmub_srv {
+ /* Feature capabilities reported by fw */
+ struct dmub_feature_caps feature_caps;
+ struct dmub_visual_confirm_color visual_confirm_color;
++
++ enum dmub_srv_power_state_type power_state;
+ };
+
+ /**
+@@ -889,6 +898,18 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
+ */
+ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
++/**
++ * dmub_srv_set_power_state() - Track DC power state in dmub_srv
++ * @dmub: The dmub service
++ * @power_state: DC power state setting
++ *
++ * Store DC power state in dmub_srv. If dmub_srv is in D3, then don't send messages to DMUB
++ *
++ * Return:
++ * void
++ */
++void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
++
+ #if defined(__cplusplus)
+ }
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 38360adc53d97..59d4e64845ca2 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -713,6 +713,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ dmub->hw_funcs.reset_release(dmub);
+
+ dmub->hw_init = true;
++ dmub->power_state = DMUB_POWER_STATE_D0;
+
+ return DMUB_STATUS_OK;
+ }
+@@ -766,6 +767,9 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
++ if (dmub->power_state != DMUB_POWER_STATE_D0)
++ return DMUB_STATUS_INVALID;
++
+ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
+ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+@@ -784,6 +788,9 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
++ if (dmub->power_state != DMUB_POWER_STATE_D0)
++ return DMUB_STATUS_INVALID;
++
+ /**
+ * Read back all the queued commands to ensure that they've
+ * been flushed to framebuffer memory. Otherwise DMCUB might
+@@ -1100,3 +1107,11 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
+ subvp_index);
+ }
+ }
++
++void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
++{
++ if (!dmub || !dmub->hw_init)
++ return;
++
++ dmub->power_state = dmub_srv_power_state;
++}
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 1675314a3ff20..7806056ca1dd1 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
+ isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ isPSRSUSupported = false;
++ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
++ isPSRSUSupported = false;
+ else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
+ isPSRSUSupported = true;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index e1a5ee911dbbc..cd252e2b584c6 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -24,6 +24,7 @@
+
+ #include <linux/firmware.h>
+ #include <linux/pci.h>
++#include <linux/power_supply.h>
+ #include <linux/reboot.h>
+
+ #include "amdgpu.h"
+@@ -817,16 +818,8 @@ static int smu_late_init(void *handle)
+ * handle the switch automatically. Driver involvement
+ * is unnecessary.
+ */
+- if (!smu->dc_controlled_by_gpio) {
+- ret = smu_set_power_source(smu,
+- adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+- SMU_POWER_SOURCE_DC);
+- if (ret) {
+- dev_err(adev->dev, "Failed to switch to %s mode!\n",
+- adev->pm.ac_power ? "AC" : "DC");
+- return ret;
+- }
+- }
++ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
++ smu_set_ac_dc(smu);
+
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
+@@ -2502,6 +2495,7 @@ int smu_get_power_limit(void *handle,
+ case SMU_PPT_LIMIT_CURRENT:
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
++ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index 5a314d0316c1c..c7bfa68bf00f4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1442,10 +1442,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ schedule_work(&smu->interrupt_work);
++ adev->pm.ac_power = true;
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ schedule_work(&smu->interrupt_work);
++ adev->pm.ac_power = false;
+ break;
+ case 0x7:
+ /*
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index cf1b84060bc3d..68981086b5a24 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1379,10 +1379,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ smu_v13_0_ack_ac_dc_interrupt(smu);
++ adev->pm.ac_power = true;
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ smu_v13_0_ack_ac_dc_interrupt(smu);
++ adev->pm.ac_power = false;
+ break;
+ case 0x7:
+ /*
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 82c4e1f1c6f07..2a76064d909fb 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -2352,6 +2352,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
++ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+
+ if (smu_v13_0_get_current_power_limit(smu, &power_limit))
+ power_limit = smu->adev->pm.ac_power ?
+@@ -2375,7 +2376,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
+ od_percent_upper, od_percent_lower, power_limit);
+
+ if (max_power_limit) {
+- *max_power_limit = power_limit * (100 + od_percent_upper);
++ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+@@ -2970,6 +2971,55 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
+ return ret;
+ }
+
++static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
++ enum smu_ppt_limit_type limit_type,
++ uint32_t limit)
++{
++ PPTable_t *pptable = smu->smu_table.driver_pptable;
++ SkuTable_t *skutable = &pptable->SkuTable;
++ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
++ struct smu_table_context *table_context = &smu->smu_table;
++ OverDriveTableExternal_t *od_table =
++ (OverDriveTableExternal_t *)table_context->overdrive_table;
++ int ret = 0;
++
++ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
++ return -EINVAL;
++
++ if (limit <= msg_limit) {
++ if (smu->current_power_limit > msg_limit) {
++ od_table->OverDriveTable.Ppt = 0;
++ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
++
++ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
++ if (ret) {
++ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
++ return ret;
++ }
++ }
++ return smu_v13_0_set_power_limit(smu, limit_type, limit);
++ } else if (smu->od_enabled) {
++ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
++ if (ret)
++ return ret;
++
++ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
++ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
++
++ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
++ if (ret) {
++ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
++ return ret;
++ }
++
++ smu->current_power_limit = limit;
++ } else {
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
+ .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
+@@ -3024,7 +3074,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
+ .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
+ .get_power_limit = smu_v13_0_0_get_power_limit,
+- .set_power_limit = smu_v13_0_set_power_limit,
++ .set_power_limit = smu_v13_0_0_set_power_limit,
+ .set_power_source = smu_v13_0_set_power_source,
+ .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
+ .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+index b64e07b759374..8cd2b8cc3d58a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+@@ -924,7 +924,9 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
+ if (i < (clocks.num_levels - 1))
+ clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
+
+- if (curr_clk >= clk1 && curr_clk < clk2) {
++ if (curr_clk == clk1) {
++ level = i;
++ } else if (curr_clk >= clk1 && curr_clk < clk2) {
+ level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
+ i :
+ i + 1;
+@@ -2190,17 +2192,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+ continue;
+ }
+
+- if (ret) {
+- dev_err(adev->dev,
+- "failed to send mode2 message \tparam: 0x%08x error code %d\n",
+- SMU_RESET_MODE_2, ret);
++ if (ret)
+ goto out;
+- }
++
+ } while (ret == -ETIME && timeout);
+
+ out:
+ mutex_unlock(&smu->message_lock);
+
++ if (ret)
++ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
++ ret);
++
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 81eafed76045e..d380a53e8f772 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -2316,6 +2316,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
+ PPTable_t *pptable = table_context->driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
++ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+
+ if (smu_v13_0_get_current_power_limit(smu, &power_limit))
+ power_limit = smu->adev->pm.ac_power ?
+@@ -2339,7 +2340,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
+ od_percent_upper, od_percent_lower, power_limit);
+
+ if (max_power_limit) {
+- *max_power_limit = power_limit * (100 + od_percent_upper);
++ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+@@ -2567,6 +2568,55 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
+ NULL);
+ }
+
++static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
++ enum smu_ppt_limit_type limit_type,
++ uint32_t limit)
++{
++ PPTable_t *pptable = smu->smu_table.driver_pptable;
++ SkuTable_t *skutable = &pptable->SkuTable;
++ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
++ struct smu_table_context *table_context = &smu->smu_table;
++ OverDriveTableExternal_t *od_table =
++ (OverDriveTableExternal_t *)table_context->overdrive_table;
++ int ret = 0;
++
++ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
++ return -EINVAL;
++
++ if (limit <= msg_limit) {
++ if (smu->current_power_limit > msg_limit) {
++ od_table->OverDriveTable.Ppt = 0;
++ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
++
++ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
++ if (ret) {
++ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
++ return ret;
++ }
++ }
++ return smu_v13_0_set_power_limit(smu, limit_type, limit);
++ } else if (smu->od_enabled) {
++ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
++ if (ret)
++ return ret;
++
++ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
++ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
++
++ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
++ if (ret) {
++ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
++ return ret;
++ }
++
++ smu->current_power_limit = limit;
++ } else {
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
+ .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
+@@ -2618,7 +2668,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
+ .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
+ .get_power_limit = smu_v13_0_7_get_power_limit,
+- .set_power_limit = smu_v13_0_set_power_limit,
++ .set_power_limit = smu_v13_0_7_set_power_limit,
+ .set_power_source = smu_v13_0_set_power_source,
+ .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
+ .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 51abe42c639e5..5168628f11cff 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -1741,6 +1741,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ int ret = 0;
+
++ mutex_lock(&ctx->aux_lock);
+ pm_runtime_get_sync(dev);
+ msg->reply = 0;
+ switch (request) {
+@@ -1757,6 +1758,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ msg->size, msg->buffer);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
++ mutex_unlock(&ctx->aux_lock);
+
+ return ret;
+ }
+@@ -2453,7 +2455,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
+ ctx->connector = NULL;
+ anx7625_dp_stop(ctx);
+
+- pm_runtime_put_sync(dev);
++ mutex_lock(&ctx->aux_lock);
++ pm_runtime_put_sync_suspend(dev);
++ mutex_unlock(&ctx->aux_lock);
+ }
+
+ static enum drm_connector_status
+@@ -2647,6 +2651,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+
+ mutex_init(&platform->lock);
+ mutex_init(&platform->hdcp_wq_lock);
++ mutex_init(&platform->aux_lock);
+
+ INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
+ platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
+index 5af819611ebce..80d3fb4e985f1 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
+@@ -471,6 +471,8 @@ struct anx7625_data {
+ struct workqueue_struct *hdcp_workqueue;
+ /* Lock for hdcp work queue */
+ struct mutex hdcp_wq_lock;
++ /* Lock for aux transfer and disable */
++ struct mutex aux_lock;
+ char edid_block;
+ struct display_timing dt;
+ u8 display_timing_valid;
+diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
+index d81920227a8ae..7c0076e499533 100644
+--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
++++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
+@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
+ int ret;
+
+ ret = i2c_master_send(ptn_bridge->client, &addr, 1);
+- if (ret <= 0) {
++ if (ret < 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(ptn_bridge->client, buf, len);
+- if (ret <= 0) {
++ if (ret < 0) {
+ DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
+ return ret;
+ }
+@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
+ buf[1] = val;
+
+ ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
+- if (ret <= 0) {
++ if (ret < 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
+index 541e4f5afc4c8..14d4dcf239da8 100644
+--- a/drivers/gpu/drm/bridge/parade-ps8640.c
++++ b/drivers/gpu/drm/bridge/parade-ps8640.c
+@@ -107,6 +107,7 @@ struct ps8640 {
+ struct device_link *link;
+ bool pre_enabled;
+ bool need_post_hpd_delay;
++ struct mutex aux_lock;
+ };
+
+ static const struct regmap_config ps8640_regmap_config[] = {
+@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ int ret;
+
++ mutex_lock(&ps_bridge->aux_lock);
+ pm_runtime_get_sync(dev);
++ ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
++ if (ret) {
++ pm_runtime_put_sync_suspend(dev);
++ goto exit;
++ }
+ ret = ps8640_aux_transfer_msg(aux, msg);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
++exit:
++ mutex_unlock(&ps_bridge->aux_lock);
++
+ return ret;
+ }
+
+@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
+ ps_bridge->pre_enabled = false;
+
+ ps8640_bridge_vdo_control(ps_bridge, DISABLE);
++
++ /*
++ * The bridge seems to expect everything to be power cycled at the
++ * disable process, so grab a lock here to make sure
++ * ps8640_aux_transfer() is not holding a runtime PM reference and
++ * preventing the bridge from suspend.
++ */
++ mutex_lock(&ps_bridge->aux_lock);
++
+ pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
++
++ mutex_unlock(&ps_bridge->aux_lock);
+ }
+
+ static int ps8640_bridge_attach(struct drm_bridge *bridge,
+@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
+ if (!ps_bridge)
+ return -ENOMEM;
+
++ mutex_init(&ps_bridge->aux_lock);
++
+ ps_bridge->supplies[0].supply = "vdd12";
+ ps_bridge->supplies[1].supply = "vdd33";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
+index be5914caa17d5..63a1a0c88be4d 100644
+--- a/drivers/gpu/drm/bridge/samsung-dsim.c
++++ b/drivers/gpu/drm/bridge/samsung-dsim.c
+@@ -969,10 +969,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
+ reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
+-
+- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+- reg |= DSIM_FORCE_STOP_STATE;
+-
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+@@ -1431,18 +1427,6 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
+ disable_irq(dsi->irq);
+ }
+
+-static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
+-{
+- u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+-
+- if (enable)
+- reg |= DSIM_FORCE_STOP_STATE;
+- else
+- reg &= ~DSIM_FORCE_STOP_STATE;
+-
+- samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+-}
+-
+ static int samsung_dsim_init(struct samsung_dsim *dsi)
+ {
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+@@ -1492,9 +1476,6 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return;
+-
+- samsung_dsim_set_display_mode(dsi);
+- samsung_dsim_set_display_enable(dsi, true);
+ }
+ }
+
+@@ -1503,12 +1484,8 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
+ {
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+- if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
+- samsung_dsim_set_display_mode(dsi);
+- samsung_dsim_set_display_enable(dsi, true);
+- } else {
+- samsung_dsim_set_stop_state(dsi, false);
+- }
++ samsung_dsim_set_display_mode(dsi);
++ samsung_dsim_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+ }
+@@ -1521,9 +1498,6 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+- samsung_dsim_set_stop_state(dsi, true);
+-
+ dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
+ }
+
+@@ -1828,8 +1802,6 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
+ if (ret)
+ return ret;
+
+- samsung_dsim_set_stop_state(dsi, false);
+-
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
+index 2bdc5b439bebd..4560ae9cbce15 100644
+--- a/drivers/gpu/drm/bridge/sii902x.c
++++ b/drivers/gpu/drm/bridge/sii902x.c
+@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
+ return ret;
+ }
+
++ ret = sii902x_audio_codec_init(sii902x, dev);
++ if (ret)
++ return ret;
++
++ i2c_set_clientdata(sii902x->i2c, sii902x);
++
++ sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
++ 1, 0, I2C_MUX_GATE,
++ sii902x_i2c_bypass_select,
++ sii902x_i2c_bypass_deselect);
++ if (!sii902x->i2cmux) {
++ ret = -ENOMEM;
++ goto err_unreg_audio;
++ }
++
++ sii902x->i2cmux->priv = sii902x;
++ ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
++ if (ret)
++ goto err_unreg_audio;
++
+ sii902x->bridge.funcs = &sii902x_bridge_funcs;
+ sii902x->bridge.of_node = dev->of_node;
+ sii902x->bridge.timings = &default_sii902x_timings;
+@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
+
+ drm_bridge_add(&sii902x->bridge);
+
+- sii902x_audio_codec_init(sii902x, dev);
+-
+- i2c_set_clientdata(sii902x->i2c, sii902x);
++ return 0;
+
+- sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+- 1, 0, I2C_MUX_GATE,
+- sii902x_i2c_bypass_select,
+- sii902x_i2c_bypass_deselect);
+- if (!sii902x->i2cmux)
+- return -ENOMEM;
++err_unreg_audio:
++ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
++ platform_device_unregister(sii902x->audio.pdev);
+
+- sii902x->i2cmux->priv = sii902x;
+- return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
++ return ret;
+ }
+
+ static int sii902x_probe(struct i2c_client *client)
+@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
+ }
+
+ static void sii902x_remove(struct i2c_client *client)
+-
+ {
+ struct sii902x *sii902x = i2c_get_clientdata(client);
+
+- i2c_mux_del_adapters(sii902x->i2cmux);
+ drm_bridge_remove(&sii902x->bridge);
++ i2c_mux_del_adapters(sii902x->i2cmux);
++
++ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
++ platform_device_unregister(sii902x->audio.pdev);
+ }
+
+ static const struct of_device_id sii902x_dt_ids[] = {
+diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
+index d8b2955e88fd0..afb02aae707b4 100644
+--- a/drivers/gpu/drm/drm_damage_helper.c
++++ b/drivers/gpu/drm/drm_damage_helper.c
+@@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
+
+- if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
++ if (!iter->clips || state->ignore_damage_clips ||
++ !drm_rect_equals(&state->src, &old_state->src)) {
+ iter->clips = NULL;
+ iter->num_clips = 0;
+ iter->full_update = true;
+diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
+index 24e7998d17313..311e179904a2a 100644
+--- a/drivers/gpu/drm/drm_plane.c
++++ b/drivers/gpu/drm/drm_plane.c
+@@ -678,6 +678,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ !file_priv->universal_planes)
+ continue;
+
++ /*
++ * If we're running on a virtualized driver then,
++ * unless userspace advertizes support for the
++ * virtualized cursor plane, disable cursor planes
++ * because they'll be broken due to missing cursor
++ * hotspot info.
++ */
++ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
++ drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
++ file_priv->atomic &&
++ !file_priv->supports_virtualized_cursor_plane)
++ continue;
++
+ if (drm_lease_held(file_priv, plane->base.id)) {
+ if (count < plane_resp->count_planes &&
+ put_user(plane->base.id, plane_ptr + count))
+@@ -1387,6 +1400,7 @@ retry:
+ out:
+ if (fb)
+ drm_framebuffer_put(fb);
++ fb = NULL;
+ if (plane->old_fb)
+ drm_framebuffer_put(plane->old_fb);
+ plane->old_fb = NULL;
+diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+index 4d986077738b9..bce027552474a 100644
+--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
+ {
+- struct exynos_drm_plane plane = ctx->planes[win];
++ struct exynos_drm_plane *plane = &ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+- to_exynos_plane_state(plane.base.state);
++ to_exynos_plane_state(plane->base.state);
+ unsigned int alpha = state->base.alpha;
+ unsigned int pixel_alpha;
+ unsigned long val;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index 8dde7b1e9b35d..5bdc246f5fad0 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -661,9 +661,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
+ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb, int width)
+ {
+- struct exynos_drm_plane plane = ctx->planes[win];
++ struct exynos_drm_plane *plane = &ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+- to_exynos_plane_state(plane.base.state);
++ to_exynos_plane_state(plane->base.state);
+ uint32_t pixel_format = fb->format->format;
+ unsigned int alpha = state->base.alpha;
+ u32 val = WINCONx_ENWIN;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+index 34cdabc30b4f5..5302bebbe38c9 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+@@ -1342,7 +1342,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
+ for (i = 0; i < ctx->num_clocks; i++) {
+ ret = clk_prepare_enable(ctx->clocks[i]);
+ if (ret) {
+- while (--i > 0)
++ while (--i >= 0)
+ clk_disable_unprepare(ctx->clocks[i]);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index 67143a0f51893..bf0dbd7d0697d 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1155,6 +1155,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+ }
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
++ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ /* ensure all panel commands dispatched before enabling transcoder */
+ wait_for_cmds_dispatched_to_panel(encoder);
+@@ -1255,8 +1256,6 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
+ /* step6d: enable dsi transcoder */
+ gen11_dsi_enable_transcoder(encoder);
+
+- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+-
+ /* step7: enable backlight */
+ intel_backlight_enable(crtc_state, conn_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 4f1f31fc9529d..614b4534ef96b 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1401,8 +1401,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+- EDP_PSR_DEBUG_MASK_HPD |
+- EDP_PSR_DEBUG_MASK_LPSP;
++ EDP_PSR_DEBUG_MASK_HPD;
++
++ /*
++ * For some unknown reason on HSW non-ULT (or at least on
++ * Dell Latitude E6540) external displays start to flicker
++ * when PSR is enabled on the eDP. SR/PC6 residency is much
++ * higher than should be possible with an external display.
++ * As a workaround leave LPSP unmasked to prevent PSR entry
++ * when external displays are active.
++ */
++ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
++ mask |= EDP_PSR_DEBUG_MASK_LPSP;
+
+ if (DISPLAY_VER(dev_priv) < 20)
+ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 5057d976fa578..ca762ea554136 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
+ if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+
+- if (atomic_dec_and_test(&fctx->notify_ref))
++ if (!--fctx->notify_ref)
+ drop = 1;
+ }
+
+@@ -103,7 +103,6 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
+ void
+ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+ {
+- cancel_work_sync(&fctx->allow_block_work);
+ nouveau_fence_context_kill(fctx, 0);
+ nvif_event_dtor(&fctx->event);
+ fctx->dead = 1;
+@@ -168,18 +167,6 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
+ return ret;
+ }
+
+-static void
+-nouveau_fence_work_allow_block(struct work_struct *work)
+-{
+- struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+- allow_block_work);
+-
+- if (atomic_read(&fctx->notify_ref) == 0)
+- nvif_event_block(&fctx->event);
+- else
+- nvif_event_allow(&fctx->event);
+-}
+-
+ void
+ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
+ {
+@@ -191,7 +178,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
+ } args;
+ int ret;
+
+- INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
+ INIT_LIST_HEAD(&fctx->flip);
+ INIT_LIST_HEAD(&fctx->pending);
+ spin_lock_init(&fctx->lock);
+@@ -535,19 +521,15 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
+ struct nouveau_fence *fence = from_fence(f);
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ bool ret;
+- bool do_work;
+
+- if (atomic_inc_return(&fctx->notify_ref) == 0)
+- do_work = true;
++ if (!fctx->notify_ref++)
++ nvif_event_allow(&fctx->event);
+
+ ret = nouveau_fence_no_signaling(f);
+ if (ret)
+ set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
+- else if (atomic_dec_and_test(&fctx->notify_ref))
+- do_work = true;
+-
+- if (do_work)
+- schedule_work(&fctx->allow_block_work);
++ else if (!--fctx->notify_ref)
++ nvif_event_block(&fctx->event);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
+index 28f5cf013b898..64d33ae7f3561 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
+@@ -3,7 +3,6 @@
+ #define __NOUVEAU_FENCE_H__
+
+ #include <linux/dma-fence.h>
+-#include <linux/workqueue.h>
+ #include <nvif/event.h>
+
+ struct nouveau_drm;
+@@ -46,9 +45,7 @@ struct nouveau_fence_chan {
+ char name[32];
+
+ struct nvif_event event;
+- struct work_struct allow_block_work;
+- atomic_t notify_ref;
+- int dead, killed;
++ int notify_ref, dead, killed;
+ };
+
+ struct nouveau_fence_priv {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
+index a6602c0126715..3dda885df5b22 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
+@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+ } else {
+ ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+ mem->mem.size, &tmp);
++ if (ret)
++ goto done;
++
+ vma->addr = tmp.addr;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+index c8ce7ff187135..e74493a4569ed 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+@@ -550,6 +550,10 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
+ struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
+
+ runl->nonstall.vector = engn->func->nonstall(engn);
++
++ /* if no nonstall vector just keep going */
++ if (runl->nonstall.vector == -1)
++ continue;
+ if (runl->nonstall.vector < 0) {
+ RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
+ return runl->nonstall.vector;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+index b903785056b5d..3454c7d295029 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+@@ -351,7 +351,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
+ int ret;
+
+ ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
+- WARN_ON(ret < 0);
++ WARN_ON(ret == -ENOENT);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+index 04bceaa28a197..da1bebb896f7f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+@@ -25,12 +25,8 @@ int
+ nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+ {
+ for (int i = 0; i < gsp->intr_nr; i++) {
+- if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+- if (gsp->intr[i].nonstall != ~0)
+- return gsp->intr[i].nonstall;
+-
+- return -EINVAL;
+- }
++ if (gsp->intr[i].type == type && gsp->intr[i].inst == inst)
++ return gsp->intr[i].nonstall;
+ }
+
+ return -ENOENT;
+diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
+index 99e14dc212ecb..a4ac4b47777fe 100644
+--- a/drivers/gpu/drm/panel/Kconfig
++++ b/drivers/gpu/drm/panel/Kconfig
+@@ -530,6 +530,8 @@ config DRM_PANEL_RAYDIUM_RM692E5
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
++ select DRM_DISPLAY_DP_HELPER
++ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Raydium RM692E5-based
+ display panels, such as the one found in the Fairphone 5 smartphone.
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 95c8472d878a9..7dc6fb7308ce3 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -973,6 +973,8 @@ static const struct panel_desc auo_b116xak01 = {
+ },
+ .delay = {
+ .hpd_absent = 200,
++ .unprepare = 500,
++ .enable = 50,
+ },
+ };
+
+@@ -1840,7 +1842,8 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+@@ -1848,8 +1851,10 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
++ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
++ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+index ea5a857793827..f23d8832a1ad0 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
+ .off_func = s6d7aa0_lsl080al02_off,
+ .drm_mode = &s6d7aa0_lsl080al02_mode,
+ .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
+- .bus_flags = DRM_BUS_FLAG_DE_HIGH,
++ .bus_flags = 0,
+
+ .has_backlight = false,
+ .use_passwd3 = false,
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 9367a4572dcf6..1ba633fd56961 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -3861,6 +3861,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ };
+
+ static const struct panel_desc tianma_tm070jvhg33 = {
+@@ -3873,6 +3874,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ };
+
+ static const struct display_timing tianma_tm070rvhg71_timing = {
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index 46de4f171970b..beee5563031aa 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -285,7 +285,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = {
+ };
+
+ static struct drm_driver qxl_driver = {
+- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
++ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
+
+ .dumb_create = qxl_mode_dumb_create,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
+diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
+index 5e5e466f35d10..7c78c074e3a2e 100644
+--- a/drivers/gpu/drm/tidss/tidss_crtc.c
++++ b/drivers/gpu/drm/tidss/tidss_crtc.c
+@@ -169,13 +169,13 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct tidss_device *tidss = to_tidss(ddev);
+ unsigned long flags;
+
+- dev_dbg(ddev->dev,
+- "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+- crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+- crtc->state->enable, crtc->state->event);
++ dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
++ __func__, crtc->name, crtc->state->active ? "" : "not ",
++ drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
++ crtc->state->event);
+
+ /* There is nothing to do if CRTC is not going to be enabled. */
+- if (!crtc->state->enable)
++ if (!crtc->state->active)
+ return;
+
+ /*
+diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+index 047b958123341..cd9e66a06596a 100644
+--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
++++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+@@ -182,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops);
+
+ static const struct drm_driver driver = {
+ .driver_features =
+- DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
++ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
+
+ .fops = &vbox_fops,
+ .name = DRIVER_NAME,
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
+index 4334c76084084..f8e9abe647b92 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
+@@ -177,7 +177,7 @@ static const struct drm_driver driver = {
+ * out via drm_device::driver_features:
+ */
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+- DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
++ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
+ .open = virtio_gpu_driver_open,
+ .postclose = virtio_gpu_driver_postclose,
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
+index a2e045f3a0004..a1ef657eba077 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
++++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
+@@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
+ {
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
++ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
++ plane);
+ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+@@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
+ return 0;
+
++ /*
++ * Ignore damage clips if the framebuffer attached to the plane's state
++ * has changed since the last plane update (page-flip). In this case, a
++ * full plane update should happen because uploads are done per-buffer.
++ */
++ if (old_plane_state->fb != new_plane_state->fb)
++ new_plane_state->ignore_damage_clips = true;
++
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 8b24ecf60e3ec..d3e308fdfd5be 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1611,7 +1611,7 @@ static const struct file_operations vmwgfx_driver_fops = {
+
+ static const struct drm_driver driver = {
+ .driver_features =
+- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
++ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
+ .ioctls = vmw_ioctls,
+ .num_ioctls = ARRAY_SIZE(vmw_ioctls),
+ .master_set = vmw_master_set,
+diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
+index 0e5d3d2e9c985..76002b91c86a4 100644
+--- a/drivers/iio/adc/ad7091r-base.c
++++ b/drivers/iio/adc/ad7091r-base.c
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/bitops.h>
++#include <linux/bitfield.h>
+ #include <linux/iio/events.h>
+ #include <linux/iio/iio.h>
+ #include <linux/interrupt.h>
+@@ -28,6 +29,7 @@
+ #define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff)
+
+ /* AD7091R_REG_CONF */
++#define AD7091R_REG_CONF_ALERT_EN BIT(4)
+ #define AD7091R_REG_CONF_AUTO BIT(8)
+ #define AD7091R_REG_CONF_CMD BIT(10)
+
+@@ -49,6 +51,27 @@ struct ad7091r_state {
+ struct mutex lock; /*lock to prevent concurent reads */
+ };
+
++const struct iio_event_spec ad7091r_events[] = {
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_RISING,
++ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
++ BIT(IIO_EV_INFO_ENABLE),
++ },
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_FALLING,
++ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
++ BIT(IIO_EV_INFO_ENABLE),
++ },
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_EITHER,
++ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
++ },
++};
++EXPORT_SYMBOL_NS_GPL(ad7091r_events, IIO_AD7091R);
++
+ static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+ {
+ int ret, conf;
+@@ -168,8 +191,142 @@ unlock:
+ return ret;
+ }
+
++static int ad7091r_read_event_config(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++ int val, ret;
++
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ &val);
++ if (ret)
++ return ret;
++ return val != AD7091R_HIGH_LIMIT;
++ case IIO_EV_DIR_FALLING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ &val);
++ if (ret)
++ return ret;
++ return val != AD7091R_LOW_LIMIT;
++ default:
++ return -EINVAL;
++ }
++}
++
++static int ad7091r_write_event_config(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir, int state)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++
++ if (state) {
++ return regmap_set_bits(st->map, AD7091R_REG_CONF,
++ AD7091R_REG_CONF_ALERT_EN);
++ } else {
++ /*
++ * Set thresholds either to 0 or to 2^12 - 1 as appropriate to
++ * prevent alerts and thus disable event generation.
++ */
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ AD7091R_HIGH_LIMIT);
++ case IIO_EV_DIR_FALLING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ AD7091R_LOW_LIMIT);
++ default:
++ return -EINVAL;
++ }
++ }
++}
++
++static int ad7091r_read_event_value(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ enum iio_event_info info, int *val, int *val2)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++ int ret;
++
++ switch (info) {
++ case IIO_EV_INFO_VALUE:
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ val);
++ if (ret)
++ return ret;
++ return IIO_VAL_INT;
++ case IIO_EV_DIR_FALLING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ val);
++ if (ret)
++ return ret;
++ return IIO_VAL_INT;
++ default:
++ return -EINVAL;
++ }
++ case IIO_EV_INFO_HYSTERESIS:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_HYSTERESIS(chan->channel),
++ val);
++ if (ret)
++ return ret;
++ return IIO_VAL_INT;
++ default:
++ return -EINVAL;
++ }
++}
++
++static int ad7091r_write_event_value(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ enum iio_event_info info, int val, int val2)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++
++ switch (info) {
++ case IIO_EV_INFO_VALUE:
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ val);
++ case IIO_EV_DIR_FALLING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ val);
++ default:
++ return -EINVAL;
++ }
++ case IIO_EV_INFO_HYSTERESIS:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_HYSTERESIS(chan->channel),
++ val);
++ default:
++ return -EINVAL;
++ }
++}
++
+ static const struct iio_info ad7091r_info = {
+ .read_raw = ad7091r_read_raw,
++ .read_event_config = &ad7091r_read_event_config,
++ .write_event_config = &ad7091r_write_event_config,
++ .read_event_value = &ad7091r_read_event_value,
++ .write_event_value = &ad7091r_write_event_value,
+ };
+
+ static irqreturn_t ad7091r_event_handler(int irq, void *private)
+@@ -232,6 +389,11 @@ int ad7091r_probe(struct device *dev, const char *name,
+ iio_dev->channels = chip_info->channels;
+
+ if (irq) {
++ ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
++ AD7091R_REG_CONF_ALERT_EN, BIT(4));
++ if (ret)
++ return ret;
++
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ad7091r_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
+@@ -243,7 +405,14 @@ int ad7091r_probe(struct device *dev, const char *name,
+ if (IS_ERR(st->vref)) {
+ if (PTR_ERR(st->vref) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
++
+ st->vref = NULL;
++ /* Enable internal vref */
++ ret = regmap_set_bits(st->map, AD7091R_REG_CONF,
++ AD7091R_REG_CONF_INT_VREF);
++ if (ret)
++ return dev_err_probe(st->dev, ret,
++ "Error on enable internal reference\n");
+ } else {
+ ret = regulator_enable(st->vref);
+ if (ret)
+diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
+index 509748aef9b19..b9e1c8bf3440a 100644
+--- a/drivers/iio/adc/ad7091r-base.h
++++ b/drivers/iio/adc/ad7091r-base.h
+@@ -8,6 +8,12 @@
+ #ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+ #define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+
++#define AD7091R_REG_CONF_INT_VREF BIT(0)
++
++/* AD7091R_REG_CH_LIMIT */
++#define AD7091R_HIGH_LIMIT 0xFFF
++#define AD7091R_LOW_LIMIT 0x0
++
+ struct device;
+ struct ad7091r_state;
+
+@@ -17,6 +23,8 @@ struct ad7091r_chip_info {
+ unsigned int vref_mV;
+ };
+
++extern const struct iio_event_spec ad7091r_events[3];
++
+ extern const struct regmap_config ad7091r_regmap_config;
+
+ int ad7091r_probe(struct device *dev, const char *name,
+diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
+index 2f048527b7b78..dae98c95ebb87 100644
+--- a/drivers/iio/adc/ad7091r5.c
++++ b/drivers/iio/adc/ad7091r5.c
+@@ -12,26 +12,6 @@
+
+ #include "ad7091r-base.h"
+
+-static const struct iio_event_spec ad7091r5_events[] = {
+- {
+- .type = IIO_EV_TYPE_THRESH,
+- .dir = IIO_EV_DIR_RISING,
+- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+- BIT(IIO_EV_INFO_ENABLE),
+- },
+- {
+- .type = IIO_EV_TYPE_THRESH,
+- .dir = IIO_EV_DIR_FALLING,
+- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+- BIT(IIO_EV_INFO_ENABLE),
+- },
+- {
+- .type = IIO_EV_TYPE_THRESH,
+- .dir = IIO_EV_DIR_EITHER,
+- .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+- },
+-};
+-
+ #define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+@@ -44,10 +24,10 @@ static const struct iio_event_spec ad7091r5_events[] = {
+ .scan_type.realbits = bits, \
+ }
+ static const struct iio_chan_spec ad7091r5_channels_irq[] = {
+- AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+- AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+- AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+- AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
++ AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++ AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++ AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++ AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ };
+
+ static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
+diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+index 28f3fdfe23a29..6975a71d740f6 100644
+--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
++++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+@@ -487,9 +487,15 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
+ struct iosys_map *map)
+ {
+- struct vb2_dma_sg_buf *buf = dbuf->priv;
++ struct vb2_dma_sg_buf *buf;
++ void *vaddr;
++
++ buf = dbuf->priv;
++ vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
++ if (!vaddr)
++ return -EINVAL;
+
+- iosys_map_set_vaddr(map, buf->vaddr);
++ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+ }
+diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
+index 29098612813cb..c6fea5837a19f 100644
+--- a/drivers/media/i2c/imx290.c
++++ b/drivers/media/i2c/imx290.c
+@@ -41,18 +41,18 @@
+ #define IMX290_WINMODE_720P (1 << 4)
+ #define IMX290_WINMODE_CROP (4 << 4)
+ #define IMX290_FR_FDG_SEL CCI_REG8(0x3009)
+-#define IMX290_BLKLEVEL CCI_REG16(0x300a)
++#define IMX290_BLKLEVEL CCI_REG16_LE(0x300a)
+ #define IMX290_GAIN CCI_REG8(0x3014)
+-#define IMX290_VMAX CCI_REG24(0x3018)
++#define IMX290_VMAX CCI_REG24_LE(0x3018)
+ #define IMX290_VMAX_MAX 0x3ffff
+-#define IMX290_HMAX CCI_REG16(0x301c)
++#define IMX290_HMAX CCI_REG16_LE(0x301c)
+ #define IMX290_HMAX_MAX 0xffff
+-#define IMX290_SHS1 CCI_REG24(0x3020)
++#define IMX290_SHS1 CCI_REG24_LE(0x3020)
+ #define IMX290_WINWV_OB CCI_REG8(0x303a)
+-#define IMX290_WINPV CCI_REG16(0x303c)
+-#define IMX290_WINWV CCI_REG16(0x303e)
+-#define IMX290_WINPH CCI_REG16(0x3040)
+-#define IMX290_WINWH CCI_REG16(0x3042)
++#define IMX290_WINPV CCI_REG16_LE(0x303c)
++#define IMX290_WINWV CCI_REG16_LE(0x303e)
++#define IMX290_WINPH CCI_REG16_LE(0x3040)
++#define IMX290_WINWH CCI_REG16_LE(0x3042)
+ #define IMX290_OUT_CTRL CCI_REG8(0x3046)
+ #define IMX290_ODBIT_10BIT (0 << 0)
+ #define IMX290_ODBIT_12BIT (1 << 0)
+@@ -78,28 +78,28 @@
+ #define IMX290_ADBIT2 CCI_REG8(0x317c)
+ #define IMX290_ADBIT2_10BIT 0x12
+ #define IMX290_ADBIT2_12BIT 0x00
+-#define IMX290_CHIP_ID CCI_REG16(0x319a)
++#define IMX290_CHIP_ID CCI_REG16_LE(0x319a)
+ #define IMX290_ADBIT3 CCI_REG8(0x31ec)
+ #define IMX290_ADBIT3_10BIT 0x37
+ #define IMX290_ADBIT3_12BIT 0x0e
+ #define IMX290_REPETITION CCI_REG8(0x3405)
+ #define IMX290_PHY_LANE_NUM CCI_REG8(0x3407)
+ #define IMX290_OPB_SIZE_V CCI_REG8(0x3414)
+-#define IMX290_Y_OUT_SIZE CCI_REG16(0x3418)
+-#define IMX290_CSI_DT_FMT CCI_REG16(0x3441)
++#define IMX290_Y_OUT_SIZE CCI_REG16_LE(0x3418)
++#define IMX290_CSI_DT_FMT CCI_REG16_LE(0x3441)
+ #define IMX290_CSI_DT_FMT_RAW10 0x0a0a
+ #define IMX290_CSI_DT_FMT_RAW12 0x0c0c
+ #define IMX290_CSI_LANE_MODE CCI_REG8(0x3443)
+-#define IMX290_EXTCK_FREQ CCI_REG16(0x3444)
+-#define IMX290_TCLKPOST CCI_REG16(0x3446)
+-#define IMX290_THSZERO CCI_REG16(0x3448)
+-#define IMX290_THSPREPARE CCI_REG16(0x344a)
+-#define IMX290_TCLKTRAIL CCI_REG16(0x344c)
+-#define IMX290_THSTRAIL CCI_REG16(0x344e)
+-#define IMX290_TCLKZERO CCI_REG16(0x3450)
+-#define IMX290_TCLKPREPARE CCI_REG16(0x3452)
+-#define IMX290_TLPX CCI_REG16(0x3454)
+-#define IMX290_X_OUT_SIZE CCI_REG16(0x3472)
++#define IMX290_EXTCK_FREQ CCI_REG16_LE(0x3444)
++#define IMX290_TCLKPOST CCI_REG16_LE(0x3446)
++#define IMX290_THSZERO CCI_REG16_LE(0x3448)
++#define IMX290_THSPREPARE CCI_REG16_LE(0x344a)
++#define IMX290_TCLKTRAIL CCI_REG16_LE(0x344c)
++#define IMX290_THSTRAIL CCI_REG16_LE(0x344e)
++#define IMX290_TCLKZERO CCI_REG16_LE(0x3450)
++#define IMX290_TCLKPREPARE CCI_REG16_LE(0x3452)
++#define IMX290_TLPX CCI_REG16_LE(0x3454)
++#define IMX290_X_OUT_SIZE CCI_REG16_LE(0x3472)
+ #define IMX290_INCKSEL7 CCI_REG8(0x3480)
+
+ #define IMX290_PGCTRL_REGEN BIT(0)
+diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
+index 9c58c1a80cba3..81bb61407bdce 100644
+--- a/drivers/media/i2c/imx355.c
++++ b/drivers/media/i2c/imx355.c
+@@ -1748,10 +1748,6 @@ static int imx355_probe(struct i2c_client *client)
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&imx355->sd);
+- if (ret < 0)
+- goto error_media_entity;
+-
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+@@ -1760,9 +1756,15 @@ static int imx355_probe(struct i2c_client *client)
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&imx355->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&imx355->sd.entity);
+
+ error_handler_free:
+diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c
+index bbd5740d2280b..02de09edc3f96 100644
+--- a/drivers/media/i2c/ov01a10.c
++++ b/drivers/media/i2c/ov01a10.c
+@@ -859,6 +859,7 @@ static void ov01a10_remove(struct i2c_client *client)
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static int ov01a10_probe(struct i2c_client *client)
+@@ -905,17 +906,26 @@ static int ov01a10_probe(struct i2c_client *client)
+ goto err_media_entity_cleanup;
+ }
+
++ /*
++ * Device is already turned on by i2c-core with ACPI domain PM.
++ * Enable runtime PM and turn off the device.
++ */
++ pm_runtime_set_active(&client->dev);
++ pm_runtime_enable(dev);
++ pm_runtime_idle(dev);
++
+ ret = v4l2_async_register_subdev_sensor(&ov01a10->sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+- goto err_media_entity_cleanup;
++ goto err_pm_disable;
+ }
+
+- pm_runtime_enable(dev);
+- pm_runtime_idle(dev);
+-
+ return 0;
+
++err_pm_disable:
++ pm_runtime_disable(dev);
++ pm_runtime_set_suspended(&client->dev);
++
+ err_media_entity_cleanup:
+ media_entity_cleanup(&ov01a10->sd.entity);
+
+diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
+index 970d2caeb3d62..2793e4675c6da 100644
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -1556,24 +1556,27 @@ static int ov13b10_probe(struct i2c_client *client)
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+- if (ret < 0)
+- goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+-
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ if (full_power)
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov13b->sd.entity);
+
+ error_handler_free:
+@@ -1596,6 +1599,7 @@ static void ov13b10_remove(struct i2c_client *client)
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
+diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
+index ee33152996055..f1377c305bcef 100644
+--- a/drivers/media/i2c/ov9734.c
++++ b/drivers/media/i2c/ov9734.c
+@@ -894,6 +894,7 @@ static void ov9734_remove(struct i2c_client *client)
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ mutex_destroy(&ov9734->mutex);
+ }
+
+@@ -939,13 +940,6 @@ static int ov9734_probe(struct i2c_client *client)
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
+- if (ret < 0) {
+- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+- ret);
+- goto probe_error_media_entity_cleanup;
+- }
+-
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+@@ -954,9 +948,18 @@ static int ov9734_probe(struct i2c_client *client)
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
++ if (ret < 0) {
++ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
++ ret);
++ goto probe_error_media_entity_cleanup_pm;
++ }
++
+ return 0;
+
+-probe_error_media_entity_cleanup:
++probe_error_media_entity_cleanup_pm:
++ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov9734->sd.entity);
+
+ probe_error_v4l2_ctrl_handler_free:
+diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
+index fa27638edc072..dab14787116b6 100644
+--- a/drivers/media/i2c/st-mipid02.c
++++ b/drivers/media/i2c/st-mipid02.c
+@@ -770,6 +770,7 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
+ struct v4l2_subdev_format *format)
+ {
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
++ struct v4l2_subdev_format source_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ format->format.code = get_fmt_code(format->format.code);
+@@ -781,8 +782,12 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
+
+ *fmt = format->format;
+
+- /* Propagate the format change to the source pad */
+- mipid02_set_fmt_source(sd, sd_state, format);
++ /*
++ * Propagate the format change to the source pad, taking
++ * care not to update the format pointer given back to user
++ */
++ source_fmt = *format;
++ mipid02_set_fmt_source(sd, sd_state, &source_fmt);
+ }
+
+ static int mipid02_set_fmt(struct v4l2_subdev *sd,
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 60425c99a2b8b..c3456c700c07e 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1021,13 +1021,13 @@ static void mtk_jpeg_dec_device_run(void *priv)
+ if (ret < 0)
+ goto dec_end;
+
+- schedule_delayed_work(&jpeg->job_timeout_work,
+- msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+-
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
+ goto dec_end;
+
++ schedule_delayed_work(&jpeg->job_timeout_work,
++ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
++
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ mtk_jpeg_dec_reset(jpeg->reg_base);
+ mtk_jpeg_dec_set_config(jpeg->reg_base,
+@@ -1749,9 +1749,6 @@ retry_select:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+- schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
+- msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+-
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx,
+ &jpeg_src_buf->dec_param,
+@@ -1761,6 +1758,9 @@ retry_select:
+ goto setdst_end;
+ }
+
++ schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
++ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
++
+ spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags);
+ ctx->total_frame_num++;
+ mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base);
+diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
+index bc2dbec019b04..10005c80f43b5 100644
+--- a/drivers/media/v4l2-core/v4l2-cci.c
++++ b/drivers/media/v4l2-core/v4l2-cci.c
+@@ -18,6 +18,7 @@
+
+ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ {
++ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+@@ -25,8 +26,9 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ if (err && *err)
+ return *err;
+
+- len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+- reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
++ little_endian = reg & CCI_REG_LE;
++ len = CCI_REG_WIDTH_BYTES(reg);
++ reg = CCI_REG_ADDR(reg);
+
+ ret = regmap_bulk_read(map, reg, buf, len);
+ if (ret) {
+@@ -40,16 +42,28 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ *val = buf[0];
+ break;
+ case 2:
+- *val = get_unaligned_be16(buf);
++ if (little_endian)
++ *val = get_unaligned_le16(buf);
++ else
++ *val = get_unaligned_be16(buf);
+ break;
+ case 3:
+- *val = get_unaligned_be24(buf);
++ if (little_endian)
++ *val = get_unaligned_le24(buf);
++ else
++ *val = get_unaligned_be24(buf);
+ break;
+ case 4:
+- *val = get_unaligned_be32(buf);
++ if (little_endian)
++ *val = get_unaligned_le32(buf);
++ else
++ *val = get_unaligned_be32(buf);
+ break;
+ case 8:
+- *val = get_unaligned_be64(buf);
++ if (little_endian)
++ *val = get_unaligned_le64(buf);
++ else
++ *val = get_unaligned_be64(buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+@@ -68,6 +82,7 @@ EXPORT_SYMBOL_GPL(cci_read);
+
+ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ {
++ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+@@ -75,24 +90,37 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ if (err && *err)
+ return *err;
+
+- len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+- reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
++ little_endian = reg & CCI_REG_LE;
++ len = CCI_REG_WIDTH_BYTES(reg);
++ reg = CCI_REG_ADDR(reg);
+
+ switch (len) {
+ case 1:
+ buf[0] = val;
+ break;
+ case 2:
+- put_unaligned_be16(val, buf);
++ if (little_endian)
++ put_unaligned_le16(val, buf);
++ else
++ put_unaligned_be16(val, buf);
+ break;
+ case 3:
+- put_unaligned_be24(val, buf);
++ if (little_endian)
++ put_unaligned_le24(val, buf);
++ else
++ put_unaligned_be24(val, buf);
+ break;
+ case 4:
+- put_unaligned_be32(val, buf);
++ if (little_endian)
++ put_unaligned_le32(val, buf);
++ else
++ put_unaligned_be32(val, buf);
+ break;
+ case 8:
+- put_unaligned_be64(val, buf);
++ if (little_endian)
++ put_unaligned_le64(val, buf);
++ else
++ put_unaligned_be64(val, buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 134c36edb6cf7..32d49100dff51 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -400,6 +400,10 @@ struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
++ unsigned int flags;
++#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
++#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
++
+ struct mmc_rpmb_data *rpmb;
+ };
+
+@@ -465,7 +469,7 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+ }
+
+ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+- struct mmc_blk_ioc_data *idata)
++ struct mmc_blk_ioc_data **idatas, int i)
+ {
+ struct mmc_command cmd = {}, sbc = {};
+ struct mmc_data data = {};
+@@ -475,10 +479,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ unsigned int busy_timeout_ms;
+ int err;
+ unsigned int target_part;
++ struct mmc_blk_ioc_data *idata = idatas[i];
++ struct mmc_blk_ioc_data *prev_idata = NULL;
+
+ if (!card || !md || !idata)
+ return -EINVAL;
+
++ if (idata->flags & MMC_BLK_IOC_DROP)
++ return 0;
++
++ if (idata->flags & MMC_BLK_IOC_SBC)
++ prev_idata = idatas[i - 1];
++
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+@@ -532,7 +544,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ return err;
+ }
+
+- if (idata->rpmb) {
++ if (idata->rpmb || prev_idata) {
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ /*
+ * We don't do any blockcount validation because the max size
+@@ -540,6 +552,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ * 'Reliable Write' bit here.
+ */
+ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
++ if (prev_idata)
++ sbc.arg = prev_idata->ic.arg;
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ mrq.sbc = &sbc;
+ }
+@@ -557,6 +571,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ mmc_wait_for_req(card->host, &mrq);
+ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+
++ if (prev_idata) {
++ memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
++ if (sbc.error) {
++ dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
++ __func__, sbc.error);
++ return sbc.error;
++ }
++ }
++
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+@@ -1034,6 +1057,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+ md->reset_done &= ~type;
+ }
+
++static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
++{
++ struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
++ int i;
++
++ for (i = 1; i < mq_rq->ioc_count; i++) {
++ if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
++ mmc_op_multi(idata[i]->ic.opcode)) {
++ idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
++ idata[i]->flags |= MMC_BLK_IOC_SBC;
++ }
++ }
++}
++
+ /*
+ * The non-block commands come back from the block layer after it queued it and
+ * processed it with all other requests and then they get issued in this
+@@ -1061,11 +1098,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+ if (ret)
+ break;
+ }
++
++ mmc_blk_check_sbc(mq_rq);
++
+ fallthrough;
+ case MMC_DRV_OP_IOCTL_RPMB:
+ idata = mq_rq->drv_op_data;
+ for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+- ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
++ ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
+ if (ret)
+ break;
+ }
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index cc333ad67cac8..2a99ffb61f8c0 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -15,7 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/bio.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direction.h>
+ #include <linux/crc7.h>
+ #include <linux/crc-itu-t.h>
+ #include <linux/scatterlist.h>
+@@ -119,19 +119,14 @@ struct mmc_spi_host {
+ struct spi_transfer status;
+ struct spi_message readback;
+
+- /* underlying DMA-aware controller, or null */
+- struct device *dma_dev;
+-
+ /* buffer used for commands and for message "overhead" */
+ struct scratch *data;
+- dma_addr_t data_dma;
+
+ /* Specs say to write ones most of the time, even when the card
+ * has no need to read its input data; and many cards won't care.
+ * This is our source of those ones.
+ */
+ void *ones;
+- dma_addr_t ones_dma;
+ };
+
+
+@@ -147,11 +142,8 @@ static inline int mmc_cs_off(struct mmc_spi_host *host)
+ return spi_setup(host->spi);
+ }
+
+-static int
+-mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
++static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
+ {
+- int status;
+-
+ if (len > sizeof(*host->data)) {
+ WARN_ON(1);
+ return -EIO;
+@@ -159,19 +151,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
+
+ host->status.len = len;
+
+- if (host->dma_dev)
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_FROM_DEVICE);
+-
+- status = spi_sync_locked(host->spi, &host->readback);
+-
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_FROM_DEVICE);
+-
+- return status;
++ return spi_sync_locked(host->spi, &host->readback);
+ }
+
+ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
+@@ -506,23 +486,11 @@ mmc_spi_command_send(struct mmc_spi_host *host,
+ t = &host->t;
+ memset(t, 0, sizeof(*t));
+ t->tx_buf = t->rx_buf = data->status;
+- t->tx_dma = t->rx_dma = host->data_dma;
+ t->len = cp - data->status;
+ t->cs_change = 1;
+ spi_message_add_tail(t, &host->m);
+
+- if (host->dma_dev) {
+- host->m.is_dma_mapped = 1;
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+- }
+ status = spi_sync_locked(host->spi, &host->m);
+-
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+ if (status < 0) {
+ dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
+ cmd->error = status;
+@@ -540,9 +508,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
+ * We always provide TX data for data and CRC. The MMC/SD protocol
+ * requires us to write ones; but Linux defaults to writing zeroes;
+ * so we explicitly initialize it to all ones on RX paths.
+- *
+- * We also handle DMA mapping, so the underlying SPI controller does
+- * not need to (re)do it for each message.
+ */
+ static void
+ mmc_spi_setup_data_message(
+@@ -552,11 +517,8 @@ mmc_spi_setup_data_message(
+ {
+ struct spi_transfer *t;
+ struct scratch *scratch = host->data;
+- dma_addr_t dma = host->data_dma;
+
+ spi_message_init(&host->m);
+- if (dma)
+- host->m.is_dma_mapped = 1;
+
+ /* for reads, readblock() skips 0xff bytes before finding
+ * the token; for writes, this transfer issues that token.
+@@ -570,8 +532,6 @@ mmc_spi_setup_data_message(
+ else
+ scratch->data_token = SPI_TOKEN_SINGLE;
+ t->tx_buf = &scratch->data_token;
+- if (dma)
+- t->tx_dma = dma + offsetof(struct scratch, data_token);
+ spi_message_add_tail(t, &host->m);
+ }
+
+@@ -581,7 +541,6 @@ mmc_spi_setup_data_message(
+ t = &host->t;
+ memset(t, 0, sizeof(*t));
+ t->tx_buf = host->ones;
+- t->tx_dma = host->ones_dma;
+ /* length and actual buffer info are written later */
+ spi_message_add_tail(t, &host->m);
+
+@@ -591,14 +550,9 @@ mmc_spi_setup_data_message(
+ if (direction == DMA_TO_DEVICE) {
+ /* the actual CRC may get written later */
+ t->tx_buf = &scratch->crc_val;
+- if (dma)
+- t->tx_dma = dma + offsetof(struct scratch, crc_val);
+ } else {
+ t->tx_buf = host->ones;
+- t->tx_dma = host->ones_dma;
+ t->rx_buf = &scratch->crc_val;
+- if (dma)
+- t->rx_dma = dma + offsetof(struct scratch, crc_val);
+ }
+ spi_message_add_tail(t, &host->m);
+
+@@ -621,10 +575,7 @@ mmc_spi_setup_data_message(
+ memset(t, 0, sizeof(*t));
+ t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
+ t->tx_buf = host->ones;
+- t->tx_dma = host->ones_dma;
+ t->rx_buf = scratch->status;
+- if (dma)
+- t->rx_dma = dma + offsetof(struct scratch, status);
+ t->cs_change = 1;
+ spi_message_add_tail(t, &host->m);
+ }
+@@ -653,23 +604,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+
+ if (host->mmc->use_spi_crc)
+ scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
+- if (host->dma_dev)
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+
+ status = spi_sync_locked(spi, &host->m);
+-
+ if (status != 0) {
+ dev_dbg(&spi->dev, "write error (%d)\n", status);
+ return status;
+ }
+
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+-
+ /*
+ * Get the transmission data-response reply. It must follow
+ * immediately after the data block we transferred. This reply
+@@ -718,8 +659,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+
+ t->tx_buf += t->len;
+- if (host->dma_dev)
+- t->tx_dma += t->len;
+
+ /* Return when not busy. If we didn't collect that status yet,
+ * we'll need some more I/O.
+@@ -783,30 +722,12 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+ leftover = status << 1;
+
+- if (host->dma_dev) {
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+- dma_sync_single_for_device(host->dma_dev,
+- t->rx_dma, t->len,
+- DMA_FROM_DEVICE);
+- }
+-
+ status = spi_sync_locked(spi, &host->m);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "read error %d\n", status);
+ return status;
+ }
+
+- if (host->dma_dev) {
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+- dma_sync_single_for_cpu(host->dma_dev,
+- t->rx_dma, t->len,
+- DMA_FROM_DEVICE);
+- }
+-
+ if (bitshift) {
+ /* Walk through the data and the crc and do
+ * all the magic to get byte-aligned data.
+@@ -841,8 +762,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+
+ t->rx_buf += t->len;
+- if (host->dma_dev)
+- t->rx_dma += t->len;
+
+ return 0;
+ }
+@@ -857,7 +776,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ struct mmc_data *data, u32 blk_size)
+ {
+ struct spi_device *spi = host->spi;
+- struct device *dma_dev = host->dma_dev;
+ struct spi_transfer *t;
+ enum dma_data_direction direction = mmc_get_dma_dir(data);
+ struct scatterlist *sg;
+@@ -884,31 +802,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ */
+ for_each_sg(data->sg, sg, data->sg_len, n_sg) {
+ int status = 0;
+- dma_addr_t dma_addr = 0;
+ void *kmap_addr;
+ unsigned length = sg->length;
+- enum dma_data_direction dir = direction;
+-
+- /* set up dma mapping for controller drivers that might
+- * use DMA ... though they may fall back to PIO
+- */
+- if (dma_dev) {
+- /* never invalidate whole *shared* pages ... */
+- if ((sg->offset != 0 || length != PAGE_SIZE)
+- && dir == DMA_FROM_DEVICE)
+- dir = DMA_BIDIRECTIONAL;
+-
+- dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
+- PAGE_SIZE, dir);
+- if (dma_mapping_error(dma_dev, dma_addr)) {
+- data->error = -EFAULT;
+- break;
+- }
+- if (direction == DMA_TO_DEVICE)
+- t->tx_dma = dma_addr + sg->offset;
+- else
+- t->rx_dma = dma_addr + sg->offset;
+- }
+
+ /* allow pio too; we don't allow highmem */
+ kmap_addr = kmap(sg_page(sg));
+@@ -941,8 +836,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ if (direction == DMA_FROM_DEVICE)
+ flush_dcache_page(sg_page(sg));
+ kunmap(sg_page(sg));
+- if (dma_dev)
+- dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
+
+ if (status < 0) {
+ data->error = status;
+@@ -977,21 +870,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ scratch->status[0] = SPI_TOKEN_STOP_TRAN;
+
+ host->early_status.tx_buf = host->early_status.rx_buf;
+- host->early_status.tx_dma = host->early_status.rx_dma;
+ host->early_status.len = statlen;
+
+- if (host->dma_dev)
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+-
+ tmp = spi_sync_locked(spi, &host->m);
+-
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+-
+ if (tmp < 0) {
+ if (!data->error)
+ data->error = tmp;
+@@ -1265,52 +1146,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
+ return IRQ_HANDLED;
+ }
+
+-#ifdef CONFIG_HAS_DMA
+-static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
+-{
+- struct spi_device *spi = host->spi;
+- struct device *dev;
+-
+- if (!spi->master->dev.parent->dma_mask)
+- return 0;
+-
+- dev = spi->master->dev.parent;
+-
+- host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(dev, host->ones_dma))
+- return -ENOMEM;
+-
+- host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(dev, host->data_dma)) {
+- dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+- DMA_TO_DEVICE);
+- return -ENOMEM;
+- }
+-
+- dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+-
+- host->dma_dev = dev;
+- return 0;
+-}
+-
+-static void mmc_spi_dma_free(struct mmc_spi_host *host)
+-{
+- if (!host->dma_dev)
+- return;
+-
+- dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+- DMA_TO_DEVICE);
+- dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+-}
+-#else
+-static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
+-static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
+-#endif
+-
+ static int mmc_spi_probe(struct spi_device *spi)
+ {
+ void *ones;
+@@ -1402,24 +1237,17 @@ static int mmc_spi_probe(struct spi_device *spi)
+ host->powerup_msecs = 250;
+ }
+
+- /* preallocate dma buffers */
++ /* Preallocate buffers */
+ host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
+ if (!host->data)
+ goto fail_nobuf1;
+
+- status = mmc_spi_dma_alloc(host);
+- if (status)
+- goto fail_dma;
+-
+ /* setup message for status/busy readback */
+ spi_message_init(&host->readback);
+- host->readback.is_dma_mapped = (host->dma_dev != NULL);
+
+ spi_message_add_tail(&host->status, &host->readback);
+ host->status.tx_buf = host->ones;
+- host->status.tx_dma = host->ones_dma;
+ host->status.rx_buf = &host->data->status;
+- host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
+ host->status.cs_change = 1;
+
+ /* register card detect irq */
+@@ -1464,9 +1292,8 @@ static int mmc_spi_probe(struct spi_device *spi)
+ if (!status)
+ has_ro = true;
+
+- dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
++ dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
+ dev_name(&mmc->class_dev),
+- host->dma_dev ? "" : ", no DMA",
+ has_ro ? "" : ", no WP",
+ (host->pdata && host->pdata->setpower)
+ ? "" : ", no poweroff",
+@@ -1477,8 +1304,6 @@ static int mmc_spi_probe(struct spi_device *spi)
+ fail_gpiod_request:
+ mmc_remove_host(mmc);
+ fail_glue_init:
+- mmc_spi_dma_free(host);
+-fail_dma:
+ kfree(host->data);
+ fail_nobuf1:
+ mmc_spi_put_pdata(spi);
+@@ -1500,7 +1325,6 @@ static void mmc_spi_remove(struct spi_device *spi)
+
+ mmc_remove_host(mmc);
+
+- mmc_spi_dma_free(host);
+ kfree(host->data);
+ kfree(host->ones);
+
+diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
+index a7ec947a3ebb1..53019d313db71 100644
+--- a/drivers/mtd/maps/vmu-flash.c
++++ b/drivers/mtd/maps/vmu-flash.c
+@@ -719,7 +719,7 @@ static int vmu_can_unload(struct maple_device *mdev)
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mtd = &((card->mtd)[x]);
+- if (mtd->usecount > 0)
++ if (kref_read(&mtd->refcnt))
+ return 0;
+ }
+ return 1;
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index 9e24bedffd89a..bbdcfbe643f3f 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1207,6 +1207,23 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ return nand_exec_op(chip, &op);
+ }
+
++static void rawnand_cap_cont_reads(struct nand_chip *chip)
++{
++ struct nand_memory_organization *memorg;
++ unsigned int pages_per_lun, first_lun, last_lun;
++
++ memorg = nanddev_get_memorg(&chip->base);
++ pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
++ first_lun = chip->cont_read.first_page / pages_per_lun;
++ last_lun = chip->cont_read.last_page / pages_per_lun;
++
++ /* Prevent sequential cache reads across LUN boundaries */
++ if (first_lun != last_lun)
++ chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
++ else
++ chip->cont_read.pause_page = chip->cont_read.last_page;
++}
++
+ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool check_only)
+@@ -1225,7 +1242,7 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_op_instr cont_instrs[] = {
+- NAND_OP_CMD(page == chip->cont_read.last_page ?
++ NAND_OP_CMD(page == chip->cont_read.pause_page ?
+ NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+@@ -1262,16 +1279,29 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
+ }
+
+ if (page == chip->cont_read.first_page)
+- return nand_exec_op(chip, &start_op);
++ ret = nand_exec_op(chip, &start_op);
+ else
+- return nand_exec_op(chip, &cont_op);
++ ret = nand_exec_op(chip, &cont_op);
++ if (ret)
++ return ret;
++
++ if (!chip->cont_read.ongoing)
++ return 0;
++
++ if (page == chip->cont_read.pause_page &&
++ page != chip->cont_read.last_page) {
++ chip->cont_read.first_page = chip->cont_read.pause_page + 1;
++ rawnand_cap_cont_reads(chip);
++ } else if (page == chip->cont_read.last_page) {
++ chip->cont_read.ongoing = false;
++ }
++
++ return 0;
+ }
+
+ static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
+ {
+- return chip->cont_read.ongoing &&
+- page >= chip->cont_read.first_page &&
+- page <= chip->cont_read.last_page;
++ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
+ }
+
+ /**
+@@ -3430,21 +3460,42 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
+ u32 readlen, int col)
+ {
+ struct mtd_info *mtd = nand_to_mtd(chip);
++ unsigned int end_page, end_col;
++
++ chip->cont_read.ongoing = false;
+
+ if (!chip->controller->supported_op.cont_read)
+ return;
+
+- if ((col && col + readlen < (3 * mtd->writesize)) ||
+- (!col && readlen < (2 * mtd->writesize))) {
+- chip->cont_read.ongoing = false;
++ end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
++ end_col = (col + readlen) % mtd->writesize;
++
++ if (col)
++ page++;
++
++ if (end_col && end_page)
++ end_page--;
++
++ if (page + 1 > end_page)
+ return;
+- }
+
+- chip->cont_read.ongoing = true;
+ chip->cont_read.first_page = page;
+- if (col)
++ chip->cont_read.last_page = end_page;
++ chip->cont_read.ongoing = true;
++
++ rawnand_cap_cont_reads(chip);
++}
++
++static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
++{
++ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
++ return;
++
++ chip->cont_read.first_page++;
++ if (chip->cont_read.first_page == chip->cont_read.pause_page)
+ chip->cont_read.first_page++;
+- chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
++ if (chip->cont_read.first_page >= chip->cont_read.last_page)
++ chip->cont_read.ongoing = false;
+ }
+
+ /**
+@@ -3621,6 +3672,8 @@ read_retry:
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagecache.bitflips);
++
++ rawnand_cont_read_skip_first_page(chip, page);
+ }
+
+ readlen -= bytes;
+@@ -5125,6 +5178,14 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
+ /* The supported_op fields should not be set by individual drivers */
+ WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+
++ /*
++ * Too many devices do not support sequential cached reads with on-die
++ * ECC correction enabled, so in this case refuse to perform the
++ * automation.
++ */
++ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
++ return;
++
+ if (!nand_has_exec_op(chip))
+ return;
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index e1f1e646cf480..22c8bfb5ed9dd 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -10627,10 +10627,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto half_open_err;
+ }
++ bnxt_init_napi(bp);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
+ if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
++ bnxt_del_napi(bp);
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto half_open_err;
+ }
+@@ -10649,6 +10651,7 @@ half_open_err:
+ void bnxt_half_close_nic(struct bnxt *bp)
+ {
+ bnxt_hwrm_resource_free(bp, false, true);
++ bnxt_del_napi(bp);
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+@@ -12298,6 +12301,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
+
+ bp->fw_cap = 0;
+ rc = bnxt_hwrm_ver_get(bp);
++ /* FW may be unresponsive after FLR. FLR must complete within 100 msec
++ * so wait before continuing with recovery.
++ */
++ if (rc)
++ msleep(100);
+ bnxt_try_map_fw_health_reg(bp);
+ if (rc) {
+ rc = bnxt_try_recover_fw(bp);
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index df40c720e7b23..9aeff2b37a612 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -1485,7 +1485,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+- length, false);
++ length - ETH_FCS_LEN, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+@@ -1568,7 +1568,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
+ prefetch(entry->xdp->data);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+- xsk_buff_set_size(entry->xdp, length);
++ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
+ xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+
+ /* RX metadata with timestamps is in front of actual data,
+@@ -1762,6 +1762,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
+ allocated--;
+ }
+ }
++
++ /* set need wakeup flag immediately if ring is not filled completely,
++ * first polling would be too late as need wakeup signalisation would
++ * be delayed for an indefinite time
++ */
++ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
++ int desc_available = tsnep_rx_desc_available(rx);
++
++ if (desc_available)
++ xsk_set_rx_need_wakeup(rx->xsk_pool);
++ else
++ xsk_clear_rx_need_wakeup(rx->xsk_pool);
++ }
+ }
+
+ static bool tsnep_pending(struct tsnep_queue *queue)
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index e08c7b572497d..c107680985e48 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+
+ /* if any of the above changed restart the FEC */
+ if (status_change) {
++ netif_stop_queue(ndev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+@@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+ }
+ } else {
+ if (fep->link) {
++ netif_stop_queue(ndev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_stop(ndev);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index d5519af346577..2bd7b29fb2516 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = 0;
+ bool ok;
+- int ret;
+
+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+- if (ring->vsi->type == I40E_VSI_MAIN)
+- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
++ ring->rx_buf_len = vsi->rx_buf_len;
++
++ /* XDP RX-queue info only needed for RX rings exposed to XDP */
++ if (ring->vsi->type != I40E_VSI_MAIN)
++ goto skip;
++
++ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ }
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- ring->rx_buf_len =
+- xsk_pool_get_rx_frame_size(ring->xsk_pool);
+- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
++ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+- if (ret)
+- return ret;
++ if (err)
++ return err;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
+- ring->rx_buf_len = vsi->rx_buf_len;
+- if (ring->vsi->type == I40E_VSI_MAIN) {
+- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+- MEM_TYPE_PAGE_SHARED,
+- NULL);
+- if (ret)
+- return ret;
+- }
++ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
++ MEM_TYPE_PAGE_SHARED,
++ NULL);
++ if (err)
++ return err;
+ }
+
++skip:
+ xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index dd410b15000f7..071ef309a3a42 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1555,7 +1555,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+ {
+ struct device *dev = rx_ring->dev;
+- int err;
+
+ u64_stats_init(&rx_ring->syncp);
+
+@@ -1576,14 +1575,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+ rx_ring->next_to_process = 0;
+ rx_ring->next_to_use = 0;
+
+- /* XDP RX-queue info only needed for RX rings exposed to XDP */
+- if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
+- if (err < 0)
+- return err;
+- }
+-
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
+ rx_ring->rx_bi =
+@@ -2099,7 +2090,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ struct xdp_buff *xdp)
+ {
+- u32 next = rx_ring->next_to_clean;
++ u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
++ u32 next = rx_ring->next_to_clean, i = 0;
+ struct i40e_rx_buffer *rx_buffer;
+
+ xdp->flags = 0;
+@@ -2112,10 +2104,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ if (!rx_buffer->page)
+ continue;
+
+- if (xdp_res == I40E_XDP_CONSUMED)
+- rx_buffer->pagecnt_bias++;
+- else
++ if (xdp_res != I40E_XDP_CONSUMED)
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
++ else if (i++ <= nr_frags)
++ rx_buffer->pagecnt_bias++;
+
+ /* EOP buffer will be put in i40e_clean_rx_irq() */
+ if (next == rx_ring->next_to_process)
+@@ -2129,20 +2121,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+- * @nr_frags: number of buffers for the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+- struct xdp_buff *xdp,
+- u32 nr_frags)
++ struct xdp_buff *xdp)
+ {
+ unsigned int size = xdp->data_end - xdp->data;
+ struct i40e_rx_buffer *rx_buffer;
++ struct skb_shared_info *sinfo;
+ unsigned int headlen;
+ struct sk_buff *skb;
++ u32 nr_frags = 0;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(xdp->data);
+@@ -2180,6 +2172,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
+
++ if (unlikely(xdp_buff_has_frags(xdp))) {
++ sinfo = xdp_get_shared_info_from_buff(xdp);
++ nr_frags = sinfo->nr_frags;
++ }
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ /* update all of the pointers */
+ size -= headlen;
+@@ -2199,9 +2195,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+- struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
++ struct skb_shared_info *skinfo = skb_shinfo(skb);
+
+- sinfo = xdp_get_shared_info_from_buff(xdp);
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+@@ -2224,17 +2219,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+- * @nr_frags: number of buffers for the packet
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+- struct xdp_buff *xdp,
+- u32 nr_frags)
++ struct xdp_buff *xdp)
+ {
+ unsigned int metasize = xdp->data - xdp->data_meta;
++ struct skb_shared_info *sinfo;
+ struct sk_buff *skb;
++ u32 nr_frags;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+@@ -2243,6 +2238,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ */
+ net_prefetch(xdp->data_meta);
+
++ if (unlikely(xdp_buff_has_frags(xdp))) {
++ sinfo = xdp_get_shared_info_from_buff(xdp);
++ nr_frags = sinfo->nr_frags;
++ }
++
+ /* build an skb around the page buffer */
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
+ if (unlikely(!skb))
+@@ -2255,9 +2255,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ skb_metadata_set(skb, metasize);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+- struct skb_shared_info *sinfo;
+-
+- sinfo = xdp_get_shared_info_from_buff(xdp);
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+@@ -2602,9 +2599,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ total_rx_bytes += size;
+ } else {
+ if (ring_uses_build_skb(rx_ring))
+- skb = i40e_build_skb(rx_ring, xdp, nfrags);
++ skb = i40e_build_skb(rx_ring, xdp);
+ else
+- skb = i40e_construct_skb(rx_ring, xdp, nfrags);
++ skb = i40e_construct_skb(rx_ring, xdp);
+
+ /* drop if we failed to retrieve a buffer */
+ if (!skb) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index e99fa854d17f1..65f38a57b3dfe 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+- virt_to_page(xdp->data_hard_start), 0, size);
++ virt_to_page(xdp->data_hard_start),
++ XDP_PACKET_HEADROOM, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+@@ -499,7 +500,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
+ xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
+ i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
+ &rx_bytes, xdp_res, &failure);
+- first->flags = 0;
+ next_to_clean = next_to_process;
+ if (failure)
+ break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 7fa43827a3f06..4f3e65b47cdc3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -534,19 +534,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ ring->rx_buf_len = ring->vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+- /* coverity[check_return] */
+- __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->q_index,
+- ring->q_vector->napi.napi_id,
+- ring->vsi->rx_buf_len);
++ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ }
+
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
+
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+@@ -557,13 +565,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->q_index);
+ } else {
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+- /* coverity[check_return] */
+- __xdp_rxq_info_reg(&ring->xdp_rxq,
+- ring->netdev,
+- ring->q_index,
+- ring->q_vector->napi.napi_id,
+- ring->vsi->rx_buf_len);
++ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ }
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 9e97ea8630686..9170a3e8f0884 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
+ if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+
+- if (rx_ring->vsi->type == ICE_VSI_PF &&
+- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
+- goto err;
+ return 0;
+
+ err:
+@@ -600,9 +595,7 @@ out_failure:
+ ret = ICE_XDP_CONSUMED;
+ }
+ exit:
+- rx_buf->act = ret;
+- if (unlikely(xdp_buff_has_frags(xdp)))
+- ice_set_rx_bufs_act(xdp, rx_ring, ret);
++ ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ }
+
+ /**
+@@ -890,14 +883,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+- if (unlikely(xdp_buff_has_frags(xdp)))
+- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
++ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
++ /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
++ * can pop off frags but driver has to handle it on its own
++ */
++ rx_ring->nr_frags = sinfo->nr_frags;
+
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+@@ -1249,6 +1245,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
++ rx_ring->nr_frags = 0;
+ continue;
+ construct_skb:
+ if (likely(ice_ring_uses_build_skb(rx_ring)))
+@@ -1264,10 +1261,12 @@ construct_skb:
+ ICE_XDP_CONSUMED);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
++ rx_ring->nr_frags = 0;
+ break;
+ }
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
++ rx_ring->nr_frags = 0;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index daf7b9dbb1435..b28b9826bbcd3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -333,6 +333,7 @@ struct ice_rx_ring {
+ struct ice_channel *ch;
+ struct ice_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
++ u32 nr_frags;
+ dma_addr_t dma; /* physical address of ring */
+ u64 cached_phctime;
+ u16 rx_buf_len;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+index 115969ecdf7b9..b0e56675f98b2 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+@@ -12,26 +12,39 @@
+ * act: action to store onto Rx buffers related to XDP buffer parts
+ *
+ * Set action that should be taken before putting Rx buffer from first frag
+- * to one before last. Last one is handled by caller of this function as it
+- * is the EOP frag that is currently being processed. This function is
+- * supposed to be called only when XDP buffer contains frags.
++ * to the last.
+ */
+ static inline void
+ ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+ const unsigned int act)
+ {
+- const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+- u32 first = rx_ring->first_desc;
+- u32 nr_frags = sinfo->nr_frags;
++ u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
++ u32 nr_frags = rx_ring->nr_frags + 1;
++ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ struct ice_rx_buf *buf;
+
+ for (int i = 0; i < nr_frags; i++) {
+- buf = &rx_ring->rx_buf[first];
++ buf = &rx_ring->rx_buf[idx];
+ buf->act = act;
+
+- if (++first == cnt)
+- first = 0;
++ if (++idx == cnt)
++ idx = 0;
++ }
++
++ /* adjust pagecnt_bias on frags freed by XDP prog */
++ if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
++ u32 delta = rx_ring->nr_frags - sinfo_frags;
++
++ while (delta) {
++ if (idx == 0)
++ idx = cnt - 1;
++ else
++ idx--;
++ buf = &rx_ring->rx_buf[idx];
++ buf->pagecnt_bias--;
++ delta--;
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 99954508184f9..f3663b3f6390e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -820,7 +820,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+- virt_to_page(xdp->data_hard_start), 0, size);
++ virt_to_page(xdp->data_hard_start),
++ XDP_PACKET_HEADROOM, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+@@ -891,7 +892,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+
+ if (!first) {
+ first = xdp;
+- xdp_buff_clear_frags_flag(first);
+ } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ break;
+ }
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index 19809b0ddcd90..0241e498cc20f 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
+ /* setup watchdog timeout value to be 5 second */
+ netdev->watchdog_timeo = 5 * HZ;
+
++ netdev->dev_port = idx;
++
+ /* configure default MTU size */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = vport->max_mtu;
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 93137606869e2..065f07392c964 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+ }
+
++/* Cleanup pool before actual initialization in the OS */
++static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
++{
++ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
++ u32 val;
++ int i;
++
++ /* Drain the BM from all possible residues left by firmware */
++ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
++ mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
++
++ put_cpu();
++
++ /* Stop the BM pool */
++ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
++ val |= MVPP2_BM_STOP_MASK;
++ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
++}
++
+ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+ {
+ enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
+ int i, err, poolnum = MVPP2_BM_POOLS_NUM;
+ struct mvpp2_port *port;
+
++ if (priv->percpu_pools)
++ poolnum = mvpp2_get_nrxqs(priv) * 2;
++
++ /* Clean up the pool state in case it contains stale state */
++ for (i = 0; i < poolnum; i++)
++ mvpp2_bm_pool_cleanup(priv, i);
++
+ if (priv->percpu_pools) {
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+ }
+ }
+
+- poolnum = mvpp2_get_nrxqs(priv) * 2;
+ for (i = 0; i < poolnum; i++) {
+ /* the pool in use */
+ int pn = i / (poolnum / 2);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index a7b1f9686c09a..4957412ff1f65 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ {
+ const char *namep = mlx5_command_str(opcode);
+ struct mlx5_cmd_stats *stats;
++ unsigned long flags;
+
+ if (!err || !(strcmp(namep, "unknown command opcode")))
+ return;
+@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ stats = xa_load(&dev->cmd.stats, opcode);
+ if (!stats)
+ return;
+- spin_lock_irq(&stats->lock);
++ spin_lock_irqsave(&stats->lock, flags);
+ stats->failed++;
+ if (err < 0)
+ stats->last_failed_errno = -err;
+@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ stats->last_failed_mbox_status = status;
+ stats->last_failed_syndrome = syndrome;
+ }
+- spin_unlock_irq(&stats->lock);
++ spin_unlock_irqrestore(&stats->lock, flags);
+ }
+
+ /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index e1283531e0b81..671adbad0a40f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
++ ft->g = NULL;
+ kvfree(in);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+index e097f336e1c4a..30507b7c2fb17 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+@@ -1062,8 +1062,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+
+- allow_swp =
+- mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
++ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
++ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ mlx5e_build_sq_param_common(mdev, param);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index af3928eddafd1..803035d4e5976 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
+ out:
+ napi_consume_skb(skb, budget);
+- md_buff[*md_buff_sz++] = metadata_id;
++ md_buff[(*md_buff_sz)++] = metadata_id;
+ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index 161c5190c236a..05612d9c6080c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ /* iv len */
+ aes_gcm->icv_len = x->aead->alg_icv_len;
+
++ attrs->dir = x->xso.dir;
++
+ /* esn */
+ if (x->props.flags & XFRM_STATE_ESN) {
+ attrs->replay_esn.trigger = true;
+ attrs->replay_esn.esn = sa_entry->esn_state.esn;
+ attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
+ attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
++ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
++ goto skip_replay_window;
++
+ switch (x->replay_esn->replay_window) {
+ case 32:
+ attrs->replay_esn.replay_window =
+@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ }
+ }
+
+- attrs->dir = x->xso.dir;
++skip_replay_window:
+ /* spi */
+ attrs->spi = be32_to_cpu(x->id.spi);
+
+@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
+ return -EINVAL;
+ }
+
+- if (x->replay_esn && x->replay_esn->replay_window != 32 &&
++ if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
++ x->replay_esn->replay_window != 32 &&
+ x->replay_esn->replay_window != 64 &&
+ x->replay_esn->replay_window != 128 &&
+ x->replay_esn->replay_window != 256) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+index bb7f86c993e55..e66f486faafe1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+
+ ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+- in = kvzalloc(inlen, GFP_KERNEL);
+- if (!in || !ft->g) {
+- kfree(ft->g);
+- kvfree(in);
++ if (!ft->g)
+ return -ENOMEM;
++
++ in = kvzalloc(inlen, GFP_KERNEL);
++ if (!in) {
++ err = -ENOMEM;
++ goto err_free_g;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ break;
+ default:
+ err = -EINVAL;
+- goto out;
++ goto err_free_in;
+ }
+
+ switch (type) {
+@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ break;
+ default:
+ err = -EINVAL;
+- goto out;
++ goto err_free_in;
+ }
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+- goto err;
++ goto err_clean_group;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+- goto err;
++ goto err_clean_group;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+-err:
++err_clean_group:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+-out:
++err_free_in:
+ kvfree(in);
+-
++err_free_g:
++ kfree(ft->g);
++ ft->g = NULL;
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 96af9e2ab1d87..404dd1d9b28bf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
+
+ err = mlx5e_rss_params_indir_init(&indir, mdev,
+ mlx5e_rqt_size(mdev, hp->num_channels),
+- mlx5e_rqt_size(mdev, priv->max_nch));
++ mlx5e_rqt_size(mdev, hp->num_channels));
+ if (err)
+ return err;
+
+@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
+ list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
+ if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
+ continue;
++
++ list_del(&peer_flow->peer_flows);
+ if (refcount_dec_and_test(&peer_flow->refcnt)) {
+ mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
+- list_del(&peer_flow->peer_flows);
+ kfree(peer_flow);
+ }
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+index a7ed87e9d8426..22dd30cf8033f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
+ i++;
+ }
+
++ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
+ ether_addr_copy(dmac_v, entry->key.addr);
+@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
++ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
+ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ dest.vport.vhca_id = port->esw_owner_vhca_id;
+ }
++ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
+ handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+index a4b9253316618..b29299c49ab3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
++ MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
++ !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
+
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+index 40c7be1240416..58bd749b5e4de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+- MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
++ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index e3ec559369fa0..d2b65a0ce47b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ switch (action_type) {
+ case DR_ACTION_TYP_DROP:
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
++ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ break;
+ case DR_ACTION_TYP_FT:
+ dest_action = action;
+@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ action->sampler->tx_icm_addr;
+ break;
+ case DR_ACTION_TYP_VPORT:
+- attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+- dest_action = action;
+- attr.final_icm_addr = rx_rule ?
+- action->vport->caps->icm_address_rx :
+- action->vport->caps->icm_address_tx;
++ if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
++ /* can't go to uplink on RX rule - dropping instead */
++ attr.final_icm_addr = nic_dmn->drop_icm_addr;
++ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
++ } else {
++ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
++ dest_action = action;
++ attr.final_icm_addr = rx_rule ?
++ action->vport->caps->icm_address_rx :
++ action->vport->caps->icm_address_tx;
++ }
+ break;
+ case DR_ACTION_TYP_POP_VLAN:
+ if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 49b81daf7411d..d094c3c1e2ee7 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7467,6 +7467,9 @@ int stmmac_dvr_probe(struct device *device,
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
++ /* Wait a bit for the reset to take effect */
++ udelay(10);
++
+ /* Init MAC and get the capabilities */
+ ret = stmmac_hw_init(priv);
+ if (ret)
+diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
+index 704e949484d0c..b9b5554ea8620 100644
+--- a/drivers/net/fjes/fjes_hw.c
++++ b/drivers/net/fjes/fjes_hw.c
+@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+
+ mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
+ hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
+- if (!(hw->hw_info.req_buf))
+- return -ENOMEM;
++ if (!(hw->hw_info.req_buf)) {
++ result = -ENOMEM;
++ goto free_ep_info;
++ }
+
+ hw->hw_info.req_buf_size = mem_size;
+
+ mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
+ hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
+- if (!(hw->hw_info.res_buf))
+- return -ENOMEM;
++ if (!(hw->hw_info.res_buf)) {
++ result = -ENOMEM;
++ goto free_req_buf;
++ }
+
+ hw->hw_info.res_buf_size = mem_size;
+
+ result = fjes_hw_alloc_shared_status_region(hw);
+ if (result)
+- return result;
++ goto free_res_buf;
+
+ hw->hw_info.buffer_share_bit = 0;
+ hw->hw_info.buffer_unshare_reserve_bit = 0;
+@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+
+ result = fjes_hw_alloc_epbuf(&buf_pair->tx);
+ if (result)
+- return result;
++ goto free_epbuf;
+
+ result = fjes_hw_alloc_epbuf(&buf_pair->rx);
+ if (result)
+- return result;
++ goto free_epbuf;
+
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ fjes_hw_setup_epbuf(&buf_pair->tx, mac,
+@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+ fjes_hw_init_command_registers(hw, &param);
+
+ return 0;
++
++free_epbuf:
++ for (epidx = 0; epidx < hw->max_epid ; epidx++) {
++ if (epidx == hw->my_epid)
++ continue;
++ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
++ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
++ }
++ fjes_hw_free_shared_status_region(hw);
++free_res_buf:
++ kfree(hw->hw_info.res_buf);
++ hw->hw_info.res_buf = NULL;
++free_req_buf:
++ kfree(hw->hw_info.req_buf);
++ hw->hw_info.req_buf = NULL;
++free_ep_info:
++ kfree(hw->ep_shm_info);
++ hw->ep_shm_info = NULL;
++ return result;
+ }
+
+ static void fjes_hw_cleanup(struct fjes_hw *hw)
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 706ea5263e879..cd15d7b380ab5 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -44,7 +44,7 @@
+
+ static unsigned int ring_size __ro_after_init = 128;
+ module_param(ring_size, uint, 0444);
+-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
++MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
+ unsigned int netvsc_ring_bytes __ro_after_init;
+
+ static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+@@ -2805,7 +2805,7 @@ static int __init netvsc_drv_init(void)
+ pr_info("Increased ring_size to %u (min allowed)\n",
+ ring_size);
+ }
+- netvsc_ring_bytes = ring_size * PAGE_SIZE;
++ netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
+
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index ce5ad4a824813..858175ca58cd8 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -120,6 +120,11 @@
+ */
+ #define LAN8814_1PPM_FORMAT 17179
+
++#define PTP_RX_VERSION 0x0248
++#define PTP_TX_VERSION 0x0288
++#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
++#define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
++
+ #define PTP_RX_MOD 0x024F
+ #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+ #define PTP_RX_TIMESTAMP_EN 0x024D
+@@ -3147,6 +3152,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+
++ /* Disable checking for minorVersionPTP field */
++ lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
++ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
++ lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
++ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
++
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index afa5497f7c35c..4a4f8c8e79fa1 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1630,13 +1630,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
+ switch (act) {
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
+- if (err)
++ if (err) {
++ dev_core_stats_rx_dropped_inc(tun->dev);
+ return err;
++ }
++ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
+ break;
+ case XDP_TX:
+ err = tun_xdp_tx(tun->dev, xdp);
+- if (err < 0)
++ if (err < 0) {
++ dev_core_stats_rx_dropped_inc(tun->dev);
+ return err;
++ }
++ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
+ break;
+ case XDP_PASS:
+ break;
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index f12b606e2d2e5..667d55e261560 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -368,10 +368,6 @@ struct ath11k_vif {
+ struct ieee80211_chanctx_conf chanctx;
+ struct ath11k_arp_ns_offload arp_ns_offload;
+ struct ath11k_rekey_data rekey_data;
+-
+-#ifdef CONFIG_ATH11K_DEBUGFS
+- struct dentry *debugfs_twt;
+-#endif /* CONFIG_ATH11K_DEBUGFS */
+ };
+
+ struct ath11k_vif_iter {
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
+index be76e7d1c4366..0796f4d92b477 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs.c
+@@ -1893,35 +1893,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
+ .open = simple_open
+ };
+
+-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
++void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif)
+ {
++ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = arvif->ar->ab;
++ struct dentry *debugfs_twt;
+
+ if (arvif->vif->type != NL80211_IFTYPE_AP &&
+ !(arvif->vif->type == NL80211_IFTYPE_STATION &&
+ test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
+ return;
+
+- arvif->debugfs_twt = debugfs_create_dir("twt",
+- arvif->vif->debugfs_dir);
+- debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
++ debugfs_twt = debugfs_create_dir("twt",
++ arvif->vif->debugfs_dir);
++ debugfs_create_file("add_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
+
+- debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
++ debugfs_create_file("del_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
+
+- debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
++ debugfs_create_file("pause_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+- debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
++ debugfs_create_file("resume_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
+ }
+
+-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+-{
+- if (!arvif->debugfs_twt)
+- return;
+-
+- debugfs_remove_recursive(arvif->debugfs_twt);
+- arvif->debugfs_twt = NULL;
+-}
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
+index 3af0169f6cf21..6f630b42e95c7 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.h
++++ b/drivers/net/wireless/ath/ath11k/debugfs.h
+@@ -306,8 +306,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+ return ar->debug.rx_filter;
+ }
+
+-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
++void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif);
+ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+@@ -386,14 +386,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
+ return 0;
+ }
+
+-static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+-{
+-}
+-
+-static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+-{
+-}
+-
+ static inline void
+ ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 7f7b398177737..71c6dab1aedba 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -6750,13 +6750,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+ goto err;
+ }
+
+- /* In the case of hardware recovery, debugfs files are
+- * not deleted since ieee80211_ops.remove_interface() is
+- * not invoked. In such cases, try to delete the files.
+- * These will be re-created later.
+- */
+- ath11k_debugfs_remove_interface(arvif);
+-
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+@@ -6933,8 +6926,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+
+ ath11k_dp_vdev_tx_attach(ar, arvif);
+
+- ath11k_debugfs_add_interface(arvif);
+-
+ if (vif->type != NL80211_IFTYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_create(ar);
+@@ -7050,8 +7041,6 @@ err_vdev_del:
+ /* Recalc txpower for remaining vdev */
+ ath11k_mac_txpower_recalc(ar);
+
+- ath11k_debugfs_remove_interface(arvif);
+-
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+@@ -9149,6 +9138,7 @@ static const struct ieee80211_ops ath11k_ops = {
+ #endif
+
+ #ifdef CONFIG_ATH11K_DEBUGFS
++ .vif_add_debugfs = ath11k_debugfs_op_vif_add,
+ .sta_add_debugfs = ath11k_debugfs_sta_op_add,
+ #endif
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index b658cf228fbe2..9160d81a871ee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2018-2023 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+ */
+ #include <linux/firmware.h>
+ #include "iwl-drv.h"
+@@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
+ node_trig = (void *)node_tlv->data;
+ }
+
+- memcpy(node_trig->data + offset, trig->data, trig_data_len);
++ memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
+ node_tlv->length = cpu_to_le32(size);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 84f345c69ea56..b2971dd953358 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1378,12 +1378,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+- forced = opp_table->rate_clk_single != target_freq;
++ forced = opp_table->rate_clk_single != freq;
+ }
+
+- ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
++ ret = _set_opp(dev, opp_table, opp, &freq, forced);
+
+- if (target_freq)
++ if (freq)
+ dev_pm_opp_put(opp);
+
+ put_opp_table:
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index bb0d92461b08b..7a6a3e7f2825b 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -213,7 +213,7 @@ static int __init power_init(void)
+ if (running_on_qemu && soft_power_reg)
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
+ qemu_power_off, (void *)soft_power_reg);
+- else
++ if (!running_on_qemu || soft_power_reg)
+ power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
+ KTHREAD_NAME);
+ if (IS_ERR(power_task)) {
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 1dd84c7a79de9..b1995ac268d77 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1170,7 +1170,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
+ int ret;
+
+ addr = pmc->block[blk_num].mmio_base +
+- (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
++ ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+ ret = mlxbf_pmc_readl(addr, &word);
+ if (ret)
+ return ret;
+@@ -1413,7 +1413,7 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
+ int ret;
+
+ addr = pmc->block[blk_num].mmio_base +
+- (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
++ ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+ ret = mlxbf_pmc_readl(addr, &word);
+ if (ret)
+ return ret;
+diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
+index a1ee1a74fc3c4..2cf3b4a8813f9 100644
+--- a/drivers/platform/x86/intel/ifs/load.c
++++ b/drivers/platform/x86/intel/ifs/load.c
+@@ -399,7 +399,8 @@ int ifs_load_firmware(struct device *dev)
+ if (fw->size != expected_size) {
+ dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
+ expected_size, fw->size);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto release;
+ }
+
+ ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index 33ab207493e3e..33bb58dc3f78c 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
+ static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
+ static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+
+-static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+- struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr);
++ struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->domain_id);
+ }
+
+-static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+- struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr);
++ struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->cluster_id);
+ }
+
+-static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+- struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr);
++ struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->package_id);
+ }
+@@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
+ }
+
+ #define store_uncore_min_max(name, min_max) \
+- static ssize_t store_##name(struct device *dev, \
+- struct device_attribute *attr, \
++ static ssize_t store_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return store_min_max_freq_khz(data, buf, count, \
+ min_max); \
+ }
+
+ #define show_uncore_min_max(name, min_max) \
+- static ssize_t show_##name(struct device *dev, \
+- struct device_attribute *attr, char *buf)\
++ static ssize_t show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf)\
+ { \
+- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_min_max_freq_khz(data, buf, min_max); \
+ }
+
+ #define show_uncore_perf_status(name) \
+- static ssize_t show_##name(struct device *dev, \
+- struct device_attribute *attr, char *buf)\
++ static ssize_t show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf)\
+ { \
+- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_perf_status_freq_khz(data, buf); \
+ }
+@@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
+ show_uncore_perf_status(current_freq_khz);
+
+ #define show_uncore_data(member_name) \
+- static ssize_t show_##member_name(struct device *dev, \
+- struct device_attribute *attr, char *buf)\
++ static ssize_t show_##member_name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data,\
+- member_name##_dev_attr);\
++ member_name##_kobj_attr);\
+ \
+ return sysfs_emit(buf, "%u\n", \
+ data->member_name); \
+@@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
+
+ #define init_attribute_rw(_name) \
+ do { \
+- sysfs_attr_init(&data->_name##_dev_attr.attr); \
+- data->_name##_dev_attr.show = show_##_name; \
+- data->_name##_dev_attr.store = store_##_name; \
+- data->_name##_dev_attr.attr.name = #_name; \
+- data->_name##_dev_attr.attr.mode = 0644; \
++ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
++ data->_name##_kobj_attr.show = show_##_name; \
++ data->_name##_kobj_attr.store = store_##_name; \
++ data->_name##_kobj_attr.attr.name = #_name; \
++ data->_name##_kobj_attr.attr.mode = 0644; \
+ } while (0)
+
+ #define init_attribute_ro(_name) \
+ do { \
+- sysfs_attr_init(&data->_name##_dev_attr.attr); \
+- data->_name##_dev_attr.show = show_##_name; \
+- data->_name##_dev_attr.store = NULL; \
+- data->_name##_dev_attr.attr.name = #_name; \
+- data->_name##_dev_attr.attr.mode = 0444; \
++ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
++ data->_name##_kobj_attr.show = show_##_name; \
++ data->_name##_kobj_attr.store = NULL; \
++ data->_name##_kobj_attr.attr.name = #_name; \
++ data->_name##_kobj_attr.attr.mode = 0444; \
+ } while (0)
+
+ #define init_attribute_root_ro(_name) \
+ do { \
+- sysfs_attr_init(&data->_name##_dev_attr.attr); \
+- data->_name##_dev_attr.show = show_##_name; \
+- data->_name##_dev_attr.store = NULL; \
+- data->_name##_dev_attr.attr.name = #_name; \
+- data->_name##_dev_attr.attr.mode = 0400; \
++ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
++ data->_name##_kobj_attr.show = show_##_name; \
++ data->_name##_kobj_attr.store = NULL; \
++ data->_name##_kobj_attr.attr.name = #_name; \
++ data->_name##_kobj_attr.attr.mode = 0400; \
+ } while (0)
+
+ static int create_attr_group(struct uncore_data *data, char *name)
+@@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
+
+ if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
+ init_attribute_root_ro(domain_id);
+- data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
+ init_attribute_root_ro(fabric_cluster_id);
+- data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
+ init_attribute_root_ro(package_id);
+- data->uncore_attrs[index++] = &data->package_id_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
+ }
+
+- data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
+- data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
+- data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
+- data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
++ data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
++ data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
++ data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+
+ data->uncore_attrs[index] = NULL;
+
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+index 7afb69977c7e8..0e5bf507e5552 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+@@ -26,14 +26,14 @@
+ * @instance_id: Unique instance id to append to directory name
+ * @name: Sysfs entry name for this instance
+ * @uncore_attr_group: Attribute group storage
+- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
+- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
+- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
+- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
+- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
+- * @domain_id_dev_attr: Storage for device attribute domain_id
+- * @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id
+- * @package_id_dev_attr: Storage for device attribute package_id
++ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
++ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
++ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
++ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
++ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
++ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
++ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
++ * @package_id_kobj_attr: Storage for kobject attribute package_id
+ * @uncore_attrs: Attribute storage for group creation
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+@@ -53,14 +53,14 @@ struct uncore_data {
+ char name[32];
+
+ struct attribute_group uncore_attr_group;
+- struct device_attribute max_freq_khz_dev_attr;
+- struct device_attribute min_freq_khz_dev_attr;
+- struct device_attribute initial_max_freq_khz_dev_attr;
+- struct device_attribute initial_min_freq_khz_dev_attr;
+- struct device_attribute current_freq_khz_dev_attr;
+- struct device_attribute domain_id_dev_attr;
+- struct device_attribute fabric_cluster_id_dev_attr;
+- struct device_attribute package_id_dev_attr;
++ struct kobj_attribute max_freq_khz_kobj_attr;
++ struct kobj_attribute min_freq_khz_kobj_attr;
++ struct kobj_attribute initial_max_freq_khz_kobj_attr;
++ struct kobj_attribute initial_min_freq_khz_kobj_attr;
++ struct kobj_attribute current_freq_khz_kobj_attr;
++ struct kobj_attribute domain_id_kobj_attr;
++ struct kobj_attribute fabric_cluster_id_kobj_attr;
++ struct kobj_attribute package_id_kobj_attr;
+ struct attribute *uncore_attrs[9];
+ };
+
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 1cf2471d54dde..17cc4b45e0239 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -26,6 +26,21 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
+ {}
+ };
+
++/*
++ * Cache BAR0 of P2SB device functions 0 to 7.
++ * TODO: The constant 8 is the number of functions that PCI specification
++ * defines. Same definitions exist tree-wide. Unify this definition and
++ * the other definitions then move to include/uapi/linux/pci.h.
++ */
++#define NR_P2SB_RES_CACHE 8
++
++struct p2sb_res_cache {
++ u32 bus_dev_id;
++ struct resource res;
++};
++
++static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
++
+ static int p2sb_get_devfn(unsigned int *devfn)
+ {
+ unsigned int fn = P2SB_DEVFN_DEFAULT;
+@@ -39,8 +54,16 @@ static int p2sb_get_devfn(unsigned int *devfn)
+ return 0;
+ }
+
++static bool p2sb_valid_resource(struct resource *res)
++{
++ if (res->flags)
++ return true;
++
++ return false;
++}
++
+ /* Copy resource from the first BAR of the device in question */
+-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
++static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ {
+ struct resource *bar0 = &pdev->resource[0];
+
+@@ -56,49 +79,66 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+-
+- return 0;
+ }
+
+-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+ {
++ struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
+ struct pci_dev *pdev;
+- int ret;
+
+ pdev = pci_scan_single_device(bus, devfn);
+ if (!pdev)
+- return -ENODEV;
++ return;
+
+- ret = p2sb_read_bar0(pdev, mem);
++ p2sb_read_bar0(pdev, &cache->res);
++ cache->bus_dev_id = bus->dev.id;
+
+ pci_stop_and_remove_bus_device(pdev);
+- return ret;
+ }
+
+-/**
+- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+- * @bus: PCI bus to communicate with
+- * @devfn: PCI slot and function to communicate with
+- * @mem: memory resource to be filled in
+- *
+- * The BIOS prevents the P2SB device from being enumerated by the PCI
+- * subsystem, so we need to unhide and hide it back to lookup the BAR.
+- *
+- * if @bus is NULL, the bus 0 in domain 0 will be used.
+- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+- *
+- * Caller must provide a valid pointer to @mem.
+- *
+- * Locking is handled by pci_rescan_remove_lock mutex.
+- *
+- * Return:
+- * 0 on success or appropriate errno value on error.
+- */
+-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
++{
++ unsigned int slot, fn;
++
++ if (PCI_FUNC(devfn) == 0) {
++ /*
++ * When function number of the P2SB device is zero, scan it and
++ * other function numbers, and if devices are available, cache
++ * their BAR0s.
++ */
++ slot = PCI_SLOT(devfn);
++ for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
++ p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
++ } else {
++ /* Scan the P2SB device and cache its BAR0 */
++ p2sb_scan_and_cache_devfn(bus, devfn);
++ }
++
++ if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
++ return -ENOENT;
++
++ return 0;
++}
++
++static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
++{
++ static struct pci_bus *p2sb_bus;
++
++ bus = bus ?: p2sb_bus;
++ if (bus)
++ return bus;
++
++ /* Assume P2SB is on the bus 0 in domain 0 */
++ p2sb_bus = pci_find_bus(0, 0);
++ return p2sb_bus;
++}
++
++static int p2sb_cache_resources(void)
+ {
+- struct pci_dev *pdev_p2sb;
+ unsigned int devfn_p2sb;
+ u32 value = P2SBC_HIDE;
++ struct pci_bus *bus;
++ u16 class;
+ int ret;
+
+ /* Get devfn for P2SB device itself */
+@@ -106,8 +146,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ if (ret)
+ return ret;
+
+- /* if @bus is NULL, use bus 0 in domain 0 */
+- bus = bus ?: pci_find_bus(0, 0);
++ bus = p2sb_get_bus(NULL);
++ if (!bus)
++ return -ENODEV;
++
++ /*
++ * When a device with same devfn exists and its device class is not
++ * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
++ */
++ pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
++ if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
++ return -ENODEV;
+
+ /*
+ * Prevent concurrent PCI bus scan from seeing the P2SB device and
+@@ -115,17 +164,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ */
+ pci_lock_rescan_remove();
+
+- /* Unhide the P2SB device, if needed */
++ /*
++ * The BIOS prevents the P2SB device from being enumerated by the PCI
++ * subsystem, so we need to unhide and hide it back to lookup the BAR.
++ * Unhide the P2SB device here, if needed.
++ */
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+
+- pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+- if (devfn)
+- ret = p2sb_scan_and_read(bus, devfn, mem);
+- else
+- ret = p2sb_read_bar0(pdev_p2sb, mem);
+- pci_stop_and_remove_bus_device(pdev_p2sb);
++ ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+
+ /* Hide the P2SB device, if it was hidden */
+ if (value & P2SBC_HIDE)
+@@ -133,12 +181,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+
+ pci_unlock_rescan_remove();
+
+- if (ret)
+- return ret;
++ return ret;
++}
++
++/**
++ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
++ * @bus: PCI bus to communicate with
++ * @devfn: PCI slot and function to communicate with
++ * @mem: memory resource to be filled in
++ *
++ * If @bus is NULL, the bus 0 in domain 0 will be used.
++ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
++ *
++ * Caller must provide a valid pointer to @mem.
++ *
++ * Return:
++ * 0 on success or appropriate errno value on error.
++ */
++int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++{
++ struct p2sb_res_cache *cache;
++ int ret;
++
++ bus = p2sb_get_bus(bus);
++ if (!bus)
++ return -ENODEV;
++
++ if (!devfn) {
++ ret = p2sb_get_devfn(&devfn);
++ if (ret)
++ return ret;
++ }
+
+- if (mem->flags == 0)
++ cache = &p2sb_resources[PCI_FUNC(devfn)];
++ if (cache->bus_dev_id != bus->dev.id)
+ return -ENODEV;
+
++ if (!p2sb_valid_resource(&cache->res))
++ return -ENOENT;
++
++ memcpy(mem, &cache->res, sizeof(*mem));
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(p2sb_bar);
++
++static int __init p2sb_fs_init(void)
++{
++ p2sb_cache_resources();
++ return 0;
++}
++
++/*
++ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
++ * not be locked in sysfs pci bus rescan path because of deadlock. To
++ * avoid the deadlock, access to P2SB devices with the lock at an early
++ * step in kernel initialization and cache required resources. This
++ * should happen after subsys_initcall which initializes PCI subsystem
++ * and before device_initcall which requires P2SB resources.
++ */
++fs_initcall(p2sb_fs_init);
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 5dd22258cb3bc..bd017478e61b6 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -686,9 +686,10 @@ acpi_status wmi_install_notify_handler(const char *guid,
+ block->handler_data = data;
+
+ wmi_status = wmi_method_enable(block, true);
+- if ((wmi_status != AE_OK) ||
+- ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
+- status = wmi_status;
++ if (ACPI_FAILURE(wmi_status))
++ dev_warn(&block->dev.dev, "Failed to enable device\n");
++
++ status = AE_OK;
+ }
+ }
+
+@@ -729,12 +730,13 @@ acpi_status wmi_remove_notify_handler(const char *guid)
+ status = AE_OK;
+ } else {
+ wmi_status = wmi_method_enable(block, false);
++ if (ACPI_FAILURE(wmi_status))
++ dev_warn(&block->dev.dev, "Failed to disable device\n");
++
+ block->handler = NULL;
+ block->handler_data = NULL;
+- if ((wmi_status != AE_OK) ||
+- ((wmi_status == AE_OK) &&
+- (status == AE_NOT_EXIST)))
+- status = wmi_status;
++
++ status = AE_OK;
+ }
+ }
+ }
+diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
+index dc87965f81641..1062939c32645 100644
+--- a/drivers/rpmsg/virtio_rpmsg_bus.c
++++ b/drivers/rpmsg/virtio_rpmsg_bus.c
+@@ -378,6 +378,7 @@ static void virtio_rpmsg_release_device(struct device *dev)
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
++ kfree(rpdev->driver_override);
+ kfree(vch);
+ }
+
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 228fb2d11c709..7d99cd2c37a0b 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -231,7 +231,7 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
+ if (!pm_trace_rtc_valid())
+ return -EIO;
+
+- ret = mc146818_get_time(t);
++ ret = mc146818_get_time(t, 1000);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+ return ret;
+@@ -292,7 +292,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+
+ /* This not only a rtc_op, but also called directly */
+ if (!is_valid_irq(cmos->irq))
+- return -EIO;
++ return -ETIMEDOUT;
+
+ /* Basic alarms only support hour, minute, and seconds fields.
+ * Some also support day and month, for alarms up to a year in
+@@ -307,7 +307,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ *
+ * Use the mc146818_avoid_UIP() function to avoid this.
+ */
+- if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
++ if (!mc146818_avoid_UIP(cmos_read_alarm_callback, 10, &p))
+ return -EIO;
+
+ if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+@@ -556,8 +556,8 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ *
+ * Use mc146818_avoid_UIP() to avoid this.
+ */
+- if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
+- return -EIO;
++ if (!mc146818_avoid_UIP(cmos_set_alarm_callback, 10, &p))
++ return -ETIMEDOUT;
+
+ cmos->alarm_expires = rtc_tm_to_time64(&t->time);
+
+@@ -818,18 +818,24 @@ static void rtc_wake_off(struct device *dev)
+ }
+
+ #ifdef CONFIG_X86
+-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+ static void use_acpi_alarm_quirks(void)
+ {
+- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (dmi_get_bios_year() < 2015)
++ return;
++ break;
++ case X86_VENDOR_AMD:
++ case X86_VENDOR_HYGON:
++ if (dmi_get_bios_year() < 2021)
++ return;
++ break;
++ default:
+ return;
+-
++ }
+ if (!is_hpet_enabled())
+ return;
+
+- if (dmi_get_bios_year() < 2015)
+- return;
+-
+ use_acpi_alarm = true;
+ }
+ #else
+diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
+index f1c09f1db044c..651bf3c279c74 100644
+--- a/drivers/rtc/rtc-mc146818-lib.c
++++ b/drivers/rtc/rtc-mc146818-lib.c
+@@ -8,26 +8,31 @@
+ #include <linux/acpi.h>
+ #endif
+
++#define UIP_RECHECK_DELAY 100 /* usec */
++#define UIP_RECHECK_DELAY_MS (USEC_PER_MSEC / UIP_RECHECK_DELAY)
++#define UIP_RECHECK_LOOPS_MS(x) (x / UIP_RECHECK_DELAY_MS)
++
+ /*
+ * Execute a function while the UIP (Update-in-progress) bit of the RTC is
+- * unset.
++ * unset. The timeout is configurable by the caller in ms.
+ *
+ * Warning: callback may be executed more then once.
+ */
+ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++ int timeout,
+ void *param)
+ {
+ int i;
+ unsigned long flags;
+ unsigned char seconds;
+
+- for (i = 0; i < 100; i++) {
++ for (i = 0; UIP_RECHECK_LOOPS_MS(i) < timeout; i++) {
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ /*
+ * Check whether there is an update in progress during which the
+ * readout is unspecified. The maximum update time is ~2ms. Poll
+- * every 100 usec for completion.
++ * for completion.
+ *
+ * Store the second value before checking UIP so a long lasting
+ * NMI which happens to hit after the UIP check cannot make
+@@ -37,7 +42,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+- udelay(100);
++ udelay(UIP_RECHECK_DELAY);
+ continue;
+ }
+
+@@ -56,7 +61,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+- udelay(100);
++ udelay(UIP_RECHECK_DELAY);
+ continue;
+ }
+
+@@ -72,6 +77,10 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
++ if (UIP_RECHECK_LOOPS_MS(i) >= 100)
++ pr_warn("Reading current time from RTC took around %li ms\n",
++ UIP_RECHECK_LOOPS_MS(i));
++
+ return true;
+ }
+ return false;
+@@ -84,7 +93,7 @@ EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
+ */
+ bool mc146818_does_rtc_work(void)
+ {
+- return mc146818_avoid_UIP(NULL, NULL);
++ return mc146818_avoid_UIP(NULL, 1000, NULL);
+ }
+ EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
+
+@@ -130,15 +139,27 @@ static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
+ p->ctrl = CMOS_READ(RTC_CONTROL);
+ }
+
+-int mc146818_get_time(struct rtc_time *time)
++/**
++ * mc146818_get_time - Get the current time from the RTC
++ * @time: pointer to struct rtc_time to store the current time
++ * @timeout: timeout value in ms
++ *
++ * This function reads the current time from the RTC and stores it in the
++ * provided struct rtc_time. The timeout parameter specifies the maximum
++ * time to wait for the RTC to become ready.
++ *
++ * Return: 0 on success, -ETIMEDOUT if the RTC did not become ready within
++ * the specified timeout, or another error code if an error occurred.
++ */
++int mc146818_get_time(struct rtc_time *time, int timeout)
+ {
+ struct mc146818_get_time_callback_param p = {
+ .time = time
+ };
+
+- if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
++ if (!mc146818_avoid_UIP(mc146818_get_time_callback, timeout, &p)) {
+ memset(time, 0, sizeof(*time));
+- return -EIO;
++ return -ETIMEDOUT;
+ }
+
+ if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 4db538a551925..88f41f95cc94d 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -32,7 +32,8 @@
+
+ #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
+
+-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
++static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
++static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
+ static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
+ static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
+@@ -457,6 +458,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
+ __func__, nisc, isc, q->apqn);
+
++ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ status.response_code = AP_RESPONSE_INVALID_GISA;
+ return status;
+ }
+@@ -661,17 +663,23 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
++ * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
++ * guest's AP configuration that are still in the host's AP
++ * configuration.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+- * filtered.
++ * filtered. Consequently, all queues associated with the adapter that
++ * are in the host's AP configuration must be reset. If queues are
++ * subsequently made available again to the guest, they should re-appear
++ * in a reset state
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+-static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+- struct ap_matrix_mdev *matrix_mdev)
++static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long *apm_filtered)
+ {
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+@@ -681,6 +689,7 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
++ bitmap_clear(apm_filtered, 0, AP_DEVICES);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+@@ -692,8 +701,9 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+- for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+- for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
++ for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
++ AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+@@ -705,8 +715,16 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_status.response_code) {
+- clear_bit_inv(apid,
+- matrix_mdev->shadow_apcb.apm);
++ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
++
++ /*
++ * If the adapter was previously plugged into
++ * the guest, let's let the caller know that
++ * the APID was filtered.
++ */
++ if (test_bit_inv(apid, prev_shadow_apm))
++ set_bit_inv(apid, apm_filtered);
++
+ break;
+ }
+ }
+@@ -808,7 +826,7 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+- vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+@@ -918,6 +936,47 @@ static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ AP_MKQID(apid, apqi));
+ }
+
++static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long apid,
++ struct list_head *qlist)
++{
++ struct vfio_ap_queue *q;
++ unsigned long apqi;
++
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
++ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
++ if (q)
++ list_add_tail(&q->reset_qnode, qlist);
++ }
++}
++
++static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long apid)
++{
++ struct list_head qlist;
++
++ INIT_LIST_HEAD(&qlist);
++ collect_queues_to_reset(matrix_mdev, apid, &qlist);
++ vfio_ap_mdev_reset_qlist(&qlist);
++}
++
++static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long *apm_reset)
++{
++ struct list_head qlist;
++ unsigned long apid;
++
++ if (bitmap_empty(apm_reset, AP_DEVICES))
++ return 0;
++
++ INIT_LIST_HEAD(&qlist);
++
++ for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
++ collect_queues_to_reset(matrix_mdev, apid, &qlist);
++
++ return vfio_ap_mdev_reset_qlist(&qlist);
++}
++
+ /**
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
+@@ -958,7 +1017,7 @@ static ssize_t assign_adapter_store(struct device *dev,
+ {
+ int ret;
+ unsigned long apid;
+- DECLARE_BITMAP(apm_delta, AP_DEVICES);
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -987,12 +1046,11 @@ static ssize_t assign_adapter_store(struct device *dev,
+ }
+
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+- memset(apm_delta, 0, sizeof(apm_delta));
+- set_bit_inv(apid, apm_delta);
+
+- if (vfio_ap_mdev_filter_matrix(apm_delta,
+- matrix_mdev->matrix.aqm, matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+
+ ret = count;
+ done:
+@@ -1023,11 +1081,12 @@ static struct vfio_ap_queue
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+- * @qtable: table for storing queues associated with unassigned adapter.
++ * @qlist: list for storing queues associated with unassigned adapter that
++ * need to be reset.
+ */
+ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+- struct ap_queue_table *qtable)
++ struct list_head *qlist)
+ {
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+@@ -1035,11 +1094,10 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+- if (q && qtable) {
++ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+- hash_add(qtable->queues, &q->mdev_qnode,
+- q->apqn);
++ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+ }
+@@ -1047,26 +1105,23 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+ {
+- int loop_cursor;
+- struct vfio_ap_queue *q;
+- struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
+
+- hash_init(qtable->queues);
+- vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
++ INIT_LIST_HEAD(&qlist);
++ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+- vfio_ap_mdev_reset_queues(qtable);
++ vfio_ap_mdev_reset_qlist(&qlist);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+- hash_del(&q->mdev_qnode);
++ list_del(&q->reset_qnode);
+ }
+-
+- kfree(qtable);
+ }
+
+ /**
+@@ -1167,7 +1222,7 @@ static ssize_t assign_domain_store(struct device *dev,
+ {
+ int ret;
+ unsigned long apqi;
+- DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -1196,12 +1251,11 @@ static ssize_t assign_domain_store(struct device *dev,
+ }
+
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+- memset(aqm_delta, 0, sizeof(aqm_delta));
+- set_bit_inv(apqi, aqm_delta);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+- matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+
+ ret = count;
+ done:
+@@ -1214,7 +1268,7 @@ static DEVICE_ATTR_WO(assign_domain);
+
+ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+- struct ap_queue_table *qtable)
++ struct list_head *qlist)
+ {
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+@@ -1222,11 +1276,10 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+- if (q && qtable) {
++ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+- hash_add(qtable->queues, &q->mdev_qnode,
+- q->apqn);
++ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+ }
+@@ -1234,26 +1287,23 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+ {
+- int loop_cursor;
+- struct vfio_ap_queue *q;
+- struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
+
+- hash_init(qtable->queues);
+- vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
++ INIT_LIST_HEAD(&qlist);
++ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+- vfio_ap_mdev_reset_queues(qtable);
++ vfio_ap_mdev_reset_qlist(&qlist);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+- hash_del(&q->mdev_qnode);
++ list_del(&q->reset_qnode);
+ }
+-
+- kfree(qtable);
+ }
+
+ /**
+@@ -1608,7 +1658,7 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+ get_update_locks_for_kvm(kvm);
+
+ kvm_arch_crypto_clear_masks(kvm);
+- vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ vfio_ap_mdev_reset_queues(matrix_mdev);
+ kvm_put_kvm(kvm);
+ matrix_mdev->kvm = NULL;
+
+@@ -1744,15 +1794,33 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
+ }
+ }
+
+-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
++static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+ {
+ int ret = 0, loop_cursor;
+ struct vfio_ap_queue *q;
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
++ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
+ vfio_ap_mdev_reset_queue(q);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
++ flush_work(&q->reset_work);
++
++ if (q->reset_status.response_code)
++ ret = -EIO;
++ }
++
++ return ret;
++}
++
++static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
++{
++ int ret = 0;
++ struct vfio_ap_queue *q;
++
++ list_for_each_entry(q, qlist, reset_qnode)
++ vfio_ap_mdev_reset_queue(q);
++
++ list_for_each_entry(q, qlist, reset_qnode) {
+ flush_work(&q->reset_work);
+
+ if (q->reset_status.response_code)
+@@ -1938,7 +2006,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+- ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ break;
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ ret = vfio_ap_get_irq_info(arg);
+@@ -2070,6 +2138,7 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ {
+ int ret;
+ struct vfio_ap_queue *q;
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+@@ -2091,15 +2160,28 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+- matrix_mdev->matrix.aqm,
+- matrix_mdev))
++ /*
++ * If we're in the process of handling the adding of adapters or
++ * domains to the host's AP configuration, then let the
++ * vfio_ap device driver's on_scan_complete callback filter the
++ * matrix and update the guest's AP configuration after all of
++ * the new queue devices are probed.
++ */
++ if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
++ !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
++ goto done;
++
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+ }
++
++done:
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+- return 0;
++ return ret;
+
+ err_remove_group:
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+@@ -2116,26 +2198,40 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
++ apid = AP_QID_CARD(q->apqn);
++ apqi = AP_QID_QUEUE(q->apqn);
+
+ if (matrix_mdev) {
+- vfio_ap_unlink_queue_fr_mdev(q);
+-
+- apid = AP_QID_CARD(q->apqn);
+- apqi = AP_QID_QUEUE(q->apqn);
+-
+- /*
+- * If the queue is assigned to the guest's APCB, then remove
+- * the adapter's APID from the APCB and hot it into the guest.
+- */
++ /* If the queue is assigned to the guest's AP configuration */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
++ /*
++ * Since the queues are defined via a matrix of adapters
++ * and domains, it is not possible to hot unplug a
++ * single queue; so, let's unplug the adapter.
++ */
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apid(matrix_mdev, apid);
++ goto done;
+ }
+ }
+
+- vfio_ap_mdev_reset_queue(q);
+- flush_work(&q->reset_work);
++ /*
++ * If the queue is not in the host's AP configuration, then resetting
++ * it will fail with response code 01, (APQN not valid); so, let's make
++ * sure it is in the host's config.
++ */
++ if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
++ test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
++ vfio_ap_mdev_reset_queue(q);
++ flush_work(&q->reset_work);
++ }
++
++done:
++ if (matrix_mdev)
++ vfio_ap_unlink_queue_fr_mdev(q);
++
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+@@ -2443,39 +2539,30 @@ void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+
+ static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+ {
+- bool do_hotplug = false;
+- int filter_domains = 0;
+- int filter_adapters = 0;
+- DECLARE_BITMAP(apm, AP_DEVICES);
+- DECLARE_BITMAP(aqm, AP_DOMAINS);
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
++ bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+- filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+- matrix_mdev->apm_add, AP_DEVICES);
+- filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+- matrix_mdev->aqm_add, AP_DOMAINS);
+-
+- if (filter_adapters && filter_domains)
+- do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+- else if (filter_adapters)
+- do_hotplug |=
+- vfio_ap_mdev_filter_matrix(apm,
+- matrix_mdev->shadow_apcb.aqm,
+- matrix_mdev);
+- else
+- do_hotplug |=
+- vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+- aqm, matrix_mdev);
++ filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
++ matrix_mdev->apm_add, AP_DEVICES);
++ filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
++ matrix_mdev->aqm_add, AP_DOMAINS);
++ filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
++ matrix_mdev->adm_add, AP_DOMAINS);
+
+- if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+- AP_DOMAINS))
++ if (filter_adapters || filter_domains)
++ do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
++
++ if (filter_cdoms)
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
+index 88aff8b81f2fc..98d37aa27044a 100644
+--- a/drivers/s390/crypto/vfio_ap_private.h
++++ b/drivers/s390/crypto/vfio_ap_private.h
+@@ -133,6 +133,8 @@ struct ap_matrix_mdev {
+ * @apqn: the APQN of the AP queue device
+ * @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
++ * @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues
++ * that need to be reset
+ * @reset_status: the status from the last reset of the queue
+ * @reset_work: work to wait for queue reset to complete
+ */
+@@ -143,6 +145,7 @@ struct vfio_ap_queue {
+ #define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
++ struct list_head reset_qnode;
+ struct ap_queue_status reset_status;
+ struct work_struct reset_work;
+ };
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 1223d34c04da3..d983f4a0e9f14 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -2196,15 +2196,18 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
+ struct scsi_cmnd *scmd, *next;
+
+ list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
++ struct scsi_device *sdev = scmd->device;
++
+ list_del_init(&scmd->eh_entry);
+- if (scsi_device_online(scmd->device) &&
+- !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
+- scsi_eh_should_retry_cmd(scmd)) {
++ if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
++ scsi_cmd_retry_allowed(scmd) &&
++ scsi_eh_should_retry_cmd(scmd)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: flush retry cmd\n",
+ current->comm));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
++ blk_mq_kick_requeue_list(sdev->request_queue);
+ } else {
+ /*
+ * If just we got sense for the device (called
+diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
+index 92ec76c03965b..2312152a44b3e 100644
+--- a/drivers/soc/fsl/qe/qmc.c
++++ b/drivers/soc/fsl/qe/qmc.c
+@@ -175,7 +175,7 @@ struct qmc_chan {
+ struct list_head list;
+ unsigned int id;
+ struct qmc *qmc;
+- void *__iomem s_param;
++ void __iomem *s_param;
+ enum qmc_mode mode;
+ u64 tx_ts_mask;
+ u64 rx_ts_mask;
+@@ -203,9 +203,9 @@ struct qmc_chan {
+ struct qmc {
+ struct device *dev;
+ struct tsa_serial *tsa_serial;
+- void *__iomem scc_regs;
+- void *__iomem scc_pram;
+- void *__iomem dpram;
++ void __iomem *scc_regs;
++ void __iomem *scc_pram;
++ void __iomem *dpram;
+ u16 scc_pram_offset;
+ cbd_t __iomem *bd_table;
+ dma_addr_t bd_dma_addr;
+@@ -218,37 +218,37 @@ struct qmc {
+ struct qmc_chan *chans[64];
+ };
+
+-static inline void qmc_write16(void *__iomem addr, u16 val)
++static inline void qmc_write16(void __iomem *addr, u16 val)
+ {
+ iowrite16be(val, addr);
+ }
+
+-static inline u16 qmc_read16(void *__iomem addr)
++static inline u16 qmc_read16(void __iomem *addr)
+ {
+ return ioread16be(addr);
+ }
+
+-static inline void qmc_setbits16(void *__iomem addr, u16 set)
++static inline void qmc_setbits16(void __iomem *addr, u16 set)
+ {
+ qmc_write16(addr, qmc_read16(addr) | set);
+ }
+
+-static inline void qmc_clrbits16(void *__iomem addr, u16 clr)
++static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+ {
+ qmc_write16(addr, qmc_read16(addr) & ~clr);
+ }
+
+-static inline void qmc_write32(void *__iomem addr, u32 val)
++static inline void qmc_write32(void __iomem *addr, u32 val)
+ {
+ iowrite32be(val, addr);
+ }
+
+-static inline u32 qmc_read32(void *__iomem addr)
++static inline u32 qmc_read32(void __iomem *addr)
+ {
+ return ioread32be(addr);
+ }
+
+-static inline void qmc_setbits32(void *__iomem addr, u32 set)
++static inline void qmc_setbits32(void __iomem *addr, u32 set)
+ {
+ qmc_write32(addr, qmc_read32(addr) | set);
+ }
+@@ -318,7 +318,7 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+@@ -374,7 +374,7 @@ static void qmc_chan_write_done(struct qmc_chan *chan)
+ void (*complete)(void *context);
+ unsigned long flags;
+ void *context;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ /*
+@@ -425,7 +425,7 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+@@ -488,7 +488,7 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
+ void (*complete)(void *context, size_t size);
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ void *context;
+ u16 datalen;
+ u16 ctrl;
+@@ -663,7 +663,7 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+@@ -685,7 +685,6 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
+ qmc_read16(chan->s_param + QMC_SPE_RBASE));
+
+ chan->rx_pending = 0;
+- chan->is_rx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ }
+@@ -694,7 +693,7 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
+index 3f99813355900..6c5741cf5e9d2 100644
+--- a/drivers/soc/fsl/qe/tsa.c
++++ b/drivers/soc/fsl/qe/tsa.c
+@@ -98,9 +98,9 @@
+ #define TSA_SIRP 0x10
+
+ struct tsa_entries_area {
+- void *__iomem entries_start;
+- void *__iomem entries_next;
+- void *__iomem last_entry;
++ void __iomem *entries_start;
++ void __iomem *entries_next;
++ void __iomem *last_entry;
+ };
+
+ struct tsa_tdm {
+@@ -117,8 +117,8 @@ struct tsa_tdm {
+
+ struct tsa {
+ struct device *dev;
+- void *__iomem si_regs;
+- void *__iomem si_ram;
++ void __iomem *si_regs;
++ void __iomem *si_ram;
+ resource_size_t si_ram_sz;
+ spinlock_t lock;
+ int tdms; /* TSA_TDMx ORed */
+@@ -135,27 +135,27 @@ static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
+ return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
+ }
+
+-static inline void tsa_write32(void *__iomem addr, u32 val)
++static inline void tsa_write32(void __iomem *addr, u32 val)
+ {
+ iowrite32be(val, addr);
+ }
+
+-static inline void tsa_write8(void *__iomem addr, u32 val)
++static inline void tsa_write8(void __iomem *addr, u32 val)
+ {
+ iowrite8(val, addr);
+ }
+
+-static inline u32 tsa_read32(void *__iomem addr)
++static inline u32 tsa_read32(void __iomem *addr)
+ {
+ return ioread32be(addr);
+ }
+
+-static inline void tsa_clrbits32(void *__iomem addr, u32 clr)
++static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
+ {
+ tsa_write32(addr, tsa_read32(addr) & ~clr);
+ }
+
+-static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set)
++static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
+ {
+ tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
+ }
+@@ -313,7 +313,7 @@ static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
+ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+ {
+- void *__iomem addr;
++ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
+diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
+index b78279e2f54cf..7ee52cf2570fa 100644
+--- a/drivers/soc/qcom/pmic_glink_altmode.c
++++ b/drivers/soc/qcom/pmic_glink_altmode.c
+@@ -285,7 +285,7 @@ static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmod
+
+ svid = mux == 2 ? USB_TYPEC_DP_SID : 0;
+
+- if (!altmode->ports[port].altmode) {
++ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+@@ -328,7 +328,7 @@ static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmod
+ hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]);
+ hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]);
+
+- if (!altmode->ports[port].altmode) {
++ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
+index 3a99f6dcdfafa..a3b1f4e6f0f90 100644
+--- a/drivers/soundwire/amd_manager.c
++++ b/drivers/soundwire/amd_manager.c
+@@ -927,6 +927,14 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
+ amd_manager->bus.clk_stop_timeout = 200;
+ amd_manager->bus.link_id = amd_manager->instance;
+
++ /*
++ * Due to BIOS compatibility, the two links are exposed within
++ * the scope of a single controller. If this changes, the
++ * controller_id will have to be updated with drv_data
++ * information.
++ */
++ amd_manager->bus.controller_id = 0;
++
+ switch (amd_manager->instance) {
+ case ACP_SDW0:
+ amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 41b0d9adf68ef..f3fec15c31122 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -22,6 +22,10 @@ static int sdw_get_id(struct sdw_bus *bus)
+ return rc;
+
+ bus->id = rc;
++
++ if (bus->controller_id == -1)
++ bus->controller_id = rc;
++
+ return 0;
+ }
+
+diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
+index d1553cb771874..67abd7e52f092 100644
+--- a/drivers/soundwire/debugfs.c
++++ b/drivers/soundwire/debugfs.c
+@@ -20,7 +20,7 @@ void sdw_bus_debugfs_init(struct sdw_bus *bus)
+ return;
+
+ /* create the debugfs master-N */
+- snprintf(name, sizeof(name), "master-%d-%d", bus->id, bus->link_id);
++ snprintf(name, sizeof(name), "master-%d-%d", bus->controller_id, bus->link_id);
+ bus->debugfs = debugfs_create_dir(name, sdw_debugfs_root);
+ }
+
+diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
+index 7f15e3549e539..93698532deac4 100644
+--- a/drivers/soundwire/intel_auxdevice.c
++++ b/drivers/soundwire/intel_auxdevice.c
+@@ -234,6 +234,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
+ cdns->instance = sdw->instance;
+ cdns->msg_count = 0;
+
++ /* single controller for all SoundWire links */
++ bus->controller_id = 0;
++
+ bus->link_id = auxdev->id;
+ bus->clk_stop_timeout = 1;
+
+diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
+index 9b05c9e25ebe4..51abedbbaa663 100644
+--- a/drivers/soundwire/master.c
++++ b/drivers/soundwire/master.c
+@@ -145,7 +145,7 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
+ md->dev.fwnode = fwnode;
+ md->dev.dma_mask = parent->dma_mask;
+
+- dev_set_name(&md->dev, "sdw-master-%d", bus->id);
++ dev_set_name(&md->dev, "sdw-master-%d-%d", bus->controller_id, bus->link_id);
+
+ ret = device_register(&md->dev);
+ if (ret) {
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index a1e2d6c981862..8e027eee8b733 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -1624,6 +1624,9 @@ static int qcom_swrm_probe(struct platform_device *pdev)
+ }
+ }
+
++ /* FIXME: is there a DT-defined value to use ? */
++ ctrl->bus.controller_id = -1;
++
+ ret = sdw_bus_master_add(&ctrl->bus, dev, dev->fwnode);
+ if (ret) {
+ dev_err(dev, "Failed to register Soundwire controller (%d)\n",
+diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
+index c1c1a2ac293af..060c2982e26b0 100644
+--- a/drivers/soundwire/slave.c
++++ b/drivers/soundwire/slave.c
+@@ -39,14 +39,14 @@ int sdw_slave_add(struct sdw_bus *bus,
+ slave->dev.fwnode = fwnode;
+
+ if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
+- /* name shall be sdw:link:mfg:part:class */
+- dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x",
+- bus->link_id, id->mfg_id, id->part_id,
++ /* name shall be sdw:ctrl:link:mfg:part:class */
++ dev_set_name(&slave->dev, "sdw:%01x:%01x:%04x:%04x:%02x",
++ bus->controller_id, bus->link_id, id->mfg_id, id->part_id,
+ id->class_id);
+ } else {
+- /* name shall be sdw:link:mfg:part:class:unique */
+- dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x:%01x",
+- bus->link_id, id->mfg_id, id->part_id,
++ /* name shall be sdw:ctrl:link:mfg:part:class:unique */
++ dev_set_name(&slave->dev, "sdw:%01x:%01x:%04x:%04x:%02x:%01x",
++ bus->controller_id, bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+ }
+
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index ef08fcac2f6da..0407b91183caa 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -19,7 +19,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/spi/spi.h>
+-#include <linux/spi/spi-mem.h>
++#include <linux/mtd/spi-nor.h>
+ #include <linux/sysfs.h>
+ #include <linux/types.h>
+ #include "spi-bcm-qspi.h"
+@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+
+ /* non-aligned and very short transfers are handled by MSPI */
+ if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
+- len < 4)
++ len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
+ mspi_read = true;
+
+ if (!has_bspi(qspi) || mspi_read)
+diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
+index a50eb4db79de8..e5140532071d2 100644
+--- a/drivers/spi/spi-cadence.c
++++ b/drivers/spi/spi-cadence.c
+@@ -317,6 +317,15 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
+ xspi->rx_bytes -= nrx;
+
+ while (ntx || nrx) {
++ if (nrx) {
++ u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
++
++ if (xspi->rxbuf)
++ *xspi->rxbuf++ = data;
++
++ nrx--;
++ }
++
+ if (ntx) {
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+@@ -326,14 +335,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
+ ntx--;
+ }
+
+- if (nrx) {
+- u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+-
+- if (xspi->rxbuf)
+- *xspi->rxbuf++ = data;
+-
+- nrx--;
+- }
+ }
+ }
+
+diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
+index 57d767a68e7b2..b9918dcc38027 100644
+--- a/drivers/spi/spi-intel-pci.c
++++ b/drivers/spi/spi-intel-pci.c
+@@ -84,7 +84,6 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
+- { PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
+ { },
+ };
+ MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 8ead7acb99f34..4adc56dabf55c 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1624,6 +1624,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
+ pm_runtime_put_noidle(ctlr->dev.parent);
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
++
++ msg->status = ret;
++ spi_finalize_current_message(ctlr);
++
+ return ret;
+ }
+ }
+diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
+index 83d4f451b1a97..931cd88425e49 100644
+--- a/drivers/thermal/gov_power_allocator.c
++++ b/drivers/thermal/gov_power_allocator.c
+@@ -693,7 +693,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz,
+
+ trip = params->trip_switch_on;
+ if (trip && tz->temperature < trip->temperature) {
+- update = tz->last_temperature >= trip->temperature;
++ update = tz->passive;
+ tz->passive = 0;
+ reset_pid_controller(params);
+ allow_maximum_power(tz, update);
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index c69db6c90869c..1c5a429b2e3e9 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -24,6 +24,7 @@
+ #include <linux/bitops.h>
+ #include <linux/cpufeature.h>
+ #include <linux/cpumask.h>
++#include <linux/delay.h>
+ #include <linux/gfp.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -34,7 +35,9 @@
+ #include <linux/processor.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/suspend.h>
+ #include <linux/string.h>
++#include <linux/syscore_ops.h>
+ #include <linux/topology.h>
+ #include <linux/workqueue.h>
+
+@@ -347,6 +350,52 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance)
+ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+ }
+
++/* Caller must hold hfi_instance_lock. */
++static void hfi_enable(void)
++{
++ u64 msr_val;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++}
++
++static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
++{
++ phys_addr_t hw_table_pa;
++ u64 msr_val;
++
++ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
++ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
++}
++
++/* Caller must hold hfi_instance_lock. */
++static void hfi_disable(void)
++{
++ u64 msr_val;
++ int i;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++
++ /*
++ * Wait for hardware to acknowledge the disabling of HFI. Some
++ * processors may not do it. Wait for ~2ms. This is a reasonable
++ * time for hardware to complete any pending actions on the HFI
++ * memory.
++ */
++ for (i = 0; i < 2000; i++) {
++ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
++ if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
++ break;
++
++ udelay(1);
++ cpu_relax();
++ }
++}
++
+ /**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+@@ -364,8 +413,6 @@ void intel_hfi_online(unsigned int cpu)
+ {
+ struct hfi_instance *hfi_instance;
+ struct hfi_cpu_info *info;
+- phys_addr_t hw_table_pa;
+- u64 msr_val;
+ u16 die_id;
+
+ /* Nothing to do if hfi_instances are missing. */
+@@ -403,14 +450,16 @@ void intel_hfi_online(unsigned int cpu)
+ /*
+ * Hardware is programmed with the physical address of the first page
+ * frame of the table. Hence, the allocated memory must be page-aligned.
++ *
++ * Some processors do not forget the initial address of the HFI table
++ * even after having been reprogrammed. Keep using the same pages. Do
++ * not free them.
+ */
+ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!hfi_instance->hw_table)
+ goto unlock;
+
+- hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+-
+ /*
+ * Allocate memory to keep a local copy of the table that
+ * hardware generates.
+@@ -420,16 +469,6 @@ void intel_hfi_online(unsigned int cpu)
+ if (!hfi_instance->local_table)
+ goto free_hw_table;
+
+- /*
+- * Program the address of the feedback table of this die/package. On
+- * some processors, hardware remembers the old address of the HFI table
+- * even after having been reprogrammed and re-enabled. Thus, do not free
+- * the pages allocated for the table or reprogram the hardware with a
+- * new base address. Namely, program the hardware only once.
+- */
+- msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+-
+ init_hfi_instance(hfi_instance);
+
+ INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+@@ -438,13 +477,8 @@ void intel_hfi_online(unsigned int cpu)
+
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+
+- /*
+- * Enable the hardware feedback interface and never disable it. See
+- * comment on programming the address of the table.
+- */
+- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+- msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
+
+ unlock:
+ mutex_unlock(&hfi_instance_lock);
+@@ -484,6 +518,10 @@ void intel_hfi_offline(unsigned int cpu)
+
+ mutex_lock(&hfi_instance_lock);
+ cpumask_clear_cpu(cpu, hfi_instance->cpus);
++
++ if (!cpumask_weight(hfi_instance->cpus))
++ hfi_disable();
++
+ mutex_unlock(&hfi_instance_lock);
+ }
+
+@@ -532,6 +570,30 @@ static __init int hfi_parse_features(void)
+ return 0;
+ }
+
++static void hfi_do_enable(void)
++{
++ /* This code runs only on the boot CPU. */
++ struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
++ struct hfi_instance *hfi_instance = info->hfi_instance;
++
++ /* No locking needed. There is no concurrency with CPU online. */
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
++}
++
++static int hfi_do_disable(void)
++{
++ /* No locking needed. There is no concurrency with CPU offline. */
++ hfi_disable();
++
++ return 0;
++}
++
++static struct syscore_ops hfi_pm_ops = {
++ .resume = hfi_do_enable,
++ .suspend = hfi_do_disable,
++};
++
+ void __init intel_hfi_init(void)
+ {
+ struct hfi_instance *hfi_instance;
+@@ -563,6 +625,8 @@ void __init intel_hfi_init(void)
+ if (!hfi_updates_wq)
+ goto err_nomem;
+
++ register_syscore_ops(&hfi_pm_ops);
++
+ return;
+
+ err_nomem:
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index d5bb0da95376f..dba25e70903c3 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -301,8 +301,8 @@
+
+
+ /* Misc definitions */
++#define SC16IS7XX_SPI_READ_BIT BIT(7)
+ #define SC16IS7XX_FIFO_SIZE (64)
+-#define SC16IS7XX_REG_SHIFT 2
+ #define SC16IS7XX_GPIOS_PER_BANK 4
+
+ struct sc16is7xx_devtype {
+@@ -323,7 +323,8 @@ struct sc16is7xx_one_config {
+
+ struct sc16is7xx_one {
+ struct uart_port port;
+- u8 line;
++ struct regmap *regmap;
++ struct mutex efr_lock; /* EFR registers access */
+ struct kthread_work tx_work;
+ struct kthread_work reg_work;
+ struct kthread_delayed_work ms_work;
+@@ -334,7 +335,6 @@ struct sc16is7xx_one {
+
+ struct sc16is7xx_port {
+ const struct sc16is7xx_devtype *devtype;
+- struct regmap *regmap;
+ struct clk *clk;
+ #ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+@@ -344,7 +344,6 @@ struct sc16is7xx_port {
+ unsigned char buf[SC16IS7XX_FIFO_SIZE];
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+- struct mutex efr_lock;
+ struct sc16is7xx_one p[];
+ };
+
+@@ -361,48 +360,35 @@ static void sc16is7xx_stop_tx(struct uart_port *port);
+
+ #define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
+
+-static int sc16is7xx_line(struct uart_port *port)
+-{
+- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+- return one->line;
+-}
+-
+ static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ unsigned int val = 0;
+- const u8 line = sc16is7xx_line(port);
+
+- regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
++ regmap_read(one->regmap, reg, &val);
+
+ return val;
+ }
+
+ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
++ regmap_write(one->regmap, reg, val);
+ }
+
+ static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+ {
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
+- u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- regcache_cache_bypass(s->regmap, true);
+- regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+- regcache_cache_bypass(s->regmap, false);
++ regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, s->buf, rxlen);
+ }
+
+ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+ {
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
+- u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ /*
+ * Don't send zero-length data, at least on SPI it confuses the chip
+@@ -411,32 +397,15 @@ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+ if (unlikely(!to_send))
+ return;
+
+- regcache_cache_bypass(s->regmap, true);
+- regmap_raw_write(s->regmap, addr, s->buf, to_send);
+- regcache_cache_bypass(s->regmap, false);
++ regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
+ }
+
+ static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
+ u8 mask, u8 val)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
+-
+- regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
+- mask, val);
+-}
+-
+-static int sc16is7xx_alloc_line(void)
+-{
+- int i;
+-
+- BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
+-
+- for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
+- if (!test_and_set_bit(i, &sc16is7xx_lines))
+- break;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- return i;
++ regmap_update_bits(one->regmap, reg, mask, val);
+ }
+
+ static void sc16is7xx_power(struct uart_port *port, int on)
+@@ -478,7 +447,7 @@ static const struct sc16is7xx_devtype sc16is762_devtype = {
+
+ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+ {
+- switch (reg >> SC16IS7XX_REG_SHIFT) {
++ switch (reg) {
+ case SC16IS7XX_RHR_REG:
+ case SC16IS7XX_IIR_REG:
+ case SC16IS7XX_LSR_REG:
+@@ -497,7 +466,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+
+ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
+ {
+- switch (reg >> SC16IS7XX_REG_SHIFT) {
++ switch (reg) {
+ case SC16IS7XX_RHR_REG:
+ return true;
+ default:
+@@ -507,9 +476,14 @@ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
+ return false;
+ }
+
++static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
++{
++ return reg == SC16IS7XX_RHR_REG;
++}
++
+ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ u8 lcr;
+ u8 prescaler = 0;
+ unsigned long clk = port->uartclk, div = clk / 16 / baud;
+@@ -532,7 +506,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ * because the bulk of the interrupt processing is run as a workqueue
+ * job in thread context.
+ */
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+
+ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+
+@@ -541,17 +515,17 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+ /* Enable enhanced features */
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_ENABLE_BIT,
+ SC16IS7XX_EFR_ENABLE_BIT);
+
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Put LCR back to the normal mode */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+- mutex_unlock(&s->efr_lock);
++ mutex_unlock(&one->efr_lock);
+
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_CLKSEL_BIT,
+@@ -562,10 +536,10 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ SC16IS7XX_LCR_CONF_MODE_A);
+
+ /* Write the new divisor */
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_port_write(port, SC16IS7XX_DLH_REG, div / 256);
+ sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Put LCR back to the normal mode */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+@@ -701,6 +675,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+
+ if (uart_circ_empty(xmit))
+ sc16is7xx_stop_tx(port);
++ else
++ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+ uart_port_unlock_irqrestore(port, flags);
+ }
+
+@@ -719,11 +695,10 @@ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ {
+ struct uart_port *port = &one->port;
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
+ unsigned int status, changed;
+
+- lockdep_assert_held_once(&s->efr_lock);
++ lockdep_assert_held_once(&one->efr_lock);
+
+ status = sc16is7xx_get_hwmctrl(port);
+ changed = status ^ one->old_mctrl;
+@@ -749,74 +724,77 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+
+ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ {
++ bool rc = true;
++ unsigned int iir, rxlen;
+ struct uart_port *port = &s->p[portno].port;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- do {
+- unsigned int iir, rxlen;
+- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+- iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
+- if (iir & SC16IS7XX_IIR_NO_INT_BIT)
+- return false;
+-
+- iir &= SC16IS7XX_IIR_ID_MASK;
+-
+- switch (iir) {
+- case SC16IS7XX_IIR_RDI_SRC:
+- case SC16IS7XX_IIR_RLSE_SRC:
+- case SC16IS7XX_IIR_RTOI_SRC:
+- case SC16IS7XX_IIR_XOFFI_SRC:
+- rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+-
+- /*
+- * There is a silicon bug that makes the chip report a
+- * time-out interrupt but no data in the FIFO. This is
+- * described in errata section 18.1.4.
+- *
+- * When this happens, read one byte from the FIFO to
+- * clear the interrupt.
+- */
+- if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
+- rxlen = 1;
+-
+- if (rxlen)
+- sc16is7xx_handle_rx(port, rxlen, iir);
+- break;
++ mutex_lock(&one->efr_lock);
++
++ iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
++ if (iir & SC16IS7XX_IIR_NO_INT_BIT) {
++ rc = false;
++ goto out_port_irq;
++ }
++
++ iir &= SC16IS7XX_IIR_ID_MASK;
++
++ switch (iir) {
++ case SC16IS7XX_IIR_RDI_SRC:
++ case SC16IS7XX_IIR_RLSE_SRC:
++ case SC16IS7XX_IIR_RTOI_SRC:
++ case SC16IS7XX_IIR_XOFFI_SRC:
++ rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
++
++ /*
++ * There is a silicon bug that makes the chip report a
++ * time-out interrupt but no data in the FIFO. This is
++ * described in errata section 18.1.4.
++ *
++ * When this happens, read one byte from the FIFO to
++ * clear the interrupt.
++ */
++ if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
++ rxlen = 1;
++
++ if (rxlen)
++ sc16is7xx_handle_rx(port, rxlen, iir);
++ break;
+ /* CTSRTS interrupt comes only when CTS goes inactive */
+- case SC16IS7XX_IIR_CTSRTS_SRC:
+- case SC16IS7XX_IIR_MSI_SRC:
+- sc16is7xx_update_mlines(one);
+- break;
+- case SC16IS7XX_IIR_THRI_SRC:
+- sc16is7xx_handle_tx(port);
+- break;
+- default:
+- dev_err_ratelimited(port->dev,
+- "ttySC%i: Unexpected interrupt: %x",
+- port->line, iir);
+- break;
+- }
+- } while (0);
+- return true;
++ case SC16IS7XX_IIR_CTSRTS_SRC:
++ case SC16IS7XX_IIR_MSI_SRC:
++ sc16is7xx_update_mlines(one);
++ break;
++ case SC16IS7XX_IIR_THRI_SRC:
++ sc16is7xx_handle_tx(port);
++ break;
++ default:
++ dev_err_ratelimited(port->dev,
++ "ttySC%i: Unexpected interrupt: %x",
++ port->line, iir);
++ break;
++ }
++
++out_port_irq:
++ mutex_unlock(&one->efr_lock);
++
++ return rc;
+ }
+
+ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+ {
+- struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
++ bool keep_polling;
+
+- mutex_lock(&s->efr_lock);
++ struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
+
+- while (1) {
+- bool keep_polling = false;
++ do {
+ int i;
+
++ keep_polling = false;
++
+ for (i = 0; i < s->devtype->nr_uart; ++i)
+ keep_polling |= sc16is7xx_port_irq(s, i);
+- if (!keep_polling)
+- break;
+- }
+-
+- mutex_unlock(&s->efr_lock);
++ } while (keep_polling);
+
+ return IRQ_HANDLED;
+ }
+@@ -824,20 +802,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ {
+ struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- unsigned long flags;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ (port->rs485.delay_rts_before_send > 0))
+ msleep(port->rs485.delay_rts_before_send);
+
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+ sc16is7xx_handle_tx(port);
+- mutex_unlock(&s->efr_lock);
+-
+- uart_port_lock_irqsave(port, &flags);
+- sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+- uart_port_unlock_irqrestore(port, flags);
++ mutex_unlock(&one->efr_lock);
+ }
+
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+@@ -940,9 +913,9 @@ static void sc16is7xx_ms_proc(struct kthread_work *ws)
+ struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev);
+
+ if (one->port.state) {
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+ sc16is7xx_update_mlines(one);
+- mutex_unlock(&s->efr_lock);
++ mutex_unlock(&one->efr_lock);
+
+ kthread_queue_delayed_work(&s->kworker, &one->ms_work, HZ);
+ }
+@@ -1026,7 +999,6 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ unsigned int lcr, flow = 0;
+ int baud;
+@@ -1085,13 +1057,13 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
+
+ /* As above, claim the mutex while accessing the EFR. */
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+ /* Configure flow control */
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
+ sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
+
+@@ -1110,12 +1082,12 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_FLOWCTRL_BITS,
+ flow);
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Update LCR register */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+- mutex_unlock(&s->efr_lock);
++ mutex_unlock(&one->efr_lock);
+
+ /* Get baud rate generator configuration */
+ baud = uart_get_baud_rate(port, termios, old,
+@@ -1161,7 +1133,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi
+ static int sc16is7xx_startup(struct uart_port *port)
+ {
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ unsigned int val;
+ unsigned long flags;
+
+@@ -1178,7 +1149,7 @@ static int sc16is7xx_startup(struct uart_port *port)
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+
+ /* Enable write access to enhanced features and internal clock div */
+ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+@@ -1196,7 +1167,7 @@ static int sc16is7xx_startup(struct uart_port *port)
+ SC16IS7XX_TCR_RX_RESUME(24) |
+ SC16IS7XX_TCR_RX_HALT(48));
+
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Now, initialize the UART */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+@@ -1447,7 +1418,8 @@ static void sc16is7xx_setup_irda_ports(struct sc16is7xx_port *s)
+ /*
+ * Configure ports designated to operate as modem control lines.
+ */
+-static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
++static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s,
++ struct regmap *regmap)
+ {
+ int i;
+ int ret;
+@@ -1476,8 +1448,8 @@ static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
+
+ if (s->mctrl_mask)
+ regmap_update_bits(
+- s->regmap,
+- SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
++ regmap,
++ SC16IS7XX_IOCONTROL_REG,
+ SC16IS7XX_IOCONTROL_MODEM_A_BIT |
+ SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
+
+@@ -1492,7 +1464,7 @@ static const struct serial_rs485 sc16is7xx_rs485_supported = {
+
+ static int sc16is7xx_probe(struct device *dev,
+ const struct sc16is7xx_devtype *devtype,
+- struct regmap *regmap, int irq)
++ struct regmap *regmaps[], int irq)
+ {
+ unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
+ unsigned int val;
+@@ -1500,16 +1472,20 @@ static int sc16is7xx_probe(struct device *dev,
+ int i, ret;
+ struct sc16is7xx_port *s;
+
+- if (IS_ERR(regmap))
+- return PTR_ERR(regmap);
++ for (i = 0; i < devtype->nr_uart; i++)
++ if (IS_ERR(regmaps[i]))
++ return PTR_ERR(regmaps[i]);
+
+ /*
+ * This device does not have an identification register that would
+ * tell us if we are really connected to the correct device.
+ * The best we can do is to check if communication is at all possible.
++ *
++ * Note: regmap[0] is used in the probe function to access registers
++ * common to all channels/ports, as it is guaranteed to be present on
++ * all variants.
+ */
+- ret = regmap_read(regmap,
+- SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
++ ret = regmap_read(regmaps[0], SC16IS7XX_LSR_REG, &val);
+ if (ret < 0)
+ return -EPROBE_DEFER;
+
+@@ -1543,10 +1519,8 @@ static int sc16is7xx_probe(struct device *dev,
+ return -EINVAL;
+ }
+
+- s->regmap = regmap;
+ s->devtype = devtype;
+ dev_set_drvdata(dev, s);
+- mutex_init(&s->efr_lock);
+
+ kthread_init_worker(&s->kworker);
+ s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
+@@ -1558,11 +1532,17 @@ static int sc16is7xx_probe(struct device *dev,
+ sched_set_fifo(s->kworker_task);
+
+ /* reset device, purging any pending irq / data */
+- regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+- SC16IS7XX_IOCONTROL_SRESET_BIT);
++ regmap_write(regmaps[0], SC16IS7XX_IOCONTROL_REG,
++ SC16IS7XX_IOCONTROL_SRESET_BIT);
+
+ for (i = 0; i < devtype->nr_uart; ++i) {
+- s->p[i].line = i;
++ s->p[i].port.line = find_first_zero_bit(&sc16is7xx_lines,
++ SC16IS7XX_MAX_DEVS);
++ if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
++ ret = -ERANGE;
++ goto out_ports;
++ }
++
+ /* Initialize port data */
+ s->p[i].port.dev = dev;
+ s->p[i].port.irq = irq;
+@@ -1582,12 +1562,9 @@ static int sc16is7xx_probe(struct device *dev,
+ s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
+ s->p[i].port.ops = &sc16is7xx_ops;
+ s->p[i].old_mctrl = 0;
+- s->p[i].port.line = sc16is7xx_alloc_line();
++ s->p[i].regmap = regmaps[i];
+
+- if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+- ret = -ENOMEM;
+- goto out_ports;
+- }
++ mutex_init(&s->p[i].efr_lock);
+
+ ret = uart_get_rs485_mode(&s->p[i].port);
+ if (ret)
+@@ -1604,20 +1581,25 @@ static int sc16is7xx_probe(struct device *dev,
+ kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+ kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc);
++
+ /* Register port */
+- uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
++ ret = uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
++ if (ret)
++ goto out_ports;
++
++ set_bit(s->p[i].port.line, &sc16is7xx_lines);
+
+ /* Enable EFR */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(regmaps[i], true);
+
+ /* Enable write access to enhanced features */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_ENABLE_BIT);
+
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(regmaps[i], false);
+
+ /* Restore access to general registers */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
+@@ -1628,7 +1610,7 @@ static int sc16is7xx_probe(struct device *dev,
+
+ sc16is7xx_setup_irda_ports(s);
+
+- ret = sc16is7xx_setup_mctrl_ports(s);
++ ret = sc16is7xx_setup_mctrl_ports(s, regmaps[0]);
+ if (ret)
+ goto out_ports;
+
+@@ -1663,10 +1645,9 @@ static int sc16is7xx_probe(struct device *dev,
+ #endif
+
+ out_ports:
+- for (i--; i >= 0; i--) {
+- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+- }
++ for (i = 0; i < devtype->nr_uart; i++)
++ if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
++ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+
+ kthread_stop(s->kworker_task);
+
+@@ -1688,8 +1669,8 @@ static void sc16is7xx_remove(struct device *dev)
+
+ for (i = 0; i < s->devtype->nr_uart; i++) {
+ kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
+- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
++ if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
++ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ sc16is7xx_power(&s->p[i].port, 0);
+ }
+
+@@ -1711,19 +1692,42 @@ static const struct of_device_id __maybe_unused sc16is7xx_dt_ids[] = {
+ MODULE_DEVICE_TABLE(of, sc16is7xx_dt_ids);
+
+ static struct regmap_config regcfg = {
+- .reg_bits = 7,
+- .pad_bits = 1,
++ .reg_bits = 5,
++ .pad_bits = 3,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = sc16is7xx_regmap_volatile,
+ .precious_reg = sc16is7xx_regmap_precious,
++ .writeable_noinc_reg = sc16is7xx_regmap_noinc,
++ .readable_noinc_reg = sc16is7xx_regmap_noinc,
++ .max_raw_read = SC16IS7XX_FIFO_SIZE,
++ .max_raw_write = SC16IS7XX_FIFO_SIZE,
++ .max_register = SC16IS7XX_EFCR_REG,
+ };
+
++static const char *sc16is7xx_regmap_name(u8 port_id)
++{
++ switch (port_id) {
++ case 0: return "port0";
++ case 1: return "port1";
++ default:
++ WARN_ON(true);
++ return NULL;
++ }
++}
++
++static unsigned int sc16is7xx_regmap_port_mask(unsigned int port_id)
++{
++ /* CH1,CH0 are at bits 2:1. */
++ return port_id << 1;
++}
++
+ #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ {
+ const struct sc16is7xx_devtype *devtype;
+- struct regmap *regmap;
++ struct regmap *regmaps[2];
++ unsigned int i;
+ int ret;
+
+ /* Setup SPI bus */
+@@ -1748,11 +1752,20 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
+ }
+
+- regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
+- (devtype->nr_uart - 1);
+- regmap = devm_regmap_init_spi(spi, &regcfg);
++ for (i = 0; i < devtype->nr_uart; i++) {
++ regcfg.name = sc16is7xx_regmap_name(i);
++ /*
++ * If read_flag_mask is 0, the regmap code sets it to a default
++ * of 0x80. Since we specify our own mask, we must add the READ
++ * bit ourselves:
++ */
++ regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i) |
++ SC16IS7XX_SPI_READ_BIT;
++ regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
++ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
++ }
+
+- return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
++ return sc16is7xx_probe(&spi->dev, devtype, regmaps, spi->irq);
+ }
+
+ static void sc16is7xx_spi_remove(struct spi_device *spi)
+@@ -1791,7 +1804,8 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
+ {
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
+ const struct sc16is7xx_devtype *devtype;
+- struct regmap *regmap;
++ struct regmap *regmaps[2];
++ unsigned int i;
+
+ if (i2c->dev.of_node) {
+ devtype = device_get_match_data(&i2c->dev);
+@@ -1801,11 +1815,14 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
+ devtype = (struct sc16is7xx_devtype *)id->driver_data;
+ }
+
+- regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
+- (devtype->nr_uart - 1);
+- regmap = devm_regmap_init_i2c(i2c, &regcfg);
++ for (i = 0; i < devtype->nr_uart; i++) {
++ regcfg.name = sc16is7xx_regmap_name(i);
++ regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i);
++ regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
++ regmaps[i] = devm_regmap_init_i2c(i2c, &regcfg);
++ }
+
+- return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
++ return sc16is7xx_probe(&i2c->dev, devtype, regmaps, i2c->irq);
+ }
+
+ static void sc16is7xx_i2c_remove(struct i2c_client *client)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index ce2f76769fb0b..1f8d86b9c4fa0 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8916,12 +8916,9 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+
+ out:
+ pm_runtime_put_sync(hba->dev);
+- /*
+- * If we failed to initialize the device or the device is not
+- * present, turn off the power/clocks etc.
+- */
++
+ if (ret)
+- ufshcd_hba_exit(hba);
++ dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
+ }
+
+ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
+index de1ae0bead3ba..f4837c3b8ae2b 100644
+--- a/fs/afs/addr_list.c
++++ b/fs/afs/addr_list.c
+@@ -13,26 +13,33 @@
+ #include "internal.h"
+ #include "afs_fs.h"
+
++static void afs_free_addrlist(struct rcu_head *rcu)
++{
++ struct afs_addr_list *alist = container_of(rcu, struct afs_addr_list, rcu);
++ unsigned int i;
++
++ for (i = 0; i < alist->nr_addrs; i++)
++ rxrpc_kernel_put_peer(alist->addrs[i].peer);
++}
++
+ /*
+ * Release an address list.
+ */
+ void afs_put_addrlist(struct afs_addr_list *alist)
+ {
+ if (alist && refcount_dec_and_test(&alist->usage))
+- kfree_rcu(alist, rcu);
++ call_rcu(&alist->rcu, afs_free_addrlist);
+ }
+
+ /*
+ * Allocate an address list.
+ */
+-struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
+- unsigned short service,
+- unsigned short port)
++struct afs_addr_list *afs_alloc_addrlist(unsigned int nr, u16 service_id)
+ {
+ struct afs_addr_list *alist;
+ unsigned int i;
+
+- _enter("%u,%u,%u", nr, service, port);
++ _enter("%u,%u", nr, service_id);
+
+ if (nr > AFS_MAX_ADDRESSES)
+ nr = AFS_MAX_ADDRESSES;
+@@ -44,16 +51,8 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
+ refcount_set(&alist->usage, 1);
+ alist->max_addrs = nr;
+
+- for (i = 0; i < nr; i++) {
+- struct sockaddr_rxrpc *srx = &alist->addrs[i];
+- srx->srx_family = AF_RXRPC;
+- srx->srx_service = service;
+- srx->transport_type = SOCK_DGRAM;
+- srx->transport_len = sizeof(srx->transport.sin6);
+- srx->transport.sin6.sin6_family = AF_INET6;
+- srx->transport.sin6.sin6_port = htons(port);
+- }
+-
++ for (i = 0; i < nr; i++)
++ alist->addrs[i].service_id = service_id;
+ return alist;
+ }
+
+@@ -126,7 +125,7 @@ struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net,
+ if (!vllist->servers[0].server)
+ goto error_vl;
+
+- alist = afs_alloc_addrlist(nr, service, AFS_VL_PORT);
++ alist = afs_alloc_addrlist(nr, service);
+ if (!alist)
+ goto error;
+
+@@ -197,9 +196,11 @@ struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net,
+ }
+
+ if (family == AF_INET)
+- afs_merge_fs_addr4(alist, x[0], xport);
++ ret = afs_merge_fs_addr4(net, alist, x[0], xport);
+ else
+- afs_merge_fs_addr6(alist, x, xport);
++ ret = afs_merge_fs_addr6(net, alist, x, xport);
++ if (ret < 0)
++ goto error;
+
+ } while (p < end);
+
+@@ -271,25 +272,33 @@ struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry
+ /*
+ * Merge an IPv4 entry into a fileserver address list.
+ */
+-void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
++int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *alist,
++ __be32 xdr, u16 port)
+ {
+- struct sockaddr_rxrpc *srx;
+- u32 addr = ntohl(xdr);
++ struct sockaddr_rxrpc srx;
++ struct rxrpc_peer *peer;
+ int i;
+
+ if (alist->nr_addrs >= alist->max_addrs)
+- return;
++ return 0;
+
+- for (i = 0; i < alist->nr_ipv4; i++) {
+- struct sockaddr_in *a = &alist->addrs[i].transport.sin;
+- u32 a_addr = ntohl(a->sin_addr.s_addr);
+- u16 a_port = ntohs(a->sin_port);
++ srx.srx_family = AF_RXRPC;
++ srx.transport_type = SOCK_DGRAM;
++ srx.transport_len = sizeof(srx.transport.sin);
++ srx.transport.sin.sin_family = AF_INET;
++ srx.transport.sin.sin_port = htons(port);
++ srx.transport.sin.sin_addr.s_addr = xdr;
+
+- if (addr == a_addr && port == a_port)
+- return;
+- if (addr == a_addr && port < a_port)
+- break;
+- if (addr < a_addr)
++ peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL);
++ if (!peer)
++ return -ENOMEM;
++
++ for (i = 0; i < alist->nr_ipv4; i++) {
++ if (peer == alist->addrs[i].peer) {
++ rxrpc_kernel_put_peer(peer);
++ return 0;
++ }
++ if (peer <= alist->addrs[i].peer)
+ break;
+ }
+
+@@ -298,38 +307,42 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
+ alist->addrs + i,
+ sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
+
+- srx = &alist->addrs[i];
+- srx->srx_family = AF_RXRPC;
+- srx->transport_type = SOCK_DGRAM;
+- srx->transport_len = sizeof(srx->transport.sin);
+- srx->transport.sin.sin_family = AF_INET;
+- srx->transport.sin.sin_port = htons(port);
+- srx->transport.sin.sin_addr.s_addr = xdr;
++ alist->addrs[i].peer = peer;
+ alist->nr_ipv4++;
+ alist->nr_addrs++;
++ return 0;
+ }
+
+ /*
+ * Merge an IPv6 entry into a fileserver address list.
+ */
+-void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
++int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *alist,
++ __be32 *xdr, u16 port)
+ {
+- struct sockaddr_rxrpc *srx;
+- int i, diff;
++ struct sockaddr_rxrpc srx;
++ struct rxrpc_peer *peer;
++ int i;
+
+ if (alist->nr_addrs >= alist->max_addrs)
+- return;
++ return 0;
+
+- for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
+- struct sockaddr_in6 *a = &alist->addrs[i].transport.sin6;
+- u16 a_port = ntohs(a->sin6_port);
++ srx.srx_family = AF_RXRPC;
++ srx.transport_type = SOCK_DGRAM;
++ srx.transport_len = sizeof(srx.transport.sin6);
++ srx.transport.sin6.sin6_family = AF_INET6;
++ srx.transport.sin6.sin6_port = htons(port);
++ memcpy(&srx.transport.sin6.sin6_addr, xdr, 16);
+
+- diff = memcmp(xdr, &a->sin6_addr, 16);
+- if (diff == 0 && port == a_port)
+- return;
+- if (diff == 0 && port < a_port)
+- break;
+- if (diff < 0)
++ peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL);
++ if (!peer)
++ return -ENOMEM;
++
++ for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
++ if (peer == alist->addrs[i].peer) {
++ rxrpc_kernel_put_peer(peer);
++ return 0;
++ }
++ if (peer <= alist->addrs[i].peer)
+ break;
+ }
+
+@@ -337,15 +350,9 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
+ memmove(alist->addrs + i + 1,
+ alist->addrs + i,
+ sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
+-
+- srx = &alist->addrs[i];
+- srx->srx_family = AF_RXRPC;
+- srx->transport_type = SOCK_DGRAM;
+- srx->transport_len = sizeof(srx->transport.sin6);
+- srx->transport.sin6.sin6_family = AF_INET6;
+- srx->transport.sin6.sin6_port = htons(port);
+- memcpy(&srx->transport.sin6.sin6_addr, xdr, 16);
++ alist->addrs[i].peer = peer;
+ alist->nr_addrs++;
++ return 0;
+ }
+
+ /*
+@@ -379,26 +386,24 @@ bool afs_iterate_addresses(struct afs_addr_cursor *ac)
+ selected:
+ ac->index = index;
+ set_bit(index, &ac->tried);
+- ac->responded = false;
++ ac->call_responded = false;
+ return true;
+ }
+
+ /*
+ * Release an address list cursor.
+ */
+-int afs_end_cursor(struct afs_addr_cursor *ac)
++void afs_end_cursor(struct afs_addr_cursor *ac)
+ {
+ struct afs_addr_list *alist;
+
+ alist = ac->alist;
+ if (alist) {
+- if (ac->responded &&
++ if (ac->call_responded &&
+ ac->index != alist->preferred &&
+ test_bit(ac->alist->preferred, &ac->tried))
+ WRITE_ONCE(alist->preferred, ac->index);
+ afs_put_addrlist(alist);
+ ac->alist = NULL;
+ }
+-
+- return ac->error;
+ }
+diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
+index d4ddb20d67320..99a3f20bc7869 100644
+--- a/fs/afs/cmservice.c
++++ b/fs/afs/cmservice.c
+@@ -146,10 +146,11 @@ static int afs_find_cm_server_by_peer(struct afs_call *call)
+ {
+ struct sockaddr_rxrpc srx;
+ struct afs_server *server;
++ struct rxrpc_peer *peer;
+
+- rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
++ peer = rxrpc_kernel_get_call_peer(call->net->socket, call->rxcall);
+
+- server = afs_find_server(call->net, &srx);
++ server = afs_find_server(call->net, peer);
+ if (!server) {
+ trace_afs_cm_no_server(call, &srx);
+ return 0;
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 5219182e52e1a..9140780be5a43 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -474,6 +474,14 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+ continue;
+ }
+
++ /* Don't expose silly rename entries to userspace. */
++ if (nlen > 6 &&
++ dire->u.name[0] == '.' &&
++ ctx->actor != afs_lookup_filldir &&
++ ctx->actor != afs_lookup_one_filldir &&
++ memcmp(dire->u.name, ".__afs", 6) == 0)
++ continue;
++
+ /* found the next entry */
+ if (!dir_emit(ctx, dire->u.name, nlen,
+ ntohl(dire->u.vnode),
+@@ -693,8 +701,9 @@ static void afs_do_lookup_success(struct afs_operation *op)
+ vp = &op->file[0];
+ abort_code = vp->scb.status.abort_code;
+ if (abort_code != 0) {
+- op->ac.abort_code = abort_code;
+- op->error = afs_abort_to_error(abort_code);
++ op->call_abort_code = abort_code;
++ afs_op_set_error(op, afs_abort_to_error(abort_code));
++ op->cumul_error.abort_code = abort_code;
+ }
+ break;
+
+@@ -707,6 +716,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
+ break;
+ }
+
++ if (vp->scb.status.abort_code)
++ trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
+ if (!vp->scb.have_status && !vp->scb.have_error)
+ continue;
+
+@@ -846,13 +857,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ _debug("nr_files %u", op->nr_files);
+
+ /* Need space for examining all the selected files */
+- op->error = -ENOMEM;
+ if (op->nr_files > 2) {
+ op->more_files = kvcalloc(op->nr_files - 2,
+ sizeof(struct afs_vnode_param),
+ GFP_KERNEL);
+- if (!op->more_files)
++ if (!op->more_files) {
++ afs_op_nomem(op);
+ goto out_op;
++ }
+
+ for (i = 2; i < op->nr_files; i++) {
+ vp = &op->more_files[i - 2];
+@@ -878,14 +890,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ * lookups contained therein are stored in the reply without aborting
+ * the whole operation.
+ */
+- op->error = -ENOTSUPP;
++ afs_op_set_error(op, -ENOTSUPP);
+ if (!cookie->one_only) {
+ op->ops = &afs_inline_bulk_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ }
+
+- if (op->error == -ENOTSUPP) {
++ if (afs_op_error(op) == -ENOTSUPP) {
+ /* We could try FS.BulkStatus next, but this aborts the entire
+ * op if any of the lookups fails - so, for the moment, revert
+ * to FS.FetchStatus for op->file[1].
+@@ -895,12 +907,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ }
+- inode = ERR_PTR(op->error);
+
+ out_op:
+- if (op->error == 0) {
+- inode = &op->file[1].vnode->netfs.inode;
+- op->file[1].vnode = NULL;
++ if (!afs_op_error(op)) {
++ if (op->file[1].scb.status.abort_code) {
++ afs_op_accumulate_error(op, -ECONNABORTED,
++ op->file[1].scb.status.abort_code);
++ } else {
++ inode = &op->file[1].vnode->netfs.inode;
++ op->file[1].vnode = NULL;
++ }
+ }
+
+ if (op->file[0].scb.have_status)
+@@ -1255,7 +1271,7 @@ void afs_check_for_remote_deletion(struct afs_operation *op)
+ {
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+- switch (op->ac.abort_code) {
++ switch (afs_op_abort_code(op)) {
+ case VNOVNODE:
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ afs_break_callback(vnode, afs_cb_break_for_deleted);
+@@ -1273,20 +1289,20 @@ static void afs_vnode_new_inode(struct afs_operation *op)
+
+ _enter("");
+
+- ASSERTCMP(op->error, ==, 0);
++ ASSERTCMP(afs_op_error(op), ==, 0);
+
+ inode = afs_iget(op, vp);
+ if (IS_ERR(inode)) {
+ /* ENOMEM or EINTR at a really inconvenient time - just abandon
+ * the new directory on the server.
+ */
+- op->error = PTR_ERR(inode);
++ afs_op_accumulate_error(op, PTR_ERR(inode), 0);
+ return;
+ }
+
+ vnode = AFS_FS_I(inode);
+ set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+- if (!op->error)
++ if (!afs_op_error(op))
+ afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb);
+ d_instantiate(op->dentry, inode);
+ }
+@@ -1320,7 +1336,7 @@ static void afs_create_put(struct afs_operation *op)
+ {
+ _enter("op=%08x", op->debug_id);
+
+- if (op->error)
++ if (afs_op_error(op))
+ d_drop(op->dentry);
+ }
+
+@@ -1480,7 +1496,7 @@ static void afs_dir_remove_link(struct afs_operation *op)
+ struct dentry *dentry = op->dentry;
+ int ret;
+
+- if (op->error != 0 ||
++ if (afs_op_error(op) ||
+ (op->file[1].scb.have_status && op->file[1].scb.have_error))
+ return;
+ if (d_really_is_positive(dentry))
+@@ -1504,10 +1520,10 @@ static void afs_dir_remove_link(struct afs_operation *op)
+
+ ret = afs_validate(vnode, op->key);
+ if (ret != -ESTALE)
+- op->error = ret;
++ afs_op_set_error(op, ret);
+ }
+
+- _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, op->error);
++ _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, afs_op_error(op));
+ }
+
+ static void afs_unlink_success(struct afs_operation *op)
+@@ -1538,7 +1554,7 @@ static void afs_unlink_edit_dir(struct afs_operation *op)
+ static void afs_unlink_put(struct afs_operation *op)
+ {
+ _enter("op=%08x", op->debug_id);
+- if (op->unlink.need_rehash && op->error < 0 && op->error != -ENOENT)
++ if (op->unlink.need_rehash && afs_op_error(op) < 0 && afs_op_error(op) != -ENOENT)
+ d_rehash(op->dentry);
+ }
+
+@@ -1579,7 +1595,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
+ /* Try to make sure we have a callback promise on the victim. */
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0) {
+- op->error = ret;
++ afs_op_set_error(op, ret);
+ goto error;
+ }
+
+@@ -1588,7 +1604,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
+ spin_unlock(&dentry->d_lock);
+ /* Start asynchronous writeout of the inode */
+ write_inode_now(d_inode(dentry), 0);
+- op->error = afs_sillyrename(dvnode, vnode, dentry, op->key);
++ afs_op_set_error(op, afs_sillyrename(dvnode, vnode, dentry, op->key));
+ goto error;
+ }
+ if (!d_unhashed(dentry)) {
+@@ -1609,7 +1625,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
+ /* If there was a conflict with a third party, check the status of the
+ * unlinked vnode.
+ */
+- if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
++ if (afs_op_error(op) == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+ op->file[1].update_ctime = false;
+ op->fetch_status.which = 1;
+ op->ops = &afs_fetch_status_operation;
+@@ -1691,7 +1707,7 @@ static void afs_link_success(struct afs_operation *op)
+ static void afs_link_put(struct afs_operation *op)
+ {
+ _enter("op=%08x", op->debug_id);
+- if (op->error)
++ if (afs_op_error(op))
+ d_drop(op->dentry);
+ }
+
+@@ -1889,7 +1905,7 @@ static void afs_rename_put(struct afs_operation *op)
+ if (op->rename.rehash)
+ d_rehash(op->rename.rehash);
+ dput(op->rename.tmp);
+- if (op->error)
++ if (afs_op_error(op))
+ d_rehash(op->dentry);
+ }
+
+@@ -1934,7 +1950,7 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ return PTR_ERR(op);
+
+ ret = afs_validate(vnode, op->key);
+- op->error = ret;
++ afs_op_set_error(op, ret);
+ if (ret < 0)
+ goto error;
+
+@@ -1971,7 +1987,7 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ op->rename.tmp = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!op->rename.tmp) {
+- op->error = -ENOMEM;
++ afs_op_nomem(op);
+ goto error;
+ }
+
+@@ -1979,7 +1995,7 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ AFS_FS_I(d_inode(new_dentry)),
+ new_dentry, op->key);
+ if (ret) {
+- op->error = ret;
++ afs_op_set_error(op, ret);
+ goto error;
+ }
+
+diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
+index bb5807e87fa4c..a1e581946b930 100644
+--- a/fs/afs/dir_silly.c
++++ b/fs/afs/dir_silly.c
+@@ -218,7 +218,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
+ /* If there was a conflict with a third party, check the status of the
+ * unlinked vnode.
+ */
+- if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
++ if (op->cumul_error.error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+ op->file[1].update_ctime = false;
+ op->fetch_status.which = 1;
+ op->ops = &afs_fetch_status_operation;
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index d37dd201752ba..8f9b424275698 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -243,12 +243,9 @@ static void afs_fetch_data_notify(struct afs_operation *op)
+ {
+ struct afs_read *req = op->fetch.req;
+ struct netfs_io_subrequest *subreq = req->subreq;
+- int error = op->error;
++ int error = afs_op_error(op);
+
+- if (error == -ECONNABORTED)
+- error = afs_abort_to_error(op->ac.abort_code);
+ req->error = error;
+-
+ if (subreq) {
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
+@@ -271,7 +268,7 @@ static void afs_fetch_data_success(struct afs_operation *op)
+
+ static void afs_fetch_data_put(struct afs_operation *op)
+ {
+- op->fetch.req->error = op->error;
++ op->fetch.req->error = afs_op_error(op);
+ afs_put_read(op->fetch.req);
+ }
+
+diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
+index 7a3803ce3a229..cebe4fad81922 100644
+--- a/fs/afs/fs_operation.c
++++ b/fs/afs/fs_operation.c
+@@ -40,8 +40,8 @@ struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *vo
+ op->net = volume->cell->net;
+ op->cb_v_break = volume->cb_v_break;
+ op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
+- op->error = -EDESTADDRREQ;
+- op->ac.error = SHRT_MAX;
++ op->nr_iterations = -1;
++ afs_op_set_error(op, -EDESTADDRREQ);
+
+ _leave(" = [op=%08x]", op->debug_id);
+ return op;
+@@ -71,7 +71,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
+ swap(vnode, vnode2);
+
+ if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
+- op->error = -ERESTARTSYS;
++ afs_op_set_error(op, -ERESTARTSYS);
+ op->flags |= AFS_OPERATION_STOP;
+ _leave(" = f [I 0]");
+ return false;
+@@ -80,7 +80,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
+
+ if (vnode2) {
+ if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
+- op->error = -ERESTARTSYS;
++ afs_op_set_error(op, -ERESTARTSYS);
+ op->flags |= AFS_OPERATION_STOP;
+ mutex_unlock(&vnode->io_lock);
+ op->flags &= ~AFS_OPERATION_LOCK_0;
+@@ -159,16 +159,16 @@ static void afs_end_vnode_operation(struct afs_operation *op)
+ {
+ _enter("");
+
+- if (op->error == -EDESTADDRREQ ||
+- op->error == -EADDRNOTAVAIL ||
+- op->error == -ENETUNREACH ||
+- op->error == -EHOSTUNREACH)
++ switch (afs_op_error(op)) {
++ case -EDESTADDRREQ:
++ case -EADDRNOTAVAIL:
++ case -ENETUNREACH:
++ case -EHOSTUNREACH:
+ afs_dump_edestaddrreq(op);
++ break;
++ }
+
+ afs_drop_io_locks(op);
+-
+- if (op->error == -ECONNABORTED)
+- op->error = afs_abort_to_error(op->ac.abort_code);
+ }
+
+ /*
+@@ -179,6 +179,8 @@ void afs_wait_for_operation(struct afs_operation *op)
+ _enter("");
+
+ while (afs_select_fileserver(op)) {
++ op->call_error = 0;
++ op->call_abort_code = 0;
+ op->cb_s_break = op->server->cb_s_break;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
+ op->ops->issue_yfs_rpc)
+@@ -186,30 +188,34 @@ void afs_wait_for_operation(struct afs_operation *op)
+ else if (op->ops->issue_afs_rpc)
+ op->ops->issue_afs_rpc(op);
+ else
+- op->ac.error = -ENOTSUPP;
+-
+- if (op->call)
+- op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
++ op->call_error = -ENOTSUPP;
++
++ if (op->call) {
++ afs_wait_for_call_to_complete(op->call, &op->ac);
++ op->call_abort_code = op->call->abort_code;
++ op->call_error = op->call->error;
++ op->call_responded = op->call->responded;
++ op->ac.call_responded = true;
++ WRITE_ONCE(op->ac.alist->addrs[op->ac.index].last_error,
++ op->call_error);
++ afs_put_call(op->call);
++ }
+ }
+
+- switch (op->error) {
+- case 0:
++ if (!afs_op_error(op)) {
+ _debug("success");
+ op->ops->success(op);
+- break;
+- case -ECONNABORTED:
++ } else if (op->cumul_error.aborted) {
+ if (op->ops->aborted)
+ op->ops->aborted(op);
+- fallthrough;
+- default:
++ } else {
+ if (op->ops->failed)
+ op->ops->failed(op);
+- break;
+ }
+
+ afs_end_vnode_operation(op);
+
+- if (op->error == 0 && op->ops->edit_dir) {
++ if (!afs_op_error(op) && op->ops->edit_dir) {
+ _debug("edit_dir");
+ op->ops->edit_dir(op);
+ }
+@@ -221,7 +227,7 @@ void afs_wait_for_operation(struct afs_operation *op)
+ */
+ int afs_put_operation(struct afs_operation *op)
+ {
+- int i, ret = op->error;
++ int i, ret = afs_op_error(op);
+
+ _enter("op=%08x,%d", op->debug_id, ret);
+
+diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
+index daaf3810cc925..58d28b82571e3 100644
+--- a/fs/afs/fs_probe.c
++++ b/fs/afs/fs_probe.c
+@@ -101,6 +101,7 @@ static void afs_fs_probe_not_done(struct afs_net *net,
+ void afs_fileserver_probe_result(struct afs_call *call)
+ {
+ struct afs_addr_list *alist = call->alist;
++ struct afs_address *addr = &alist->addrs[call->addr_ix];
+ struct afs_server *server = call->server;
+ unsigned int index = call->addr_ix;
+ unsigned int rtt_us = 0, cap0;
+@@ -153,12 +154,12 @@ responded:
+ if (call->service_id == YFS_FS_SERVICE) {
+ server->probe.is_yfs = true;
+ set_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+- alist->addrs[index].srx_service = call->service_id;
++ addr->service_id = call->service_id;
+ } else {
+ server->probe.not_yfs = true;
+ if (!server->probe.is_yfs) {
+ clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+- alist->addrs[index].srx_service = call->service_id;
++ addr->service_id = call->service_id;
+ }
+ cap0 = ntohl(call->tmp);
+ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+@@ -167,7 +168,7 @@ responded:
+ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+ }
+
+- rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
++ rtt_us = rxrpc_kernel_get_srtt(addr->peer);
+ if (rtt_us < server->probe.rtt) {
+ server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
+@@ -181,8 +182,8 @@ responded:
+ out:
+ spin_unlock(&server->probe_lock);
+
+- _debug("probe %pU [%u] %pISpc rtt=%u ret=%d",
+- &server->uuid, index, &alist->addrs[index].transport,
++ _debug("probe %pU [%u] %pISpc rtt=%d ret=%d",
++ &server->uuid, index, rxrpc_kernel_remote_addr(alist->addrs[index].peer),
+ rtt_us, ret);
+
+ return afs_done_one_fs_probe(call->net, server);
+diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
+index 7d37f63ef0f09..2a56dea225190 100644
+--- a/fs/afs/fsclient.c
++++ b/fs/afs/fsclient.c
+@@ -1612,6 +1612,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net,
+ {
+ struct afs_call *call;
+ __be32 *bp;
++ int ret;
+
+ _enter("");
+
+@@ -1627,7 +1628,10 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net,
+
+ call->server = afs_use_server(server, afs_server_trace_give_up_cb);
+ afs_make_call(ac, call, GFP_NOFS);
+- return afs_wait_for_call_to_complete(call, ac);
++ afs_wait_for_call_to_complete(call, ac);
++ ret = call->error;
++ afs_put_call(call);
++ return ret;
+ }
+
+ /*
+@@ -1899,7 +1903,7 @@ void afs_fs_inline_bulk_status(struct afs_operation *op)
+ int i;
+
+ if (test_bit(AFS_SERVER_FL_NO_IBULK, &op->server->flags)) {
+- op->error = -ENOTSUPP;
++ afs_op_set_error(op, -ENOTSUPP);
+ return;
+ }
+
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 78efc97193493..d6eed332507f0 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -331,7 +331,7 @@ static void afs_fetch_status_success(struct afs_operation *op)
+
+ if (vnode->netfs.inode.i_state & I_NEW) {
+ ret = afs_inode_init_from_status(op, vp, vnode);
+- op->error = ret;
++ afs_op_set_error(op, ret);
+ if (ret == 0)
+ afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
+ } else {
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 7385d62c8cf50..5f6db0ac06ac6 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -72,6 +72,12 @@ enum afs_call_state {
+ AFS_CALL_COMPLETE, /* Completed or failed */
+ };
+
++struct afs_address {
++ struct rxrpc_peer *peer;
++ u16 service_id;
++ short last_error; /* Last error from this address */
++};
++
+ /*
+ * List of server addresses.
+ */
+@@ -87,7 +93,7 @@ struct afs_addr_list {
+ enum dns_lookup_status status:8;
+ unsigned long failed; /* Mask of addrs that failed locally/ICMP */
+ unsigned long responded; /* Mask of addrs that responded */
+- struct sockaddr_rxrpc addrs[] __counted_by(max_addrs);
++ struct afs_address addrs[] __counted_by(max_addrs);
+ #define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
+ };
+
+@@ -116,7 +122,6 @@ struct afs_call {
+ };
+ void *buffer; /* reply receive buffer */
+ union {
+- long ret0; /* Value to reply with instead of 0 */
+ struct afs_addr_list *ret_alist;
+ struct afs_vldb_entry *ret_vldb;
+ char *ret_str;
+@@ -140,6 +145,7 @@ struct afs_call {
+ bool upgrade; /* T to request service upgrade */
+ bool intr; /* T if interruptible */
+ bool unmarshalling_error; /* T if an unmarshalling error occurred */
++ bool responded; /* Got a response from the call (may be abort) */
+ u16 service_id; /* Actual service ID (after upgrade) */
+ unsigned int debug_id; /* Trace ID */
+ u32 operation_ID; /* operation ID for an incoming call */
+@@ -418,7 +424,7 @@ struct afs_vlserver {
+ atomic_t probe_outstanding;
+ spinlock_t probe_lock;
+ struct {
+- unsigned int rtt; /* RTT in uS */
++ unsigned int rtt; /* Best RTT in uS (or UINT_MAX) */
+ u32 abort_code;
+ short error;
+ unsigned short flags;
+@@ -535,7 +541,7 @@ struct afs_server {
+ atomic_t probe_outstanding;
+ spinlock_t probe_lock;
+ struct {
+- unsigned int rtt; /* RTT in uS */
++ unsigned int rtt; /* Best RTT in uS (or UINT_MAX) */
+ u32 abort_code;
+ short error;
+ bool responded:1;
+@@ -714,8 +720,10 @@ struct afs_permits {
+ * Error prioritisation and accumulation.
+ */
+ struct afs_error {
+- short error; /* Accumulated error */
++ s32 abort_code; /* Cumulative abort code */
++ short error; /* Cumulative error */
+ bool responded; /* T if server responded */
++ bool aborted; /* T if ->error is from an abort */
+ };
+
+ /*
+@@ -725,10 +733,8 @@ struct afs_addr_cursor {
+ struct afs_addr_list *alist; /* Current address list (pins ref) */
+ unsigned long tried; /* Tried addresses */
+ signed char index; /* Current address */
+- bool responded; /* T if the current address responded */
+ unsigned short nr_iterations; /* Number of address iterations */
+- short error;
+- u32 abort_code;
++ bool call_responded;
+ };
+
+ /*
+@@ -741,13 +747,16 @@ struct afs_vl_cursor {
+ struct afs_vlserver *server; /* Server on which this resides */
+ struct key *key; /* Key for the server */
+ unsigned long untried; /* Bitmask of untried servers */
++ struct afs_error cumul_error; /* Cumulative error */
++ s32 call_abort_code;
+ short index; /* Current server */
+- short error;
++ short call_error; /* Error from single call */
+ unsigned short flags;
+ #define AFS_VL_CURSOR_STOP 0x0001 /* Set to cease iteration */
+ #define AFS_VL_CURSOR_RETRY 0x0002 /* Set to do a retry */
+ #define AFS_VL_CURSOR_RETRIED 0x0004 /* Set if started a retry */
+- unsigned short nr_iterations; /* Number of server iterations */
++ short nr_iterations; /* Number of server iterations */
++ bool call_responded; /* T if the current address responded */
+ };
+
+ /*
+@@ -798,8 +807,10 @@ struct afs_operation {
+ struct dentry *dentry_2; /* Second dentry to be altered */
+ struct timespec64 mtime; /* Modification time to record */
+ struct timespec64 ctime; /* Change time to set */
++ struct afs_error cumul_error; /* Cumulative error */
+ short nr_files; /* Number of entries in file[], more_files */
+- short error;
++ short call_error; /* Error from single call */
++ s32 call_abort_code; /* Abort code from single call */
+ unsigned int debug_id;
+
+ unsigned int cb_v_break; /* Volume break counter before op */
+@@ -854,7 +865,9 @@ struct afs_operation {
+ struct afs_call *call;
+ unsigned long untried; /* Bitmask of untried servers */
+ short index; /* Current server */
+- unsigned short nr_iterations; /* Number of server iterations */
++ short nr_iterations; /* Number of server iterations */
++ bool call_responded; /* T if the current address responded */
++
+
+ unsigned int flags;
+ #define AFS_OPERATION_STOP 0x0001 /* Set to cease iteration */
+@@ -962,19 +975,21 @@ static inline struct afs_addr_list *afs_get_addrlist(struct afs_addr_list *alist
+ refcount_inc(&alist->usage);
+ return alist;
+ }
+-extern struct afs_addr_list *afs_alloc_addrlist(unsigned int,
+- unsigned short,
+- unsigned short);
++extern struct afs_addr_list *afs_alloc_addrlist(unsigned int nr, u16 service_id);
+ extern void afs_put_addrlist(struct afs_addr_list *);
+ extern struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *,
+ const char *, size_t, char,
+ unsigned short, unsigned short);
++bool afs_addr_list_same(const struct afs_addr_list *a,
++ const struct afs_addr_list *b);
+ extern struct afs_vlserver_list *afs_dns_query(struct afs_cell *, time64_t *);
+ extern bool afs_iterate_addresses(struct afs_addr_cursor *);
+-extern int afs_end_cursor(struct afs_addr_cursor *);
++extern void afs_end_cursor(struct afs_addr_cursor *ac);
+
+-extern void afs_merge_fs_addr4(struct afs_addr_list *, __be32, u16);
+-extern void afs_merge_fs_addr6(struct afs_addr_list *, __be32 *, u16);
++extern int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *addr,
++ __be32 xdr, u16 port);
++extern int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *addr,
++ __be32 *xdr, u16 port);
+
+ /*
+ * callback.c
+@@ -1133,11 +1148,6 @@ extern bool afs_begin_vnode_operation(struct afs_operation *);
+ extern void afs_wait_for_operation(struct afs_operation *);
+ extern int afs_do_sync_operation(struct afs_operation *);
+
+-static inline void afs_op_nomem(struct afs_operation *op)
+-{
+- op->error = -ENOMEM;
+-}
+-
+ static inline void afs_op_set_vnode(struct afs_operation *op, unsigned int n,
+ struct afs_vnode *vnode)
+ {
+@@ -1231,6 +1241,31 @@ static inline void __afs_stat(atomic_t *s)
+ extern int afs_abort_to_error(u32);
+ extern void afs_prioritise_error(struct afs_error *, int, u32);
+
++static inline void afs_op_nomem(struct afs_operation *op)
++{
++ op->cumul_error.error = -ENOMEM;
++}
++
++static inline int afs_op_error(const struct afs_operation *op)
++{
++ return op->cumul_error.error;
++}
++
++static inline s32 afs_op_abort_code(const struct afs_operation *op)
++{
++ return op->cumul_error.abort_code;
++}
++
++static inline int afs_op_set_error(struct afs_operation *op, int error)
++{
++ return op->cumul_error.error = error;
++}
++
++static inline void afs_op_accumulate_error(struct afs_operation *op, int error, s32 abort_code)
++{
++ afs_prioritise_error(&op->cumul_error, error, abort_code);
++}
++
+ /*
+ * mntpt.c
+ */
+@@ -1274,7 +1309,7 @@ extern void __net_exit afs_close_socket(struct afs_net *);
+ extern void afs_charge_preallocation(struct work_struct *);
+ extern void afs_put_call(struct afs_call *);
+ extern void afs_make_call(struct afs_addr_cursor *, struct afs_call *, gfp_t);
+-extern long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
++void afs_wait_for_call_to_complete(struct afs_call *call, struct afs_addr_cursor *ac);
+ extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
+ const struct afs_call_type *,
+ size_t, size_t);
+@@ -1401,8 +1436,7 @@ extern void __exit afs_clean_up_permit_cache(void);
+ */
+ extern spinlock_t afs_server_peer_lock;
+
+-extern struct afs_server *afs_find_server(struct afs_net *,
+- const struct sockaddr_rxrpc *);
++extern struct afs_server *afs_find_server(struct afs_net *, const struct rxrpc_peer *);
+ extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
+ extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
+ extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
+@@ -1603,7 +1637,7 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
+ struct afs_vnode_param *dir_vp,
+ struct dentry *dentry)
+ {
+- if (!op->error)
++ if (!op->cumul_error.error)
+ dentry->d_fsdata =
+ (void *)(unsigned long)dir_vp->scb.status.data_version;
+ }
+diff --git a/fs/afs/misc.c b/fs/afs/misc.c
+index 805328ca54284..b8180bf2281f7 100644
+--- a/fs/afs/misc.c
++++ b/fs/afs/misc.c
+@@ -116,6 +116,8 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
+ {
+ switch (error) {
+ case 0:
++ e->aborted = false;
++ e->error = 0;
+ return;
+ default:
+ if (e->error == -ETIMEDOUT ||
+@@ -161,12 +163,16 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
+ if (e->responded)
+ return;
+ e->error = error;
++ e->aborted = false;
+ return;
+
+ case -ECONNABORTED:
+- error = afs_abort_to_error(abort_code);
+- fallthrough;
++ e->error = afs_abort_to_error(abort_code);
++ e->aborted = true;
++ e->responded = true;
++ return;
+ case -ENETRESET: /* Responded, but we seem to have changed address */
++ e->aborted = false;
+ e->responded = true;
+ e->error = error;
+ return;
+diff --git a/fs/afs/proc.c b/fs/afs/proc.c
+index 2a0c83d715659..8a65a06908d21 100644
+--- a/fs/afs/proc.c
++++ b/fs/afs/proc.c
+@@ -307,7 +307,7 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
+ for (i = 0; i < alist->nr_addrs; i++)
+ seq_printf(m, " %c %pISpc\n",
+ alist->preferred == i ? '>' : '-',
+- &alist->addrs[i].transport);
++ rxrpc_kernel_remote_addr(alist->addrs[i].peer));
+ }
+ seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt);
+ seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n",
+@@ -398,9 +398,10 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
+ seq_printf(m, " - ALIST v=%u rsp=%lx f=%lx\n",
+ alist->version, alist->responded, alist->failed);
+ for (i = 0; i < alist->nr_addrs; i++)
+- seq_printf(m, " [%x] %pISpc%s\n",
+- i, &alist->addrs[i].transport,
+- alist->preferred == i ? "*" : "");
++ seq_printf(m, " [%x] %pISpc%s rtt=%d\n",
++ i, rxrpc_kernel_remote_addr(alist->addrs[i].peer),
++ alist->preferred == i ? "*" : "",
++ rxrpc_kernel_get_srtt(alist->addrs[i].peer));
+ return 0;
+ }
+
+diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
+index a840c3588ebbb..68c88e3a0916b 100644
+--- a/fs/afs/rotate.c
++++ b/fs/afs/rotate.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched/signal.h>
+ #include "internal.h"
+ #include "afs_fs.h"
++#include "protocol_uae.h"
+
+ /*
+ * Begin iteration through a server list, starting with the vnode's last used
+@@ -50,7 +51,7 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
+ * and have to return an error.
+ */
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+- op->error = -ESTALE;
++ afs_op_set_error(op, -ESTALE);
+ return false;
+ }
+
+@@ -92,7 +93,7 @@ static bool afs_sleep_and_retry(struct afs_operation *op)
+ if (!(op->flags & AFS_OPERATION_UNINTR)) {
+ msleep_interruptible(1000);
+ if (signal_pending(current)) {
+- op->error = -ERESTARTSYS;
++ afs_op_set_error(op, -ERESTARTSYS);
+ return false;
+ }
+ } else {
+@@ -111,31 +112,34 @@ bool afs_select_fileserver(struct afs_operation *op)
+ struct afs_addr_list *alist;
+ struct afs_server *server;
+ struct afs_vnode *vnode = op->file[0].vnode;
+- struct afs_error e;
+- u32 rtt;
+- int error = op->ac.error, i;
++ unsigned int rtt;
++ s32 abort_code = op->call_abort_code;
++ int error = op->call_error, i;
+
+- _enter("%lx[%d],%lx[%d],%d,%d",
++ op->nr_iterations++;
++
++ _enter("OP=%x+%x,%llx,%lx[%d],%lx[%d],%d,%d",
++ op->debug_id, op->nr_iterations, op->volume->vid,
+ op->untried, op->index,
+ op->ac.tried, op->ac.index,
+- error, op->ac.abort_code);
++ error, abort_code);
+
+ if (op->flags & AFS_OPERATION_STOP) {
+ _leave(" = f [stopped]");
+ return false;
+ }
+
+- op->nr_iterations++;
+-
+- /* Evaluate the result of the previous operation, if there was one. */
+- switch (error) {
+- case SHRT_MAX:
++ if (op->nr_iterations == 0)
+ goto start;
+
++ /* Evaluate the result of the previous operation, if there was one. */
++ switch (op->call_error) {
+ case 0:
++ op->cumul_error.responded = true;
++ fallthrough;
+ default:
+ /* Success or local failure. Stop. */
+- op->error = error;
++ afs_op_set_error(op, error);
+ op->flags |= AFS_OPERATION_STOP;
+ _leave(" = f [okay/local %d]", error);
+ return false;
+@@ -143,16 +147,27 @@ bool afs_select_fileserver(struct afs_operation *op)
+ case -ECONNABORTED:
+ /* The far side rejected the operation on some grounds. This
+ * might involve the server being busy or the volume having been moved.
++ *
++ * Note that various V* errors should not be sent to a cache manager
++ * by a fileserver as they should be translated to more modern UAE*
++ * errors instead. IBM AFS and OpenAFS fileservers, however, do leak
++ * these abort codes.
+ */
+- switch (op->ac.abort_code) {
++ op->cumul_error.responded = true;
++ switch (abort_code) {
+ case VNOVOL:
+ /* This fileserver doesn't know about the volume.
+ * - May indicate that the VL is wrong - retry once and compare
+ * the results.
+ * - May indicate that the fileserver couldn't attach to the vol.
++ * - The volume might have been temporarily removed so that it can
++ * be replaced by a volume restore. "vos" might have ended one
++ * transaction and has yet to create the next.
++ * - The volume might not be blessed or might not be in-service
++ * (administrative action).
+ */
+ if (op->flags & AFS_OPERATION_VNOVOL) {
+- op->error = -EREMOTEIO;
++ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
+ goto next_server;
+ }
+
+@@ -162,11 +177,13 @@ bool afs_select_fileserver(struct afs_operation *op)
+
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
+ error = afs_check_volume_status(op->volume, op);
+- if (error < 0)
+- goto failed_set_error;
++ if (error < 0) {
++ afs_op_set_error(op, error);
++ goto failed;
++ }
+
+ if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) {
+- op->error = -ENOMEDIUM;
++ afs_op_set_error(op, -ENOMEDIUM);
+ goto failed;
+ }
+
+@@ -174,7 +191,7 @@ bool afs_select_fileserver(struct afs_operation *op)
+ * it's the fileserver having trouble.
+ */
+ if (rcu_access_pointer(op->volume->servers) == op->server_list) {
+- op->error = -EREMOTEIO;
++ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
+ goto next_server;
+ }
+
+@@ -183,42 +200,91 @@ bool afs_select_fileserver(struct afs_operation *op)
+ _leave(" = t [vnovol]");
+ return true;
+
+- case VSALVAGE: /* TODO: Should this return an error or iterate? */
+ case VVOLEXISTS:
+- case VNOSERVICE:
+ case VONLINE:
+- case VDISKFULL:
+- case VOVERQUOTA:
+- op->error = afs_abort_to_error(op->ac.abort_code);
++ /* These should not be returned from the fileserver. */
++ pr_warn("Fileserver returned unexpected abort %d\n",
++ abort_code);
++ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
++ goto next_server;
++
++ case VNOSERVICE:
++ /* Prior to AFS 3.2 VNOSERVICE was returned from the fileserver
++ * if the volume was neither in-service nor administratively
++ * blessed. All usage was replaced by VNOVOL because AFS 3.1 and
++ * earlier cache managers did not handle VNOSERVICE and assumed
++ * it was the client OSes errno 105.
++ *
++ * Starting with OpenAFS 1.4.8 VNOSERVICE was repurposed as the
++ * fileserver idle dead time error which was sent in place of
++ * RX_CALL_TIMEOUT (-3). The error was intended to be sent if the
++ * fileserver took too long to send a reply to the client.
++ * RX_CALL_TIMEOUT would have caused the cache manager to mark the
++ * server down whereas VNOSERVICE since AFS 3.2 would cause cache
++ * manager to temporarily (up to 15 minutes) mark the volume
++ * instance as unusable.
++ *
++ * The idle dead logic resulted in cache inconsistency since a
++ * state changing call that the cache manager assumed was dead
++ * could still be processed to completion by the fileserver. This
++ * logic was removed in OpenAFS 1.8.0 and VNOSERVICE is no longer
++ * returned. However, many 1.4.8 through 1.6.24 fileservers are
++ * still in existence.
++ *
++ * AuriStorFS fileservers have never returned VNOSERVICE.
++ *
++ * VNOSERVICE should be treated as an alias for RX_CALL_TIMEOUT.
++ */
++ case RX_CALL_TIMEOUT:
++ afs_op_accumulate_error(op, -ETIMEDOUT, abort_code);
+ goto next_server;
+
++ case VSALVAGING: /* This error should not be leaked to cache managers
++ * but is from OpenAFS demand attach fileservers.
++ * It should be treated as an alias for VOFFLINE.
++ */
++ case VSALVAGE: /* VSALVAGE should be treated as a synonym of VOFFLINE */
+ case VOFFLINE:
++ /* The volume is in use by the volserver or another volume utility
++ * for an operation that might alter the contents. The volume is
++ * expected to come back but it might take a long time (could be
++ * days).
++ */
+ if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) {
+- afs_busy(op->volume, op->ac.abort_code);
++ afs_busy(op->volume, abort_code);
+ clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
+ }
+ if (op->flags & AFS_OPERATION_NO_VSLEEP) {
+- op->error = -EADV;
++ afs_op_set_error(op, -EADV);
+ goto failed;
+ }
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+- op->error = -ESTALE;
++ afs_op_set_error(op, -ESTALE);
+ goto failed;
+ }
+ goto busy;
+
+- case VSALVAGING:
+- case VRESTARTING:
++ case VRESTARTING: /* The fileserver is either shutting down or starting up. */
+ case VBUSY:
+- /* Retry after going round all the servers unless we
+- * have a file lock we need to maintain.
++ /* The volume is in use by the volserver or another volume
++ * utility for an operation that is not expected to alter the
++ * contents of the volume. VBUSY does not need to be returned
++ * for a ROVOL or BACKVOL bound to an ITBusy volserver
++ * transaction. The fileserver is permitted to continue serving
++ * content from ROVOLs and BACKVOLs during an ITBusy transaction
++ * because the content will not change. However, many fileserver
++ * releases do return VBUSY for ROVOL and BACKVOL instances under
++ * many circumstances.
++ *
++ * Retry after going round all the servers unless we have a file
++ * lock we need to maintain.
+ */
+ if (op->flags & AFS_OPERATION_NO_VSLEEP) {
+- op->error = -EBUSY;
++ afs_op_set_error(op, -EBUSY);
+ goto failed;
+ }
+ if (!test_and_set_bit(AFS_VOLUME_BUSY, &op->volume->flags)) {
+- afs_busy(op->volume, op->ac.abort_code);
++ afs_busy(op->volume, abort_code);
+ clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
+ }
+ busy:
+@@ -226,7 +292,7 @@ bool afs_select_fileserver(struct afs_operation *op)
+ if (!afs_sleep_and_retry(op))
+ goto failed;
+
+- /* Retry with same server & address */
++ /* Retry with same server & address */
+ _leave(" = t [vbusy]");
+ return true;
+ }
+@@ -243,7 +309,7 @@ bool afs_select_fileserver(struct afs_operation *op)
+ * honour, just in case someone sets up a loop.
+ */
+ if (op->flags & AFS_OPERATION_VMOVED) {
+- op->error = -EREMOTEIO;
++ afs_op_set_error(op, -EREMOTEIO);
+ goto failed;
+ }
+ op->flags |= AFS_OPERATION_VMOVED;
+@@ -251,8 +317,10 @@ bool afs_select_fileserver(struct afs_operation *op)
+ set_bit(AFS_VOLUME_WAIT, &op->volume->flags);
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
+ error = afs_check_volume_status(op->volume, op);
+- if (error < 0)
+- goto failed_set_error;
++ if (error < 0) {
++ afs_op_set_error(op, error);
++ goto failed;
++ }
+
+ /* If the server list didn't change, then the VLDB is
+ * out of sync with the fileservers. This is hopefully
+@@ -264,22 +332,48 @@ bool afs_select_fileserver(struct afs_operation *op)
+ * TODO: Retry a few times with sleeps.
+ */
+ if (rcu_access_pointer(op->volume->servers) == op->server_list) {
+- op->error = -ENOMEDIUM;
++ afs_op_accumulate_error(op, -ENOMEDIUM, abort_code);
+ goto failed;
+ }
+
+ goto restart_from_beginning;
+
++ case UAEIO:
++ case VIO:
++ afs_op_accumulate_error(op, -EREMOTEIO, abort_code);
++ if (op->volume->type != AFSVL_RWVOL)
++ goto next_server;
++ goto failed;
++
++ case VDISKFULL:
++ case UAENOSPC:
++ /* The partition is full. Only applies to RWVOLs.
++ * Translate locally and return ENOSPC.
++ * No replicas to failover to.
++ */
++ afs_op_set_error(op, -ENOSPC);
++ goto failed_but_online;
++
++ case VOVERQUOTA:
++ case UAEDQUOT:
++ /* Volume is full. Only applies to RWVOLs.
++ * Translate locally and return EDQUOT.
++ * No replicas to failover to.
++ */
++ afs_op_set_error(op, -EDQUOT);
++ goto failed_but_online;
++
+ default:
++ afs_op_accumulate_error(op, error, abort_code);
++ failed_but_online:
+ clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
+ clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
+- op->error = afs_abort_to_error(op->ac.abort_code);
+ goto failed;
+ }
+
+ case -ETIMEDOUT:
+ case -ETIME:
+- if (op->error != -EDESTADDRREQ)
++ if (afs_op_error(op) != -EDESTADDRREQ)
+ goto iterate_address;
+ fallthrough;
+ case -ERFKILL:
+@@ -289,7 +383,7 @@ bool afs_select_fileserver(struct afs_operation *op)
+ case -EHOSTDOWN:
+ case -ECONNREFUSED:
+ _debug("no conn");
+- op->error = error;
++ afs_op_accumulate_error(op, error, 0);
+ goto iterate_address;
+
+ case -ENETRESET:
+@@ -298,7 +392,7 @@ bool afs_select_fileserver(struct afs_operation *op)
+ fallthrough;
+ case -ECONNRESET:
+ _debug("call reset");
+- op->error = error;
++ afs_op_set_error(op, error);
+ goto failed;
+ }
+
+@@ -314,8 +408,10 @@ start:
+ * volume may have moved or even have been deleted.
+ */
+ error = afs_check_volume_status(op->volume, op);
+- if (error < 0)
+- goto failed_set_error;
++ if (error < 0) {
++ afs_op_set_error(op, error);
++ goto failed;
++ }
+
+ if (!afs_start_fs_iteration(op, vnode))
+ goto failed;
+@@ -326,8 +422,10 @@ pick_server:
+ _debug("pick [%lx]", op->untried);
+
+ error = afs_wait_for_fs_probes(op->server_list, op->untried);
+- if (error < 0)
+- goto failed_set_error;
++ if (error < 0) {
++ afs_op_set_error(op, error);
++ goto failed;
++ }
+
+ /* Pick the untried server with the lowest RTT. If we have outstanding
+ * callbacks, we stick with the server we're already using if we can.
+@@ -341,7 +439,7 @@ pick_server:
+ }
+
+ op->index = -1;
+- rtt = U32_MAX;
++ rtt = UINT_MAX;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_server *s = op->server_list->servers[i].server;
+
+@@ -409,8 +507,9 @@ iterate_address:
+
+ _debug("address [%u] %u/%u %pISp",
+ op->index, op->ac.index, op->ac.alist->nr_addrs,
+- &op->ac.alist->addrs[op->ac.index].transport);
++ rxrpc_kernel_remote_addr(op->ac.alist->addrs[op->ac.index].peer));
+
++ op->call_responded = false;
+ _leave(" = t");
+ return true;
+
+@@ -428,7 +527,8 @@ out_of_addresses:
+ op->flags &= ~AFS_OPERATION_RETRY_SERVER;
+ goto retry_server;
+ case -ERESTARTSYS:
+- goto failed_set_error;
++ afs_op_set_error(op, error);
++ goto failed;
+ case -ETIME:
+ case -EDESTADDRREQ:
+ goto next_server;
+@@ -447,23 +547,18 @@ no_more_servers:
+ if (op->flags & AFS_OPERATION_VBUSY)
+ goto restart_from_beginning;
+
+- e.error = -EDESTADDRREQ;
+- e.responded = false;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_server *s = op->server_list->servers[i].server;
+
+- afs_prioritise_error(&e, READ_ONCE(s->probe.error),
+- s->probe.abort_code);
++ error = READ_ONCE(s->probe.error);
++ if (error < 0)
++ afs_op_accumulate_error(op, error, s->probe.abort_code);
+ }
+
+- error = e.error;
+-
+-failed_set_error:
+- op->error = error;
+ failed:
+ op->flags |= AFS_OPERATION_STOP;
+ afs_end_cursor(&op->ac);
+- _leave(" = f [failed %d]", op->error);
++ _leave(" = f [failed %d]", afs_op_error(op));
+ return false;
+ }
+
+@@ -482,11 +577,13 @@ void afs_dump_edestaddrreq(const struct afs_operation *op)
+ rcu_read_lock();
+
+ pr_notice("EDESTADDR occurred\n");
+- pr_notice("FC: cbb=%x cbb2=%x fl=%x err=%hd\n",
++ pr_notice("OP: cbb=%x cbb2=%x fl=%x err=%hd\n",
+ op->file[0].cb_break_before,
+- op->file[1].cb_break_before, op->flags, op->error);
+- pr_notice("FC: ut=%lx ix=%d ni=%u\n",
++ op->file[1].cb_break_before, op->flags, op->cumul_error.error);
++ pr_notice("OP: ut=%lx ix=%d ni=%u\n",
+ op->untried, op->index, op->nr_iterations);
++ pr_notice("OP: call er=%d ac=%d r=%u\n",
++ op->call_error, op->call_abort_code, op->call_responded);
+
+ if (op->server_list) {
+ const struct afs_server_list *sl = op->server_list;
+@@ -511,8 +608,7 @@ void afs_dump_edestaddrreq(const struct afs_operation *op)
+ }
+ }
+
+- pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
+- op->ac.tried, op->ac.index, op->ac.abort_code, op->ac.error,
+- op->ac.responded, op->ac.nr_iterations);
++ pr_notice("AC: t=%lx ax=%u ni=%u\n",
++ op->ac.tried, op->ac.index, op->ac.nr_iterations);
+ rcu_read_unlock();
+ }
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index d642d06a453be..0b3e2f20b0e08 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -296,7 +296,8 @@ static void afs_notify_end_request_tx(struct sock *sock,
+ */
+ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ {
+- struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index];
++ struct afs_address *addr = &ac->alist->addrs[ac->index];
++ struct rxrpc_peer *peer = addr->peer;
+ struct rxrpc_call *rxcall;
+ struct msghdr msg;
+ struct kvec iov[1];
+@@ -304,7 +305,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ s64 tx_total_len;
+ int ret;
+
+- _enter(",{%pISp},", &srx->transport);
++ _enter(",{%pISp},", rxrpc_kernel_remote_addr(addr->peer));
+
+ ASSERT(call->type != NULL);
+ ASSERT(call->type->name != NULL);
+@@ -333,7 +334,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ }
+
+ /* create a call */
+- rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
++ rxcall = rxrpc_kernel_begin_call(call->net->socket, peer, call->key,
+ (unsigned long)call,
+ tx_total_len,
+ call->max_lifespan,
+@@ -341,6 +342,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ (call->async ?
+ afs_wake_up_async_call :
+ afs_wake_up_call_waiter),
++ addr->service_id,
+ call->upgrade,
+ (call->intr ? RXRPC_PREINTERRUPTIBLE :
+ RXRPC_UNINTERRUPTIBLE),
+@@ -406,8 +408,7 @@ error_do_abort:
+ rxrpc_kernel_recv_data(call->net->socket, rxcall,
+ &msg.msg_iter, &len, false,
+ &call->abort_code, &call->service_id);
+- ac->abort_code = call->abort_code;
+- ac->responded = true;
++ call->responded = true;
+ }
+ call->error = ret;
+ trace_afs_call_done(call);
+@@ -427,7 +428,7 @@ error_kill_call:
+ afs_set_call_complete(call, ret, 0);
+ }
+
+- ac->error = ret;
++ call->error = ret;
+ call->state = AFS_CALL_COMPLETE;
+ _leave(" = %d", ret);
+ }
+@@ -461,7 +462,7 @@ static void afs_log_error(struct afs_call *call, s32 remote_abort)
+ max = m + 1;
+ pr_notice("kAFS: Peer reported %s failure on %s [%pISp]\n",
+ msg, call->type->name,
+- &call->alist->addrs[call->addr_ix].transport);
++ rxrpc_kernel_remote_addr(call->alist->addrs[call->addr_ix].peer));
+ }
+ }
+
+@@ -508,6 +509,7 @@ static void afs_deliver_to_call(struct afs_call *call)
+ ret = -EBADMSG;
+ switch (ret) {
+ case 0:
++ call->responded = true;
+ afs_queue_call_work(call);
+ if (state == AFS_CALL_CL_PROC_REPLY) {
+ if (call->op)
+@@ -522,9 +524,11 @@ static void afs_deliver_to_call(struct afs_call *call)
+ goto out;
+ case -ECONNABORTED:
+ ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
++ call->responded = true;
+ afs_log_error(call, call->abort_code);
+ goto done;
+ case -ENOTSUPP:
++ call->responded = true;
+ abort_code = RXGEN_OPCODE;
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ abort_code, ret,
+@@ -571,50 +575,46 @@ call_complete:
+ }
+
+ /*
+- * Wait synchronously for a call to complete and clean up the call struct.
++ * Wait synchronously for a call to complete.
+ */
+-long afs_wait_for_call_to_complete(struct afs_call *call,
+- struct afs_addr_cursor *ac)
++void afs_wait_for_call_to_complete(struct afs_call *call, struct afs_addr_cursor *ac)
+ {
+- long ret;
+ bool rxrpc_complete = false;
+
+- DECLARE_WAITQUEUE(myself, current);
+-
+ _enter("");
+
+- ret = call->error;
+- if (ret < 0)
+- goto out;
++ if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
++ DECLARE_WAITQUEUE(myself, current);
++
++ add_wait_queue(&call->waitq, &myself);
++ for (;;) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++
++ /* deliver any messages that are in the queue */
++ if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
++ call->need_attention) {
++ call->need_attention = false;
++ __set_current_state(TASK_RUNNING);
++ afs_deliver_to_call(call);
++ continue;
++ }
+
+- add_wait_queue(&call->waitq, &myself);
+- for (;;) {
+- set_current_state(TASK_UNINTERRUPTIBLE);
+-
+- /* deliver any messages that are in the queue */
+- if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
+- call->need_attention) {
+- call->need_attention = false;
+- __set_current_state(TASK_RUNNING);
+- afs_deliver_to_call(call);
+- continue;
+- }
++ if (afs_check_call_state(call, AFS_CALL_COMPLETE))
++ break;
+
+- if (afs_check_call_state(call, AFS_CALL_COMPLETE))
+- break;
++ if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
++ /* rxrpc terminated the call. */
++ rxrpc_complete = true;
++ break;
++ }
+
+- if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
+- /* rxrpc terminated the call. */
+- rxrpc_complete = true;
+- break;
++ schedule();
+ }
+
+- schedule();
++ remove_wait_queue(&call->waitq, &myself);
++ __set_current_state(TASK_RUNNING);
+ }
+
+- remove_wait_queue(&call->waitq, &myself);
+- __set_current_state(TASK_RUNNING);
+-
+ if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
+ if (rxrpc_complete) {
+ afs_set_call_complete(call, call->error, call->abort_code);
+@@ -628,28 +628,8 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
+ }
+ }
+
+- spin_lock_bh(&call->state_lock);
+- ac->abort_code = call->abort_code;
+- ac->error = call->error;
+- spin_unlock_bh(&call->state_lock);
+-
+- ret = ac->error;
+- switch (ret) {
+- case 0:
+- ret = call->ret0;
+- call->ret0 = 0;
+-
+- fallthrough;
+- case -ECONNABORTED:
+- ac->responded = true;
+- break;
+- }
+-
+-out:
+- _debug("call complete");
+- afs_put_call(call);
+- _leave(" = %p", (void *)ret);
+- return ret;
++ if (call->error == 0 || call->error == -ECONNABORTED)
++ call->responded = true;
+ }
+
+ /*
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index b5237206eac3e..f7791ef13618a 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -21,13 +21,12 @@ static void __afs_put_server(struct afs_net *, struct afs_server *);
+ /*
+ * Find a server by one of its addresses.
+ */
+-struct afs_server *afs_find_server(struct afs_net *net,
+- const struct sockaddr_rxrpc *srx)
++struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer *peer)
+ {
+ const struct afs_addr_list *alist;
+ struct afs_server *server = NULL;
+ unsigned int i;
+- int seq = 0, diff;
++ int seq = 1;
+
+ rcu_read_lock();
+
+@@ -35,39 +34,14 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ if (server)
+ afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
+ server = NULL;
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
+
+- if (srx->transport.family == AF_INET6) {
+- const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
+- hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
+- alist = rcu_dereference(server->addresses);
+- for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
+- b = &alist->addrs[i].transport.sin6;
+- diff = ((u16 __force)a->sin6_port -
+- (u16 __force)b->sin6_port);
+- if (diff == 0)
+- diff = memcmp(&a->sin6_addr,
+- &b->sin6_addr,
+- sizeof(struct in6_addr));
+- if (diff == 0)
+- goto found;
+- }
+- }
+- } else {
+- const struct sockaddr_in *a = &srx->transport.sin, *b;
+- hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
+- alist = rcu_dereference(server->addresses);
+- for (i = 0; i < alist->nr_ipv4; i++) {
+- b = &alist->addrs[i].transport.sin;
+- diff = ((u16 __force)a->sin_port -
+- (u16 __force)b->sin_port);
+- if (diff == 0)
+- diff = ((u32 __force)a->sin_addr.s_addr -
+- (u32 __force)b->sin_addr.s_addr);
+- if (diff == 0)
+- goto found;
+- }
+- }
++ hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
++ alist = rcu_dereference(server->addresses);
++ for (i = 0; i < alist->nr_addrs; i++)
++ if (alist->addrs[i].peer == peer)
++ goto found;
+ }
+
+ server = NULL;
+@@ -90,7 +64,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
+ {
+ struct afs_server *server = NULL;
+ struct rb_node *p;
+- int diff, seq = 0;
++ int diff, seq = 1;
+
+ _enter("%pU", uuid);
+
+@@ -102,7 +76,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
+ if (server)
+ afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
+ server = NULL;
+-
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ read_seqbegin_or_lock(&net->fs_lock, &seq);
+
+ p = net->fs_servers.rb_node;
+@@ -463,7 +437,6 @@ static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server
+ struct afs_addr_cursor ac = {
+ .alist = alist,
+ .index = alist->preferred,
+- .error = 0,
+ };
+
+ afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+@@ -655,8 +628,8 @@ static noinline bool afs_update_server_record(struct afs_operation *op,
+ _leave(" = t [intr]");
+ return true;
+ }
+- op->error = PTR_ERR(alist);
+- _leave(" = f [%d]", op->error);
++ afs_op_set_error(op, PTR_ERR(alist));
++ _leave(" = f [%d]", afs_op_error(op));
+ return false;
+ }
+
+@@ -710,7 +683,7 @@ wait:
+ (op->flags & AFS_OPERATION_UNINTR) ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ if (ret == -ERESTARTSYS) {
+- op->error = ret;
++ afs_op_set_error(op, ret);
+ _leave(" = f [intr]");
+ return false;
+ }
+diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
+index f04a80e4f5c3f..89cadd9a69e12 100644
+--- a/fs/afs/vl_alias.c
++++ b/fs/afs/vl_alias.c
+@@ -32,55 +32,6 @@ static struct afs_volume *afs_sample_volume(struct afs_cell *cell, struct key *k
+ return volume;
+ }
+
+-/*
+- * Compare two addresses.
+- */
+-static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a,
+- const struct sockaddr_rxrpc *srx_b)
+-{
+- short port_a, port_b;
+- int addr_a, addr_b, diff;
+-
+- diff = (short)srx_a->transport_type - (short)srx_b->transport_type;
+- if (diff)
+- goto out;
+-
+- switch (srx_a->transport_type) {
+- case AF_INET: {
+- const struct sockaddr_in *a = &srx_a->transport.sin;
+- const struct sockaddr_in *b = &srx_b->transport.sin;
+- addr_a = ntohl(a->sin_addr.s_addr);
+- addr_b = ntohl(b->sin_addr.s_addr);
+- diff = addr_a - addr_b;
+- if (diff == 0) {
+- port_a = ntohs(a->sin_port);
+- port_b = ntohs(b->sin_port);
+- diff = port_a - port_b;
+- }
+- break;
+- }
+-
+- case AF_INET6: {
+- const struct sockaddr_in6 *a = &srx_a->transport.sin6;
+- const struct sockaddr_in6 *b = &srx_b->transport.sin6;
+- diff = memcmp(&a->sin6_addr, &b->sin6_addr, 16);
+- if (diff == 0) {
+- port_a = ntohs(a->sin6_port);
+- port_b = ntohs(b->sin6_port);
+- diff = port_a - port_b;
+- }
+- break;
+- }
+-
+- default:
+- WARN_ON(1);
+- diff = 1;
+- }
+-
+-out:
+- return diff;
+-}
+-
+ /*
+ * Compare the address lists of a pair of fileservers.
+ */
+@@ -94,9 +45,9 @@ static int afs_compare_fs_alists(const struct afs_server *server_a,
+ lb = rcu_dereference(server_b->addresses);
+
+ while (a < la->nr_addrs && b < lb->nr_addrs) {
+- const struct sockaddr_rxrpc *srx_a = &la->addrs[a];
+- const struct sockaddr_rxrpc *srx_b = &lb->addrs[b];
+- int diff = afs_compare_addrs(srx_a, srx_b);
++ unsigned long pa = (unsigned long)la->addrs[a].peer;
++ unsigned long pb = (unsigned long)lb->addrs[b].peer;
++ long diff = pa - pb;
+
+ if (diff < 0) {
+ a++;
+@@ -285,7 +236,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
+
+ while (afs_select_vlserver(&vc)) {
+ if (!test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) {
+- vc.ac.error = -EOPNOTSUPP;
++ vc.call_error = -EOPNOTSUPP;
+ skipped = true;
+ continue;
+ }
+diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
+index acc48216136ae..ba89140eee9e9 100644
+--- a/fs/afs/vl_list.c
++++ b/fs/afs/vl_list.c
+@@ -83,14 +83,15 @@ static u16 afs_extract_le16(const u8 **_b)
+ /*
+ * Build a VL server address list from a DNS queried server list.
+ */
+-static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
++static struct afs_addr_list *afs_extract_vl_addrs(struct afs_net *net,
++ const u8 **_b, const u8 *end,
+ u8 nr_addrs, u16 port)
+ {
+ struct afs_addr_list *alist;
+ const u8 *b = *_b;
+ int ret = -EINVAL;
+
+- alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE, port);
++ alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE);
+ if (!alist)
+ return ERR_PTR(-ENOMEM);
+ if (nr_addrs == 0)
+@@ -109,7 +110,9 @@ static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
+ goto error;
+ }
+ memcpy(x, b, 4);
+- afs_merge_fs_addr4(alist, x[0], port);
++ ret = afs_merge_fs_addr4(net, alist, x[0], port);
++ if (ret < 0)
++ goto error;
+ b += 4;
+ break;
+
+@@ -119,7 +122,9 @@ static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
+ goto error;
+ }
+ memcpy(x, b, 16);
+- afs_merge_fs_addr6(alist, x, port);
++ ret = afs_merge_fs_addr6(net, alist, x, port);
++ if (ret < 0)
++ goto error;
+ b += 16;
+ break;
+
+@@ -247,7 +252,7 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
+ /* Extract the addresses - note that we can't skip this as we
+ * have to advance the payload pointer.
+ */
+- addrs = afs_extract_vl_addrs(&b, end, bs.nr_addrs, bs.port);
++ addrs = afs_extract_vl_addrs(cell->net, &b, end, bs.nr_addrs, bs.port);
+ if (IS_ERR(addrs)) {
+ ret = PTR_ERR(addrs);
+ goto error_2;
+diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
+index 58452b86e6727..2f8a13c2bf0c8 100644
+--- a/fs/afs/vl_probe.c
++++ b/fs/afs/vl_probe.c
+@@ -48,6 +48,7 @@ void afs_vlserver_probe_result(struct afs_call *call)
+ {
+ struct afs_addr_list *alist = call->alist;
+ struct afs_vlserver *server = call->vlserver;
++ struct afs_address *addr = &alist->addrs[call->addr_ix];
+ unsigned int server_index = call->server_index;
+ unsigned int rtt_us = 0;
+ unsigned int index = call->addr_ix;
+@@ -106,16 +107,16 @@ responded:
+ if (call->service_id == YFS_VL_SERVICE) {
+ server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS;
+ set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
+- alist->addrs[index].srx_service = call->service_id;
++ addr->service_id = call->service_id;
+ } else {
+ server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS;
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) {
+ clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
+- alist->addrs[index].srx_service = call->service_id;
++ addr->service_id = call->service_id;
+ }
+ }
+
+- rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
++ rtt_us = rxrpc_kernel_get_srtt(addr->peer);
+ if (rtt_us < server->probe.rtt) {
+ server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
+@@ -130,8 +131,9 @@ responded:
+ out:
+ spin_unlock(&server->probe_lock);
+
+- _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
+- server_index, index, &alist->addrs[index].transport, rtt_us, ret);
++ _debug("probe [%u][%u] %pISpc rtt=%d ret=%d",
++ server_index, index, rxrpc_kernel_remote_addr(addr->peer),
++ rtt_us, ret);
+
+ afs_done_one_vl_probe(server, have_result);
+ }
+@@ -167,10 +169,11 @@ static bool afs_do_probe_vlserver(struct afs_net *net,
+ call = afs_vl_get_capabilities(net, &ac, key, server,
+ server_index);
+ if (!IS_ERR(call)) {
++ afs_prioritise_error(_e, call->error, call->abort_code);
+ afs_put_call(call);
+ in_progress = true;
+ } else {
+- afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
++ afs_prioritise_error(_e, PTR_ERR(call), 0);
+ afs_done_one_vl_probe(server, false);
+ }
+ }
+@@ -185,12 +188,10 @@ int afs_send_vl_probes(struct afs_net *net, struct key *key,
+ struct afs_vlserver_list *vllist)
+ {
+ struct afs_vlserver *server;
+- struct afs_error e;
++ struct afs_error e = {};
+ bool in_progress = false;
+ int i;
+
+- e.error = 0;
+- e.responded = false;
+ for (i = 0; i < vllist->nr_servers; i++) {
+ server = vllist->servers[i].server;
+ if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
+diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
+index eb415ce563600..e2dc54082a05d 100644
+--- a/fs/afs/vl_rotate.c
++++ b/fs/afs/vl_rotate.c
+@@ -20,11 +20,11 @@ bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cel
+ memset(vc, 0, sizeof(*vc));
+ vc->cell = cell;
+ vc->key = key;
+- vc->error = -EDESTADDRREQ;
+- vc->ac.error = SHRT_MAX;
++ vc->cumul_error.error = -EDESTADDRREQ;
++ vc->nr_iterations = -1;
+
+ if (signal_pending(current)) {
+- vc->error = -EINTR;
++ vc->cumul_error.error = -EINTR;
+ vc->flags |= AFS_VL_CURSOR_STOP;
+ return false;
+ }
+@@ -52,7 +52,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ &cell->dns_lookup_count,
+ smp_load_acquire(&cell->dns_lookup_count)
+ != dns_lookup_count) < 0) {
+- vc->error = -ERESTARTSYS;
++ vc->cumul_error.error = -ERESTARTSYS;
+ return false;
+ }
+ }
+@@ -60,12 +60,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ /* Status load is ordered after lookup counter load */
+ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
+ pr_warn("No record of cell %s\n", cell->name);
+- vc->error = -ENOENT;
++ vc->cumul_error.error = -ENOENT;
+ return false;
+ }
+
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+- vc->error = -EDESTADDRREQ;
++ vc->cumul_error.error = -EDESTADDRREQ;
+ return false;
+ }
+ }
+@@ -91,52 +91,52 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
+ {
+ struct afs_addr_list *alist;
+ struct afs_vlserver *vlserver;
+- struct afs_error e;
+- u32 rtt;
+- int error = vc->ac.error, i;
++ unsigned int rtt;
++ s32 abort_code = vc->call_abort_code;
++ int error = vc->call_error, i;
++
++ vc->nr_iterations++;
+
+ _enter("%lx[%d],%lx[%d],%d,%d",
+ vc->untried, vc->index,
+ vc->ac.tried, vc->ac.index,
+- error, vc->ac.abort_code);
++ error, abort_code);
+
+ if (vc->flags & AFS_VL_CURSOR_STOP) {
+ _leave(" = f [stopped]");
+ return false;
+ }
+
+- vc->nr_iterations++;
++ if (vc->nr_iterations == 0)
++ goto start;
+
+ /* Evaluate the result of the previous operation, if there was one. */
+ switch (error) {
+- case SHRT_MAX:
+- goto start;
+-
+ default:
+ case 0:
+ /* Success or local failure. Stop. */
+- vc->error = error;
++ vc->cumul_error.error = error;
+ vc->flags |= AFS_VL_CURSOR_STOP;
+- _leave(" = f [okay/local %d]", vc->ac.error);
++ _leave(" = f [okay/local %d]", vc->cumul_error.error);
+ return false;
+
+ case -ECONNABORTED:
+ /* The far side rejected the operation on some grounds. This
+ * might involve the server being busy or the volume having been moved.
+ */
+- switch (vc->ac.abort_code) {
++ switch (abort_code) {
+ case AFSVL_IO:
+ case AFSVL_BADVOLOPER:
+ case AFSVL_NOMEM:
+ /* The server went weird. */
+- vc->error = -EREMOTEIO;
++ afs_prioritise_error(&vc->cumul_error, -EREMOTEIO, abort_code);
+ //write_lock(&vc->cell->vl_servers_lock);
+ //vc->server_list->weird_mask |= 1 << vc->index;
+ //write_unlock(&vc->cell->vl_servers_lock);
+ goto next_server;
+
+ default:
+- vc->error = afs_abort_to_error(vc->ac.abort_code);
++ afs_prioritise_error(&vc->cumul_error, error, abort_code);
+ goto failed;
+ }
+
+@@ -149,12 +149,12 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
+ case -ETIMEDOUT:
+ case -ETIME:
+ _debug("no conn %d", error);
+- vc->error = error;
++ afs_prioritise_error(&vc->cumul_error, error, 0);
+ goto iterate_address;
+
+ case -ECONNRESET:
+ _debug("call reset");
+- vc->error = error;
++ afs_prioritise_error(&vc->cumul_error, error, 0);
+ vc->flags |= AFS_VL_CURSOR_RETRY;
+ goto next_server;
+
+@@ -178,15 +178,19 @@ start:
+ goto failed;
+
+ error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list);
+- if (error < 0)
+- goto failed_set_error;
++ if (error < 0) {
++ afs_prioritise_error(&vc->cumul_error, error, 0);
++ goto failed;
++ }
+
+ pick_server:
+ _debug("pick [%lx]", vc->untried);
+
+ error = afs_wait_for_vl_probes(vc->server_list, vc->untried);
+- if (error < 0)
+- goto failed_set_error;
++ if (error < 0) {
++ afs_prioritise_error(&vc->cumul_error, error, 0);
++ goto failed;
++ }
+
+ /* Pick the untried server with the lowest RTT. */
+ vc->index = vc->server_list->preferred;
+@@ -194,7 +198,7 @@ pick_server:
+ goto selected_server;
+
+ vc->index = -1;
+- rtt = U32_MAX;
++ rtt = UINT_MAX;
+ for (i = 0; i < vc->server_list->nr_servers; i++) {
+ struct afs_vlserver *s = vc->server_list->servers[i].server;
+
+@@ -249,7 +253,8 @@ iterate_address:
+
+ _debug("VL address %d/%d", vc->ac.index, vc->ac.alist->nr_addrs);
+
+- _leave(" = t %pISpc", &vc->ac.alist->addrs[vc->ac.index].transport);
++ vc->call_responded = false;
++ _leave(" = t %pISpc", rxrpc_kernel_remote_addr(vc->ac.alist->addrs[vc->ac.index].peer));
+ return true;
+
+ next_server:
+@@ -264,25 +269,19 @@ no_more_servers:
+ if (vc->flags & AFS_VL_CURSOR_RETRY)
+ goto restart_from_beginning;
+
+- e.error = -EDESTADDRREQ;
+- e.responded = false;
+ for (i = 0; i < vc->server_list->nr_servers; i++) {
+ struct afs_vlserver *s = vc->server_list->servers[i].server;
+
+ if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
+- e.responded = true;
+- afs_prioritise_error(&e, READ_ONCE(s->probe.error),
++ vc->cumul_error.responded = true;
++ afs_prioritise_error(&vc->cumul_error, READ_ONCE(s->probe.error),
+ s->probe.abort_code);
+ }
+
+- error = e.error;
+-
+-failed_set_error:
+- vc->error = error;
+ failed:
+ vc->flags |= AFS_VL_CURSOR_STOP;
+ afs_end_cursor(&vc->ac);
+- _leave(" = f [failed %d]", vc->error);
++ _leave(" = f [failed %d]", vc->cumul_error.error);
+ return false;
+ }
+
+@@ -305,7 +304,10 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ pr_notice("DNS: src=%u st=%u lc=%x\n",
+ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
+ pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+- vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
++ vc->untried, vc->index, vc->nr_iterations, vc->flags,
++ vc->cumul_error.error);
++ pr_notice("VC: call er=%d ac=%d r=%u\n",
++ vc->call_error, vc->call_abort_code, vc->call_responded);
+
+ if (vc->server_list) {
+ const struct afs_vlserver_list *sl = vc->server_list;
+@@ -329,9 +331,8 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ }
+ }
+
+- pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
+- vc->ac.tried, vc->ac.index, vc->ac.abort_code, vc->ac.error,
+- vc->ac.responded, vc->ac.nr_iterations);
++ pr_notice("AC: t=%lx ax=%u ni=%u\n",
++ vc->ac.tried, vc->ac.index, vc->ac.nr_iterations);
+ rcu_read_unlock();
+ }
+
+@@ -342,17 +343,16 @@ int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
+ {
+ struct afs_net *net = vc->cell->net;
+
+- if (vc->error == -EDESTADDRREQ ||
+- vc->error == -EADDRNOTAVAIL ||
+- vc->error == -ENETUNREACH ||
+- vc->error == -EHOSTUNREACH)
++ switch (vc->cumul_error.error) {
++ case -EDESTADDRREQ:
++ case -EADDRNOTAVAIL:
++ case -ENETUNREACH:
++ case -EHOSTUNREACH:
+ afs_vl_dump_edestaddrreq(vc);
++ break;
++ }
+
+ afs_end_cursor(&vc->ac);
+ afs_put_vlserverlist(net, vc->server_list);
+-
+- if (vc->error == -ECONNABORTED)
+- vc->error = afs_abort_to_error(vc->ac.abort_code);
+-
+- return vc->error;
++ return vc->cumul_error.error;
+ }
+diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
+index 00fca3c66ba61..db7e94584e874 100644
+--- a/fs/afs/vlclient.c
++++ b/fs/afs/vlclient.c
+@@ -106,12 +106,6 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
+ return 0;
+ }
+
+-static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call)
+-{
+- kfree(call->ret_vldb);
+- afs_flat_call_destructor(call);
+-}
+-
+ /*
+ * VL.GetEntryByNameU operation type.
+ */
+@@ -119,7 +113,7 @@ static const struct afs_call_type afs_RXVLGetEntryByNameU = {
+ .name = "VL.GetEntryByNameU",
+ .op = afs_VL_GetEntryByNameU,
+ .deliver = afs_deliver_vl_get_entry_by_name_u,
+- .destructor = afs_destroy_vl_get_entry_by_name_u,
++ .destructor = afs_flat_call_destructor,
+ };
+
+ /*
+@@ -166,7 +160,16 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc,
+
+ trace_afs_make_vl_call(call);
+ afs_make_call(&vc->ac, call, GFP_KERNEL);
+- return (struct afs_vldb_entry *)afs_wait_for_call_to_complete(call, &vc->ac);
++ afs_wait_for_call_to_complete(call, &vc->ac);
++ vc->call_abort_code = call->abort_code;
++ vc->call_error = call->error;
++ vc->call_responded = call->responded;
++ afs_put_call(call);
++ if (vc->call_error) {
++ kfree(entry);
++ return ERR_PTR(vc->call_error);
++ }
++ return entry;
+ }
+
+ /*
+@@ -208,7 +211,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
+ count = ntohl(*bp);
+
+ nentries = min(nentries, count);
+- alist = afs_alloc_addrlist(nentries, FS_SERVICE, AFS_FS_PORT);
++ alist = afs_alloc_addrlist(nentries, FS_SERVICE);
+ if (!alist)
+ return -ENOMEM;
+ alist->version = uniquifier;
+@@ -230,9 +233,13 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
+ alist = call->ret_alist;
+ bp = call->buffer;
+ count = min(call->count, 4U);
+- for (i = 0; i < count; i++)
+- if (alist->nr_addrs < call->count2)
+- afs_merge_fs_addr4(alist, *bp++, AFS_FS_PORT);
++ for (i = 0; i < count; i++) {
++ if (alist->nr_addrs < call->count2) {
++ ret = afs_merge_fs_addr4(call->net, alist, *bp++, AFS_FS_PORT);
++ if (ret < 0)
++ return ret;
++ }
++ }
+
+ call->count -= count;
+ if (call->count > 0)
+@@ -245,12 +252,6 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
+ return 0;
+ }
+
+-static void afs_vl_get_addrs_u_destructor(struct afs_call *call)
+-{
+- afs_put_addrlist(call->ret_alist);
+- return afs_flat_call_destructor(call);
+-}
+-
+ /*
+ * VL.GetAddrsU operation type.
+ */
+@@ -258,7 +259,7 @@ static const struct afs_call_type afs_RXVLGetAddrsU = {
+ .name = "VL.GetAddrsU",
+ .op = afs_VL_GetAddrsU,
+ .deliver = afs_deliver_vl_get_addrs_u,
+- .destructor = afs_vl_get_addrs_u_destructor,
++ .destructor = afs_flat_call_destructor,
+ };
+
+ /*
+@@ -269,6 +270,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
+ const uuid_t *uuid)
+ {
+ struct afs_ListAddrByAttributes__xdr *r;
++ struct afs_addr_list *alist;
+ const struct afs_uuid *u = (const struct afs_uuid *)uuid;
+ struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
+@@ -305,7 +307,17 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
+
+ trace_afs_make_vl_call(call);
+ afs_make_call(&vc->ac, call, GFP_KERNEL);
+- return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
++ afs_wait_for_call_to_complete(call, &vc->ac);
++ vc->call_abort_code = call->abort_code;
++ vc->call_error = call->error;
++ vc->call_responded = call->responded;
++ alist = call->ret_alist;
++ afs_put_call(call);
++ if (vc->call_error) {
++ afs_put_addrlist(alist);
++ return ERR_PTR(vc->call_error);
++ }
++ return alist;
+ }
+
+ /*
+@@ -450,7 +462,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
+ if (call->count > YFS_MAXENDPOINTS)
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_num);
+
+- alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
++ alist = afs_alloc_addrlist(call->count, FS_SERVICE);
+ if (!alist)
+ return -ENOMEM;
+ alist->version = uniquifier;
+@@ -488,14 +500,18 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
+ if (ntohl(bp[0]) != sizeof(__be32) * 2)
+ return afs_protocol_error(
+ call, afs_eproto_yvl_fsendpt4_len);
+- afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
++ ret = afs_merge_fs_addr4(call->net, alist, bp[1], ntohl(bp[2]));
++ if (ret < 0)
++ return ret;
+ bp += 3;
+ break;
+ case YFS_ENDPOINT_IPV6:
+ if (ntohl(bp[0]) != sizeof(__be32) * 5)
+ return afs_protocol_error(
+ call, afs_eproto_yvl_fsendpt6_len);
+- afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
++ ret = afs_merge_fs_addr6(call->net, alist, bp + 1, ntohl(bp[5]));
++ if (ret < 0)
++ return ret;
+ bp += 6;
+ break;
+ default:
+@@ -610,7 +626,7 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = {
+ .name = "YFSVL.GetEndpoints",
+ .op = afs_YFSVL_GetEndpoints,
+ .deliver = afs_deliver_yfsvl_get_endpoints,
+- .destructor = afs_vl_get_addrs_u_destructor,
++ .destructor = afs_flat_call_destructor,
+ };
+
+ /*
+@@ -620,6 +636,7 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = {
+ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
+ const uuid_t *uuid)
+ {
++ struct afs_addr_list *alist;
+ struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
+ __be32 *bp;
+@@ -644,7 +661,17 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
+
+ trace_afs_make_vl_call(call);
+ afs_make_call(&vc->ac, call, GFP_KERNEL);
+- return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
++ afs_wait_for_call_to_complete(call, &vc->ac);
++ vc->call_abort_code = call->abort_code;
++ vc->call_error = call->error;
++ vc->call_responded = call->responded;
++ alist = call->ret_alist;
++ afs_put_call(call);
++ if (vc->call_error) {
++ afs_put_addrlist(alist);
++ return ERR_PTR(vc->call_error);
++ }
++ return alist;
+ }
+
+ /*
+@@ -709,12 +736,6 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
+ return 0;
+ }
+
+-static void afs_destroy_yfsvl_get_cell_name(struct afs_call *call)
+-{
+- kfree(call->ret_str);
+- afs_flat_call_destructor(call);
+-}
+-
+ /*
+ * VL.GetCapabilities operation type
+ */
+@@ -722,7 +743,7 @@ static const struct afs_call_type afs_YFSVLGetCellName = {
+ .name = "YFSVL.GetCellName",
+ .op = afs_YFSVL_GetCellName,
+ .deliver = afs_deliver_yfsvl_get_cell_name,
+- .destructor = afs_destroy_yfsvl_get_cell_name,
++ .destructor = afs_flat_call_destructor,
+ };
+
+ /*
+@@ -737,6 +758,7 @@ char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
+ struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
+ __be32 *bp;
++ char *cellname;
+
+ _enter("");
+
+@@ -755,5 +777,15 @@ char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
+ /* Can't take a ref on server */
+ trace_afs_make_vl_call(call);
+ afs_make_call(&vc->ac, call, GFP_KERNEL);
+- return (char *)afs_wait_for_call_to_complete(call, &vc->ac);
++ afs_wait_for_call_to_complete(call, &vc->ac);
++ vc->call_abort_code = call->abort_code;
++ vc->call_error = call->error;
++ vc->call_responded = call->responded;
++ cellname = call->ret_str;
++ afs_put_call(call);
++ if (vc->call_error) {
++ kfree(cellname);
++ return ERR_PTR(vc->call_error);
++ }
++ return cellname;
+ }
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 4a168781936b5..9f90d8970ce9e 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -366,7 +366,7 @@ static void afs_store_data_success(struct afs_operation *op)
+
+ op->ctime = op->file[0].scb.status.mtime_client;
+ afs_vnode_commit_status(op, &op->file[0]);
+- if (op->error == 0) {
++ if (!afs_op_error(op)) {
+ if (!op->store.laundering)
+ afs_pages_written_back(vnode, op->store.pos, op->store.size);
+ afs_stat_v(vnode, n_stores);
+@@ -428,7 +428,7 @@ try_next_key:
+
+ afs_wait_for_operation(op);
+
+- switch (op->error) {
++ switch (afs_op_error(op)) {
+ case -EACCES:
+ case -EPERM:
+ case -ENOKEY:
+@@ -447,7 +447,7 @@ try_next_key:
+ }
+
+ afs_put_wb_key(wbk);
+- _leave(" = %d", op->error);
++ _leave(" = %d", afs_op_error(op));
+ return afs_put_operation(op);
+ }
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 01423670bc8a2..31d64812bb60a 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1260,7 +1260,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+ u64 bytes_left, end;
+ u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
+
+- if (WARN_ON(start != aligned_start)) {
++ /* Adjust the range to be aligned to 512B sectors if necessary. */
++ if (start != aligned_start) {
+ len -= aligned_start - start;
+ len = round_down(len, 1 << SECTOR_SHIFT);
+ start = aligned_start;
+@@ -4300,6 +4301,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
+ return 0;
+ }
+
++static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
++ struct find_free_extent_ctl *ffe_ctl)
++{
++ if (ffe_ctl->for_treelog) {
++ spin_lock(&fs_info->treelog_bg_lock);
++ if (fs_info->treelog_bg)
++ ffe_ctl->hint_byte = fs_info->treelog_bg;
++ spin_unlock(&fs_info->treelog_bg_lock);
++ } else if (ffe_ctl->for_data_reloc) {
++ spin_lock(&fs_info->relocation_bg_lock);
++ if (fs_info->data_reloc_bg)
++ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
++ spin_unlock(&fs_info->relocation_bg_lock);
++ } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
++ struct btrfs_block_group *block_group;
++
++ spin_lock(&fs_info->zone_active_bgs_lock);
++ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
++ /*
++ * No lock is OK here because avail is monotinically
++ * decreasing, and this is just a hint.
++ */
++ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
++
++ if (block_group_bits(block_group, ffe_ctl->flags) &&
++ avail >= ffe_ctl->num_bytes) {
++ ffe_ctl->hint_byte = block_group->start;
++ break;
++ }
++ }
++ spin_unlock(&fs_info->zone_active_bgs_lock);
++ }
++
++ return 0;
++}
++
+ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info,
+@@ -4310,19 +4347,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ return prepare_allocation_clustered(fs_info, ffe_ctl,
+ space_info, ins);
+ case BTRFS_EXTENT_ALLOC_ZONED:
+- if (ffe_ctl->for_treelog) {
+- spin_lock(&fs_info->treelog_bg_lock);
+- if (fs_info->treelog_bg)
+- ffe_ctl->hint_byte = fs_info->treelog_bg;
+- spin_unlock(&fs_info->treelog_bg_lock);
+- }
+- if (ffe_ctl->for_data_reloc) {
+- spin_lock(&fs_info->relocation_bg_lock);
+- if (fs_info->data_reloc_bg)
+- ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+- spin_unlock(&fs_info->relocation_bg_lock);
+- }
+- return 0;
++ return prepare_allocation_zoned(fs_info, ffe_ctl);
+ default:
+ BUG();
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fb3c3f43c3fa4..97cd57e710cb2 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4449,6 +4449,8 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ u64 root_flags;
+ int ret;
+
++ down_write(&fs_info->subvol_sem);
++
+ /*
+ * Don't allow to delete a subvolume with send in progress. This is
+ * inside the inode lock so the error handling that has to drop the bit
+@@ -4460,25 +4462,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu during send",
+ dest->root_key.objectid);
+- return -EPERM;
++ ret = -EPERM;
++ goto out_up_write;
+ }
+ if (atomic_read(&dest->nr_swapfiles)) {
+ spin_unlock(&dest->root_item_lock);
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu with active swapfile",
+ root->root_key.objectid);
+- return -EPERM;
++ ret = -EPERM;
++ goto out_up_write;
+ }
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
+
+- down_write(&fs_info->subvol_sem);
+-
+ ret = may_destroy_subvol(dest);
+ if (ret)
+- goto out_up_write;
++ goto out_undead;
+
+ btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+@@ -4488,7 +4490,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ */
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
+ if (ret)
+- goto out_up_write;
++ goto out_undead;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+@@ -4554,15 +4556,17 @@ out_end_trans:
+ inode->i_flags |= S_DEAD;
+ out_release:
+ btrfs_subvolume_release_metadata(root, &block_rsv);
+-out_up_write:
+- up_write(&fs_info->subvol_sem);
++out_undead:
+ if (ret) {
+ spin_lock(&dest->root_item_lock);
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
+- } else {
++ }
++out_up_write:
++ up_write(&fs_info->subvol_sem);
++ if (!ret) {
+ d_invalidate(dentry);
+ btrfs_prune_dentries(dest);
+ ASSERT(dest->send_in_progress == 0);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index a1743904202b7..69e91341fd637 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -790,6 +790,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ return -EOPNOTSUPP;
+ }
+
++ if (btrfs_root_refs(&root->root_item) == 0)
++ return -ENOENT;
++
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ return -EINVAL;
+
+@@ -2608,6 +2611,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
+ ret = -EFAULT;
+ goto out;
+ }
++ if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
+ /* compression requires us to start the IO */
+ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 6486f0d7e9931..8c4fc98ca9ce7 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -889,8 +889,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ out_unlock:
+ spin_unlock(&fs_info->ref_verify_lock);
+ out:
+- if (ret)
++ if (ret) {
++ btrfs_free_ref_cache(fs_info);
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
++ }
+ return ret;
+ }
+
+@@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
+ }
+ }
+ if (ret) {
+- btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ btrfs_free_ref_cache(fs_info);
++ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ }
+ btrfs_free_path(path);
+ return ret;
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index f62a408671cbc..443d2519f0a9d 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1099,12 +1099,22 @@ out:
+ static void scrub_read_endio(struct btrfs_bio *bbio)
+ {
+ struct scrub_stripe *stripe = bbio->private;
++ struct bio_vec *bvec;
++ int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
++ int num_sectors;
++ u32 bio_size = 0;
++ int i;
++
++ ASSERT(sector_nr < stripe->nr_sectors);
++ bio_for_each_bvec_all(bvec, &bbio->bio, i)
++ bio_size += bvec->bv_len;
++ num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
+
+ if (bbio->bio.bi_status) {
+- bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
+- bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
++ bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
++ bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
+ } else {
+- bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
++ bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
+ }
+ bio_put(&bbio->bio);
+ if (atomic_dec_and_test(&stripe->pending_io)) {
+@@ -1705,6 +1715,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
+ {
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_bio *bbio;
++ unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
++ stripe->bg->length - stripe->logical) >>
++ fs_info->sectorsize_bits;
+ int mirror = stripe->mirror_num;
+
+ ASSERT(stripe->bg);
+@@ -1719,14 +1732,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
+ bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
+ scrub_read_endio, stripe);
+
+- /* Read the whole stripe. */
+ bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
+- for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
++ /* Read the whole range inside the chunk boundary. */
++ for (unsigned int cur = 0; cur < nr_sectors; cur++) {
++ struct page *page = scrub_stripe_get_page(stripe, cur);
++ unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
+ int ret;
+
+- ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
++ ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
+ /* We should have allocated enough bio vectors. */
+- ASSERT(ret == PAGE_SIZE);
++ ASSERT(ret == fs_info->sectorsize);
+ }
+ atomic_inc(&stripe->pending_io);
+
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index e6b51fb3ddc1e..84c05246ffd8a 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -1783,6 +1783,10 @@ static ssize_t btrfs_devinfo_scrub_speed_max_store(struct kobject *kobj,
+ unsigned long long limit;
+
+ limit = memparse(buf, &endptr);
++ /* There could be trailing '\n', also catch any typos after the value. */
++ endptr = skip_spaces(endptr);
++ if (*endptr != 0)
++ return -EINVAL;
+ WRITE_ONCE(device->scrub_speed_max, limit);
+ return len;
+ }
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 50fdc69fdddf9..6eccf8496486c 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1436,7 +1436,7 @@ static int check_extent_item(struct extent_buffer *leaf,
+ if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
+ extent_err(leaf, slot,
+ "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
+- ptr, inline_type, end);
++ ptr, btrfs_extent_inline_ref_size(inline_type), end);
+ return -EUCLEAN;
+ }
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 188378ca19c7f..3779e76a15d64 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2094,6 +2094,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ map = block_group->physical_map;
+
++ spin_lock(&fs_info->zone_active_bgs_lock);
+ spin_lock(&block_group->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
+ ret = true;
+@@ -2106,7 +2107,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ goto out_unlock;
+ }
+
+- spin_lock(&fs_info->zone_active_bgs_lock);
+ for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_zoned_device_info *zinfo;
+ int reserved = 0;
+@@ -2126,20 +2126,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ */
+ if (atomic_read(&zinfo->active_zones_left) <= reserved) {
+ ret = false;
+- spin_unlock(&fs_info->zone_active_bgs_lock);
+ goto out_unlock;
+ }
+
+ if (!btrfs_dev_set_active_zone(device, physical)) {
+ /* Cannot activate the zone */
+ ret = false;
+- spin_unlock(&fs_info->zone_active_bgs_lock);
+ goto out_unlock;
+ }
+ if (!is_data)
+ zinfo->reserved_active_zones--;
+ }
+- spin_unlock(&fs_info->zone_active_bgs_lock);
+
+ /* Successfully activated all the zones */
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+@@ -2147,8 +2144,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ /* For the active block group list */
+ btrfs_get_block_group(block_group);
+-
+- spin_lock(&fs_info->zone_active_bgs_lock);
+ list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+
+@@ -2156,6 +2151,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ out_unlock:
+ spin_unlock(&block_group->lock);
++ spin_unlock(&fs_info->zone_active_bgs_lock);
+ return ret;
+ }
+
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 67f8dd8a05ef2..6296c62c10fa9 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1817,8 +1817,8 @@ static int dlm_tcp_bind(struct socket *sock)
+ memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
+ make_sockaddr(&src_addr, 0, &addr_len);
+
+- result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
+- addr_len);
++ result = kernel_bind(sock, (struct sockaddr *)&src_addr,
++ addr_len);
+ if (result < 0) {
+ /* This *may* not indicate a critical error */
+ log_print("could not bind for connect: %d", result);
+@@ -1830,7 +1830,7 @@ static int dlm_tcp_bind(struct socket *sock)
+ static int dlm_tcp_connect(struct connection *con, struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+ {
+- return sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
++ return kernel_connect(sock, addr, addr_len, O_NONBLOCK);
+ }
+
+ static int dlm_tcp_listen_validate(void)
+@@ -1862,8 +1862,8 @@ static int dlm_tcp_listen_bind(struct socket *sock)
+
+ /* Bind to our port */
+ make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+- return sock->ops->bind(sock, (struct sockaddr *)&dlm_local_addr[0],
+- addr_len);
++ return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
++ addr_len);
+ }
+
+ static const struct dlm_proto_ops dlm_tcp_ops = {
+@@ -1888,12 +1888,12 @@ static int dlm_sctp_connect(struct connection *con, struct socket *sock,
+ int ret;
+
+ /*
+- * Make sock->ops->connect() function return in specified time,
++ * Make kernel_connect() function return in specified time,
+ * since O_NONBLOCK argument in connect() function does not work here,
+ * then, we should restore the default value of this attribute.
+ */
+ sock_set_sndtimeo(sock->sk, 5);
+- ret = sock->ops->connect(sock, addr, addr_len, 0);
++ ret = kernel_connect(sock, addr, addr_len, 0);
+ sock_set_sndtimeo(sock->sk, 0);
+ return ret;
+ }
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index af98e88908eea..066ddc03b7b4d 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -121,11 +121,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+- void *inpage, unsigned int *inputmargin, int *maptype,
+- bool may_inplace)
++ void *inpage, void *out, unsigned int *inputmargin,
++ int *maptype, bool may_inplace)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+- unsigned int omargin, total, i, j;
++ unsigned int omargin, total, i;
+ struct page **in;
+ void *src, *tmp;
+
+@@ -135,12 +135,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
+ goto docopy;
+
+- for (i = 0; i < ctx->inpages; ++i) {
+- DBG_BUGON(rq->in[i] == NULL);
+- for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
+- if (rq->out[j] == rq->in[i])
+- goto docopy;
+- }
++ for (i = 0; i < ctx->inpages; ++i)
++ if (rq->out[ctx->outpages - ctx->inpages + i] !=
++ rq->in[i])
++ goto docopy;
++ kunmap_local(inpage);
++ *maptype = 3;
++ return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
+ }
+
+ if (ctx->inpages <= 1) {
+@@ -148,7 +149,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ return inpage;
+ }
+ kunmap_local(inpage);
+- might_sleep();
+ src = erofs_vm_map_ram(rq->in, ctx->inpages);
+ if (!src)
+ return ERR_PTR(-ENOMEM);
+@@ -204,12 +204,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
+ }
+
+ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+- u8 *out)
++ u8 *dst)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+ bool support_0padding = false, may_inplace = false;
+ unsigned int inputmargin;
+- u8 *headpage, *src;
++ u8 *out, *headpage, *src;
+ int ret, maptype;
+
+ DBG_BUGON(*rq->in == NULL);
+@@ -230,11 +230,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ inputmargin = rq->pageofs_in;
+- src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
++ src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
+ &maptype, may_inplace);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
++ out = dst + rq->pageofs_out;
+ /* legacy format could compress extra data in a pcluster. */
+ if (rq->partial_decoding || !support_0padding)
+ ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+@@ -265,7 +266,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ vm_unmap_ram(src, ctx->inpages);
+ } else if (maptype == 2) {
+ erofs_put_pcpubuf(src);
+- } else {
++ } else if (maptype != 3) {
+ DBG_BUGON(1);
+ return -EFAULT;
+ }
+@@ -308,7 +309,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ }
+
+ dstmap_out:
+- ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
++ ret = z_erofs_lz4_decompress_mem(&ctx, dst);
+ if (!dst_maptype)
+ kunmap_local(dst);
+ else if (dst_maptype == 2)
+diff --git a/fs/exec.c b/fs/exec.c
+index 4aa19b24f2810..6d9ed2d765efe 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1408,6 +1408,9 @@ int begin_new_exec(struct linux_binprm * bprm)
+
+ out_unlock:
+ up_write(&me->signal->exec_update_lock);
++ if (!bprm->cred)
++ mutex_unlock(&me->signal->cred_guard_mutex);
++
+ out:
+ return retval;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index d72b5e3c92ec4..ab023d709f72b 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6735,11 +6735,16 @@ __acquires(bitlock)
+ static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+ {
+- if (grp < ext4_get_groups_count(sb))
+- return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+- return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+- ext4_group_first_block_no(sb, grp) - 1) >>
+- EXT4_CLUSTER_BITS(sb);
++ unsigned long nr_clusters_in_group;
++
++ if (grp < (ext4_get_groups_count(sb) - 1))
++ nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
++ else
++ nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++ ext4_group_first_block_no(sb, grp))
++ >> EXT4_CLUSTER_BITS(sb);
++
++ return nr_clusters_in_group - 1;
+ }
+
+ static bool ext4_trim_interrupted(void)
+diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
+index d645f8b302a27..9397ed39b0b4e 100644
+--- a/fs/fscache/cache.c
++++ b/fs/fscache/cache.c
+@@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
+ void fscache_put_cache(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
+ {
+- unsigned int debug_id = cache->debug_id;
++ unsigned int debug_id;
+ bool zero;
+ int ref;
+
+ if (IS_ERR_OR_NULL(cache))
+ return;
+
++ debug_id = cache->debug_id;
+ zero = __refcount_dec_and_test(&cache->ref, &ref);
+ trace_fscache_cache(debug_id, ref - 1, where);
+
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index f5fd99d6b0d4e..76cf22ac97d76 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -920,8 +920,7 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+ if (!f.file)
+ return -EBADF;
+
+- /* RED-PEN how should LSM module know it's handling 32bit? */
+- error = security_file_ioctl(f.file, cmd, arg);
++ error = security_file_ioctl_compat(f.file, cmd, arg);
+ if (error)
+ goto out;
+
+diff --git a/fs/namei.c b/fs/namei.c
+index 71c13b2990b44..29bafbdb44ca8 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3021,20 +3021,14 @@ static struct dentry *lock_two_directories(struct dentry *p1, struct dentry *p2)
+ p = d_ancestor(p2, p1);
+ if (p) {
+ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
+- inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
++ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT2);
+ return p;
+ }
+
+ p = d_ancestor(p1, p2);
+- if (p) {
+- inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+- inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
+- return p;
+- }
+-
+- lock_two_inodes(p1->d_inode, p2->d_inode,
+- I_MUTEX_PARENT, I_MUTEX_PARENT2);
+- return NULL;
++ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
++ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
++ return p;
+ }
+
+ /*
+@@ -4716,11 +4710,12 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
+ *
+ * a) we can get into loop creation.
+ * b) race potential - two innocent renames can create a loop together.
+- * That's where 4.4 screws up. Current fix: serialization on
++ * That's where 4.4BSD screws up. Current fix: serialization on
+ * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
+ * story.
+- * c) we have to lock _four_ objects - parents and victim (if it exists),
+- * and source.
++ * c) we may have to lock up to _four_ objects - parents and victim (if it exists),
++ * and source (if it's a non-directory or a subdirectory that moves to
++ * different parent).
+ * And that - after we got ->i_mutex on parents (until then we don't know
+ * whether the target exists). Solution: try to be smart with locking
+ * order for inodes. We rely on the fact that tree topology may change
+@@ -4752,6 +4747,7 @@ int vfs_rename(struct renamedata *rd)
+ bool new_is_dir = false;
+ unsigned max_links = new_dir->i_sb->s_max_links;
+ struct name_snapshot old_name;
++ bool lock_old_subdir, lock_new_subdir;
+
+ if (source == target)
+ return 0;
+@@ -4805,15 +4801,32 @@ int vfs_rename(struct renamedata *rd)
+ take_dentry_name_snapshot(&old_name, old_dentry);
+ dget(new_dentry);
+ /*
+- * Lock all moved children. Moved directories may need to change parent
+- * pointer so they need the lock to prevent against concurrent
+- * directory changes moving parent pointer. For regular files we've
+- * historically always done this. The lockdep locking subclasses are
+- * somewhat arbitrary but RENAME_EXCHANGE in particular can swap
+- * regular files and directories so it's difficult to tell which
+- * subclasses to use.
++ * Lock children.
++ * The source subdirectory needs to be locked on cross-directory
++ * rename or cross-directory exchange since its parent changes.
++ * The target subdirectory needs to be locked on cross-directory
++ * exchange due to parent change and on any rename due to becoming
++ * a victim.
++ * Non-directories need locking in all cases (for NFS reasons);
++ * they get locked after any subdirectories (in inode address order).
++ *
++ * NOTE: WE ONLY LOCK UNRELATED DIRECTORIES IN CROSS-DIRECTORY CASE.
++ * NEVER, EVER DO THAT WITHOUT ->s_vfs_rename_mutex.
+ */
+- lock_two_inodes(source, target, I_MUTEX_NORMAL, I_MUTEX_NONDIR2);
++ lock_old_subdir = new_dir != old_dir;
++ lock_new_subdir = new_dir != old_dir || !(flags & RENAME_EXCHANGE);
++ if (is_dir) {
++ if (lock_old_subdir)
++ inode_lock_nested(source, I_MUTEX_CHILD);
++ if (target && (!new_is_dir || lock_new_subdir))
++ inode_lock(target);
++ } else if (new_is_dir) {
++ if (lock_new_subdir)
++ inode_lock_nested(target, I_MUTEX_CHILD);
++ inode_lock(source);
++ } else {
++ lock_two_nondirectories(source, target);
++ }
+
+ error = -EPERM;
+ if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target)))
+@@ -4861,8 +4874,9 @@ int vfs_rename(struct renamedata *rd)
+ d_exchange(old_dentry, new_dentry);
+ }
+ out:
+- inode_unlock(source);
+- if (target)
++ if (!is_dir || lock_old_subdir)
++ inode_unlock(source);
++ if (target && (!new_is_dir || lock_new_subdir))
+ inode_unlock(target);
+ dput(new_dentry);
+ if (!error) {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 3edbfa0233e68..21180fa953806 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7911,14 +7911,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ {
+ struct file_lock *fl;
+ int status = false;
+- struct nfsd_file *nf = find_any_file(fp);
++ struct nfsd_file *nf;
+ struct inode *inode;
+ struct file_lock_context *flctx;
+
++ spin_lock(&fp->fi_lock);
++ nf = find_any_file_locked(fp);
+ if (!nf) {
+ /* Any valid lock stateid should have some sort of access */
+ WARN_ON_ONCE(1);
+- return status;
++ goto out;
+ }
+
+ inode = file_inode(nf->nf_file);
+@@ -7934,7 +7936,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ }
+ spin_unlock(&flctx->flc_lock);
+ }
+- nfsd_file_put(nf);
++out:
++ spin_unlock(&fp->fi_lock);
+ return status;
+ }
+
+@@ -7944,10 +7947,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ * @cstate: NFSv4 COMPOUND state
+ * @u: RELEASE_LOCKOWNER arguments
+ *
+- * The lockowner's so_count is bumped when a lock record is added
+- * or when copying a conflicting lock. The latter case is brief,
+- * but can lead to fleeting false positives when looking for
+- * locks-in-use.
++ * Check if theree are any locks still held and if not - free the lockowner
++ * and any lock state that is owned.
+ *
+ * Return values:
+ * %nfs_ok: lockowner released or not found
+@@ -7983,10 +7984,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+ spin_unlock(&clp->cl_lock);
+ return nfs_ok;
+ }
+- if (atomic_read(&lo->lo_owner.so_count) != 2) {
+- spin_unlock(&clp->cl_lock);
+- nfs4_put_stateowner(&lo->lo_owner);
+- return nfserr_locks_held;
++
++ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
++ if (check_for_locks(stp->st_stid.sc_file, lo)) {
++ spin_unlock(&clp->cl_lock);
++ nfs4_put_stateowner(&lo->lo_owner);
++ return nfserr_locks_held;
++ }
+ }
+ unhash_lockowner_locked(lo);
+ while (!list_empty(&lo->lo_owner.so_stateids)) {
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 03bc8d5dfa318..22c29e540e3ce 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -18,10 +18,11 @@
+
+ struct ovl_lookup_data {
+ struct super_block *sb;
+- struct vfsmount *mnt;
++ const struct ovl_layer *layer;
+ struct qstr name;
+ bool is_dir;
+ bool opaque;
++ bool xwhiteouts;
+ bool stop;
+ bool last;
+ char *redirect;
+@@ -201,17 +202,13 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
+ return real;
+ }
+
+-static bool ovl_is_opaquedir(struct ovl_fs *ofs, const struct path *path)
+-{
+- return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE);
+-}
+-
+ static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d,
+ const char *name,
+ struct dentry *base, int len,
+ bool drop_negative)
+ {
+- struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->mnt), name, base, len);
++ struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), name,
++ base, len);
+
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ if (drop_negative && ret->d_lockref.count == 1) {
+@@ -232,10 +229,13 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
+ size_t prelen, const char *post,
+ struct dentry **ret, bool drop_negative)
+ {
++ struct ovl_fs *ofs = OVL_FS(d->sb);
+ struct dentry *this;
+ struct path path;
+ int err;
+ bool last_element = !post[0];
++ bool is_upper = d->layer->idx == 0;
++ char val;
+
+ this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative);
+ if (IS_ERR(this)) {
+@@ -253,8 +253,8 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
+ }
+
+ path.dentry = this;
+- path.mnt = d->mnt;
+- if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) {
++ path.mnt = d->layer->mnt;
++ if (ovl_path_is_whiteout(ofs, &path)) {
+ d->stop = d->opaque = true;
+ goto put_and_out;
+ }
+@@ -272,7 +272,7 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
+ d->stop = true;
+ goto put_and_out;
+ }
+- err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path, NULL);
++ err = ovl_check_metacopy_xattr(ofs, &path, NULL);
+ if (err < 0)
+ goto out_err;
+
+@@ -292,7 +292,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
+ if (d->last)
+ goto out;
+
+- if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) {
++ /* overlay.opaque=x means xwhiteouts directory */
++ val = ovl_get_opaquedir_val(ofs, &path);
++ if (last_element && !is_upper && val == 'x') {
++ d->xwhiteouts = true;
++ ovl_layer_set_xwhiteouts(ofs, d->layer);
++ } else if (val == 'y') {
+ d->stop = true;
+ if (last_element)
+ d->opaque = true;
+@@ -863,7 +868,8 @@ fail:
+ * Returns next layer in stack starting from top.
+ * Returns -1 if this is the last layer.
+ */
+-int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
++int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
++ const struct ovl_layer **layer)
+ {
+ struct ovl_entry *oe = OVL_E(dentry);
+ struct ovl_path *lowerstack = ovl_lowerstack(oe);
+@@ -871,13 +877,16 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+ BUG_ON(idx < 0);
+ if (idx == 0) {
+ ovl_path_upper(dentry, path);
+- if (path->dentry)
++ if (path->dentry) {
++ *layer = &OVL_FS(dentry->d_sb)->layers[0];
+ return ovl_numlower(oe) ? 1 : -1;
++ }
+ idx++;
+ }
+ BUG_ON(idx > ovl_numlower(oe));
+ path->dentry = lowerstack[idx - 1].dentry;
+- path->mnt = lowerstack[idx - 1].layer->mnt;
++ *layer = lowerstack[idx - 1].layer;
++ path->mnt = (*layer)->mnt;
+
+ return (idx < ovl_numlower(oe)) ? idx + 1 : -1;
+ }
+@@ -1055,7 +1064,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ old_cred = ovl_override_creds(dentry->d_sb);
+ upperdir = ovl_dentry_upper(dentry->d_parent);
+ if (upperdir) {
+- d.mnt = ovl_upper_mnt(ofs);
++ d.layer = &ofs->layers[0];
+ err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
+ if (err)
+ goto out;
+@@ -1111,7 +1120,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ else if (d.is_dir || !ofs->numdatalayer)
+ d.last = lower.layer->idx == ovl_numlower(roe);
+
+- d.mnt = lower.layer->mnt;
++ d.layer = lower.layer;
+ err = ovl_lookup_layer(lower.dentry, &d, &this, false);
+ if (err)
+ goto out_put;
+@@ -1278,6 +1287,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+
+ if (upperopaque)
+ ovl_dentry_set_opaque(dentry);
++ if (d.xwhiteouts)
++ ovl_dentry_set_xwhiteouts(dentry);
+
+ if (upperdentry)
+ ovl_dentry_set_upper_alias(dentry);
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 05c3dd597fa8d..18132ab04ead4 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -50,7 +50,6 @@ enum ovl_xattr {
+ OVL_XATTR_METACOPY,
+ OVL_XATTR_PROTATTR,
+ OVL_XATTR_XWHITEOUT,
+- OVL_XATTR_XWHITEOUTS,
+ };
+
+ enum ovl_inode_flag {
+@@ -70,6 +69,8 @@ enum ovl_entry_flag {
+ OVL_E_UPPER_ALIAS,
+ OVL_E_OPAQUE,
+ OVL_E_CONNECTED,
++ /* Lower stack may contain xwhiteout entries */
++ OVL_E_XWHITEOUTS,
+ };
+
+ enum {
+@@ -471,6 +472,10 @@ bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry);
+ bool ovl_dentry_is_opaque(struct dentry *dentry);
+ bool ovl_dentry_is_whiteout(struct dentry *dentry);
+ void ovl_dentry_set_opaque(struct dentry *dentry);
++bool ovl_dentry_has_xwhiteouts(struct dentry *dentry);
++void ovl_dentry_set_xwhiteouts(struct dentry *dentry);
++void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
++ const struct ovl_layer *layer);
+ bool ovl_dentry_has_upper_alias(struct dentry *dentry);
+ void ovl_dentry_set_upper_alias(struct dentry *dentry);
+ bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
+@@ -488,11 +493,10 @@ struct file *ovl_path_open(const struct path *path, int flags);
+ int ovl_copy_up_start(struct dentry *dentry, int flags);
+ void ovl_copy_up_end(struct dentry *dentry);
+ bool ovl_already_copied_up(struct dentry *dentry, int flags);
+-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
+- enum ovl_xattr ox);
++char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
++ enum ovl_xattr ox);
+ bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path);
+ bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path);
+-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path);
+ bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs,
+ const struct path *upperpath);
+
+@@ -567,7 +571,13 @@ static inline bool ovl_is_impuredir(struct super_block *sb,
+ .mnt = ovl_upper_mnt(ofs),
+ };
+
+- return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
++ return ovl_get_dir_xattr_val(ofs, &upperpath, OVL_XATTR_IMPURE) == 'y';
++}
++
++static inline char ovl_get_opaquedir_val(struct ovl_fs *ofs,
++ const struct path *path)
++{
++ return ovl_get_dir_xattr_val(ofs, path, OVL_XATTR_OPAQUE);
+ }
+
+ static inline bool ovl_redirect_follow(struct ovl_fs *ofs)
+@@ -674,7 +684,8 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
+ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
+ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
+ struct dentry *origin, bool verify);
+-int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
++int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
++ const struct ovl_layer **layer);
+ int ovl_verify_lowerdata(struct dentry *dentry);
+ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags);
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
+index d82d2a043da2c..f02c4bf231825 100644
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -40,6 +40,8 @@ struct ovl_layer {
+ int idx;
+ /* One fsid per unique underlying sb (upper fsid == 0) */
+ int fsid;
++ /* xwhiteouts were found on this layer */
++ bool has_xwhiteouts;
+ };
+
+ struct ovl_path {
+@@ -59,7 +61,7 @@ struct ovl_fs {
+ unsigned int numfs;
+ /* Number of data-only lower layers */
+ unsigned int numdatalayer;
+- const struct ovl_layer *layers;
++ struct ovl_layer *layers;
+ struct ovl_sb *fs;
+ /* workbasedir is the path at workdir= mount option */
+ struct dentry *workbasedir;
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index a490fc47c3e7e..8e8545bc273f6 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -305,8 +305,6 @@ static inline int ovl_dir_read(const struct path *realpath,
+ if (IS_ERR(realfile))
+ return PTR_ERR(realfile);
+
+- rdd->in_xwhiteouts_dir = rdd->dentry &&
+- ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath);
+ rdd->first_maybe_whiteout = NULL;
+ rdd->ctx.pos = 0;
+ do {
+@@ -359,10 +357,13 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
+ .is_lowest = false,
+ };
+ int idx, next;
++ const struct ovl_layer *layer;
+
+ for (idx = 0; idx != -1; idx = next) {
+- next = ovl_path_next(idx, dentry, &realpath);
++ next = ovl_path_next(idx, dentry, &realpath, &layer);
+ rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
++ rdd.in_xwhiteouts_dir = layer->has_xwhiteouts &&
++ ovl_dentry_has_xwhiteouts(dentry);
+
+ if (next != -1) {
+ err = ovl_dir_read(&realpath, &rdd);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index a0967bb250036..36543e9ffd744 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1250,6 +1250,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
+ struct ovl_entry *oe)
+ {
+ struct dentry *root;
++ struct ovl_fs *ofs = OVL_FS(sb);
+ struct ovl_path *lowerpath = ovl_lowerstack(oe);
+ unsigned long ino = d_inode(lowerpath->dentry)->i_ino;
+ int fsid = lowerpath->layer->fsid;
+@@ -1271,6 +1272,20 @@ static struct dentry *ovl_get_root(struct super_block *sb,
+ ovl_set_flag(OVL_IMPURE, d_inode(root));
+ }
+
++ /* Look for xwhiteouts marker except in the lowermost layer */
++ for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) {
++ struct path path = {
++ .mnt = lowerpath->layer->mnt,
++ .dentry = lowerpath->dentry,
++ };
++
++ /* overlay.opaque=x means xwhiteouts directory */
++ if (ovl_get_opaquedir_val(ofs, &path) == 'x') {
++ ovl_layer_set_xwhiteouts(ofs, lowerpath->layer);
++ ovl_dentry_set_xwhiteouts(root);
++ }
++ }
++
+ /* Root is always merge -> can have whiteouts */
+ ovl_set_flag(OVL_WHITEOUTS, d_inode(root));
+ ovl_dentry_set_flag(OVL_E_CONNECTED, root);
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index c3f020ca13a8c..cd0722309e5d1 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -461,6 +461,33 @@ void ovl_dentry_set_opaque(struct dentry *dentry)
+ ovl_dentry_set_flag(OVL_E_OPAQUE, dentry);
+ }
+
++bool ovl_dentry_has_xwhiteouts(struct dentry *dentry)
++{
++ return ovl_dentry_test_flag(OVL_E_XWHITEOUTS, dentry);
++}
++
++void ovl_dentry_set_xwhiteouts(struct dentry *dentry)
++{
++ ovl_dentry_set_flag(OVL_E_XWHITEOUTS, dentry);
++}
++
++/*
++ * ovl_layer_set_xwhiteouts() is called before adding the overlay dir
++ * dentry to dcache, while readdir of that same directory happens after
++ * the overlay dir dentry is in dcache, so if some cpu observes that
++ * ovl_dentry_is_xwhiteouts(), it will also observe layer->has_xwhiteouts
++ * for the layers where xwhiteouts marker was found in that merge dir.
++ */
++void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
++ const struct ovl_layer *layer)
++{
++ if (layer->has_xwhiteouts)
++ return;
++
++ /* Write once to read-mostly layer properties */
++ ofs->layers[layer->idx].has_xwhiteouts = true;
++}
++
+ /*
+ * For hard links and decoded file handles, it's possible for ovl_dentry_upper()
+ * to return positive, while there's no actual upper alias for the inode.
+@@ -739,19 +766,6 @@ bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path)
+ return res >= 0;
+ }
+
+-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path)
+-{
+- struct dentry *dentry = path->dentry;
+- int res;
+-
+- /* xattr.whiteouts must be a directory */
+- if (!d_is_dir(dentry))
+- return false;
+-
+- res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0);
+- return res >= 0;
+-}
+-
+ /*
+ * Load persistent uuid from xattr into s_uuid if found, or store a new
+ * random generated value in s_uuid and in xattr.
+@@ -811,20 +825,17 @@ fail:
+ return false;
+ }
+
+-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
+- enum ovl_xattr ox)
++char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
++ enum ovl_xattr ox)
+ {
+ int res;
+ char val;
+
+ if (!d_is_dir(path->dentry))
+- return false;
++ return 0;
+
+ res = ovl_path_getxattr(ofs, path, ox, &val, 1);
+- if (res == 1 && val == 'y')
+- return true;
+-
+- return false;
++ return res == 1 ? val : 0;
+ }
+
+ #define OVL_XATTR_OPAQUE_POSTFIX "opaque"
+@@ -837,7 +848,6 @@ bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
+ #define OVL_XATTR_METACOPY_POSTFIX "metacopy"
+ #define OVL_XATTR_PROTATTR_POSTFIX "protattr"
+ #define OVL_XATTR_XWHITEOUT_POSTFIX "whiteout"
+-#define OVL_XATTR_XWHITEOUTS_POSTFIX "whiteouts"
+
+ #define OVL_XATTR_TAB_ENTRY(x) \
+ [x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \
+@@ -854,7 +864,6 @@ const char *const ovl_xattr_table[][2] = {
+ OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY),
+ OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR),
+ OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT),
+- OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS),
+ };
+
+ int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 226e7f66b5903..8d9286a1f2e85 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -1324,6 +1324,11 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+ pipe->tail = tail;
+ pipe->head = head;
+
++ if (!pipe_has_watch_queue(pipe)) {
++ pipe->max_usage = nr_slots;
++ pipe->nr_accounted = nr_slots;
++ }
++
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
+ /* This might have made more room for writers */
+@@ -1375,8 +1380,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
+ if (ret < 0)
+ goto out_revert_acct;
+
+- pipe->max_usage = nr_slots;
+- pipe->nr_accounted = nr_slots;
+ return pipe->max_usage * PAGE_SIZE;
+
+ out_revert_acct:
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 435b61054b5b9..4905420d339f4 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -2415,7 +2415,6 @@ static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
+
+ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
+ {
+- struct mmu_notifier_range range;
+ struct pagemap_scan_private p = {0};
+ unsigned long walk_start;
+ size_t n_ranges_out = 0;
+@@ -2431,15 +2430,9 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
+ if (ret)
+ return ret;
+
+- /* Protection change for the range is going to happen. */
+- if (p.arg.flags & PM_SCAN_WP_MATCHING) {
+- mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
+- mm, p.arg.start, p.arg.end);
+- mmu_notifier_invalidate_range_start(&range);
+- }
+-
+ for (walk_start = p.arg.start; walk_start < p.arg.end;
+ walk_start = p.arg.walk_end) {
++ struct mmu_notifier_range range;
+ long n_out;
+
+ if (fatal_signal_pending(current)) {
+@@ -2450,8 +2443,20 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
+ ret = mmap_read_lock_killable(mm);
+ if (ret)
+ break;
++
++ /* Protection change for the range is going to happen. */
++ if (p.arg.flags & PM_SCAN_WP_MATCHING) {
++ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
++ mm, walk_start, p.arg.end);
++ mmu_notifier_invalidate_range_start(&range);
++ }
++
+ ret = walk_page_range(mm, walk_start, p.arg.end,
+ &pagemap_scan_ops, &p);
++
++ if (p.arg.flags & PM_SCAN_WP_MATCHING)
++ mmu_notifier_invalidate_range_end(&range);
++
+ mmap_read_unlock(mm);
+
+ n_out = pagemap_scan_flush_buffer(&p);
+@@ -2477,9 +2482,6 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
+ if (pagemap_scan_writeback_args(&p.arg, uarg))
+ ret = -EFAULT;
+
+- if (p.arg.flags & PM_SCAN_WP_MATCHING)
+- mmu_notifier_invalidate_range_end(&range);
+-
+ kfree(p.vec_buf);
+ return ret;
+ }
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 14bc745de199b..beb81fa00cff4 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -614,7 +614,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ "multichannel not available\n"
+ "Empty network interface list returned by server %s\n",
+ ses->server->hostname);
+- rc = -EINVAL;
++ rc = -EOPNOTSUPP;
++ ses->iface_last_update = jiffies;
+ goto out;
+ }
+
+@@ -712,7 +713,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+
+ ses->iface_count++;
+ spin_unlock(&ses->iface_lock);
+- ses->iface_last_update = jiffies;
+ next_iface:
+ nb_iface++;
+ next = le32_to_cpu(p->Next);
+@@ -734,11 +734,7 @@ next_iface:
+ if ((bytes_left > 8) || p->Next)
+ cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+-
+- if (!ses->iface_count) {
+- rc = -EINVAL;
+- goto out;
+- }
++ ses->iface_last_update = jiffies;
+
+ out:
+ /*
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 4f971c1061f0a..f5006aa97f5b3 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -156,6 +156,56 @@ out:
+ return;
+ }
+
++/* helper function for code reuse */
++static int
++cifs_chan_skip_or_disable(struct cifs_ses *ses,
++ struct TCP_Server_Info *server,
++ bool from_reconnect)
++{
++ struct TCP_Server_Info *pserver;
++ unsigned int chan_index;
++
++ if (SERVER_IS_CHAN(server)) {
++ cifs_dbg(VFS,
++ "server %s does not support multichannel anymore. Skip secondary channel\n",
++ ses->server->hostname);
++
++ spin_lock(&ses->chan_lock);
++ chan_index = cifs_ses_get_chan_index(ses, server);
++ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
++ spin_unlock(&ses->chan_lock);
++ goto skip_terminate;
++ }
++
++ ses->chans[chan_index].server = NULL;
++ spin_unlock(&ses->chan_lock);
++
++ /*
++ * the above reference of server by channel
++ * needs to be dropped without holding chan_lock
++ * as cifs_put_tcp_session takes a higher lock
++ * i.e. cifs_tcp_ses_lock
++ */
++ cifs_put_tcp_session(server, from_reconnect);
++
++ server->terminate = true;
++ cifs_signal_cifsd_for_reconnect(server, false);
++
++ /* mark primary server as needing reconnect */
++ pserver = server->primary_server;
++ cifs_signal_cifsd_for_reconnect(pserver, false);
++skip_terminate:
++ return -EHOSTDOWN;
++ }
++
++ cifs_server_dbg(VFS,
++ "server does not support multichannel anymore. Disable all other channels\n");
++ cifs_disable_secondary_channels(ses);
++
++
++ return 0;
++}
++
+ static int
+ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server, bool from_reconnect)
+@@ -164,8 +214,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct nls_table *nls_codepage = NULL;
+ struct cifs_ses *ses;
+ int xid;
+- struct TCP_Server_Info *pserver;
+- unsigned int chan_index;
+
+ /*
+ * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
+@@ -310,44 +358,11 @@ again:
+ */
+ if (ses->chan_count > 1 &&
+ !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+- if (SERVER_IS_CHAN(server)) {
+- cifs_dbg(VFS, "server %s does not support " \
+- "multichannel anymore. skipping secondary channel\n",
+- ses->server->hostname);
+-
+- spin_lock(&ses->chan_lock);
+- chan_index = cifs_ses_get_chan_index(ses, server);
+- if (chan_index == CIFS_INVAL_CHAN_INDEX) {
+- spin_unlock(&ses->chan_lock);
+- goto skip_terminate;
+- }
+-
+- ses->chans[chan_index].server = NULL;
+- spin_unlock(&ses->chan_lock);
+-
+- /*
+- * the above reference of server by channel
+- * needs to be dropped without holding chan_lock
+- * as cifs_put_tcp_session takes a higher lock
+- * i.e. cifs_tcp_ses_lock
+- */
+- cifs_put_tcp_session(server, from_reconnect);
+-
+- server->terminate = true;
+- cifs_signal_cifsd_for_reconnect(server, false);
+-
+- /* mark primary server as needing reconnect */
+- pserver = server->primary_server;
+- cifs_signal_cifsd_for_reconnect(pserver, false);
+-
+-skip_terminate:
++ rc = cifs_chan_skip_or_disable(ses, server,
++ from_reconnect);
++ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+- rc = -EHOSTDOWN;
+ goto out;
+- } else {
+- cifs_server_dbg(VFS, "does not support " \
+- "multichannel anymore. disabling all other channels\n");
+- cifs_disable_secondary_channels(ses);
+ }
+ }
+
+@@ -395,11 +410,23 @@ skip_sess_setup:
+ rc = SMB3_request_interfaces(xid, tcon, false);
+ free_xid(xid);
+
+- if (rc)
++ if (rc == -EOPNOTSUPP) {
++ /*
++ * some servers like Azure SMB server do not advertise
++ * that multichannel has been disabled with server
++ * capabilities, rather return STATUS_NOT_IMPLEMENTED.
++ * treat this as server not supporting multichannel
++ */
++
++ rc = cifs_chan_skip_or_disable(ses, server,
++ from_reconnect);
++ goto skip_add_channels;
++ } else if (rc)
+ cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
+ __func__, rc);
+
+ if (ses->chan_max > ses->chan_count &&
++ ses->iface_count &&
+ !SERVER_IS_CHAN(server)) {
+ if (ses->chan_count == 1)
+ cifs_server_dbg(VFS, "supports multichannel now\n");
+@@ -409,6 +436,7 @@ skip_sess_setup:
+ } else {
+ mutex_unlock(&ses->session_mutex);
+ }
++skip_add_channels:
+
+ if (smb2_command != SMB2_INTERNAL_CMD)
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+@@ -2279,7 +2307,7 @@ int smb2_parse_contexts(struct TCP_Server_Info *server,
+
+ noff = le16_to_cpu(cc->NameOffset);
+ nlen = le16_to_cpu(cc->NameLength);
+- if (noff + nlen >= doff)
++ if (noff + nlen > doff)
+ return -EINVAL;
+
+ name = (char *)cc + noff;
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 7977827c65410..09e1e7771592f 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -284,6 +284,7 @@ int ksmbd_conn_handler_loop(void *p)
+ goto out;
+
+ conn->last_active = jiffies;
++ set_freezable();
+ while (ksmbd_conn_alive(conn)) {
+ if (try_to_freeze())
+ continue;
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index b7521e41402e0..0ebf91ffa2361 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -304,7 +304,8 @@ enum ksmbd_event {
+ KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+ KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE = 15,
+
+- KSMBD_EVENT_MAX
++ __KSMBD_EVENT_MAX,
++ KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
+ };
+
+ /*
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index e0eb7cb2a5258..53dfaac425c68 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -105,7 +105,7 @@ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+ lease->is_dir = lctx->is_dir;
+ memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+ lease->version = lctx->version;
+- lease->epoch = le16_to_cpu(lctx->epoch);
++ lease->epoch = le16_to_cpu(lctx->epoch) + 1;
+ INIT_LIST_HEAD(&opinfo->lease_entry);
+ opinfo->o_lease = lease;
+
+@@ -546,6 +546,7 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+ atomic_read(&ci->sop_count)) == 1) {
+ if (lease->state != SMB2_LEASE_NONE_LE &&
+ lease->state == (lctx->req_state & lease->state)) {
++ lease->epoch++;
+ lease->state |= lctx->req_state;
+ if (lctx->req_state &
+ SMB2_LEASE_WRITE_CACHING_LE)
+@@ -556,13 +557,17 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+ atomic_read(&ci->sop_count)) > 1) {
+ if (lctx->req_state ==
+ (SMB2_LEASE_READ_CACHING_LE |
+- SMB2_LEASE_HANDLE_CACHING_LE))
++ SMB2_LEASE_HANDLE_CACHING_LE)) {
++ lease->epoch++;
+ lease->state = lctx->req_state;
++ }
+ }
+
+ if (lctx->req_state && lease->state ==
+- SMB2_LEASE_NONE_LE)
++ SMB2_LEASE_NONE_LE) {
++ lease->epoch++;
+ lease_none_upgrade(opinfo, lctx->req_state);
++ }
+ }
+ read_lock(&ci->m_lock);
+ }
+@@ -1035,7 +1040,8 @@ static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
+ SMB2_LEASE_KEY_SIZE);
+ lease2->duration = lease1->duration;
+ lease2->flags = lease1->flags;
+- lease2->epoch = lease1->epoch++;
++ lease2->epoch = lease1->epoch;
++ lease2->version = lease1->version;
+ }
+
+ static int add_lease_global_list(struct oplock_info *opinfo)
+@@ -1453,7 +1459,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ SMB2_LEASE_KEY_SIZE);
+ buf->lcontext.LeaseFlags = lease->flags;
+- buf->lcontext.Epoch = cpu_to_le16(++lease->epoch);
++ buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
+ buf->lcontext.LeaseState = lease->state;
+ memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ SMB2_LEASE_KEY_SIZE);
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 948665f8370f4..ba7a72a6a4f45 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2323,11 +2323,12 @@ out:
+ * @eabuf: set info command buffer
+ * @buf_len: set info command buffer length
+ * @path: dentry path for get ea
++ * @get_write: get write access to a mount
+ *
+ * Return: 0 on success, otherwise error
+ */
+ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+- const struct path *path)
++ const struct path *path, bool get_write)
+ {
+ struct mnt_idmap *idmap = mnt_idmap(path->mnt);
+ char *attr_name = NULL, *value;
+@@ -3015,7 +3016,7 @@ int smb2_open(struct ksmbd_work *work)
+
+ rc = smb2_set_ea(&ea_buf->ea,
+ le32_to_cpu(ea_buf->ccontext.DataLength),
+- &path);
++ &path, false);
+ if (rc == -EOPNOTSUPP)
+ rc = 0;
+ else if (rc)
+@@ -5580,6 +5581,7 @@ static int smb2_rename(struct ksmbd_work *work,
+ if (!file_info->ReplaceIfExists)
+ flags = RENAME_NOREPLACE;
+
++ smb_break_all_levII_oplock(work, fp, 0);
+ rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+ out:
+ kfree(new_name);
+@@ -5992,7 +5994,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ return -EINVAL;
+
+ return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+- buf_len, &fp->filp->f_path);
++ buf_len, &fp->filp->f_path, true);
+ }
+ case FILE_POSITION_INFORMATION:
+ {
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index b49d47bdafc94..f29bb03f0dc47 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -74,7 +74,7 @@ static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
+ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
+ static int ksmbd_ipc_heartbeat_request(void);
+
+-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
++static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
+ [KSMBD_EVENT_UNSPEC] = {
+ .len = 0,
+ },
+@@ -403,7 +403,7 @@ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
+ return -EPERM;
+ #endif
+
+- if (type >= KSMBD_EVENT_MAX) {
++ if (type > KSMBD_EVENT_MAX) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 3b13c648d4900..e413a9cf8ee38 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1234,6 +1234,8 @@ out_cancel:
+ dir_ui->ui_size = dir->i_size;
+ mutex_unlock(&dir_ui->ui_mutex);
+ out_inode:
++ /* Free inode->i_link before inode is marked as bad. */
++ fscrypt_free_inode(inode);
+ make_bad_inode(inode);
+ iput(inode);
+ out_fname:
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 764304595e8b0..3f8e6233fff7c 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1510,6 +1510,18 @@ xfs_fs_fill_super(
+
+ mp->m_super = sb;
+
++ /*
++ * Copy VFS mount flags from the context now that all parameter parsing
++ * is guaranteed to have been completed by either the old mount API or
++ * the newer fsopen/fsconfig API.
++ */
++ if (fc->sb_flags & SB_RDONLY)
++ set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
++ if (fc->sb_flags & SB_DIRSYNC)
++ mp->m_features |= XFS_FEAT_DIRSYNC;
++ if (fc->sb_flags & SB_SYNCHRONOUS)
++ mp->m_features |= XFS_FEAT_WSYNC;
++
+ error = xfs_fs_validate_params(mp);
+ if (error)
+ return error;
+@@ -1979,6 +1991,11 @@ static const struct fs_context_operations xfs_context_ops = {
+ .free = xfs_fs_free,
+ };
+
++/*
++ * WARNING: do not initialise any parameters in this function that depend on
++ * mount option parsing having already been performed as this can be called from
++ * fsopen() before any parameters have been set.
++ */
+ static int xfs_init_fs_context(
+ struct fs_context *fc)
+ {
+@@ -2010,16 +2027,6 @@ static int xfs_init_fs_context(
+ mp->m_logbsize = -1;
+ mp->m_allocsize_log = 16; /* 64k */
+
+- /*
+- * Copy binary VFS mount flags we are interested in.
+- */
+- if (fc->sb_flags & SB_RDONLY)
+- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+- if (fc->sb_flags & SB_DIRSYNC)
+- mp->m_features |= XFS_FEAT_DIRSYNC;
+- if (fc->sb_flags & SB_SYNCHRONOUS)
+- mp->m_features |= XFS_FEAT_WSYNC;
+-
+ fc->s_fs_info = mp;
+ fc->ops = &xfs_context_ops;
+
+diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
+index e2640dc64e081..ea36aa79dca25 100644
+--- a/include/drm/drm_drv.h
++++ b/include/drm/drm_drv.h
+@@ -110,6 +110,15 @@ enum drm_driver_feature {
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
++ /**
++ * @DRIVER_CURSOR_HOTSPOT:
++ *
++ * Driver supports and requires cursor hotspot information in the
++ * cursor plane (e.g. cursor plane has to actually track the mouse
++ * cursor and the clients are required to set hotspot in order for
++ * the cursor planes to work correctly).
++ */
++ DRIVER_CURSOR_HOTSPOT = BIT(9),
+
+ /* IMPORTANT: Below are all the legacy flags, add new ones above. */
+
+diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
+index e1b5b4282f75d..8f35dcea82d3b 100644
+--- a/include/drm/drm_file.h
++++ b/include/drm/drm_file.h
+@@ -226,6 +226,18 @@ struct drm_file {
+ */
+ bool is_master;
+
++ /**
++ * @supports_virtualized_cursor_plane:
++ *
++ * This client is capable of handling the cursor plane with the
++ * restrictions imposed on it by the virtualized drivers.
++ *
++ * This implies that the cursor plane has to behave like a cursor
++ * i.e. track cursor movement. It also requires setting of the
++ * hotspot properties by the client on the cursor plane.
++ */
++ bool supports_virtualized_cursor_plane;
++
+ /**
+ * @master:
+ *
+diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
+index 79d62856defbf..fef775200a81f 100644
+--- a/include/drm/drm_plane.h
++++ b/include/drm/drm_plane.h
+@@ -190,6 +190,16 @@ struct drm_plane_state {
+ */
+ struct drm_property_blob *fb_damage_clips;
+
++ /**
++ * @ignore_damage_clips:
++ *
++ * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
++ * helper that the @fb_damage_clips blob property should be ignored.
++ *
++ * See :ref:`damage_tracking_properties` for more information.
++ */
++ bool ignore_damage_clips;
++
+ /**
+ * @src:
+ *
+diff --git a/include/linux/async.h b/include/linux/async.h
+index cce4ad31e8fcf..33c9ff4afb492 100644
+--- a/include/linux/async.h
++++ b/include/linux/async.h
+@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
+ return async_schedule_node(func, dev, dev_to_node(dev));
+ }
+
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
++
+ /**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index ff217a5ce5521..472cb16458b0b 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -171,6 +171,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+ LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+ LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ unsigned long arg)
++LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
++ unsigned long arg)
+ LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+ LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
+index b0da04fe087bb..34dfcc77f505a 100644
+--- a/include/linux/mc146818rtc.h
++++ b/include/linux/mc146818rtc.h
+@@ -126,10 +126,11 @@ struct cmos_rtc_board_info {
+ #endif /* ARCH_RTC_LOCATION */
+
+ bool mc146818_does_rtc_work(void);
+-int mc146818_get_time(struct rtc_time *time);
++int mc146818_get_time(struct rtc_time *time, int timeout);
+ int mc146818_set_time(struct rtc_time *time);
+
+ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++ int timeout,
+ void *param);
+
+ #endif /* _MC146818RTC_H */
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index 6f7725238abc2..3fb428ce7d1c7 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -132,6 +132,7 @@ struct mlx5_flow_handle;
+
+ enum {
+ FLOW_CONTEXT_HAS_TAG = BIT(0),
++ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
+ };
+
+ struct mlx5_flow_context {
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 3f7b664d625b9..fb8d26a15df47 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -3557,7 +3557,7 @@ struct mlx5_ifc_flow_context_bits {
+ u8 action[0x10];
+
+ u8 extended_destination[0x1];
+- u8 reserved_at_81[0x1];
++ u8 uplink_hairpin_en[0x1];
+ u8 flow_source[0x2];
+ u8 encrypt_decrypt_type[0x4];
+ u8 destination_list_size[0x18];
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 9db36e1977125..a7c32324c2639 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1793,6 +1793,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
+ #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
+
+ struct mem_section_usage {
++ struct rcu_head rcu;
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+ #endif
+@@ -1986,7 +1987,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ {
+ int idx = subsection_map_index(pfn);
+
+- return test_bit(idx, ms->usage->subsection_map);
++ return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
+ }
+ #else
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+@@ -2010,6 +2011,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ static inline int pfn_valid(unsigned long pfn)
+ {
+ struct mem_section *ms;
++ int ret;
+
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+@@ -2023,13 +2025,19 @@ static inline int pfn_valid(unsigned long pfn)
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+ ms = __pfn_to_section(pfn);
+- if (!valid_section(ms))
++ rcu_read_lock();
++ if (!valid_section(ms)) {
++ rcu_read_unlock();
+ return 0;
++ }
+ /*
+ * Traditionally early sections always returned pfn_valid() for
+ * the entire section-sized span.
+ */
+- return early_section(ms) || pfn_section_valid(ms, pfn);
++ ret = early_section(ms) || pfn_section_valid(ms, pfn);
++ rcu_read_unlock();
++
++ return ret;
+ }
+ #endif
+
+diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
+index c29ace15a053a..9d0fc5109af66 100644
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -1265,6 +1265,7 @@ struct nand_secure_region {
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
++ * @cont_read.pause_page: End of the current sequential cache read operation
+ * @cont_read.last_page: End of the continuous read operation
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+@@ -1321,6 +1322,7 @@ struct nand_chip {
+ struct {
+ bool ongoing;
+ unsigned int first_page;
++ unsigned int pause_page;
+ unsigned int last_page;
+ } cont_read;
+
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index b26fe858fd444..3c2fc291b071d 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -261,8 +261,8 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
+ * guarantee the pinned page won't be randomly replaced in the
+ * future on write faults.
+ */
+- if (likely(!is_device_private_page(page) &&
+- unlikely(page_needs_cow_for_dma(vma, page))))
++ if (likely(!is_device_private_page(page)) &&
++ unlikely(page_needs_cow_for_dma(vma, page)))
+ return -EBUSY;
+
+ ClearPageAnonExclusive(page);
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 1d1df326c881c..9d3138c6364c3 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -389,6 +389,8 @@ int security_file_permission(struct file *file, int mask);
+ int security_file_alloc(struct file *file);
+ void security_file_free(struct file *file);
+ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++int security_file_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg);
+ int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags);
+ int security_mmap_addr(unsigned long addr);
+@@ -987,6 +989,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
+ return 0;
+ }
+
++static inline int security_file_ioctl_compat(struct file *file,
++ unsigned int cmd,
++ unsigned long arg)
++{
++ return 0;
++}
++
+ static inline int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags)
+ {
+diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
+index 5fb1f12c33f90..c44f4b47b9453 100644
+--- a/include/linux/seq_buf.h
++++ b/include/linux/seq_buf.h
+@@ -22,9 +22,8 @@ struct seq_buf {
+ };
+
+ #define DECLARE_SEQ_BUF(NAME, SIZE) \
+- char __ ## NAME ## _buffer[SIZE] = ""; \
+ struct seq_buf NAME = { \
+- .buffer = &__ ## NAME ## _buffer, \
++ .buffer = (char[SIZE]) { 0 }, \
+ .size = SIZE, \
+ }
+
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index c953b8c0d2f43..bd4418377bacf 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -500,12 +500,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
+ return !!psock->saved_data_ready;
+ }
+
+-static inline bool sk_is_udp(const struct sock *sk)
+-{
+- return sk->sk_type == SOCK_DGRAM &&
+- sk->sk_protocol == IPPROTO_UDP;
+-}
+-
+ #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
+
+ #define BPF_F_STRPARSER (1UL << 1)
+diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
+index 4f3d14bb15385..c383579a008ba 100644
+--- a/include/linux/soundwire/sdw.h
++++ b/include/linux/soundwire/sdw.h
+@@ -886,7 +886,8 @@ struct sdw_master_ops {
+ * struct sdw_bus - SoundWire bus
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
+- * @link_id: Link id number, can be 0 to N, unique for each Master
++ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
++ * @link_id: Link id number, can be 0 to N, unique for each Controller
+ * @id: bus system-wide unique id
+ * @slaves: list of Slaves on this bus
+ * @assigned: Bitmap for Slave device numbers.
+@@ -918,6 +919,7 @@ struct sdw_master_ops {
+ struct sdw_bus {
+ struct device *dev;
+ struct sdw_master_device *md;
++ int controller_id;
+ unsigned int link_id;
+ int id;
+ struct list_head slaves;
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index fd9d12de7e929..59fdd40740558 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -125,6 +125,7 @@ struct cachestat;
+ #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
+ #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
+ #define __SC_CAST(t, a) (__force t) a
++#define __SC_TYPE(t, a) t
+ #define __SC_ARGS(t, a) a
+ #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
+
+diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
+index 0f6803e4b17e9..8b0b361b464cc 100644
+--- a/include/media/v4l2-cci.h
++++ b/include/media/v4l2-cci.h
+@@ -7,6 +7,8 @@
+ #ifndef _V4L2_CCI_H
+ #define _V4L2_CCI_H
+
++#include <linux/bitfield.h>
++#include <linux/bits.h>
+ #include <linux/types.h>
+
+ struct i2c_client;
+@@ -33,11 +35,20 @@ struct cci_reg_sequence {
+ #define CCI_REG_WIDTH_SHIFT 16
+ #define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+
++#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
++#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
++#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x)
++#define CCI_REG_LE BIT(20)
++
+ #define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x))
+
+ /**
+ * cci_read() - Read a value from a single CCI register
+diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
+index 5531dd08061e5..0754c463224a4 100644
+--- a/include/net/af_rxrpc.h
++++ b/include/net/af_rxrpc.h
+@@ -15,6 +15,7 @@ struct key;
+ struct sock;
+ struct socket;
+ struct rxrpc_call;
++struct rxrpc_peer;
+ enum rxrpc_abort_reason;
+
+ enum rxrpc_interruptibility {
+@@ -41,13 +42,14 @@ void rxrpc_kernel_new_call_notification(struct socket *,
+ rxrpc_notify_new_call_t,
+ rxrpc_discard_new_call_t);
+ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+- struct sockaddr_rxrpc *srx,
++ struct rxrpc_peer *peer,
+ struct key *key,
+ unsigned long user_call_ID,
+ s64 tx_total_len,
+ u32 hard_timeout,
+ gfp_t gfp,
+ rxrpc_notify_rx_t notify_rx,
++ u16 service_id,
+ bool upgrade,
+ enum rxrpc_interruptibility interruptibility,
+ unsigned int debug_id);
+@@ -60,9 +62,14 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+ u32, int, enum rxrpc_abort_reason);
+ void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call);
+ void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call);
+-void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
+- struct sockaddr_rxrpc *);
+-bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
++struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
++ struct sockaddr_rxrpc *srx, gfp_t gfp);
++void rxrpc_kernel_put_peer(struct rxrpc_peer *peer);
++struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
++struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
++const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
++const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
++unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
+ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
+ rxrpc_user_attach_call_t, unsigned long, gfp_t,
+ unsigned int);
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index d0a2f827d5f20..9ab4bf704e864 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -357,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
+ return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
+ }
+
++static inline void inet_init_csk_locks(struct sock *sk)
++{
++ struct inet_connection_sock *icsk = inet_csk(sk);
++
++ spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
++ spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
++}
++
+ #endif /* _INET_CONNECTION_SOCK_H */
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 74db6d97cae10..8d5fe15b0f6f2 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -310,11 +310,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
+ #define inet_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
+-static inline bool sk_is_inet(struct sock *sk)
+-{
+- return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
+-}
+-
+ /**
+ * sk_to_full_sk - Access to a full socket
+ * @sk: pointer to a socket
+diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
+index 7e73f8e5e4970..1d55ba7c45be1 100644
+--- a/include/net/llc_pdu.h
++++ b/include/net/llc_pdu.h
+@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
+ */
+ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+ {
+- if (skb->protocol == htons(ETH_P_802_2))
+- memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
++ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ }
+
+ /**
+@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+ */
+ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
+ {
+- if (skb->protocol == htons(ETH_P_802_2))
+- memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
++ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ }
+
+ /**
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index dcb9160e64671..959a7725c27bb 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -375,6 +375,10 @@ struct tcf_proto_ops {
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack);
+ void (*tmplt_destroy)(void *tmplt_priv);
++ void (*tmplt_reoffload)(struct tcf_chain *chain,
++ bool add,
++ flow_setup_cb_t *cb,
++ void *cb_priv);
+ struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
+ u32 handle);
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 0201136b0b9ca..f9a9f61fa1222 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2794,9 +2794,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+ &skb_shinfo(skb)->tskey);
+ }
+
++static inline bool sk_is_inet(const struct sock *sk)
++{
++ int family = READ_ONCE(sk->sk_family);
++
++ return family == AF_INET || family == AF_INET6;
++}
++
+ static inline bool sk_is_tcp(const struct sock *sk)
+ {
+- return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
++ return sk_is_inet(sk) &&
++ sk->sk_type == SOCK_STREAM &&
++ sk->sk_protocol == IPPROTO_TCP;
++}
++
++static inline bool sk_is_udp(const struct sock *sk)
++{
++ return sk_is_inet(sk) &&
++ sk->sk_type == SOCK_DGRAM &&
++ sk->sk_protocol == IPPROTO_UDP;
+ }
+
+ static inline bool sk_is_stream_unix(const struct sock *sk)
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 1f6fc8c7a84c6..5425f7ad5ebde 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -147,11 +147,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ return ret;
+ }
+
++static inline void xsk_buff_del_tail(struct xdp_buff *tail)
++{
++ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
++
++ list_del(&xskb->xskb_list_node);
++}
++
++static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
++{
++ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
++ struct xdp_buff_xsk *frag;
++
++ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
++ xskb_list_node);
++ return &frag->xdp;
++}
++
+ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+ {
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
++ xdp->flags = 0;
+ }
+
+ static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+@@ -309,6 +327,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ return NULL;
+ }
+
++static inline void xsk_buff_del_tail(struct xdp_buff *tail)
++{
++}
++
++static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
++{
++ return NULL;
++}
++
+ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+ {
+ }
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index e9d412d19dbbb..caec276515dcc 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -1216,6 +1216,31 @@ TRACE_EVENT(afs_file_error,
+ __print_symbolic(__entry->where, afs_file_errors))
+ );
+
++TRACE_EVENT(afs_bulkstat_error,
++ TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
++
++ TP_ARGS(op, fid, index, abort),
++
++ TP_STRUCT__entry(
++ __field_struct(struct afs_fid, fid)
++ __field(unsigned int, op)
++ __field(unsigned int, index)
++ __field(s32, abort)
++ ),
++
++ TP_fast_assign(
++ __entry->op = op->debug_id;
++ __entry->fid = *fid;
++ __entry->index = index;
++ __entry->abort = abort;
++ ),
++
++ TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
++ __entry->op, __entry->index,
++ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
++ __entry->abort)
++ );
++
+ TRACE_EVENT(afs_cm_no_server,
+ TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
+
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index f7e537f64db45..4c1ef7b3705c2 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -178,7 +178,9 @@
+ #define rxrpc_peer_traces \
+ EM(rxrpc_peer_free, "FREE ") \
+ EM(rxrpc_peer_get_accept, "GET accept ") \
++ EM(rxrpc_peer_get_application, "GET app ") \
+ EM(rxrpc_peer_get_bundle, "GET bundle ") \
++ EM(rxrpc_peer_get_call, "GET call ") \
+ EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
+ EM(rxrpc_peer_get_input, "GET input ") \
+ EM(rxrpc_peer_get_input_error, "GET inpt-err") \
+@@ -187,6 +189,7 @@
+ EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
+ EM(rxrpc_peer_new_client, "NEW client ") \
+ EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
++ EM(rxrpc_peer_put_application, "PUT app ") \
+ EM(rxrpc_peer_put_bundle, "PUT bundle ") \
+ EM(rxrpc_peer_put_call, "PUT call ") \
+ EM(rxrpc_peer_put_conn, "PUT conn ") \
+diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
+index 7c29d82db9ee0..f8bc34a6bcfa2 100644
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -614,6 +614,9 @@ struct btrfs_ioctl_clone_range_args {
+ */
+ #define BTRFS_DEFRAG_RANGE_COMPRESS 1
+ #define BTRFS_DEFRAG_RANGE_START_IO 2
++#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
++ BTRFS_DEFRAG_RANGE_START_IO)
++
+ struct btrfs_ioctl_defrag_range_args {
+ /* start of the defrag operation */
+ __u64 start;
+diff --git a/kernel/async.c b/kernel/async.c
+index b2c4ba5686ee4..673bba6bdf3a0 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
+ wake_up(&async_done);
+ }
+
++static async_cookie_t __async_schedule_node_domain(async_func_t func,
++ void *data, int node,
++ struct async_domain *domain,
++ struct async_entry *entry)
++{
++ async_cookie_t newcookie;
++ unsigned long flags;
++
++ INIT_LIST_HEAD(&entry->domain_list);
++ INIT_LIST_HEAD(&entry->global_list);
++ INIT_WORK(&entry->work, async_run_entry_fn);
++ entry->func = func;
++ entry->data = data;
++ entry->domain = domain;
++
++ spin_lock_irqsave(&async_lock, flags);
++
++ /* allocate cookie and queue */
++ newcookie = entry->cookie = next_cookie++;
++
++ list_add_tail(&entry->domain_list, &domain->pending);
++ if (domain->registered)
++ list_add_tail(&entry->global_list, &async_global_pending);
++
++ atomic_inc(&entry_count);
++ spin_unlock_irqrestore(&async_lock, flags);
++
++ /* schedule for execution */
++ queue_work_node(node, system_unbound_wq, &entry->work);
++
++ return newcookie;
++}
++
+ /**
+ * async_schedule_node_domain - NUMA specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ func(data, newcookie);
+ return newcookie;
+ }
+- INIT_LIST_HEAD(&entry->domain_list);
+- INIT_LIST_HEAD(&entry->global_list);
+- INIT_WORK(&entry->work, async_run_entry_fn);
+- entry->func = func;
+- entry->data = data;
+- entry->domain = domain;
+-
+- spin_lock_irqsave(&async_lock, flags);
+-
+- /* allocate cookie and queue */
+- newcookie = entry->cookie = next_cookie++;
+-
+- list_add_tail(&entry->domain_list, &domain->pending);
+- if (domain->registered)
+- list_add_tail(&entry->global_list, &async_global_pending);
+-
+- atomic_inc(&entry_count);
+- spin_unlock_irqrestore(&async_lock, flags);
+-
+- /* schedule for execution */
+- queue_work_node(node, system_unbound_wq, &entry->work);
+
+- return newcookie;
++ return __async_schedule_node_domain(func, data, node, domain, entry);
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node_domain);
+
+@@ -231,6 +243,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node);
+
++/**
++ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
++ * @func: function to execute asynchronously
++ * @dev: device argument to be passed to function
++ *
++ * @dev is used as both the argument for the function and to provide NUMA
++ * context for where to run the function.
++ *
++ * If the asynchronous execution of @func is scheduled successfully, return
++ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
++ * that will run the function synchronously then.
++ */
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
++{
++ struct async_entry *entry;
++
++ entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
++
++ /* Give up if there is no memory or too much work. */
++ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
++ kfree(entry);
++ return false;
++ }
++
++ __async_schedule_node_domain(func, dev, dev_to_node(dev),
++ &async_dfl_domain, entry);
++ return true;
++}
++
+ /**
+ * async_synchronize_full - synchronize all asynchronous function calls
+ *
+diff --git a/kernel/crash_core.c b/kernel/crash_core.c
+index d4313b53837e3..755d8d4ef5b08 100644
+--- a/kernel/crash_core.c
++++ b/kernel/crash_core.c
+@@ -377,7 +377,6 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+- insert_resource(&iomem_resource, &crashk_low_res);
+ #endif
+ return 0;
+ }
+@@ -459,8 +458,19 @@ retry:
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+- insert_resource(&iomem_resource, &crashk_res);
+ }
++
++static __init int insert_crashkernel_resources(void)
++{
++ if (crashk_res.start < crashk_res.end)
++ insert_resource(&iomem_resource, &crashk_res);
++
++ if (crashk_low_res.start < crashk_low_res.end)
++ insert_resource(&iomem_resource, &crashk_low_res);
++
++ return 0;
++}
++early_initcall(insert_crashkernel_resources);
+ #endif
+
+ int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index dad981a865b84..52d0bf67e715f 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -626,12 +626,21 @@ retry:
+ }
+
+ /*
+- * PI futexes can not be requeued and must remove themselves from the
+- * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
++ * PI futexes can not be requeued and must remove themselves from the hash
++ * bucket. The hash bucket lock (i.e. lock_ptr) is held.
+ */
+ void futex_unqueue_pi(struct futex_q *q)
+ {
+- __futex_unqueue(q);
++ /*
++ * If the lock was not acquired (due to timeout or signal) then the
++ * rt_waiter is removed before futex_q is. If this is observed by
++ * an unlocker after dropping the rtmutex wait lock and before
++ * acquiring the hash bucket lock, then the unlocker dequeues the
++ * futex_q from the hash bucket list to guarantee consistent state
++ * vs. userspace. Therefore the dequeue here must be conditional.
++ */
++ if (!plist_node_empty(&q->list))
++ __futex_unqueue(q);
+
+ BUG_ON(!q->pi_state);
+ put_pi_state(q->pi_state);
+diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
+index 90e5197f4e569..5722467f27379 100644
+--- a/kernel/futex/pi.c
++++ b/kernel/futex/pi.c
+@@ -1135,6 +1135,7 @@ retry:
+
+ hb = futex_hash(&key);
+ spin_lock(&hb->lock);
++retry_hb:
+
+ /*
+ * Check waiters first. We do not trust user space values at
+@@ -1177,12 +1178,17 @@ retry:
+ /*
+ * Futex vs rt_mutex waiter state -- if there are no rt_mutex
+ * waiters even though futex thinks there are, then the waiter
+- * is leaving and the uncontended path is safe to take.
++ * is leaving. The entry needs to be removed from the list so a
++ * new futex_lock_pi() is not using this stale PI-state while
++ * the futex is available in user space again.
++ * There can be more than one task on its way out so it needs
++ * to retry.
+ */
+ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
+ if (!rt_waiter) {
++ __futex_unqueue(top_waiter);
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+- goto do_uncontended;
++ goto retry_hb;
+ }
+
+ get_pi_state(pi_state);
+@@ -1217,7 +1223,6 @@ retry:
+ return ret;
+ }
+
+-do_uncontended:
+ /*
+ * We have no kernel internal state, i.e. no waiters in the
+ * kernel. Waiters which are about to queue themselves are stuck
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 27ca1c866f298..371eb1711d346 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -600,7 +600,7 @@ int __init early_irq_init(void)
+ mutex_init(&desc[i].request_mutex);
+ init_waitqueue_head(&desc[i].wait_for_threads);
+ desc_set_defaults(i, &desc[i], node, NULL, NULL);
+- irq_resend_init(desc);
++ irq_resend_init(&desc[i]);
+ }
+ return arch_early_irq_init();
+ }
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index be5642a4ec490..b926c4db8a91d 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -1254,6 +1254,7 @@ int kernel_kexec(void)
+ kexec_in_progress = true;
+ kernel_restart_prepare("kexec reboot");
+ migrate_to_reboot_cpu();
++ syscore_shutdown();
+
+ /*
+ * migrate_to_reboot_cpu() disables CPU hotplug assuming that
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index a2cb0babb5ec9..d44f5937f1e55 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -606,11 +606,11 @@ static int crc32_threadfn(void *data)
+ unsigned i;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -619,7 +619,7 @@ static int crc32_threadfn(void *data)
+ for (i = 0; i < d->run_threads; i++)
+ *d->crc32 = crc32_le(*d->crc32,
+ d->unc[i], *d->unc_len[i]);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -649,12 +649,12 @@ static int lzo_compress_threadfn(void *data)
+ struct cmp_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -663,7 +663,7 @@ static int lzo_compress_threadfn(void *data)
+ d->ret = lzo1x_1_compress(d->unc, d->unc_len,
+ d->cmp + LZO_HEADER, &d->cmp_len,
+ d->wrk);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -798,7 +798,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+
+ data[thr].unc_len = off;
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -806,12 +806,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ break;
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -850,7 +850,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ }
+ }
+
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+
+@@ -1132,12 +1132,12 @@ static int lzo_decompress_threadfn(void *data)
+ struct dec_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -1150,7 +1150,7 @@ static int lzo_decompress_threadfn(void *data)
+ flush_icache_range((unsigned long)d->unc,
+ (unsigned long)d->unc + d->unc_len);
+
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -1335,7 +1335,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ crc->run_threads = 0;
+ }
+@@ -1371,7 +1371,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ pg = 0;
+ }
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -1390,7 +1390,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -1421,7 +1421,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0) {
+ crc->run_threads = thr + 1;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ goto out_finish;
+ }
+@@ -1429,13 +1429,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ }
+
+ out_finish:
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+ stop = ktime_get();
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 3ac3c846105fb..157f3ca2a9b56 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1013,6 +1013,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
+ return needmore;
+ }
+
++static void swake_up_one_online_ipi(void *arg)
++{
++ struct swait_queue_head *wqh = arg;
++
++ swake_up_one(wqh);
++}
++
++static void swake_up_one_online(struct swait_queue_head *wqh)
++{
++ int cpu = get_cpu();
++
++ /*
++ * If called from rcutree_report_cpu_starting(), wake up
++ * is dangerous that late in the CPU-down hotplug process. The
++ * scheduler might queue an ignored hrtimer. Defer the wake up
++ * to an online CPU instead.
++ */
++ if (unlikely(cpu_is_offline(cpu))) {
++ int target;
++
++ target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
++ cpu_online_mask);
++
++ smp_call_function_single(target, swake_up_one_online_ipi,
++ wqh, 0);
++ put_cpu();
++ } else {
++ put_cpu();
++ swake_up_one(wqh);
++ }
++}
++
+ /*
+ * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
+ * interrupt or softirq handler, in which case we just might immediately
+@@ -1037,7 +1069,7 @@ static void rcu_gp_kthread_wake(void)
+ return;
+ WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
+ WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
+- swake_up_one(&rcu_state.gp_wq);
++ swake_up_one_online(&rcu_state.gp_wq);
+ }
+
+ /*
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 6d7cea5d591f9..2ac440bc7e10b 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -173,7 +173,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
+ return ret;
+ }
+
+-
+ /*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+@@ -201,7 +200,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ if (wake) {
+ smp_mb(); /* EGP done before wake_up(). */
+- swake_up_one(&rcu_state.expedited_wq);
++ swake_up_one_online(&rcu_state.expedited_wq);
+ }
+ break;
+ }
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index c108ed8a9804a..3052b1f1168e2 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -99,6 +99,7 @@ static u64 suspend_start;
+ * Interval: 0.5sec.
+ */
+ #define WATCHDOG_INTERVAL (HZ >> 1)
++#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
+
+ /*
+ * Threshold: 0.0312s, when doubled: 0.0625s.
+@@ -134,6 +135,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
+ static atomic_t watchdog_reset_pending;
++static int64_t watchdog_max_interval;
+
+ static inline void clocksource_watchdog_lock(unsigned long *flags)
+ {
+@@ -399,8 +401,8 @@ static inline void clocksource_reset_watchdog(void)
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+ u64 csnow, wdnow, cslast, wdlast, delta;
++ int64_t wd_nsec, cs_nsec, interval;
+ int next_cpu, reset_pending;
+- int64_t wd_nsec, cs_nsec;
+ struct clocksource *cs;
+ enum wd_read_status read_ret;
+ unsigned long extra_wait = 0;
+@@ -470,6 +472,27 @@ static void clocksource_watchdog(struct timer_list *unused)
+ if (atomic_read(&watchdog_reset_pending))
+ continue;
+
++ /*
++ * The processing of timer softirqs can get delayed (usually
++ * on account of ksoftirqd not getting to run in a timely
++ * manner), which causes the watchdog interval to stretch.
++ * Skew detection may fail for longer watchdog intervals
++ * on account of fixed margins being used.
++ * Some clocksources, e.g. acpi_pm, cannot tolerate
++ * watchdog intervals longer than a few seconds.
++ */
++ interval = max(cs_nsec, wd_nsec);
++ if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
++ if (system_state > SYSTEM_SCHEDULING &&
++ interval > 2 * watchdog_max_interval) {
++ watchdog_max_interval = interval;
++ pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
++ cs_nsec, wd_nsec);
++ }
++ watchdog_timer.expires = jiffies;
++ continue;
++ }
++
+ /* Check the deviation from the watchdog clocksource. */
+ md = cs->uncertainty_margin + watchdog->uncertainty_margin;
+ if (abs(cs_nsec - wd_nsec) > md) {
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 2305366a818a7..ca2d59579fce9 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1574,6 +1574,7 @@ void tick_cancel_sched_timer(int cpu)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t idle_sleeptime, iowait_sleeptime;
++ unsigned long idle_calls, idle_sleeps;
+
+ # ifdef CONFIG_HIGH_RES_TIMERS
+ if (ts->sched_timer.base)
+@@ -1582,9 +1583,13 @@ void tick_cancel_sched_timer(int cpu)
+
+ idle_sleeptime = ts->idle_sleeptime;
+ iowait_sleeptime = ts->iowait_sleeptime;
++ idle_calls = ts->idle_calls;
++ idle_sleeps = ts->idle_sleeps;
+ memset(ts, 0, sizeof(*ts));
+ ts->idle_sleeptime = idle_sleeptime;
+ ts->iowait_sleeptime = iowait_sleeptime;
++ ts->idle_calls = idle_calls;
++ ts->idle_sleeps = idle_sleeps;
+ }
+ #endif
+
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index c774e560f2f95..a4dcf0f243521 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
+ }
+
+ memcpy(elt->key, key, map->key_size);
+- entry->val = elt;
++ /*
++ * Ensure the initialization is visible and
++ * publish the elt.
++ */
++ smp_wmb();
++ WRITE_ONCE(entry->val, elt);
+ atomic64_inc(&map->hits);
+
+ return entry->val;
+diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
+index 40f5908e57a4f..e16dca1e23d52 100644
+--- a/lib/crypto/mpi/ec.c
++++ b/lib/crypto/mpi/ec.c
+@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ ctx->a = mpi_copy(a);
+ ctx->b = mpi_copy(b);
+
++ ctx->d = NULL;
++ ctx->t.two_inv_p = NULL;
++
+ ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+
+ mpi_ec_get_reset(ctx);
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 5a88d6d24d793..4823ad979b72f 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -2141,6 +2141,9 @@ static void __init memmap_init_reserved_pages(void)
+ start = region->base;
+ end = start + region->size;
+
++ if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
++ nid = early_pfn_to_nid(PFN_DOWN(start));
++
+ reserve_bootmem_region(start, end, nid);
+ }
+ }
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 397f2a6e34cbd..bad3039d165e6 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1025,38 +1025,31 @@ out:
+ }
+
+ /*
+- * To record some information during migration, we use some unused
+- * fields (mapping and private) of struct folio of the newly allocated
+- * destination folio. This is safe because nobody is using them
+- * except us.
++ * To record some information during migration, we use unused private
++ * field of struct folio of the newly allocated destination folio.
++ * This is safe because nobody is using it except us.
+ */
+-union migration_ptr {
+- struct anon_vma *anon_vma;
+- struct address_space *mapping;
+-};
+-
+ enum {
+ PAGE_WAS_MAPPED = BIT(0),
+ PAGE_WAS_MLOCKED = BIT(1),
++ PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
+ };
+
+ static void __migrate_folio_record(struct folio *dst,
+- unsigned long old_page_state,
++ int old_page_state,
+ struct anon_vma *anon_vma)
+ {
+- union migration_ptr ptr = { .anon_vma = anon_vma };
+- dst->mapping = ptr.mapping;
+- dst->private = (void *)old_page_state;
++ dst->private = (void *)anon_vma + old_page_state;
+ }
+
+ static void __migrate_folio_extract(struct folio *dst,
+ int *old_page_state,
+ struct anon_vma **anon_vmap)
+ {
+- union migration_ptr ptr = { .mapping = dst->mapping };
+- *anon_vmap = ptr.anon_vma;
+- *old_page_state = (unsigned long)dst->private;
+- dst->mapping = NULL;
++ unsigned long private = (unsigned long)dst->private;
++
++ *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
++ *old_page_state = private & PAGE_OLD_STATES;
+ dst->private = NULL;
+ }
+
+diff --git a/mm/mm_init.c b/mm/mm_init.c
+index 077bfe393b5e2..513bad6727089 100644
+--- a/mm/mm_init.c
++++ b/mm/mm_init.c
+@@ -26,6 +26,7 @@
+ #include <linux/pgtable.h>
+ #include <linux/swap.h>
+ #include <linux/cma.h>
++#include <linux/crash_dump.h>
+ #include "internal.h"
+ #include "slab.h"
+ #include "shuffle.h"
+@@ -381,6 +382,11 @@ static void __init find_zone_movable_pfns_for_nodes(void)
+ goto out;
+ }
+
++ if (is_kdump_kernel()) {
++ pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
++ goto out;
++ }
++
+ for_each_mem_region(r) {
+ if (memblock_is_mirror(r))
+ continue;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 733732e7e0ba7..6d2a74138f456 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3951,14 +3951,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
+ else
+ (*no_progress_loops)++;
+
+- /*
+- * Make sure we converge to OOM if we cannot make any progress
+- * several times in the row.
+- */
+- if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+- /* Before OOM, exhaust highatomic_reserve */
+- return unreserve_highatomic_pageblock(ac, true);
+- }
++ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
++ goto out;
++
+
+ /*
+ * Keep reclaiming pages while there is a chance this will lead
+@@ -4001,6 +3996,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
+ schedule_timeout_uninterruptible(1);
+ else
+ cond_resched();
++out:
++ /* Before OOM, exhaust highatomic_reserve */
++ if (!ret)
++ return unreserve_highatomic_pageblock(ac, true);
++
+ return ret;
+ }
+
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 77d91e565045c..338cf946dee8d 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -791,6 +791,13 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ if (empty) {
+ unsigned long section_nr = pfn_to_section_nr(pfn);
+
++ /*
++ * Mark the section invalid so that valid_section()
++ * return false. This prevents code from dereferencing
++ * ms->usage array.
++ */
++ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
++
+ /*
+ * When removing an early section, the usage map is kept (as the
+ * usage maps of other sections fall into the same page). It
+@@ -799,16 +806,10 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ * was allocated during boot.
+ */
+ if (!PageReserved(virt_to_page(ms->usage))) {
+- kfree(ms->usage);
+- ms->usage = NULL;
++ kfree_rcu(ms->usage, rcu);
++ WRITE_ONCE(ms->usage, NULL);
+ }
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+- /*
+- * Mark the section invalid so that valid_section()
+- * return false. This prevents code from dereferencing
+- * ms->usage array.
+- */
+- ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+ }
+
+ /*
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 214532173536b..a3b68243fd4b1 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ }
+ if (data[IFLA_VLAN_INGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
++ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
++ continue;
+ m = nla_data(attr);
+ vlan_dev_set_ingress_priority(dev, m->to, m->from);
+ }
+ }
+ if (data[IFLA_VLAN_EGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
++ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
++ continue;
+ m = nla_data(attr);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ad20bebe153fc..add22ca0dff95 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -11509,6 +11509,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
+
+ static void __net_exit default_device_exit_net(struct net *net)
+ {
++ struct netdev_name_node *name_node, *tmp;
+ struct net_device *dev, *aux;
+ /*
+ * Push all migratable network devices back to the
+@@ -11531,6 +11532,14 @@ static void __net_exit default_device_exit_net(struct net *net)
+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ if (netdev_name_in_use(&init_net, fb_name))
+ snprintf(fb_name, IFNAMSIZ, "dev%%d");
++
++ netdev_for_each_altname_safe(dev, name_node, tmp)
++ if (netdev_name_in_use(&init_net, name_node->name)) {
++ netdev_name_node_del(name_node);
++ synchronize_rcu();
++ __netdev_name_node_alt_destroy(name_node);
++ }
++
+ err = dev_change_net_namespace(dev, &init_net, fb_name);
+ if (err) {
+ pr_emerg("%s: failed to move %s to init_net: %d\n",
+diff --git a/net/core/dev.h b/net/core/dev.h
+index 5aa45f0fd4ae5..3f5eb92396b68 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -64,6 +64,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
+
+ #define netdev_for_each_altname(dev, namenode) \
+ list_for_each_entry((namenode), &(dev)->name_node->list, list)
++#define netdev_for_each_altname_safe(dev, namenode, next) \
++ list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
++ list)
+
+ int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 1737884be52f8..cee53838310ff 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -83,6 +83,7 @@
+ #include <net/netfilter/nf_conntrack_bpf.h>
+ #include <net/netkit.h>
+ #include <linux/un.h>
++#include <net/xdp_sock_drv.h>
+
+ #include "dev.h"
+
+@@ -4090,10 +4091,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
+ skb_frag_size_add(frag, offset);
+ sinfo->xdp_frags_size += offset;
++ if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
++ xsk_buff_get_tail(xdp)->data_end += offset;
+
+ return 0;
+ }
+
++static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
++ struct xdp_mem_info *mem_info, bool release)
++{
++ struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
++
++ if (release) {
++ xsk_buff_del_tail(zc_frag);
++ __xdp_return(NULL, mem_info, false, zc_frag);
++ } else {
++ zc_frag->data_end -= shrink;
++ }
++}
++
++static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
++ int shrink)
++{
++ struct xdp_mem_info *mem_info = &xdp->rxq->mem;
++ bool release = skb_frag_size(frag) == shrink;
++
++ if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
++ bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
++ goto out;
++ }
++
++ if (release) {
++ struct page *page = skb_frag_page(frag);
++
++ __xdp_return(page_address(page), mem_info, false, NULL);
++ }
++
++out:
++ return release;
++}
++
+ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
+ {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+@@ -4108,12 +4145,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
+
+ len_free += shrink;
+ offset -= shrink;
+-
+- if (skb_frag_size(frag) == shrink) {
+- struct page *page = skb_frag_page(frag);
+-
+- __xdp_return(page_address(page), &xdp->rxq->mem,
+- false, NULL);
++ if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
+ n_frags_free++;
+ } else {
+ skb_frag_size_sub(frag, shrink);
+diff --git a/net/core/request_sock.c b/net/core/request_sock.c
+index f35c2e9984062..63de5c635842b 100644
+--- a/net/core/request_sock.c
++++ b/net/core/request_sock.c
+@@ -33,9 +33,6 @@
+
+ void reqsk_queue_alloc(struct request_sock_queue *queue)
+ {
+- spin_lock_init(&queue->rskq_lock);
+-
+- spin_lock_init(&queue->fastopenq.lock);
+ queue->fastopenq.rskq_rst_head = NULL;
+ queue->fastopenq.rskq_rst_tail = NULL;
+ queue->fastopenq.qlen = 0;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d02534c77413f..e5d43a068f8ed 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -107,6 +107,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/poll.h>
+ #include <linux/tcp.h>
++#include <linux/udp.h>
+ #include <linux/init.h>
+ #include <linux/highmem.h>
+ #include <linux/user_namespace.h>
+@@ -4148,8 +4149,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
+ {
+ struct sock *sk = p;
+
+- return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+- sk_busy_loop_timeout(sk, start_time);
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
++ return true;
++
++ if (sk_is_udp(sk) &&
++ !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
++ return true;
++
++ return sk_busy_loop_timeout(sk, start_time);
+ }
+ EXPORT_SYMBOL(sk_busy_loop_end);
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index ea0b0334a0fb0..1c58bd72e1245 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -330,6 +330,9 @@ lookup_protocol:
+ if (INET_PROTOSW_REUSE & answer_flags)
+ sk->sk_reuse = SK_CAN_REUSE;
+
++ if (INET_PROTOSW_ICSK & answer_flags)
++ inet_init_csk_locks(sk);
++
+ inet = inet_sk(sk);
+ inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
+
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 394a498c28232..762817d6c8d70 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -730,6 +730,10 @@ out:
+ }
+ if (req)
+ reqsk_put(req);
++
++ if (newsk)
++ inet_init_csk_locks(newsk);
++
+ return newsk;
+ out_err:
+ newsk = NULL;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index ff6838ca2e580..7bce79beca2b9 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -722,6 +722,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
+ if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
+ set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
++ smp_mb__after_atomic();
+ }
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED.
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 13a1833a4df52..959bfd9f6344f 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -199,6 +199,9 @@ lookup_protocol:
+ if (INET_PROTOSW_REUSE & answer_flags)
+ sk->sk_reuse = SK_CAN_REUSE;
+
++ if (INET_PROTOSW_ICSK & answer_flags)
++ inet_init_csk_locks(sk);
++
+ inet = inet_sk(sk);
+ inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
+
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 9b06c380866b5..20551cfb7da6d 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -928,14 +928,15 @@ copy_uaddr:
+ */
+ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
++ DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ struct sock *sk = sock->sk;
+ struct llc_sock *llc = llc_sk(sk);
+- DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ int flags = msg->msg_flags;
+ int noblock = flags & MSG_DONTWAIT;
++ int rc = -EINVAL, copied = 0, hdrlen, hh_len;
+ struct sk_buff *skb = NULL;
++ struct net_device *dev;
+ size_t size = 0;
+- int rc = -EINVAL, copied = 0, hdrlen;
+
+ dprintk("%s: sending from %02X to %02X\n", __func__,
+ llc->laddr.lsap, llc->daddr.lsap);
+@@ -955,22 +956,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ if (rc)
+ goto out;
+ }
+- hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
++ dev = llc->dev;
++ hh_len = LL_RESERVED_SPACE(dev);
++ hdrlen = llc_ui_header_len(sk, addr);
+ size = hdrlen + len;
+- if (size > llc->dev->mtu)
+- size = llc->dev->mtu;
++ size = min_t(size_t, size, READ_ONCE(dev->mtu));
+ copied = size - hdrlen;
+ rc = -EINVAL;
+ if (copied < 0)
+ goto out;
+ release_sock(sk);
+- skb = sock_alloc_send_skb(sk, size, noblock, &rc);
++ skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
+ lock_sock(sk);
+ if (!skb)
+ goto out;
+- skb->dev = llc->dev;
++ if (sock_flag(sk, SOCK_ZAPPED) ||
++ llc->dev != dev ||
++ hdrlen != llc_ui_header_len(sk, addr) ||
++ hh_len != LL_RESERVED_SPACE(dev) ||
++ size > READ_ONCE(dev->mtu))
++ goto out;
++ skb->dev = dev;
+ skb->protocol = llc_proto_type(addr->sllc_arphrd);
+- skb_reserve(skb, hdrlen);
++ skb_reserve(skb, hh_len + hdrlen);
+ rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
+ if (rc)
+ goto out;
+diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
+index 6e387aadffcec..4f16d9c88350b 100644
+--- a/net/llc/llc_core.c
++++ b/net/llc/llc_core.c
+@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
+ .func = llc_rcv,
+ };
+
+-static struct packet_type llc_tr_packet_type __read_mostly = {
+- .type = cpu_to_be16(ETH_P_TR_802_2),
+- .func = llc_rcv,
+-};
+-
+ static int __init llc_init(void)
+ {
+ dev_add_pack(&llc_packet_type);
+- dev_add_pack(&llc_tr_packet_type);
+ return 0;
+ }
+
+ static void __exit llc_exit(void)
+ {
+ dev_remove_pack(&llc_packet_type);
+- dev_remove_pack(&llc_tr_packet_type);
+ }
+
+ module_init(llc_init);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 0ba613dd1cc47..c33decbb97f2d 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -404,7 +404,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
+- if (!(sta->sta.valid_links & BIT(i)))
++ struct link_sta_info *link_sta;
++
++ link_sta = rcu_access_pointer(sta->link[i]);
++ if (!link_sta)
+ continue;
+
+ sta_remove_link(sta, i, false);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f032c29f1da64..6a987b36d0bb0 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -24,6 +24,7 @@
+ #include <net/sock.h>
+
+ #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
++#define NFT_SET_MAX_ANONLEN 16
+
+ unsigned int nf_tables_net_id __read_mostly;
+
+@@ -4411,6 +4412,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+ if (p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
++ if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
++ return -EINVAL;
++
+ inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (inuse == NULL)
+ return -ENOMEM;
+@@ -10905,16 +10909,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+ switch (data->verdict.code) {
+- default:
+- switch (data->verdict.code & NF_VERDICT_MASK) {
+- case NF_ACCEPT:
+- case NF_DROP:
+- case NF_QUEUE:
+- break;
+- default:
+- return -EINVAL;
+- }
+- fallthrough;
++ case NF_ACCEPT:
++ case NF_DROP:
++ case NF_QUEUE:
++ break;
+ case NFT_CONTINUE:
+ case NFT_BREAK:
+ case NFT_RETURN:
+@@ -10949,6 +10947,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+
+ data->verdict.chain = chain;
+ break;
++ default:
++ return -EINVAL;
+ }
+
+ desc->len = sizeof(data->verdict);
+diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
+index 680fe557686e4..274b6f7e6bb57 100644
+--- a/net/netfilter/nft_chain_filter.c
++++ b/net/netfilter/nft_chain_filter.c
+@@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+ {
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct nft_base_chain *basechain;
+ struct nftables_pernet *nft_net;
+- struct nft_table *table;
+ struct nft_chain *chain, *nr;
++ struct nft_table *table;
+ struct nft_ctx ctx = {
+ .net = dev_net(dev),
+ };
+@@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ nft_net = nft_pernet(ctx.net);
+ mutex_lock(&nft_net->commit_mutex);
+ list_for_each_entry(table, &nft_net->tables, list) {
+- if (table->family != NFPROTO_NETDEV)
++ if (table->family != NFPROTO_NETDEV &&
++ table->family != NFPROTO_INET)
+ continue;
+
+ ctx.family = table->family;
+@@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ if (!nft_is_base_chain(chain))
+ continue;
+
++ basechain = nft_base_chain(chain);
++ if (table->family == NFPROTO_INET &&
++ basechain->ops.hooknum != NF_INET_INGRESS)
++ continue;
++
+ ctx.chain = chain;
+ nft_netdev_event(event, dev, &ctx);
+ }
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 5284cd2ad5327..f0eeda97bfcd9 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -350,6 +350,12 @@ static int nft_target_validate(const struct nft_ctx *ctx,
+ unsigned int hook_mask = 0;
+ int ret;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_BRIDGE &&
++ ctx->family != NFPROTO_ARP)
++ return -EOPNOTSUPP;
++
+ if (nft_is_base_chain(ctx->chain)) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+@@ -595,6 +601,12 @@ static int nft_match_validate(const struct nft_ctx *ctx,
+ unsigned int hook_mask = 0;
+ int ret;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_BRIDGE &&
++ ctx->family != NFPROTO_ARP)
++ return -EOPNOTSUPP;
++
+ if (nft_is_base_chain(ctx->chain)) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index ab3362c483b4a..397351fa4d5f8 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -384,6 +384,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+ {
+ unsigned int hook_mask = (1 << NF_INET_FORWARD);
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain, hook_mask);
+ }
+
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 79039afde34ec..cefa25e0dbb0a 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -58,17 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
+ static int nft_limit_init(struct nft_limit_priv *priv,
+ const struct nlattr * const tb[], bool pkts)
+ {
++ u64 unit, tokens, rate_with_burst;
+ bool invert = false;
+- u64 unit, tokens;
+
+ if (tb[NFTA_LIMIT_RATE] == NULL ||
+ tb[NFTA_LIMIT_UNIT] == NULL)
+ return -EINVAL;
+
+ priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
++ if (priv->rate == 0)
++ return -EINVAL;
++
+ unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+- priv->nsecs = unit * NSEC_PER_SEC;
+- if (priv->rate == 0 || priv->nsecs < unit)
++ if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
+ return -EOVERFLOW;
+
+ if (tb[NFTA_LIMIT_BURST])
+@@ -77,18 +79,25 @@ static int nft_limit_init(struct nft_limit_priv *priv,
+ if (pkts && priv->burst == 0)
+ priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
+
+- if (priv->rate + priv->burst < priv->rate)
++ if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
+ return -EOVERFLOW;
+
+ if (pkts) {
+- tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
++ u64 tmp = div64_u64(priv->nsecs, priv->rate);
++
++ if (check_mul_overflow(tmp, priv->burst, &tokens))
++ return -EOVERFLOW;
+ } else {
++ u64 tmp;
++
+ /* The token bucket size limits the number of tokens can be
+ * accumulated. tokens_max specifies the bucket size.
+ * tokens_max = unit * (rate + burst) / rate.
+ */
+- tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
+- priv->rate);
++ if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
++ return -EOVERFLOW;
++
++ tokens = div64_u64(tmp, priv->rate);
+ }
+
+ if (tb[NFTA_LIMIT_FLAGS]) {
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 583885ce72328..808f5802c2704 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
+ struct nft_nat *priv = nft_expr_priv(expr);
+ int err;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ if (err < 0)
+ return err;
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 35a2c28caa60b..24d9771385729 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
+ const struct nft_rt *priv = nft_expr_priv(expr);
+ unsigned int hooks;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ switch (priv->key) {
+ case NFT_RT_NEXTHOP4:
+ case NFT_RT_NEXTHOP6:
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 9ed85be79452d..f30163e2ca620 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -242,6 +242,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
+index 13da882669a4e..1d737f89dfc18 100644
+--- a/net/netfilter/nft_synproxy.c
++++ b/net/netfilter/nft_synproxy.c
+@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
+ break;
+ #endif
+ case NFPROTO_INET:
+- case NFPROTO_BRIDGE:
+ err = nf_synproxy_ipv4_init(snet, ctx->net);
+ if (err)
+ goto nf_ct_failure;
+@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
+ break;
+ #endif
+ case NFPROTO_INET:
+- case NFPROTO_BRIDGE:
+ nf_synproxy_ipv4_fini(snet, ctx->net);
+ nf_synproxy_ipv6_fini(snet, ctx->net);
+ break;
+@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD));
+ }
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index ae15cd693f0ec..71412adb73d41 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
+ }
+
+diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
+index 452f8587addad..1c866757db552 100644
+--- a/net/netfilter/nft_xfrm.c
++++ b/net/netfilter/nft_xfrm.c
+@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
+ const struct nft_xfrm *priv = nft_expr_priv(expr);
+ unsigned int hooks;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ switch (priv->dir) {
+ case XFRM_POLICY_IN:
+ hooks = (1 << NF_INET_FORWARD) |
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index eb086b06d60da..d9107b545d360 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
+ if (is_vmalloc_addr(skb->head)) {
+ if (!skb->cloned ||
+ !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
+- vfree(skb->head);
++ vfree_atomic(skb->head);
+
+ skb->head = NULL;
+ }
+diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
+index 01c4cdfef45df..8435a20968ef5 100644
+--- a/net/rds/af_rds.c
++++ b/net/rds/af_rds.c
+@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
+
+ rs->rs_rx_traces = trace.rx_traces;
+ for (i = 0; i < rs->rs_rx_traces; i++) {
+- if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
++ if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
+ rs->rs_rx_traces = 0;
+ return -EFAULT;
+ }
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index fa8aec78f63d7..465bfe5eb0617 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -258,16 +258,62 @@ static int rxrpc_listen(struct socket *sock, int backlog)
+ return ret;
+ }
+
++/**
++ * rxrpc_kernel_lookup_peer - Obtain remote transport endpoint for an address
++ * @sock: The socket through which it will be accessed
++ * @srx: The network address
++ * @gfp: Allocation flags
++ *
++ * Lookup or create a remote transport endpoint record for the specified
++ * address and return it with a ref held.
++ */
++struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
++ struct sockaddr_rxrpc *srx, gfp_t gfp)
++{
++ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
++ int ret;
++
++ ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
++ if (ret < 0)
++ return ERR_PTR(ret);
++
++ return rxrpc_lookup_peer(rx->local, srx, gfp);
++}
++EXPORT_SYMBOL(rxrpc_kernel_lookup_peer);
++
++/**
++ * rxrpc_kernel_get_peer - Get a reference on a peer
++ * @peer: The peer to get a reference on.
++ *
++ * Get a record for the remote peer in a call.
++ */
++struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer)
++{
++ return peer ? rxrpc_get_peer(peer, rxrpc_peer_get_application) : NULL;
++}
++EXPORT_SYMBOL(rxrpc_kernel_get_peer);
++
++/**
++ * rxrpc_kernel_put_peer - Allow a kernel app to drop a peer reference
++ * @peer: The peer to drop a ref on
++ */
++void rxrpc_kernel_put_peer(struct rxrpc_peer *peer)
++{
++ rxrpc_put_peer(peer, rxrpc_peer_put_application);
++}
++EXPORT_SYMBOL(rxrpc_kernel_put_peer);
++
+ /**
+ * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
+ * @sock: The socket on which to make the call
+- * @srx: The address of the peer to contact
++ * @peer: The peer to contact
+ * @key: The security context to use (defaults to socket setting)
+ * @user_call_ID: The ID to use
+ * @tx_total_len: Total length of data to transmit during the call (or -1)
+ * @hard_timeout: The maximum lifespan of the call in sec
+ * @gfp: The allocation constraints
+ * @notify_rx: Where to send notifications instead of socket queue
++ * @service_id: The ID of the service to contact
+ * @upgrade: Request service upgrade for call
+ * @interruptibility: The call is interruptible, or can be canceled.
+ * @debug_id: The debug ID for tracing to be assigned to the call
+@@ -280,13 +326,14 @@ static int rxrpc_listen(struct socket *sock, int backlog)
+ * supplying @srx and @key.
+ */
+ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+- struct sockaddr_rxrpc *srx,
++ struct rxrpc_peer *peer,
+ struct key *key,
+ unsigned long user_call_ID,
+ s64 tx_total_len,
+ u32 hard_timeout,
+ gfp_t gfp,
+ rxrpc_notify_rx_t notify_rx,
++ u16 service_id,
+ bool upgrade,
+ enum rxrpc_interruptibility interruptibility,
+ unsigned int debug_id)
+@@ -295,13 +342,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+ struct rxrpc_call_params p;
+ struct rxrpc_call *call;
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+- int ret;
+
+ _enter(",,%x,%lx", key_serial(key), user_call_ID);
+
+- ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
+- if (ret < 0)
+- return ERR_PTR(ret);
++ if (WARN_ON_ONCE(peer->local != rx->local))
++ return ERR_PTR(-EIO);
+
+ lock_sock(&rx->sk);
+
+@@ -319,12 +364,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+
+ memset(&cp, 0, sizeof(cp));
+ cp.local = rx->local;
++ cp.peer = peer;
+ cp.key = key;
+ cp.security_level = rx->min_sec_level;
+ cp.exclusive = false;
+ cp.upgrade = upgrade;
+- cp.service_id = srx->srx_service;
+- call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
++ cp.service_id = service_id;
++ call = rxrpc_new_client_call(rx, &cp, &p, gfp, debug_id);
+ /* The socket has been unlocked. */
+ if (!IS_ERR(call)) {
+ call->notify_rx = notify_rx;
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index e8b43408136ab..5d5b19f20d1eb 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -364,6 +364,7 @@ struct rxrpc_conn_proto {
+
+ struct rxrpc_conn_parameters {
+ struct rxrpc_local *local; /* Representation of local endpoint */
++ struct rxrpc_peer *peer; /* Representation of remote endpoint */
+ struct key *key; /* Security details */
+ bool exclusive; /* T if conn is exclusive */
+ bool upgrade; /* T if service ID can be upgraded */
+@@ -867,7 +868,6 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long
+ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
+ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
+ struct rxrpc_conn_parameters *,
+- struct sockaddr_rxrpc *,
+ struct rxrpc_call_params *, gfp_t,
+ unsigned int);
+ void rxrpc_start_call_timer(struct rxrpc_call *call);
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index f10b37c147721..0943e54370ba0 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -193,7 +193,6 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
+ * Allocate a new client call.
+ */
+ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+- struct sockaddr_rxrpc *srx,
+ struct rxrpc_conn_parameters *cp,
+ struct rxrpc_call_params *p,
+ gfp_t gfp,
+@@ -211,10 +210,12 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+ now = ktime_get_real();
+ call->acks_latest_ts = now;
+ call->cong_tstamp = now;
+- call->dest_srx = *srx;
++ call->dest_srx = cp->peer->srx;
++ call->dest_srx.srx_service = cp->service_id;
+ call->interruptibility = p->interruptibility;
+ call->tx_total_len = p->tx_total_len;
+ call->key = key_get(cp->key);
++ call->peer = rxrpc_get_peer(cp->peer, rxrpc_peer_get_call);
+ call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call);
+ call->security_level = cp->security_level;
+ if (p->kernel)
+@@ -306,10 +307,6 @@ static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
+
+ _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
+
+- call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
+- if (!call->peer)
+- goto error;
+-
+ ret = rxrpc_look_up_bundle(call, gfp);
+ if (ret < 0)
+ goto error;
+@@ -334,7 +331,6 @@ error:
+ */
+ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
+ struct rxrpc_conn_parameters *cp,
+- struct sockaddr_rxrpc *srx,
+ struct rxrpc_call_params *p,
+ gfp_t gfp,
+ unsigned int debug_id)
+@@ -349,13 +345,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
+
+ _enter("%p,%lx", rx, p->user_call_ID);
+
++ if (WARN_ON_ONCE(!cp->peer)) {
++ release_sock(&rx->sk);
++ return ERR_PTR(-EIO);
++ }
++
+ limiter = rxrpc_get_call_slot(p, gfp);
+ if (!limiter) {
+ release_sock(&rx->sk);
+ return ERR_PTR(-ERESTARTSYS);
+ }
+
+- call = rxrpc_alloc_client_call(rx, srx, cp, p, gfp, debug_id);
++ call = rxrpc_alloc_client_call(rx, cp, p, gfp, debug_id);
+ if (IS_ERR(call)) {
+ release_sock(&rx->sk);
+ up(limiter);
+diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
+index 8d7a715a0bb1c..49dcda67a0d59 100644
+--- a/net/rxrpc/peer_object.c
++++ b/net/rxrpc/peer_object.c
+@@ -22,6 +22,8 @@
+ #include <net/ip6_route.h>
+ #include "ar-internal.h"
+
++static const struct sockaddr_rxrpc rxrpc_null_addr;
++
+ /*
+ * Hash a peer key.
+ */
+@@ -457,39 +459,53 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
+ }
+
+ /**
+- * rxrpc_kernel_get_peer - Get the peer address of a call
++ * rxrpc_kernel_get_call_peer - Get the peer address of a call
+ * @sock: The socket on which the call is in progress.
+ * @call: The call to query
+- * @_srx: Where to place the result
+ *
+- * Get the address of the remote peer in a call.
++ * Get a record for the remote peer in a call.
+ */
+-void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
+- struct sockaddr_rxrpc *_srx)
++struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call)
+ {
+- *_srx = call->peer->srx;
++ return call->peer;
+ }
+-EXPORT_SYMBOL(rxrpc_kernel_get_peer);
++EXPORT_SYMBOL(rxrpc_kernel_get_call_peer);
+
+ /**
+ * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
+- * @sock: The socket on which the call is in progress.
+- * @call: The call to query
+- * @_srtt: Where to store the SRTT value.
++ * @peer: The peer to query
+ *
+- * Get the call's peer smoothed RTT in uS.
++ * Get the call's peer smoothed RTT in uS or UINT_MAX if we have no samples.
+ */
+-bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
+- u32 *_srtt)
++unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer)
+ {
+- struct rxrpc_peer *peer = call->peer;
++ return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX;
++}
++EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
+
+- if (peer->rtt_count == 0) {
+- *_srtt = 1000000; /* 1S */
+- return false;
+- }
++/**
++ * rxrpc_kernel_remote_srx - Get the address of a peer
++ * @peer: The peer to query
++ *
++ * Get a pointer to the address from a peer record. The caller is responsible
++ * for making sure that the address is not deallocated.
++ */
++const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer)
++{
++ return peer ? &peer->srx : &rxrpc_null_addr;
++}
++EXPORT_SYMBOL(rxrpc_kernel_remote_srx);
+
+- *_srtt = call->peer->srtt_us >> 3;
+- return true;
++/**
++ * rxrpc_kernel_remote_addr - Get the peer transport address of a call
++ * @peer: The peer to query
++ *
++ * Get a pointer to the transport address from a peer record. The caller is
++ * responsible for making sure that the address is not deallocated.
++ */
++const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer)
++{
++ return (const struct sockaddr *)
++ (peer ? &peer->srx.transport : &rxrpc_null_addr.transport);
+ }
+-EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
++EXPORT_SYMBOL(rxrpc_kernel_remote_addr);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 8e0b94714e849..5677d5690a022 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -572,6 +572,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
+ __acquires(&call->user_mutex)
+ {
+ struct rxrpc_conn_parameters cp;
++ struct rxrpc_peer *peer;
+ struct rxrpc_call *call;
+ struct key *key;
+
+@@ -584,21 +585,29 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
+ return ERR_PTR(-EDESTADDRREQ);
+ }
+
++ peer = rxrpc_lookup_peer(rx->local, srx, GFP_KERNEL);
++ if (!peer) {
++ release_sock(&rx->sk);
++ return ERR_PTR(-ENOMEM);
++ }
++
+ key = rx->key;
+ if (key && !rx->key->payload.data[0])
+ key = NULL;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.local = rx->local;
++ cp.peer = peer;
+ cp.key = rx->key;
+ cp.security_level = rx->min_sec_level;
+ cp.exclusive = rx->exclusive | p->exclusive;
+ cp.upgrade = p->upgrade;
+ cp.service_id = srx->srx_service;
+- call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
++ call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL,
+ atomic_inc_return(&rxrpc_debug_id));
+ /* The socket is now unlocked */
+
++ rxrpc_put_peer(peer, rxrpc_peer_put_application);
+ _leave(" = %p\n", call);
+ return call;
+ }
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 1976bd1639863..02c594baa1d93 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1536,6 +1536,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
+ chain_prev = chain,
+ chain = __tcf_get_next_chain(block, chain),
+ tcf_chain_put(chain_prev)) {
++ if (chain->tmplt_ops && add)
++ chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
++ cb_priv);
+ for (tp = __tcf_get_next_proto(chain, NULL); tp;
+ tp_prev = tp,
+ tp = __tcf_get_next_proto(chain, tp),
+@@ -1551,6 +1554,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
+ goto err_playback_remove;
+ }
+ }
++ if (chain->tmplt_ops && !add)
++ chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
++ cb_priv);
+ }
+
+ return 0;
+@@ -2971,7 +2977,8 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
+ ops = tcf_proto_lookup_ops(name, true, extack);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+- if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
++ if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
++ !ops->tmplt_reoffload) {
+ NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
+ module_put(ops->owner);
+ return -EOPNOTSUPP;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index e5314a31f75ae..efb9d2811b73d 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2721,6 +2721,28 @@ static void fl_tmplt_destroy(void *tmplt_priv)
+ kfree(tmplt);
+ }
+
++static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
++ flow_setup_cb_t *cb, void *cb_priv)
++{
++ struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
++ struct flow_cls_offload cls_flower = {};
++
++ cls_flower.rule = flow_rule_alloc(0);
++ if (!cls_flower.rule)
++ return;
++
++ cls_flower.common.chain_index = chain->index;
++ cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
++ FLOW_CLS_TMPLT_DESTROY;
++ cls_flower.cookie = (unsigned long) tmplt;
++ cls_flower.rule->match.dissector = &tmplt->dissector;
++ cls_flower.rule->match.mask = &tmplt->mask;
++ cls_flower.rule->match.key = &tmplt->dummy_key;
++
++ cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
++ kfree(cls_flower.rule);
++}
++
+ static int fl_dump_key_val(struct sk_buff *skb,
+ void *val, int val_type,
+ void *mask, int mask_type, int len)
+@@ -3628,6 +3650,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
+ .bind_class = fl_bind_class,
+ .tmplt_create = fl_tmplt_create,
+ .tmplt_destroy = fl_tmplt_destroy,
++ .tmplt_reoffload = fl_tmplt_reoffload,
+ .tmplt_dump = fl_tmplt_dump,
+ .get_exts = fl_get_exts,
+ .owner = THIS_MODULE,
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index 5cc376834c575..fb9e5cc1285ec 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -163,7 +163,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+ }
+ if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
+ (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
+- !list_empty(&smc->conn.lgr->list)) {
++ !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
+ struct smc_connection *conn = &smc->conn;
+ struct smcd_diag_dmbinfo dinfo;
+ struct smcd_dev *smcd = conn->lgr->smcd;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 998687421fa6a..e0ce4276274be 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
+ ARRAY_SIZE(rqstp->rq_bvec), xdr);
+
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+- count, 0);
++ count, rqstp->rq_res.len);
+ err = sock_sendmsg(svsk->sk_sock, &msg);
+ if (err == -ECONNREFUSED) {
+ /* ICMP error on earlier request. */
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+- count, 0);
++ count, rqstp->rq_res.len);
+ err = sock_sendmsg(svsk->sk_sock, &msg);
+ }
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 3da0b52f308d4..688e641cd2784 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -167,8 +167,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ contd = XDP_PKT_CONTD;
+
+ err = __xsk_rcv_zc(xs, xskb, len, contd);
+- if (err || likely(!frags))
+- goto out;
++ if (err)
++ goto err;
++ if (likely(!frags))
++ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+@@ -177,11 +179,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+- return err;
++ goto err;
+ list_del(&pos->xskb_list_node);
+ }
+
+-out:
++ return 0;
++err:
++ xsk_buff_free(xdp);
+ return err;
+ }
+
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 49cb9f9a09bee..b0a611677865d 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -541,6 +541,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
+
+ xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
+ xskb->xdp.data_meta = xskb->xdp.data;
++ xskb->xdp.flags = 0;
+
+ if (pool->dma_need_sync) {
+ dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl
+index 0ffd5531242aa..408bfd0216da0 100755
+--- a/scripts/get_abi.pl
++++ b/scripts/get_abi.pl
+@@ -98,7 +98,7 @@ sub parse_abi {
+ $name =~ s,.*/,,;
+
+ my $fn = $file;
+- $fn =~ s,Documentation/ABI/,,;
++ $fn =~ s,.*Documentation/ABI/,,;
+
+ my $nametag = "File $fn";
+ $data{$nametag}->{what} = "File $name";
+diff --git a/security/security.c b/security/security.c
+index dcb3e7014f9bd..266cec94369b2 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -2648,6 +2648,24 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ EXPORT_SYMBOL_GPL(security_file_ioctl);
+
++/**
++ * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
++ * @file: associated file
++ * @cmd: ioctl cmd
++ * @arg: ioctl arguments
++ *
++ * Compat version of security_file_ioctl() that correctly handles 32-bit
++ * processes running on 64-bit kernels.
++ *
++ * Return: Returns 0 if permission is granted.
++ */
++int security_file_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ return call_int_hook(file_ioctl_compat, 0, file, cmd, arg);
++}
++EXPORT_SYMBOL_GPL(security_file_ioctl_compat);
++
+ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+ {
+ /*
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 0fb890bd72cb6..102a1f0c09e0c 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3725,6 +3725,33 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
+ return error;
+ }
+
++static int selinux_file_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ /*
++ * If we are in a 64-bit kernel running 32-bit userspace, we need to
++ * make sure we don't compare 32-bit flags to 64-bit flags.
++ */
++ switch (cmd) {
++ case FS_IOC32_GETFLAGS:
++ cmd = FS_IOC_GETFLAGS;
++ break;
++ case FS_IOC32_SETFLAGS:
++ cmd = FS_IOC_SETFLAGS;
++ break;
++ case FS_IOC32_GETVERSION:
++ cmd = FS_IOC_GETVERSION;
++ break;
++ case FS_IOC32_SETVERSION:
++ cmd = FS_IOC_SETVERSION;
++ break;
++ default:
++ break;
++ }
++
++ return selinux_file_ioctl(file, cmd, arg);
++}
++
+ static int default_noexec __ro_after_init;
+
+ static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
+@@ -7037,6 +7064,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(file_permission, selinux_file_permission),
+ LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
+ LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
++ LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
+ LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
+ LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
+ LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 65130a791f573..1f1ea8529421f 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4973,6 +4973,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
+
+ LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
+ LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
++ LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl),
+ LSM_HOOK_INIT(file_lock, smack_file_lock),
+ LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
+ LSM_HOOK_INIT(mmap_file, smack_mmap_file),
+diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
+index 255f1b4702955..1bf4b3da9cbc5 100644
+--- a/security/tomoyo/tomoyo.c
++++ b/security/tomoyo/tomoyo.c
+@@ -568,6 +568,7 @@ static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+ LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+ LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
++ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+ LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+ LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 4e42847297732..1e788859c863b 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -1232,11 +1232,11 @@ static int fill_sdw_codec_dlc(struct device *dev,
+ else if (is_unique_device(adr_link, sdw_version, mfg_id, part_id,
+ class_id, adr_index))
+ codec->name = devm_kasprintf(dev, GFP_KERNEL,
+- "sdw:%01x:%04x:%04x:%02x", link_id,
++ "sdw:0:%01x:%04x:%04x:%02x", link_id,
+ mfg_id, part_id, class_id);
+ else
+ codec->name = devm_kasprintf(dev, GFP_KERNEL,
+- "sdw:%01x:%04x:%04x:%02x:%01x", link_id,
++ "sdw:0:%01x:%04x:%04x:%02x:%01x", link_id,
+ mfg_id, part_id, class_id, unique_id);
+
+ if (!codec->name)
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+index c54d1697f439a..d508486cc0bdc 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+@@ -162,7 +162,7 @@ prio_arp()
+ local mode=$1
+
+ for primary_reselect in 0 1 2; do
+- prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
++ prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
+ done
+ }
+@@ -178,7 +178,7 @@ prio_ns()
+ fi
+
+ for primary_reselect in 0 1 2; do
+- prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
++ prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
+ done
+ }
+@@ -194,9 +194,9 @@ prio()
+
+ for mode in $modes; do
+ prio_miimon $mode
+- prio_arp $mode
+- prio_ns $mode
+ done
++ prio_arp "active-backup"
++ prio_ns "active-backup"
+ }
+
+ arp_validate_test()
+diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
+index 6091b45d226ba..79b65bdf05db6 100644
+--- a/tools/testing/selftests/drivers/net/bonding/settings
++++ b/tools/testing/selftests/drivers/net/bonding/settings
+@@ -1 +1 @@
+-timeout=120
++timeout=1200
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+index 1b08e042cf942..185b02d2d4cd1 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+@@ -269,6 +269,7 @@ for port in 0 1; do
+ echo 1 > $NSIM_DEV_SYS/new_port
+ fi
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
++ ifconfig $NSIM_NETDEV up
+
+ msg="new NIC device created"
+ exp0=( 0 0 0 0 )
+@@ -430,6 +431,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ overflow_table0 "overflow NIC table"
+@@ -487,6 +489,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ overflow_table0 "overflow NIC table"
+@@ -543,6 +546,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ overflow_table0 "destroy NIC"
+@@ -572,6 +576,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ msg="create VxLANs v6"
+@@ -632,6 +637,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+@@ -687,6 +693,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ msg="create VxLANs v6"
+@@ -746,6 +753,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ msg="create VxLANs v6"
+@@ -876,6 +884,7 @@ msg="re-add a port"
+
+ echo 2 > $NSIM_DEV_SYS/del_port
+ echo 2 > $NSIM_DEV_SYS/new_port
++NSIM_NETDEV=`get_netdev_name old_netdevs`
+ check_tables
+
+ msg="replace VxLAN in overflow table"
+diff --git a/tools/testing/selftests/mm/hugepage-vmemmap.c b/tools/testing/selftests/mm/hugepage-vmemmap.c
+index 5b354c209e936..894d28c3dd478 100644
+--- a/tools/testing/selftests/mm/hugepage-vmemmap.c
++++ b/tools/testing/selftests/mm/hugepage-vmemmap.c
+@@ -10,10 +10,7 @@
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
+-
+-#define MAP_LENGTH (2UL * 1024 * 1024)
+-
+-#define PAGE_SIZE 4096
++#include "vm_util.h"
+
+ #define PAGE_COMPOUND_HEAD (1UL << 15)
+ #define PAGE_COMPOUND_TAIL (1UL << 16)
+@@ -39,6 +36,9 @@
+ #define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
+ #endif
+
++static size_t pagesize;
++static size_t maplength;
++
+ static void write_bytes(char *addr, size_t length)
+ {
+ unsigned long i;
+@@ -56,7 +56,7 @@ static unsigned long virt_to_pfn(void *addr)
+ if (fd < 0)
+ return -1UL;
+
+- lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
++ lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET);
+ read(fd, &pagemap, sizeof(pagemap));
+ close(fd);
+
+@@ -86,7 +86,7 @@ static int check_page_flags(unsigned long pfn)
+ * this also verifies kernel has correctly set the fake page_head to tail
+ * while hugetlb_free_vmemmap is enabled.
+ */
+- for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
++ for (i = 1; i < maplength / pagesize; i++) {
+ read(fd, &pageflags, sizeof(pageflags));
+ if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
+ (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
+@@ -106,18 +106,25 @@ int main(int argc, char **argv)
+ void *addr;
+ unsigned long pfn;
+
+- addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
++ pagesize = psize();
++ maplength = default_huge_page_size();
++ if (!maplength) {
++ printf("Unable to determine huge page size\n");
++ exit(1);
++ }
++
++ addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
+ if (addr == MAP_FAILED) {
+ perror("mmap");
+ exit(1);
+ }
+
+ /* Trigger allocation of HugeTLB page. */
+- write_bytes(addr, MAP_LENGTH);
++ write_bytes(addr, maplength);
+
+ pfn = virt_to_pfn(addr);
+ if (pfn == -1UL) {
+- munmap(addr, MAP_LENGTH);
++ munmap(addr, maplength);
+ perror("virt_to_pfn");
+ exit(1);
+ }
+@@ -125,13 +132,13 @@ int main(int argc, char **argv)
+ printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
+
+ if (check_page_flags(pfn) < 0) {
+- munmap(addr, MAP_LENGTH);
++ munmap(addr, maplength);
+ perror("check_page_flags");
+ exit(1);
+ }
+
+ /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
+- if (munmap(addr, MAP_LENGTH)) {
++ if (munmap(addr, maplength)) {
+ perror("munmap");
+ exit(1);
+ }
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index 8da562a9ae87e..19ff750516609 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -1,5 +1,6 @@
+ CONFIG_USER_NS=y
+ CONFIG_NET_NS=y
++CONFIG_BONDING=m
+ CONFIG_BPF_SYSCALL=y
+ CONFIG_TEST_BPF=m
+ CONFIG_NUMA=y
+@@ -14,9 +15,13 @@ CONFIG_VETH=y
+ CONFIG_NET_IPVTI=y
+ CONFIG_IPV6_VTI=y
+ CONFIG_DUMMY=y
++CONFIG_BRIDGE_VLAN_FILTERING=y
+ CONFIG_BRIDGE=y
++CONFIG_CRYPTO_CHACHA20POLY1305=m
+ CONFIG_VLAN_8021Q=y
+ CONFIG_IFB=y
++CONFIG_INET_DIAG=y
++CONFIG_IP_GRE=m
+ CONFIG_NETFILTER=y
+ CONFIG_NETFILTER_ADVANCED=y
+ CONFIG_NF_CONNTRACK=m
+@@ -25,15 +30,36 @@ CONFIG_IP6_NF_IPTABLES=m
+ CONFIG_IP_NF_IPTABLES=m
+ CONFIG_IP6_NF_NAT=m
+ CONFIG_IP_NF_NAT=m
++CONFIG_IPV6_GRE=m
++CONFIG_IPV6_SEG6_LWTUNNEL=y
++CONFIG_L2TP_ETH=m
++CONFIG_L2TP_IP=m
++CONFIG_L2TP=m
++CONFIG_L2TP_V3=y
++CONFIG_MACSEC=m
++CONFIG_MACVLAN=y
++CONFIG_MACVTAP=y
++CONFIG_MPLS=y
++CONFIG_MPTCP=y
+ CONFIG_NF_TABLES=m
+ CONFIG_NF_TABLES_IPV6=y
+ CONFIG_NF_TABLES_IPV4=y
+ CONFIG_NFT_NAT=m
++CONFIG_NET_ACT_GACT=m
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_U32=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_SCH_FQ_CODEL=m
++CONFIG_NET_SCH_HTB=m
+ CONFIG_NET_SCH_FQ=m
+ CONFIG_NET_SCH_ETF=m
+ CONFIG_NET_SCH_NETEM=y
++CONFIG_PSAMPLE=m
++CONFIG_TCP_MD5SIG=y
+ CONFIG_TEST_BLACKHOLE_DEV=m
+ CONFIG_KALLSYMS=y
++CONFIG_TLS=m
+ CONFIG_TRACEPOINTS=y
+ CONFIG_NET_DROP_MONITOR=m
+ CONFIG_NETDEVSIM=m
+@@ -48,7 +74,9 @@ CONFIG_BAREUDP=m
+ CONFIG_IPV6_IOAM6_LWTUNNEL=y
+ CONFIG_CRYPTO_SM4_GENERIC=y
+ CONFIG_AMT=m
++CONFIG_TUN=y
+ CONFIG_VXLAN=m
+ CONFIG_IP_SCTP=m
+ CONFIG_NETFILTER_XT_MATCH_POLICY=m
+ CONFIG_CRYPTO_ARIA=y
++CONFIG_XFRM_INTERFACE=m
+diff --git a/tools/testing/selftests/net/rps_default_mask.sh b/tools/testing/selftests/net/rps_default_mask.sh
+index a26c5624429fb..4287a85298907 100755
+--- a/tools/testing/selftests/net/rps_default_mask.sh
++++ b/tools/testing/selftests/net/rps_default_mask.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ readonly ksft_skip=4
+@@ -33,6 +33,10 @@ chk_rps() {
+
+ rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
+ printf "%-60s" "$msg"
++
++ # In case there is more than 32 CPUs we need to remove commas from masks
++ rps_mask=${rps_mask//,}
++ expected_rps_mask=${expected_rps_mask//,}
+ if [ $rps_mask -eq $expected_rps_mask ]; then
+ echo "[ ok ]"
+ else
+diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c
+index a148181641026..e9fa14e107322 100644
+--- a/tools/testing/selftests/net/so_incoming_cpu.c
++++ b/tools/testing/selftests/net/so_incoming_cpu.c
+@@ -3,19 +3,16 @@
+ #define _GNU_SOURCE
+ #include <sched.h>
+
++#include <fcntl.h>
++
+ #include <netinet/in.h>
+ #include <sys/socket.h>
+ #include <sys/sysinfo.h>
+
+ #include "../kselftest_harness.h"
+
+-#define CLIENT_PER_SERVER 32 /* More sockets, more reliable */
+-#define NR_SERVER self->nproc
+-#define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER)
+-
+ FIXTURE(so_incoming_cpu)
+ {
+- int nproc;
+ int *servers;
+ union {
+ struct sockaddr addr;
+@@ -56,12 +53,47 @@ FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
+ .when_to_set = AFTER_ALL_LISTEN,
+ };
+
++static void write_sysctl(struct __test_metadata *_metadata,
++ char *filename, char *string)
++{
++ int fd, len, ret;
++
++ fd = open(filename, O_WRONLY);
++ ASSERT_NE(fd, -1);
++
++ len = strlen(string);
++ ret = write(fd, string, len);
++ ASSERT_EQ(ret, len);
++}
++
++static void setup_netns(struct __test_metadata *_metadata)
++{
++ ASSERT_EQ(unshare(CLONE_NEWNET), 0);
++ ASSERT_EQ(system("ip link set lo up"), 0);
++
++ write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001");
++ write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0");
++}
++
++#define NR_PORT (60001 - 10000 - 1)
++#define NR_CLIENT_PER_SERVER_DEFAULT 32
++static int nr_client_per_server, nr_server, nr_client;
++
+ FIXTURE_SETUP(so_incoming_cpu)
+ {
+- self->nproc = get_nprocs();
+- ASSERT_LE(2, self->nproc);
++ setup_netns(_metadata);
++
++ nr_server = get_nprocs();
++ ASSERT_LE(2, nr_server);
++
++ if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT)
++ nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT;
++ else
++ nr_client_per_server = NR_PORT / nr_server;
++
++ nr_client = nr_client_per_server * nr_server;
+
+- self->servers = malloc(sizeof(int) * NR_SERVER);
++ self->servers = malloc(sizeof(int) * nr_server);
+ ASSERT_NE(self->servers, NULL);
+
+ self->in_addr.sin_family = AF_INET;
+@@ -74,7 +106,7 @@ FIXTURE_TEARDOWN(so_incoming_cpu)
+ {
+ int i;
+
+- for (i = 0; i < NR_SERVER; i++)
++ for (i = 0; i < nr_server; i++)
+ close(self->servers[i]);
+
+ free(self->servers);
+@@ -110,10 +142,10 @@ int create_server(struct __test_metadata *_metadata,
+ if (variant->when_to_set == BEFORE_LISTEN)
+ set_so_incoming_cpu(_metadata, fd, cpu);
+
+- /* We don't use CLIENT_PER_SERVER here not to block
++ /* We don't use nr_client_per_server here not to block
+ * this test at connect() if SO_INCOMING_CPU is broken.
+ */
+- ret = listen(fd, NR_CLIENT);
++ ret = listen(fd, nr_client);
+ ASSERT_EQ(ret, 0);
+
+ if (variant->when_to_set == AFTER_LISTEN)
+@@ -128,7 +160,7 @@ void create_servers(struct __test_metadata *_metadata,
+ {
+ int i, ret;
+
+- for (i = 0; i < NR_SERVER; i++) {
++ for (i = 0; i < nr_server; i++) {
+ self->servers[i] = create_server(_metadata, self, variant, i);
+
+ if (i == 0) {
+@@ -138,7 +170,7 @@ void create_servers(struct __test_metadata *_metadata,
+ }
+
+ if (variant->when_to_set == AFTER_ALL_LISTEN) {
+- for (i = 0; i < NR_SERVER; i++)
++ for (i = 0; i < nr_server; i++)
+ set_so_incoming_cpu(_metadata, self->servers[i], i);
+ }
+ }
+@@ -149,7 +181,7 @@ void create_clients(struct __test_metadata *_metadata,
+ cpu_set_t cpu_set;
+ int i, j, fd, ret;
+
+- for (i = 0; i < NR_SERVER; i++) {
++ for (i = 0; i < nr_server; i++) {
+ CPU_ZERO(&cpu_set);
+
+ CPU_SET(i, &cpu_set);
+@@ -162,7 +194,7 @@ void create_clients(struct __test_metadata *_metadata,
+ ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+ ASSERT_EQ(ret, 0);
+
+- for (j = 0; j < CLIENT_PER_SERVER; j++) {
++ for (j = 0; j < nr_client_per_server; j++) {
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_NE(fd, -1);
+
+@@ -180,8 +212,8 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
+ int i, j, fd, cpu, ret, total = 0;
+ socklen_t len = sizeof(int);
+
+- for (i = 0; i < NR_SERVER; i++) {
+- for (j = 0; j < CLIENT_PER_SERVER; j++) {
++ for (i = 0; i < nr_server; i++) {
++ for (j = 0; j < nr_client_per_server; j++) {
+ /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
+ fd = accept(self->servers[i], &self->addr, &self->addrlen);
+ ASSERT_NE(fd, -1);
+@@ -195,7 +227,7 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
+ }
+ }
+
+- ASSERT_EQ(total, NR_CLIENT);
++ ASSERT_EQ(total, nr_client);
+ TH_LOG("SO_INCOMING_CPU is very likely to be "
+ "working correctly with %d sockets.", total);
+ }
+diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
+index 50a2cc8aef387..c000942c80483 100644
+--- a/tools/testing/selftests/riscv/hwprobe/cbo.c
++++ b/tools/testing/selftests/riscv/hwprobe/cbo.c
+@@ -36,16 +36,14 @@ static void sigill_handler(int sig, siginfo_t *info, void *context)
+ regs[0] += 4;
+ }
+
+-static void cbo_insn(char *base, int fn)
+-{
+- uint32_t insn = MK_CBO(fn);
+-
+- asm volatile(
+- "mv a0, %0\n"
+- "li a1, %1\n"
+- ".4byte %2\n"
+- : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory");
+-}
++#define cbo_insn(base, fn) \
++({ \
++ asm volatile( \
++ "mv a0, %0\n" \
++ "li a1, %1\n" \
++ ".4byte %2\n" \
++ : : "r" (base), "i" (fn), "i" (MK_CBO(fn)) : "a0", "a1", "memory"); \
++})
+
+ static void cbo_inval(char *base) { cbo_insn(base, 0); }
+ static void cbo_clean(char *base) { cbo_insn(base, 1); }