summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-07-11 14:38:03 -0400
committerMike Pagano <mpagano@gentoo.org>2023-07-11 14:38:03 -0400
commiteab0d513c7679ac29d6350f6a8e086e9b47d9a27 (patch)
treedaf6490a382ffb337df5646a745fce72971dba92
parentRemove redundant patch (diff)
downloadlinux-patches-6.3.tar.gz
linux-patches-6.3.tar.bz2
linux-patches-6.3.zip
Linux patch 6.3.136.3-186.3-176.3
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1012_linux-6.3.13.patch18968
2 files changed, 18972 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index db990ac3..a55d4916 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch: 1011_linux-6.3.12.patch
From: https://www.kernel.org
Desc: Linux 6.3.12
+Patch: 1012_linux-6.3.13.patch
+From: https://www.kernel.org
+Desc: Linux 6.3.13
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1012_linux-6.3.13.patch b/1012_linux-6.3.13.patch
new file mode 100644
index 00000000..c71951c0
--- /dev/null
+++ b/1012_linux-6.3.13.patch
@@ -0,0 +1,18968 @@
+diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8188-afe.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8188-afe.yaml
+index 82ccb32f08f27..9e877f0d19fbb 100644
+--- a/Documentation/devicetree/bindings/sound/mediatek,mt8188-afe.yaml
++++ b/Documentation/devicetree/bindings/sound/mediatek,mt8188-afe.yaml
+@@ -63,15 +63,15 @@ properties:
+ - const: apll12_div2
+ - const: apll12_div3
+ - const: apll12_div9
+- - const: a1sys_hp_sel
+- - const: aud_intbus_sel
+- - const: audio_h_sel
+- - const: audio_local_bus_sel
+- - const: dptx_m_sel
+- - const: i2so1_m_sel
+- - const: i2so2_m_sel
+- - const: i2si1_m_sel
+- - const: i2si2_m_sel
++ - const: top_a1sys_hp
++ - const: top_aud_intbus
++ - const: top_audio_h
++ - const: top_audio_local_bus
++ - const: top_dptx
++ - const: top_i2so1
++ - const: top_i2so2
++ - const: top_i2si1
++ - const: top_i2si2
+ - const: adsp_audio_26m
+
+ mediatek,etdm-in1-cowork-source:
+@@ -193,15 +193,15 @@ examples:
+ "apll12_div2",
+ "apll12_div3",
+ "apll12_div9",
+- "a1sys_hp_sel",
+- "aud_intbus_sel",
+- "audio_h_sel",
+- "audio_local_bus_sel",
+- "dptx_m_sel",
+- "i2so1_m_sel",
+- "i2so2_m_sel",
+- "i2si1_m_sel",
+- "i2si2_m_sel",
++ "top_a1sys_hp",
++ "top_aud_intbus",
++ "top_audio_h",
++ "top_audio_local_bus",
++ "top_dptx",
++ "top_i2so1",
++ "top_i2so2",
++ "top_i2si1",
++ "top_i2si2",
+ "adsp_audio_26m";
+ };
+
+diff --git a/Makefile b/Makefile
+index 7b6c66b7b0041..df591a8efd8a6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 3
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
+index c9434ff3aa4ce..8a3fb71e9cfad 100644
+--- a/arch/arc/include/asm/linkage.h
++++ b/arch/arc/include/asm/linkage.h
+@@ -8,6 +8,10 @@
+
+ #include <asm/dwarf.h>
+
++#define ASM_NL ` /* use '`' to mark new line in macro */
++#define __ALIGN .align 4
++#define __ALIGN_STR __stringify(__ALIGN)
++
+ #ifdef __ASSEMBLY__
+
+ .macro ST2 e, o, off
+@@ -28,10 +32,6 @@
+ #endif
+ .endm
+
+-#define ASM_NL ` /* use '`' to mark new line in macro */
+-#define __ALIGN .align 4
+-#define __ALIGN_STR __stringify(__ALIGN)
+-
+ /* annotation for data we want in DCCM - if enabled in .config */
+ .macro ARCFP_DATA nm
+ #ifdef CONFIG_ARC_HAS_DCCM
+diff --git a/arch/arm/boot/dts/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts
+index 14f58033efeb9..ca2266b936ee2 100644
+--- a/arch/arm/boot/dts/bcm53015-meraki-mr26.dts
++++ b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts
+@@ -128,7 +128,7 @@
+
+ fixed-link {
+ speed = <1000>;
+- duplex-full;
++ full-duplex;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
+index 46c2c93b01d88..a34e1746a6c59 100644
+--- a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
++++ b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
+@@ -187,7 +187,7 @@
+
+ fixed-link {
+ speed = <1000>;
+- duplex-full;
++ full-duplex;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
+index 5fc1b847f4aa5..787a0dd8216b7 100644
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -542,7 +542,6 @@
+ "spi_lr_session_done",
+ "spi_lr_overread";
+ clocks = <&iprocmed>;
+- clock-names = "iprocmed";
+ num-cs = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+index 03caea6fc6ffa..4351c5a02fa59 100644
+--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+@@ -49,7 +49,7 @@
+ lcd_backlight: backlight {
+ compatible = "pwm-backlight";
+
+- pwms = <&pwm3 0 5000000 0>;
++ pwms = <&pwm3 0 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ enable-gpios = <&gpio5 14 GPIO_ACTIVE_HIGH>;
+diff --git a/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi b/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
+index 0097e72e3fb22..f4df4cc1dfa5e 100644
+--- a/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
++++ b/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
+@@ -18,6 +18,8 @@
+
+ gpio-restart {
+ compatible = "gpio-restart";
++ pinctrl-0 = <&reset_pins>;
++ pinctrl-names = "default";
+ gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
+ priority = <200>;
+ };
+@@ -39,7 +41,7 @@
+ status = "okay";
+
+ spi3: spi@400 {
+- pinctrl-0 = <&fc3_b_pins>;
++ pinctrl-0 = <&fc3_b_pins>, <&spi3_cs_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+@@ -59,6 +61,12 @@
+ function = "miim_c";
+ };
+
++ reset_pins: reset-pins {
++ /* SYS_RST# */
++ pins = "GPIO_56";
++ function = "gpio";
++ };
++
+ sgpio_a_pins: sgpio-a-pins {
+ /* SCK, D0, D1 */
+ pins = "GPIO_32", "GPIO_33", "GPIO_34";
+@@ -71,6 +79,12 @@
+ function = "sgpio_b";
+ };
+
++ spi3_cs_pins: spi3-cs-pins {
++ /* CS# */
++ pins = "GPIO_46";
++ function = "gpio";
++ };
++
+ usart0_pins: usart0-pins {
+ /* RXD, TXD */
+ pins = "GPIO_25", "GPIO_26";
+diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
+index 21eb59041a7d9..8432efd48f610 100644
+--- a/arch/arm/boot/dts/meson8.dtsi
++++ b/arch/arm/boot/dts/meson8.dtsi
+@@ -752,13 +752,13 @@
+
+ &uart_B {
+ compatible = "amlogic,meson8-uart";
+- clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++ clocks = <&xtal>, <&clkc CLKID_UART1>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
+ };
+
+ &uart_C {
+ compatible = "amlogic,meson8-uart";
+- clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++ clocks = <&xtal>, <&clkc CLKID_UART2>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
+ };
+
+diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
+index d5a3fe21e8e7e..25f7c985f9ea1 100644
+--- a/arch/arm/boot/dts/meson8b.dtsi
++++ b/arch/arm/boot/dts/meson8b.dtsi
+@@ -740,13 +740,13 @@
+
+ &uart_B {
+ compatible = "amlogic,meson8b-uart";
+- clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++ clocks = <&xtal>, <&clkc CLKID_UART1>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
+ };
+
+ &uart_C {
+ compatible = "amlogic,meson8b-uart";
+- clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++ clocks = <&xtal>, <&clkc CLKID_UART2>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
+ };
+
+diff --git a/arch/arm/boot/dts/omap3-gta04a5one.dts b/arch/arm/boot/dts/omap3-gta04a5one.dts
+index 9db9fe67cd63b..95df45cc70c09 100644
+--- a/arch/arm/boot/dts/omap3-gta04a5one.dts
++++ b/arch/arm/boot/dts/omap3-gta04a5one.dts
+@@ -5,9 +5,11 @@
+
+ #include "omap3-gta04a5.dts"
+
+-&omap3_pmx_core {
++/ {
+ model = "Goldelico GTA04A5/Letux 2804 with OneNAND";
++};
+
++&omap3_pmx_core {
+ gpmc_pins: pinmux_gpmc_pins {
+ pinctrl-single,pins = <
+
+diff --git a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+index 1345df7cbd002..6b047c6793707 100644
+--- a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
++++ b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+@@ -23,6 +23,10 @@
+ status = "okay";
+ };
+
++&blsp2_dma {
++ qcom,controlled-remotely;
++};
++
+ &blsp2_i2c5 {
+ status = "okay";
+ clock-frequency = <200000>;
+diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
+index 834ad95515b17..1c3d36701b8e5 100644
+--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
++++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
+@@ -300,7 +300,7 @@
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+- rpm_requests: rpm_requests {
++ rpm_requests: rpm-requests {
+ compatible = "qcom,rpm-msm8974";
+ qcom,smd-channels = "rpm_requests";
+
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+index 4709677151aac..46b87a27d8b37 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+@@ -137,10 +137,13 @@
+
+ sound {
+ compatible = "audio-graph-card";
+- routing =
+- "MIC_IN", "Capture",
+- "Capture", "Mic Bias",
+- "Playback", "HP_OUT";
++ widgets = "Headphone", "Headphone Jack",
++ "Line", "Line In Jack",
++ "Microphone", "Microphone Jack";
++ routing = "Headphone Jack", "HP_OUT",
++ "LINE_IN", "Line In Jack",
++ "MIC_IN", "Microphone Jack",
++ "Microphone Jack", "Mic Bias";
+ dais = <&sai2a_port &sai2b_port>;
+ status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+index 50af4a27d6be4..7d5d6d4360385 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+@@ -87,7 +87,7 @@
+
+ sound {
+ compatible = "audio-graph-card";
+- label = "STM32MP1-AV96-HDMI";
++ label = "STM32-AV96-HDMI";
+ dais = <&sai2a_port>;
+ status = "okay";
+ };
+@@ -321,6 +321,12 @@
+ };
+ };
+ };
++
++ dh_mac_eeprom: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
+ };
+
+ &ltdc {
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
+index c32c160f97f20..39af79dc654cc 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
+@@ -192,6 +192,12 @@
+ reg = <0x50>;
+ pagesize = <16>;
+ };
++
++ dh_mac_eeprom: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
+ };
+
+ &sdmmc1 { /* MicroSD */
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+index bb40fb46da81d..bba19f21e5277 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+@@ -213,12 +213,6 @@
+ status = "disabled";
+ };
+ };
+-
+- eeprom@53 {
+- compatible = "atmel,24c02";
+- reg = <0x53>;
+- pagesize = <16>;
+- };
+ };
+
+ &ipcc {
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-testbench.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-testbench.dtsi
+index 5fdb74b652aca..faed31b6d84a1 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-testbench.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-testbench.dtsi
+@@ -90,6 +90,14 @@
+ };
+ };
+
++&i2c4 {
++ dh_mac_eeprom: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++};
++
+ &sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_b>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+index 11370ae0d868b..030b7ace63f1e 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+@@ -438,7 +438,7 @@
+ i2s2_port: port {
+ i2s2_endpoint: endpoint {
+ remote-endpoint = <&sii9022_tx_endpoint>;
+- format = "i2s";
++ dai-format = "i2s";
+ mclk-fs = <256>;
+ };
+ };
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index 505a306e0271a..aebe2c8f6a686 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
+ #endif
+ .endm
+
++/*
++ * Raw SMP data memory barrier
++ */
++ .macro __smp_dmb mode
++#if __LINUX_ARM_ARCH__ >= 7
++ .ifeqs "\mode","arm"
++ dmb ish
++ .else
++ W(dmb) ish
++ .endif
++#elif __LINUX_ARM_ARCH__ == 6
++ mcr p15, 0, r0, c7, c10, 5 @ dmb
++#else
++ .error "Incompatible SMP platform"
++#endif
++ .endm
++
+ #if defined(CONFIG_CPU_V7M)
+ /*
+ * setmode is used to assert to be in svc mode during boot. For v7-M
+diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
+index 6f5d627c44a3c..f46b3c570f92e 100644
+--- a/arch/arm/include/asm/sync_bitops.h
++++ b/arch/arm/include/asm/sync_bitops.h
+@@ -14,14 +14,35 @@
+ * ops which are SMP safe even on a UP kernel.
+ */
+
++/*
++ * Unordered
++ */
++
+ #define sync_set_bit(nr, p) _set_bit(nr, p)
+ #define sync_clear_bit(nr, p) _clear_bit(nr, p)
+ #define sync_change_bit(nr, p) _change_bit(nr, p)
+-#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
+-#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
+-#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
+ #define sync_test_bit(nr, addr) test_bit(nr, addr)
+-#define arch_sync_cmpxchg arch_cmpxchg
+
++/*
++ * Fully ordered
++ */
++
++int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
++#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
++
++int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
++#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
++
++int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
++#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
++
++#define arch_sync_cmpxchg(ptr, old, new) \
++({ \
++ __typeof__(*(ptr)) __ret; \
++ __smp_mb__before_atomic(); \
++ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
++ __smp_mb__after_atomic(); \
++ __ret; \
++})
+
+ #endif
+diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
+index 95bd359912889..f069d1b2318e6 100644
+--- a/arch/arm/lib/bitops.h
++++ b/arch/arm/lib/bitops.h
+@@ -28,7 +28,7 @@ UNWIND( .fnend )
+ ENDPROC(\name )
+ .endm
+
+- .macro testop, name, instr, store
++ .macro __testop, name, instr, store, barrier
+ ENTRY( \name )
+ UNWIND( .fnstart )
+ ands ip, r1, #3
+@@ -38,7 +38,7 @@ UNWIND( .fnstart )
+ mov r0, r0, lsr #5
+ add r1, r1, r0, lsl #2 @ Get word offset
+ mov r3, r2, lsl r3 @ create mask
+- smp_dmb
++ \barrier
+ #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
+ .arch_extension mp
+ ALT_SMP(W(pldw) [r1])
+@@ -50,13 +50,21 @@ UNWIND( .fnstart )
+ strex ip, r2, [r1]
+ cmp ip, #0
+ bne 1b
+- smp_dmb
++ \barrier
+ cmp r0, #0
+ movne r0, #1
+ 2: bx lr
+ UNWIND( .fnend )
+ ENDPROC(\name )
+ .endm
++
++ .macro testop, name, instr, store
++ __testop \name, \instr, \store, smp_dmb
++ .endm
++
++ .macro sync_testop, name, instr, store
++ __testop \name, \instr, \store, __smp_dmb
++ .endm
+ #else
+ .macro bitop, name, instr
+ ENTRY( \name )
+diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
+index 4ebecc67e6e04..f13fe9bc2399a 100644
+--- a/arch/arm/lib/testchangebit.S
++++ b/arch/arm/lib/testchangebit.S
+@@ -10,3 +10,7 @@
+ .text
+
+ testop _test_and_change_bit, eor, str
++
++#if __LINUX_ARM_ARCH__ >= 6
++sync_testop _sync_test_and_change_bit, eor, str
++#endif
+diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
+index 009afa0f5b4a7..4d2c5ca620ebf 100644
+--- a/arch/arm/lib/testclearbit.S
++++ b/arch/arm/lib/testclearbit.S
+@@ -10,3 +10,7 @@
+ .text
+
+ testop _test_and_clear_bit, bicne, strne
++
++#if __LINUX_ARM_ARCH__ >= 6
++sync_testop _sync_test_and_clear_bit, bicne, strne
++#endif
+diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
+index f3192e55acc87..649dbab65d8d0 100644
+--- a/arch/arm/lib/testsetbit.S
++++ b/arch/arm/lib/testsetbit.S
+@@ -10,3 +10,7 @@
+ .text
+
+ testop _test_and_set_bit, orreq, streq
++
++#if __LINUX_ARM_ARCH__ >= 6
++sync_testop _sync_test_and_set_bit, orreq, streq
++#endif
+diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
+index dd4b164d18317..a9efa7bc2fa12 100644
+--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
++++ b/arch/arm/mach-ep93xx/timer-ep93xx.c
+@@ -9,6 +9,7 @@
+ #include <linux/io.h>
+ #include <asm/mach/time.h>
+ #include "soc.h"
++#include "platform.h"
+
+ /*************************************************************************
+ * Timer handling for EP93xx
+@@ -60,7 +61,7 @@ static u64 notrace ep93xx_read_sched_clock(void)
+ return ret;
+ }
+
+-u64 ep93xx_clocksource_read(struct clocksource *c)
++static u64 ep93xx_clocksource_read(struct clocksource *c)
+ {
+ u64 ret;
+
+diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
+index df758c1f92373..a8ca8d427182d 100644
+--- a/arch/arm/mach-omap1/board-osk.c
++++ b/arch/arm/mach-omap1/board-osk.c
+@@ -25,7 +25,8 @@
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
++#include <linux/gpio/driver.h>
+ #include <linux/gpio/machine.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+@@ -64,13 +65,12 @@
+ /* TPS65010 has four GPIOs. nPG and LED2 can be treated like GPIOs with
+ * alternate pin configurations for hardware-controlled blinking.
+ */
+-#define OSK_TPS_GPIO_BASE (OMAP_MAX_GPIO_LINES + 16 /* MPUIO */)
+-# define OSK_TPS_GPIO_USB_PWR_EN (OSK_TPS_GPIO_BASE + 0)
+-# define OSK_TPS_GPIO_LED_D3 (OSK_TPS_GPIO_BASE + 1)
+-# define OSK_TPS_GPIO_LAN_RESET (OSK_TPS_GPIO_BASE + 2)
+-# define OSK_TPS_GPIO_DSP_PWR_EN (OSK_TPS_GPIO_BASE + 3)
+-# define OSK_TPS_GPIO_LED_D9 (OSK_TPS_GPIO_BASE + 4)
+-# define OSK_TPS_GPIO_LED_D2 (OSK_TPS_GPIO_BASE + 5)
++#define OSK_TPS_GPIO_USB_PWR_EN 0
++#define OSK_TPS_GPIO_LED_D3 1
++#define OSK_TPS_GPIO_LAN_RESET 2
++#define OSK_TPS_GPIO_DSP_PWR_EN 3
++#define OSK_TPS_GPIO_LED_D9 4
++#define OSK_TPS_GPIO_LED_D2 5
+
+ static struct mtd_partition osk_partitions[] = {
+ /* bootloader (U-Boot, etc) in first sector */
+@@ -174,11 +174,20 @@ static const struct gpio_led tps_leds[] = {
+ /* NOTE: D9 and D2 have hardware blink support.
+ * Also, D9 requires non-battery power.
+ */
+- { .gpio = OSK_TPS_GPIO_LED_D9, .name = "d9",
+- .default_trigger = "disk-activity", },
+- { .gpio = OSK_TPS_GPIO_LED_D2, .name = "d2", },
+- { .gpio = OSK_TPS_GPIO_LED_D3, .name = "d3", .active_low = 1,
+- .default_trigger = "heartbeat", },
++ { .name = "d9", .default_trigger = "disk-activity", },
++ { .name = "d2", },
++ { .name = "d3", .default_trigger = "heartbeat", },
++};
++
++static struct gpiod_lookup_table tps_leds_gpio_table = {
++ .dev_id = "leds-gpio",
++ .table = {
++ /* Use local offsets on TPS65010 */
++ GPIO_LOOKUP_IDX("tps65010", OSK_TPS_GPIO_LED_D9, NULL, 0, GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP_IDX("tps65010", OSK_TPS_GPIO_LED_D2, NULL, 1, GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP_IDX("tps65010", OSK_TPS_GPIO_LED_D3, NULL, 2, GPIO_ACTIVE_LOW),
++ { }
++ },
+ };
+
+ static struct gpio_led_platform_data tps_leds_data = {
+@@ -192,29 +201,34 @@ static struct platform_device osk5912_tps_leds = {
+ .dev.platform_data = &tps_leds_data,
+ };
+
+-static int osk_tps_setup(struct i2c_client *client, void *context)
++/* The board just hold these GPIOs hogged from setup to teardown */
++static struct gpio_desc *eth_reset;
++static struct gpio_desc *vdd_dsp;
++
++static int osk_tps_setup(struct i2c_client *client, struct gpio_chip *gc)
+ {
++ struct gpio_desc *d;
+ if (!IS_BUILTIN(CONFIG_TPS65010))
+ return -ENOSYS;
+
+ /* Set GPIO 1 HIGH to disable VBUS power supply;
+ * OHCI driver powers it up/down as needed.
+ */
+- gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
+- gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
++ d = gpiochip_request_own_desc(gc, OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en",
++ GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
+ /* Free the GPIO again as the driver will request it */
+- gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
++ gpiochip_free_own_desc(d);
+
+ /* Set GPIO 2 high so LED D3 is off by default */
+ tps65010_set_gpio_out_value(GPIO2, HIGH);
+
+ /* Set GPIO 3 low to take ethernet out of reset */
+- gpio_request(OSK_TPS_GPIO_LAN_RESET, "smc_reset");
+- gpio_direction_output(OSK_TPS_GPIO_LAN_RESET, 0);
++ eth_reset = gpiochip_request_own_desc(gc, OSK_TPS_GPIO_LAN_RESET, "smc_reset",
++ GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW);
+
+ /* GPIO4 is VDD_DSP */
+- gpio_request(OSK_TPS_GPIO_DSP_PWR_EN, "dsp_power");
+- gpio_direction_output(OSK_TPS_GPIO_DSP_PWR_EN, 1);
++ vdd_dsp = gpiochip_request_own_desc(gc, OSK_TPS_GPIO_DSP_PWR_EN, "dsp_power",
++ GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
+ /* REVISIT if DSP support isn't configured, power it off ... */
+
+ /* Let LED1 (D9) blink; leds-gpio may override it */
+@@ -232,15 +246,22 @@ static int osk_tps_setup(struct i2c_client *client, void *context)
+
+ /* register these three LEDs */
+ osk5912_tps_leds.dev.parent = &client->dev;
++ gpiod_add_lookup_table(&tps_leds_gpio_table);
+ platform_device_register(&osk5912_tps_leds);
+
+ return 0;
+ }
+
++static void osk_tps_teardown(struct i2c_client *client, struct gpio_chip *gc)
++{
++ gpiochip_free_own_desc(eth_reset);
++ gpiochip_free_own_desc(vdd_dsp);
++}
++
+ static struct tps65010_board tps_board = {
+- .base = OSK_TPS_GPIO_BASE,
+ .outmask = 0x0f,
+ .setup = osk_tps_setup,
++ .teardown = osk_tps_teardown,
+ };
+
+ static struct i2c_board_info __initdata osk_i2c_board_info[] = {
+@@ -263,11 +284,6 @@ static void __init osk_init_smc91x(void)
+ {
+ u32 l;
+
+- if ((gpio_request(0, "smc_irq")) < 0) {
+- printk("Error requesting gpio 0 for smc91x irq\n");
+- return;
+- }
+-
+ /* Check EMIFS wait states to fix errors with SMC_GET_PKT_HDR */
+ l = omap_readl(EMIFS_CCS(1));
+ l |= 0x3;
+@@ -279,10 +295,6 @@ static void __init osk_init_cf(int seg)
+ struct resource *res = &osk5912_cf_resources[1];
+
+ omap_cfg_reg(M7_1610_GPIO62);
+- if ((gpio_request(62, "cf_irq")) < 0) {
+- printk("Error requesting gpio 62 for CF irq\n");
+- return;
+- }
+
+ switch (seg) {
+ /* NOTE: CS0 could be configured too ... */
+@@ -308,18 +320,17 @@ static void __init osk_init_cf(int seg)
+ seg, omap_readl(EMIFS_CCS(seg)), omap_readl(EMIFS_ACS(seg)));
+ omap_writel(0x0004a1b3, EMIFS_CCS(seg)); /* synch mode 4 etc */
+ omap_writel(0x00000000, EMIFS_ACS(seg)); /* OE hold/setup */
+-
+- /* the CF I/O IRQ is really active-low */
+- irq_set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING);
+ }
+
+ static struct gpiod_lookup_table osk_usb_gpio_table = {
+ .dev_id = "ohci",
+ .table = {
+ /* Power GPIO on the I2C-attached TPS65010 */
+- GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("tps65010", OSK_TPS_GPIO_USB_PWR_EN, "power",
++ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
+ GPIO_ACTIVE_HIGH),
++ { }
+ },
+ };
+
+@@ -341,8 +352,25 @@ static struct omap_usb_config osk_usb_config __initdata = {
+
+ #define EMIFS_CS3_VAL (0x88013141)
+
++static struct gpiod_lookup_table osk_irq_gpio_table = {
++ .dev_id = NULL,
++ .table = {
++ /* GPIO used for SMC91x IRQ */
++ GPIO_LOOKUP(OMAP_GPIO_LABEL, 0, "smc_irq",
++ GPIO_ACTIVE_HIGH),
++ /* GPIO used for CF IRQ */
++ GPIO_LOOKUP("gpio-48-63", 14, "cf_irq",
++ GPIO_ACTIVE_HIGH),
++ /* GPIO used by the TPS65010 chip */
++ GPIO_LOOKUP("mpuio", 1, "tps65010",
++ GPIO_ACTIVE_HIGH),
++ { }
++ },
++};
++
+ static void __init osk_init(void)
+ {
++ struct gpio_desc *d;
+ u32 l;
+
+ osk_init_smc91x();
+@@ -359,10 +387,31 @@ static void __init osk_init(void)
+
+ osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys();
+ osk_flash_resource.end += SZ_32M - 1;
+- osk5912_smc91x_resources[1].start = gpio_to_irq(0);
+- osk5912_smc91x_resources[1].end = gpio_to_irq(0);
+- osk5912_cf_resources[0].start = gpio_to_irq(62);
+- osk5912_cf_resources[0].end = gpio_to_irq(62);
++
++ /*
++ * Add the GPIOs to be used as IRQs and immediately look them up
++ * to be passed as an IRQ resource. This is ugly but should work
++ * until the day we convert to device tree.
++ */
++ gpiod_add_lookup_table(&osk_irq_gpio_table);
++
++ d = gpiod_get(NULL, "smc_irq", GPIOD_IN);
++ if (IS_ERR(d)) {
++ pr_err("Unable to get SMC IRQ GPIO descriptor\n");
++ } else {
++ irq_set_irq_type(gpiod_to_irq(d), IRQ_TYPE_EDGE_RISING);
++ osk5912_smc91x_resources[1] = DEFINE_RES_IRQ(gpiod_to_irq(d));
++ }
++
++ d = gpiod_get(NULL, "cf_irq", GPIOD_IN);
++ if (IS_ERR(d)) {
++ pr_err("Unable to get CF IRQ GPIO descriptor\n");
++ } else {
++ /* the CF I/O IRQ is really active-low */
++ irq_set_irq_type(gpiod_to_irq(d), IRQ_TYPE_EDGE_FALLING);
++ osk5912_cf_resources[0] = DEFINE_RES_IRQ(gpiod_to_irq(d));
++ }
++
+ platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices));
+
+ l = omap_readl(USB_TRANSCEIVER_CTRL);
+@@ -372,13 +421,15 @@ static void __init osk_init(void)
+ gpiod_add_lookup_table(&osk_usb_gpio_table);
+ omap1_usb_init(&osk_usb_config);
+
++ omap_serial_init();
++
+ /* irq for tps65010 chip */
+ /* bootloader effectively does: omap_cfg_reg(U19_1610_MPUIO1); */
+- if (gpio_request(OMAP_MPUIO(1), "tps65010") == 0)
+- gpio_direction_input(OMAP_MPUIO(1));
+-
+- omap_serial_init();
+- osk_i2c_board_info[0].irq = gpio_to_irq(OMAP_MPUIO(1));
++ d = gpiod_get(NULL, "tps65010", GPIOD_IN);
++ if (IS_ERR(d))
++ pr_err("Unable to get TPS65010 IRQ GPIO descriptor\n");
++ else
++ osk_i2c_board_info[0].irq = gpiod_to_irq(d);
+ omap_register_i2c_bus(1, 400, osk_i2c_board_info,
+ ARRAY_SIZE(osk_i2c_board_info));
+ }
+diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
+index 1610c567a6a3a..10d2f078e4a8e 100644
+--- a/arch/arm/mach-omap2/board-generic.c
++++ b/arch/arm/mach-omap2/board-generic.c
+@@ -13,6 +13,7 @@
+ #include <linux/of_platform.h>
+ #include <linux/irqdomain.h>
+ #include <linux/clocksource.h>
++#include <linux/clockchips.h>
+
+ #include <asm/setup.h>
+ #include <asm/mach/arch.h>
+diff --git a/arch/arm/probes/kprobes/checkers-common.c b/arch/arm/probes/kprobes/checkers-common.c
+index 4d720990cf2a3..eba7ac4725c02 100644
+--- a/arch/arm/probes/kprobes/checkers-common.c
++++ b/arch/arm/probes/kprobes/checkers-common.c
+@@ -40,7 +40,7 @@ enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn,
+ * Different from other insn uses imm8, the real addressing offset of
+ * STRD in T32 encoding should be imm8 * 4. See ARMARM description.
+ */
+-enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
++static enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+ {
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index 9090c3a74dcce..d8238da095df7 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -233,7 +233,7 @@ singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ * kprobe, and that level is reserved for user kprobe handlers, so we can't
+ * risk encountering a new kprobe in an interrupt handler.
+ */
+-void __kprobes kprobe_handler(struct pt_regs *regs)
++static void __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+ struct kprobe *p, *cur;
+ struct kprobe_ctlblk *kcb;
+diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
+index dbef34ed933f2..7f65048380ca5 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -145,8 +145,6 @@ __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+ }
+ }
+
+-extern void kprobe_handler(struct pt_regs *regs);
+-
+ static void
+ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+ {
+diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
+index c562832b86272..171c7076b89f4 100644
+--- a/arch/arm/probes/kprobes/test-core.c
++++ b/arch/arm/probes/kprobes/test-core.c
+@@ -720,7 +720,7 @@ static const char coverage_register_lookup[16] = {
+ [REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP,
+ };
+
+-unsigned coverage_start_registers(const struct decode_header *h)
++static unsigned coverage_start_registers(const struct decode_header *h)
+ {
+ unsigned regs = 0;
+ int i;
+diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h
+index 56ad3c0aaeeac..c7297037c1623 100644
+--- a/arch/arm/probes/kprobes/test-core.h
++++ b/arch/arm/probes/kprobes/test-core.h
+@@ -454,3 +454,7 @@ void kprobe_thumb32_test_cases(void);
+ #else
+ void kprobe_arm_test_cases(void);
+ #endif
++
++void __kprobes_test_case_start(void);
++void __kprobes_test_case_end_16(void);
++void __kprobes_test_case_end_32(void);
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso
+index 15ee8c568f3c3..543c13385d6e3 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso
++++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3-nand.dtso
+@@ -29,13 +29,13 @@
+
+ partition@0 {
+ label = "bl2";
+- reg = <0x0 0x80000>;
++ reg = <0x0 0x100000>;
+ read-only;
+ };
+
+- partition@80000 {
++ partition@100000 {
+ label = "reserved";
+- reg = <0x80000 0x300000>;
++ reg = <0x100000 0x280000>;
+ };
+
+ partition@380000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index fbe14b13051a6..4836ad55fd4ae 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -292,6 +292,10 @@
+ };
+ };
+
++&gic {
++ mediatek,broken-save-restore-fw;
++};
++
+ &gpu {
+ mali-supply = <&mt6358_vgpu_reg>;
+ sram-supply = <&mt6358_vsram_gpu_reg>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index 87b91c8feaf96..a3a2b7de54a7c 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -70,7 +70,8 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+- capacity-dmips-mhz = <530>;
++ performance-domains = <&performance 0>;
++ capacity-dmips-mhz = <427>;
+ };
+
+ cpu1: cpu@100 {
+@@ -87,7 +88,8 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+- capacity-dmips-mhz = <530>;
++ performance-domains = <&performance 0>;
++ capacity-dmips-mhz = <427>;
+ };
+
+ cpu2: cpu@200 {
+@@ -104,7 +106,8 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+- capacity-dmips-mhz = <530>;
++ performance-domains = <&performance 0>;
++ capacity-dmips-mhz = <427>;
+ };
+
+ cpu3: cpu@300 {
+@@ -121,7 +124,8 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&l2_0>;
+- capacity-dmips-mhz = <530>;
++ performance-domains = <&performance 0>;
++ capacity-dmips-mhz = <427>;
+ };
+
+ cpu4: cpu@400 {
+@@ -138,6 +142,7 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_1>;
++ performance-domains = <&performance 1>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+@@ -155,6 +160,7 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_1>;
++ performance-domains = <&performance 1>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+@@ -172,6 +178,7 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_1>;
++ performance-domains = <&performance 1>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+@@ -189,6 +196,7 @@
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_1>;
++ performance-domains = <&performance 1>;
+ capacity-dmips-mhz = <1024>;
+ };
+
+@@ -318,6 +326,12 @@
+ compatible = "simple-bus";
+ ranges;
+
++ performance: performance-controller@11bc10 {
++ compatible = "mediatek,cpufreq-hw";
++ reg = <0 0x0011bc10 0 0x120>, <0 0x0011bd30 0 0x120>;
++ #performance-domain-cells = <1>;
++ };
++
+ gic: interrupt-controller@c000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <4>;
+diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
+index 0367a00a269b3..5eae6e7fd248e 100644
+--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
++++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
+@@ -61,7 +61,7 @@
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ };
+
+- psci {
++ psci: psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
+index 9d1a082de3e29..32bb76b3202a0 100644
+--- a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
++++ b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
+@@ -6,6 +6,18 @@
+ /dts-v1/;
+ #include "sparx5.dtsi"
+
++&psci {
++ status = "disabled";
++};
++
++&cpu0 {
++ enable-method = "spin-table";
++};
++
++&cpu1 {
++ enable-method = "spin-table";
++};
++
+ &uart0 {
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index c52d79a55d80c..dbdb8077857ef 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -325,12 +325,6 @@
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+-&pronto {
+- status = "okay";
+-
+- firmware-name = "qcom/apq8016/wcnss.mbn";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -411,10 +405,19 @@
+ qcom,mbhc-vthreshold-high = <75 150 237 450 500>;
+ };
+
++&wcnss {
++ status = "okay";
++ firmware-name = "qcom/apq8016/wcnss.mbn";
++};
++
+ &wcnss_ctrl {
+ firmware-name = "qcom/apq8016/WCNSS_qcom_wlan_nv_sbc.bin";
+ };
+
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ /* Enable CoreSight */
+ &cti0 { status = "okay"; };
+ &cti1 { status = "okay"; };
+@@ -444,21 +447,21 @@
+ vdd_l7-supply = <&pm8916_s4>;
+
+ s3 {
+- regulator-min-microvolt = <375000>;
+- regulator-max-microvolt = <1562000>;
++ regulator-min-microvolt = <1250000>;
++ regulator-max-microvolt = <1350000>;
+ };
+
+ s4 {
+- regulator-min-microvolt = <1800000>;
+- regulator-max-microvolt = <1800000>;
++ regulator-min-microvolt = <1850000>;
++ regulator-max-microvolt = <2150000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l1 {
+- regulator-min-microvolt = <375000>;
+- regulator-max-microvolt = <1525000>;
++ regulator-min-microvolt = <1225000>;
++ regulator-max-microvolt = <1225000>;
+ };
+
+ l2 {
+@@ -467,13 +470,13 @@
+ };
+
+ l4 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <2050000>;
++ regulator-max-microvolt = <2050000>;
+ };
+
+ l5 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+@@ -482,60 +485,68 @@
+ };
+
+ l7 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ };
+
+ l8 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <2900000>;
++ regulator-max-microvolt = <2900000>;
+ };
+
+ l9 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ };
+
+ l10 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <2800000>;
++ regulator-max-microvolt = <2800000>;
+ };
+
+ l11 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <2950000>;
++ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ l12 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <2950000>;
+ };
+
+ l13 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <3075000>;
++ regulator-max-microvolt = <3075000>;
+ };
+
+ l14 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
+ };
+
+- /**
+- * 1.8v required on LS expansion
+- * for mezzanine boards
++ /*
++ * The 96Boards specification expects a 1.8V power rail on the low-speed
++ * expansion connector that is able to provide at least 0.18W / 100 mA.
++ * L15/L16 are connected in parallel to provide 55 mA each. A minimum load
++ * must be specified to ensure the regulators are not put in LPM where they
++ * would only provide 5 mA.
+ */
+ l15 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-system-load = <50000>;
++ regulator-allow-set-load;
+ regulator-always-on;
+ };
+
+ l16 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-system-load = <50000>;
++ regulator-allow-set-load;
++ regulator-always-on;
+ };
+
+ l17 {
+@@ -544,8 +555,8 @@
+ };
+
+ l18 {
+- regulator-min-microvolt = <1750000>;
+- regulator-max-microvolt = <3337000>;
++ regulator-min-microvolt = <2700000>;
++ regulator-max-microvolt = <2700000>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+index 71e0a500599c8..ed2e2f6c6775a 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+@@ -26,7 +26,7 @@
+
+ v1p05: v1p05-regulator {
+ compatible = "regulator-fixed";
+- reglator-name = "v1p05";
++ regulator-name = "v1p05";
+ regulator-always-on;
+ regulator-boot-on;
+
+@@ -38,7 +38,7 @@
+
+ v12_poe: v12-poe-regulator {
+ compatible = "regulator-fixed";
+- reglator-name = "v12_poe";
++ regulator-name = "v12_poe";
+ regulator-always-on;
+ regulator-boot-on;
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 9ff4e9d45065b..8ec9e282b412c 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -301,7 +301,7 @@
+ status = "disabled";
+ };
+
+- prng: qrng@e1000 {
++ prng: qrng@e3000 {
+ compatible = "qcom,prng-ee";
+ reg = <0x0 0x000e3000 0x0 0x1000>;
+ clocks = <&gcc GCC_PRNG_AHB_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts b/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
+index ed3fa7b3575b7..13cd9ad167df7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-acer-a1-724.dts
+@@ -118,10 +118,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+@@ -149,6 +145,14 @@
+ extcon = <&usb_id>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
+index 701a5585d77e4..fecb69944cfa3 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
+@@ -160,10 +160,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -191,6 +187,14 @@
+ extcon = <&usb_id>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts b/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
+index 3618704a53309..91284a1d0966f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
+@@ -128,10 +128,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -159,6 +155,14 @@
+ extcon = <&usb_id>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts b/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
+index a0e520edde029..525ec76efeeb7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-gplus-fl8005a.dts
+@@ -118,10 +118,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+@@ -149,6 +145,14 @@
+ extcon = <&usb_id>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts b/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
+index 8c07eca900d3f..5b1bac8f51220 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-huawei-g7.dts
+@@ -227,10 +227,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -312,6 +308,14 @@
+ qcom,hphl-jack-type-normally-open;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+index d1e8cf2f50c0d..f1dd625e18227 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+@@ -231,10 +231,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -263,6 +259,14 @@
+ extcon = <&pm8916_usbin>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
+index 3899e11b9843b..b79e80913af9f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
+@@ -99,10 +99,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -130,6 +126,14 @@
+ extcon = <&usb_id>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-pm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pm8916.dtsi
+index 8cac23b5240c6..6eb5e0a395100 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-pm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-pm8916.dtsi
+@@ -20,17 +20,6 @@
+ pll-supply = <&pm8916_l7>;
+ };
+
+-&pronto {
+- vddpx-supply = <&pm8916_l7>;
+-
+- iris {
+- vddxo-supply = <&pm8916_l7>;
+- vddrfa-supply = <&pm8916_s3>;
+- vddpa-supply = <&pm8916_l9>;
+- vdddig-supply = <&pm8916_l5>;
+- };
+-};
+-
+ &sdhc_1 {
+ vmmc-supply = <&pm8916_l8>;
+ vqmmc-supply = <&pm8916_l5>;
+@@ -46,6 +35,17 @@
+ v3p3-supply = <&pm8916_l13>;
+ };
+
++&wcnss {
++ vddpx-supply = <&pm8916_l7>;
++};
++
++&wcnss_iris {
++ vddxo-supply = <&pm8916_l7>;
++ vddrfa-supply = <&pm8916_s3>;
++ vddpa-supply = <&pm8916_l9>;
++ vdddig-supply = <&pm8916_l5>;
++};
++
+ &rpm_requests {
+ smd_rpm_regulators: regulators {
+ compatible = "qcom,rpm-pm8916-regulators";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+index a2ed7bdbf528f..16d67749960e0 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+@@ -252,10 +252,6 @@
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
+index c691cca2eb459..a1ca4d8834201 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
+@@ -112,6 +112,14 @@
+ status = "okay";
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &msmgpio {
+ panel_vdd3_default: panel-vdd3-default-state {
+ pins = "gpio9";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+index 3dd819458785d..4e10b8a5e9f9c 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+@@ -54,12 +54,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- iris {
+- compatible = "qcom,wcn3660b";
+- };
+-};
+-
+ &touchkey {
+ vcc-supply = <&reg_touch_key>;
+ vdd-supply = <&reg_touch_key>;
+@@ -69,6 +63,14 @@
+ status = "okay";
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3660b";
++};
++
+ &msmgpio {
+ tkey_en_default: tkey-en-default-state {
+ pins = "gpio97";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi
+index c95f0b4bc61f3..f6c4a011fdfd2 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-e2015-common.dtsi
+@@ -58,6 +58,14 @@
+ vdd-supply = <&reg_touch_key>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &msmgpio {
+ tkey_en_default: tkey-en-default-state {
+ pins = "gpio97";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi
+index d920b7247d823..74ffd04db8d84 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-gt5-common.dtsi
+@@ -125,14 +125,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-
+- iris {
+- compatible = "qcom,wcn3660b";
+- };
+-};
+-
+ &sdhc_1 {
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+@@ -162,6 +154,14 @@
+ extcon = <&pm8916_usbin>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3660b";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi
+index f3b81b6f0a2f1..adeee0830e768 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j5-common.dtsi
+@@ -93,10 +93,6 @@
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -124,6 +120,14 @@
+ extcon = <&muic>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
+index d4984b3af8023..1a41a4db874da 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-serranove.dts
+@@ -272,14 +272,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-
+- iris {
+- compatible = "qcom,wcn3660b";
+- };
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -320,6 +312,14 @@
+ extcon = <&muic>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3660b";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi b/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
+index cdf34b74fa8fa..50bae6f214f1f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
+@@ -99,10 +99,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+@@ -122,6 +118,14 @@
+ extcon = <&pm8916_usbin>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+index a87be1d95b14b..ac56c7595f78a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+@@ -153,10 +153,6 @@
+ status = "okay";
+ };
+
+-&pronto {
+- status = "okay";
+-};
+-
+ &sdhc_1 {
+ status = "okay";
+
+@@ -184,6 +180,14 @@
+ extcon = <&usb_id>;
+ };
+
++&wcnss {
++ status = "okay";
++};
++
++&wcnss_iris {
++ compatible = "qcom,wcn3620";
++};
++
+ &smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 0d5283805f42c..7cc3d0d92cb9e 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1161,7 +1161,7 @@
+ };
+ };
+
+- camss: camss@1b00000 {
++ camss: camss@1b0ac00 {
+ compatible = "qcom,msm8916-camss";
+ reg = <0x01b0ac00 0x200>,
+ <0x01b00030 0x4>,
+@@ -1553,7 +1553,7 @@
+ #sound-dai-cells = <1>;
+ };
+
+- sdhc_1: mmc@7824000 {
++ sdhc_1: mmc@7824900 {
+ compatible = "qcom,msm8916-sdhci", "qcom,sdhci-msm-v4";
+ reg = <0x07824900 0x11c>, <0x07824000 0x800>;
+ reg-names = "hc", "core";
+@@ -1571,7 +1571,7 @@
+ status = "disabled";
+ };
+
+- sdhc_2: mmc@7864000 {
++ sdhc_2: mmc@7864900 {
+ compatible = "qcom,msm8916-sdhci", "qcom,sdhci-msm-v4";
+ reg = <0x07864900 0x11c>, <0x07864000 0x800>;
+ reg-names = "hc", "core";
+@@ -1870,7 +1870,7 @@
+ };
+ };
+
+- pronto: remoteproc@a21b000 {
++ wcnss: remoteproc@a204000 {
+ compatible = "qcom,pronto-v2-pil", "qcom,pronto";
+ reg = <0x0a204000 0x2000>, <0x0a202000 0x1000>, <0x0a21b000 0x3000>;
+ reg-names = "ccu", "dxe", "pmu";
+@@ -1896,9 +1896,8 @@
+
+ status = "disabled";
+
+- iris {
+- compatible = "qcom,wcn3620";
+-
++ wcnss_iris: iris {
++ /* Separate chip, compatible is board-specific */
+ clocks = <&rpmcc RPM_SMD_RF_CLK2>;
+ clock-names = "xo";
+ };
+@@ -1916,13 +1915,13 @@
+ compatible = "qcom,wcnss";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+- qcom,mmio = <&pronto>;
++ qcom,mmio = <&wcnss>;
+
+- bluetooth {
++ wcnss_bt: bluetooth {
+ compatible = "qcom,wcnss-bt";
+ };
+
+- wifi {
++ wcnss_wifi: wifi {
+ compatible = "qcom,wcnss-wlan";
+
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
+index 610f3e3fc0c22..7001f6b0b9f9a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
+@@ -878,7 +878,7 @@
+ };
+ };
+
+- apps_iommu: iommu@1e00000 {
++ apps_iommu: iommu@1e20000 {
+ compatible = "qcom,msm8953-iommu", "qcom,msm-iommu-v1";
+ ranges = <0 0x1e20000 0x20000>;
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+index e55baafd9efd0..e1617b9a73df2 100644
+--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+@@ -821,7 +821,7 @@
+ cell-index = <0>;
+ };
+
+- sdhc_1: mmc@7824000 {
++ sdhc_1: mmc@7824900 {
+ compatible = "qcom,msm8976-sdhci", "qcom,sdhci-msm-v4";
+ reg = <0x07824900 0x500>, <0x07824000 0x800>;
+ reg-names = "hc", "core";
+@@ -837,7 +837,7 @@
+ status = "disabled";
+ };
+
+- sdhc_2: mmc@7864000 {
++ sdhc_2: mmc@7864900 {
+ compatible = "qcom,msm8976-sdhci", "qcom,sdhci-msm-v4";
+ reg = <0x07864900 0x11c>, <0x07864000 0x800>;
+ reg-names = "hc", "core";
+@@ -956,7 +956,7 @@
+ #reset-cells = <1>;
+ };
+
+- sdhc_3: mmc@7a24000 {
++ sdhc_3: mmc@7a24900 {
+ compatible = "qcom,msm8976-sdhci", "qcom,sdhci-msm-v4";
+ reg = <0x07a24900 0x11c>, <0x07a24000 0x800>;
+ reg-names = "hc", "core";
+diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+index 24c3fced8df71..5ce23306fd844 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+@@ -745,7 +745,7 @@
+ reg = <0xfc4ab000 0x4>;
+ };
+
+- spmi_bus: spmi@fc4c0000 {
++ spmi_bus: spmi@fc4cf000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xfc4cf000 0x1000>,
+ <0xfc4cb000 0x1000>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 73da1a4d52462..771db923b2e33 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -2069,7 +2069,7 @@
+ };
+ };
+
+- camss: camss@a00000 {
++ camss: camss@a34000 {
+ compatible = "qcom,msm8996-camss";
+ reg = <0x00a34000 0x1000>,
+ <0x00a00030 0x4>,
+diff --git a/arch/arm64/boot/dts/qcom/pm7250b.dtsi b/arch/arm64/boot/dts/qcom/pm7250b.dtsi
+index d709d955a2f5a..daa6f1d30efa0 100644
+--- a/arch/arm64/boot/dts/qcom/pm7250b.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm7250b.dtsi
+@@ -3,6 +3,7 @@
+ * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
++#include <dt-bindings/iio/qcom,spmi-vadc.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/spmi/spmi.h>
+
+diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
+index adbba9f4089ab..13925ac44669d 100644
+--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
+@@ -55,7 +55,7 @@
+
+ pm8998_resin: resin {
+ compatible = "qcom,pm8941-resin";
+- interrupts = <GIC_SPI 0x8 1 IRQ_TYPE_EDGE_BOTH>;
++ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+index c72a51c32a300..eeb4e51b31cfc 100644
+--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
++++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+@@ -1238,6 +1238,7 @@
+ qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 3>,
+ <WAKE_TCS 3>, <CONTROL_TCS 0>;
+ label = "apps_rsc";
++ power-domains = <&CLUSTER_PD>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index 5827cda270a0e..07c720c5721d4 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -1893,7 +1893,7 @@
+ };
+ };
+
+- camss: camss@ca00000 {
++ camss: camss@ca00020 {
+ compatible = "qcom,sdm660-camss";
+ reg = <0x0ca00020 0x10>,
+ <0x0ca30000 0x100>,
+diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+index 02f14692dd9da..50dd050eb132d 100644
+--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+@@ -1155,6 +1155,7 @@
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
++ power-domains = <&CLUSTER_PD>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index 56f2d855df78d..406f0224581a7 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -483,6 +483,7 @@
+ };
+
+ rmi4-f12@12 {
++ reg = <0x12>;
+ syna,rezero-wait-ms = <0xc8>;
+ syna,clip-x-high = <0x438>;
+ syna,clip-y-high = <0x870>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index c5e92851a4f08..a818a81408dfb 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4158,7 +4158,7 @@
+ #reset-cells = <1>;
+ };
+
+- camss: camss@a00000 {
++ camss: camss@acb3000 {
+ compatible = "qcom,sdm845-camss";
+
+ reg = <0 0x0acb3000 0 0x1000>,
+@@ -5058,6 +5058,7 @@
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
++ power-domains = <&CLUSTER_PD>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index fbd67d2c8d781..62b1c1674a68d 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -693,7 +693,7 @@
+ #interrupt-cells = <4>;
+ };
+
+- tsens0: thermal-sensor@4410000 {
++ tsens0: thermal-sensor@4411000 {
+ compatible = "qcom,sm6115-tsens", "qcom,tsens-v2";
+ reg = <0x0 0x04411000 0x0 0x1ff>, /* TM */
+ <0x0 0x04410000 0x0 0x8>; /* SROT */
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index b9c982a059dfb..c0f22a3bea5ce 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -26,9 +26,10 @@
+ framebuffer: framebuffer@9c000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0x9c000000 0 0x2300000>;
+- width = <1644>;
+- height = <3840>;
+- stride = <(1644 * 4)>;
++ /* pdx203 BL initializes in 2.5k mode, not 4k */
++ width = <1096>;
++ height = <2560>;
++ stride = <(1096 * 4)>;
+ format = "a8r8g8b8";
+ /*
+ * That's a lot of clocks, but it's necessary due
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 9cb52d7efdd8d..1a258a5461acf 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -892,7 +892,7 @@
+ };
+ };
+
+- gpi_dma0: dma-controller@900000 {
++ gpi_dma0: dma-controller@9800000 {
+ compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
+ reg = <0 0x09800000 0 0x60000>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1625,7 +1625,7 @@
+ status = "disabled";
+ };
+
+- pcie1_phy: phy@1c0f000 {
++ pcie1_phy: phy@1c0e000 {
+ compatible = "qcom,sm8350-qmp-gen3x2-pcie-phy";
+ reg = <0 0x01c0e000 0 0x2000>;
+ clocks = <&gcc GCC_PCIE_1_AUX_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+index 90e3fb11e6e70..75bd5f2ae681f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+@@ -1843,8 +1843,8 @@
+ <&apps_smmu 0x481 0x0>;
+ };
+
+- crypto: crypto@1de0000 {
+- compatible = "qcom,sm8550-qce";
++ crypto: crypto@1dfa000 {
++ compatible = "qcom,sm8550-qce", "qcom,sm8150-qce", "qcom,qce";
+ reg = <0x0 0x01dfa000 0x0 0x6000>;
+ dmas = <&cryptobam 4>, <&cryptobam 5>;
+ dma-names = "rx", "tx";
+@@ -2441,6 +2441,10 @@
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+
++ interconnects = <&aggre1_noc MASTER_USB3_0 0 &mc_virt SLAVE_EBI1 0>,
++ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
++ interconnect-names = "usb-ddr", "apps-usb";
++
+ status = "disabled";
+
+ usb_1_dwc3: usb@a600000 {
+@@ -2536,7 +2540,7 @@
+ #interrupt-cells = <4>;
+ };
+
+- tlmm: pinctrl@f000000 {
++ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sm8550-tlmm";
+ reg = <0 0x0f100000 0 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+@@ -3250,6 +3254,7 @@
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 3>, <SLEEP_TCS 2>,
+ <WAKE_TCS 2>, <CONTROL_TCS 0>;
++ power-domains = <&CLUSTER_PD>;
+
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+index efc80960380f4..c78b7a5c2e2aa 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+@@ -367,7 +367,7 @@
+ };
+
+ scif1_pins: scif1 {
+- groups = "scif1_data_b", "scif1_ctrl";
++ groups = "scif1_data_b";
+ function = "scif1";
+ };
+
+@@ -397,7 +397,6 @@
+ &scif1 {
+ pinctrl-0 = <&scif1_pins>;
+ pinctrl-names = "default";
+- uart-has-rtscts;
+
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+index bc49ba534790e..f364b7803115d 100644
+--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+@@ -23,7 +23,7 @@
+ aliases {
+ serial2 = &main_uart8;
+ mmc1 = &main_sdhci1;
+- i2c0 = &main_i2c0;
++ i2c3 = &main_i2c0;
+ };
+
+ memory@80000000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+index 0d39d6b8cc0ca..63633e4f6c59f 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+@@ -83,25 +83,25 @@
+ &wkup_pmx2 {
+ mcu_cpsw_pins_default: mcu-cpsw-pins-default {
+ pinctrl-single,pins = <
+- J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
+- J721E_WKUP_IOPAD(0x006c, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */
+- J721E_WKUP_IOPAD(0x0070, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */
+- J721E_WKUP_IOPAD(0x0074, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */
+- J721E_WKUP_IOPAD(0x0078, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */
+- J721E_WKUP_IOPAD(0x007c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */
+- J721E_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */
+- J721E_WKUP_IOPAD(0x008c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
+- J721E_WKUP_IOPAD(0x0090, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
+- J721E_WKUP_IOPAD(0x0094, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
+- J721E_WKUP_IOPAD(0x0080, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */
+- J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
++ J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
++ J721E_WKUP_IOPAD(0x0004, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */
++ J721E_WKUP_IOPAD(0x0008, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */
++ J721E_WKUP_IOPAD(0x000c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */
++ J721E_WKUP_IOPAD(0x0010, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */
++ J721E_WKUP_IOPAD(0x0014, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */
++ J721E_WKUP_IOPAD(0x0020, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */
++ J721E_WKUP_IOPAD(0x0024, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
++ J721E_WKUP_IOPAD(0x0028, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
++ J721E_WKUP_IOPAD(0x002c, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
++ J721E_WKUP_IOPAD(0x0018, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */
++ J721E_WKUP_IOPAD(0x001c, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
+ >;
+ };
+
+ mcu_mdio_pins_default: mcu-mdio1-pins-default {
+ pinctrl-single,pins = <
+- J721E_WKUP_IOPAD(0x009c, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */
+- J721E_WKUP_IOPAD(0x0098, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */
++ J721E_WKUP_IOPAD(0x0034, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */
++ J721E_WKUP_IOPAD(0x0030, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */
+ >;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
+index 37c24b077b6aa..8a62ac263b89a 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
+@@ -936,6 +936,7 @@
+ };
+
+ &mailbox0_cluster0 {
++ status = "okay";
+ interrupts = <436>;
+
+ mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
+@@ -950,6 +951,7 @@
+ };
+
+ &mailbox0_cluster1 {
++ status = "okay";
+ interrupts = <432>;
+
+ mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
+@@ -964,6 +966,7 @@
+ };
+
+ &mailbox0_cluster2 {
++ status = "okay";
+ interrupts = <428>;
+
+ mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
+@@ -978,6 +981,7 @@
+ };
+
+ &mailbox0_cluster3 {
++ status = "okay";
+ interrupts = <424>;
+
+ mbox_c66_0: mbox-c66-0 {
+@@ -992,6 +996,7 @@
+ };
+
+ &mailbox0_cluster4 {
++ status = "okay";
+ interrupts = <420>;
+
+ mbox_c71_0: mbox-c71-0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+index 8cd4a7ecc121e..ed575f17935b6 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+@@ -22,7 +22,7 @@
+ aliases {
+ serial2 = &main_uart8;
+ mmc1 = &main_sdhci1;
+- i2c0 = &main_i2c0;
++ i2c3 = &main_i2c0;
+ };
+
+ memory@80000000 {
+@@ -140,6 +140,32 @@
+ };
+ };
+
++&wkup_pmx2 {
++ mcu_cpsw_pins_default: mcu-cpsw-pins-default {
++ pinctrl-single,pins = <
++ J784S4_WKUP_IOPAD(0x02c, PIN_INPUT, 0) /* (A35) MCU_RGMII1_RD0 */
++ J784S4_WKUP_IOPAD(0x028, PIN_INPUT, 0) /* (B36) MCU_RGMII1_RD1 */
++ J784S4_WKUP_IOPAD(0x024, PIN_INPUT, 0) /* (C36) MCU_RGMII1_RD2 */
++ J784S4_WKUP_IOPAD(0x020, PIN_INPUT, 0) /* (D36) MCU_RGMII1_RD3 */
++ J784S4_WKUP_IOPAD(0x01c, PIN_INPUT, 0) /* (B37) MCU_RGMII1_RXC */
++ J784S4_WKUP_IOPAD(0x004, PIN_INPUT, 0) /* (C37) MCU_RGMII1_RX_CTL */
++ J784S4_WKUP_IOPAD(0x014, PIN_OUTPUT, 0) /* (D37) MCU_RGMII1_TD0 */
++ J784S4_WKUP_IOPAD(0x010, PIN_OUTPUT, 0) /* (D38) MCU_RGMII1_TD1 */
++ J784S4_WKUP_IOPAD(0x00c, PIN_OUTPUT, 0) /* (E37) MCU_RGMII1_TD2 */
++ J784S4_WKUP_IOPAD(0x008, PIN_OUTPUT, 0) /* (E38) MCU_RGMII1_TD3 */
++ J784S4_WKUP_IOPAD(0x018, PIN_OUTPUT, 0) /* (E36) MCU_RGMII1_TXC */
++ J784S4_WKUP_IOPAD(0x000, PIN_OUTPUT, 0) /* (C38) MCU_RGMII1_TX_CTL */
++ >;
++ };
++
++ mcu_mdio_pins_default: mcu-mdio-pins-default {
++ pinctrl-single,pins = <
++ J784S4_WKUP_IOPAD(0x034, PIN_OUTPUT, 0) /* (A36) MCU_MDIO0_MDC */
++ J784S4_WKUP_IOPAD(0x030, PIN_INPUT, 0) /* (B35) MCU_MDIO0_MDIO */
++ >;
++ };
++};
++
+ &main_uart8 {
+ status = "okay";
+ pinctrl-names = "default";
+@@ -194,3 +220,27 @@
+ &main_gpio0 {
+ status = "okay";
+ };
++
++&mcu_cpsw {
++ status = "okay";
++ pinctrl-names = "default";
++ pinctrl-0 = <&mcu_cpsw_pins_default>;
++};
++
++&davinci_mdio {
++ pinctrl-names = "default";
++ pinctrl-0 = <&mcu_mdio_pins_default>;
++
++ mcu_phy0: ethernet-phy@0 {
++ reg = <0>;
++ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
++ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
++ ti,min-output-impedance;
++ };
++};
++
++&mcu_cpsw_port1 {
++ status = "okay";
++ phy-mode = "rgmii-rxid";
++ phy-handle = <&mcu_phy0>;
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+index 64bd3dee14aa6..8a2350f2c82d0 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+@@ -50,7 +50,34 @@
+ wkup_pmx0: pinctrl@4301c000 {
+ compatible = "pinctrl-single";
+ /* Proxy 0 addressing */
+- reg = <0x00 0x4301c000 0x00 0x178>;
++ reg = <0x00 0x4301c000 0x00 0x034>;
++ #pinctrl-cells = <1>;
++ pinctrl-single,register-width = <32>;
++ pinctrl-single,function-mask = <0xffffffff>;
++ };
++
++ wkup_pmx1: pinctrl@4301c038 {
++ compatible = "pinctrl-single";
++ /* Proxy 0 addressing */
++ reg = <0x00 0x4301c038 0x00 0x02c>;
++ #pinctrl-cells = <1>;
++ pinctrl-single,register-width = <32>;
++ pinctrl-single,function-mask = <0xffffffff>;
++ };
++
++ wkup_pmx2: pinctrl@4301c068 {
++ compatible = "pinctrl-single";
++ /* Proxy 0 addressing */
++ reg = <0x00 0x4301c068 0x00 0x120>;
++ #pinctrl-cells = <1>;
++ pinctrl-single,register-width = <32>;
++ pinctrl-single,function-mask = <0xffffffff>;
++ };
++
++ wkup_pmx3: pinctrl@4301c190 {
++ compatible = "pinctrl-single";
++ /* Proxy 0 addressing */
++ reg = <0x00 0x4301c190 0x00 0x004>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0xffffffff>;
+diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
+index cd03819a3b686..cdf6a35e39944 100644
+--- a/arch/arm64/include/asm/fpsimdmacros.h
++++ b/arch/arm64/include/asm/fpsimdmacros.h
+@@ -316,12 +316,12 @@
+ _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
+ cbz \save_ffr, 921f
+ _sve_rdffr 0
+- _sve_str_p 0, \nxbase
+- _sve_ldr_p 0, \nxbase, -16
+ b 922f
+ 921:
+- str xzr, [x\nxbase] // Zero out FFR
++ _sve_pfalse 0 // Zero out FFR
+ 922:
++ _sve_str_p 0, \nxbase
++ _sve_ldr_p 0, \nxbase, -16
+ mrs x\nxtmp, fpsr
+ str w\nxtmp, [\xpfpsr]
+ mrs x\nxtmp, fpcr
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index 239c4537484d0..2b1284c6c64a6 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -237,59 +237,49 @@
+ act8600: act8600@5a {
+ compatible = "active-semi,act8600";
+ reg = <0x5a>;
+- status = "okay";
+
+ regulators {
+- vddcore: SUDCDC1 {
+- regulator-name = "DCDC_REG1";
++ vddcore: DCDC1 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+- vddmem: SUDCDC2 {
+- regulator-name = "DCDC_REG2";
++ vddmem: DCDC2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+- vcc_33: SUDCDC3 {
+- regulator-name = "DCDC_REG3";
++ vcc_33: DCDC3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+- vcc_50: SUDCDC4 {
+- regulator-name = "SUDCDC_REG4";
++ vcc_50: SUDCDC_REG4 {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+- vcc_25: LDO_REG5 {
+- regulator-name = "LDO_REG5";
++ vcc_25: LDO5 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+- wifi_io: LDO_REG6 {
+- regulator-name = "LDO_REG6";
++ wifi_io: LDO6 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+- vcc_28: LDO_REG7 {
+- regulator-name = "LDO_REG7";
++ cim_io_28: LDO7 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+- vcc_15: LDO_REG8 {
+- regulator-name = "LDO_REG8";
++ cim_io_15: LDO8 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+ vrtc_18: LDO_REG9 {
+- regulator-name = "LDO_REG9";
+ /* Despite the datasheet stating 3.3V
+ * for REG9 and the driver expecting that,
+ * REG9 outputs 1.8V.
+@@ -303,7 +293,6 @@
+ regulator-always-on;
+ };
+ vcc_11: LDO_REG10 {
+- regulator-name = "LDO_REG10";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+index 0ec1581619db5..cf770d86c03c6 100644
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -368,7 +368,6 @@ void preempt_schedule_irq(void);
+
+ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
+ {
+- unsigned long flags;
+ unsigned long ret = 0;
+ unsigned long kuap;
+ bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
+@@ -392,7 +391,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
+
+ kuap = kuap_get_and_assert_locked();
+
+- local_irq_save(flags);
++ local_irq_disable();
+
+ if (!arch_irq_disabled_regs(regs)) {
+ /* Returning to a kernel context with local irqs enabled. */
+diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
+index 49813f9824681..a9b9c32d0c1ff 100644
+--- a/arch/powerpc/kernel/ppc_save_regs.S
++++ b/arch/powerpc/kernel/ppc_save_regs.S
+@@ -31,10 +31,10 @@ _GLOBAL(ppc_save_regs)
+ lbz r0,PACAIRQSOFTMASK(r13)
+ PPC_STL r0,SOFTE(r3)
+ #endif
+- /* go up one stack frame for SP */
+- PPC_LL r4,0(r1)
+- PPC_STL r4,GPR1(r3)
++ /* store current SP */
++ PPC_STL r1,GPR1(r3)
+ /* get caller's LR */
++ PPC_LL r4,0(r1)
+ PPC_LL r0,LRSAVE(r4)
+ PPC_STL r0,_LINK(r3)
+ mflr r0
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index c114c7f25645c..7a718ed32b277 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -264,8 +264,9 @@ static void prepare_save_user_regs(int ctx_has_vsx_region)
+ #endif
+ }
+
+-static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+- struct mcontext __user *tm_frame, int ctx_has_vsx_region)
++static __always_inline int
++__unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
++ struct mcontext __user *tm_frame, int ctx_has_vsx_region)
+ {
+ unsigned long msr = regs->msr;
+
+@@ -364,8 +365,9 @@ static void prepare_save_tm_user_regs(void)
+ current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
+ }
+
+-static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
+- struct mcontext __user *tm_frame, unsigned long msr)
++static __always_inline int
++save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
++ struct mcontext __user *tm_frame, unsigned long msr)
+ {
+ /* Save both sets of general registers */
+ unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
+@@ -444,8 +446,9 @@ failed:
+ #else
+ static void prepare_save_tm_user_regs(void) { }
+
+-static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
+- struct mcontext __user *tm_frame, unsigned long msr)
++static __always_inline int
++save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
++ struct mcontext __user *tm_frame, unsigned long msr)
+ {
+ return 0;
+ }
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 2297aa764ecdb..e8db8c8efe359 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -745,9 +745,9 @@ static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
+ }
+
+ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+- unsigned long end)
++ unsigned long end, bool direct)
+ {
+- unsigned long next;
++ unsigned long next, pages = 0;
+ pte_t *pte;
+
+ pte = pte_start + pte_index(addr);
+@@ -769,13 +769,16 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+ }
+
+ pte_clear(&init_mm, addr, pte);
++ pages++;
+ }
++ if (direct)
++ update_page_count(mmu_virtual_psize, -pages);
+ }
+
+ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+- unsigned long end)
++ unsigned long end, bool direct)
+ {
+- unsigned long next;
++ unsigned long next, pages = 0;
+ pte_t *pte_base;
+ pmd_t *pmd;
+
+@@ -793,19 +796,22 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+ continue;
+ }
+ pte_clear(&init_mm, addr, (pte_t *)pmd);
++ pages++;
+ continue;
+ }
+
+ pte_base = (pte_t *)pmd_page_vaddr(*pmd);
+- remove_pte_table(pte_base, addr, next);
++ remove_pte_table(pte_base, addr, next, direct);
+ free_pte_table(pte_base, pmd);
+ }
++ if (direct)
++ update_page_count(MMU_PAGE_2M, -pages);
+ }
+
+ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
+- unsigned long end)
++ unsigned long end, bool direct)
+ {
+- unsigned long next;
++ unsigned long next, pages = 0;
+ pmd_t *pmd_base;
+ pud_t *pud;
+
+@@ -823,16 +829,20 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
+ continue;
+ }
+ pte_clear(&init_mm, addr, (pte_t *)pud);
++ pages++;
+ continue;
+ }
+
+ pmd_base = pud_pgtable(*pud);
+- remove_pmd_table(pmd_base, addr, next);
++ remove_pmd_table(pmd_base, addr, next, direct);
+ free_pmd_table(pmd_base, pud);
+ }
++ if (direct)
++ update_page_count(MMU_PAGE_1G, -pages);
+ }
+
+-static void __meminit remove_pagetable(unsigned long start, unsigned long end)
++static void __meminit remove_pagetable(unsigned long start, unsigned long end,
++ bool direct)
+ {
+ unsigned long addr, next;
+ pud_t *pud_base;
+@@ -861,7 +871,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
+ }
+
+ pud_base = p4d_pgtable(*p4d);
+- remove_pud_table(pud_base, addr, next);
++ remove_pud_table(pud_base, addr, next, direct);
+ free_pud_table(pud_base, p4d);
+ }
+
+@@ -884,7 +894,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
+
+ int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
+ {
+- remove_pagetable(start, end);
++ remove_pagetable(start, end, true);
+ return 0;
+ }
+ #endif /* CONFIG_MEMORY_HOTPLUG */
+@@ -920,7 +930,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
+ {
+- remove_pagetable(start, start + page_size);
++ remove_pagetable(start, start + page_size, false);
+ }
+ #endif
+ #endif
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index 05b0d584e50b8..fe1b83020e0df 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -189,7 +189,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
+ unsigned long nr_pfn = page_size / sizeof(struct page);
+ unsigned long start_pfn = page_to_pfn((struct page *)start);
+
+- if ((start_pfn + nr_pfn) > altmap->end_pfn)
++ if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
+ return true;
+
+ if (start_pfn < altmap->base_pfn)
+diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
+index 7195133b26bb9..59882da3e7425 100644
+--- a/arch/powerpc/platforms/powernv/pci-sriov.c
++++ b/arch/powerpc/platforms/powernv/pci-sriov.c
+@@ -594,12 +594,12 @@ static void pnv_pci_sriov_disable(struct pci_dev *pdev)
+ struct pnv_iov_data *iov;
+
+ iov = pnv_iov_get(pdev);
+- num_vfs = iov->num_vfs;
+- base_pe = iov->vf_pe_arr[0].pe_number;
+-
+ if (WARN_ON(!iov))
+ return;
+
++ num_vfs = iov->num_vfs;
++ base_pe = iov->vf_pe_arr[0].pe_number;
++
+ /* Release VF PEs */
+ pnv_ioda_release_vf_PE(pdev);
+
+diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
+index 0072682531d80..b664838008c12 100644
+--- a/arch/powerpc/platforms/powernv/vas-window.c
++++ b/arch/powerpc/platforms/powernv/vas-window.c
+@@ -1310,8 +1310,8 @@ int vas_win_close(struct vas_window *vwin)
+ /* if send window, drop reference to matching receive window */
+ if (window->tx_win) {
+ if (window->user_win) {
+- put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
++ put_vas_user_win_ref(&vwin->task_ref);
+ }
+ put_rx_win(window->rxwin);
+ }
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 513180467562b..9a44a98ba3420 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -507,8 +507,8 @@ static int vas_deallocate_window(struct vas_window *vwin)
+ vascaps[win->win_type].nr_open_windows--;
+ mutex_unlock(&vas_pseries_mutex);
+
+- put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
++ put_vas_user_win_ref(&vwin->task_ref);
+
+ kfree(win);
+ return 0;
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index c976a21cd4bd5..194f166b2cc40 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -67,6 +67,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
++ current->thread.bad_cause = utask->autask.saved_cause;
+
+ instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
+
+@@ -102,6 +103,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ {
+ struct uprobe_task *utask = current->utask;
+
++ current->thread.bad_cause = utask->autask.saved_cause;
+ /*
+ * Task has received a fatal signal, so reset back to probbed
+ * address.
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 055300e08fb38..3d191ec036fb7 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -840,6 +840,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
+ return true;
+ }
+
++static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
++ bool enc)
++{
++ /*
++ * Only handle shared->private conversion here.
++ * See the comment in tdx_early_init().
++ */
++ if (enc)
++ return tdx_enc_status_changed(vaddr, numpages, enc);
++ return true;
++}
++
++static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
++ bool enc)
++{
++ /*
++ * Only handle private->shared conversion here.
++ * See the comment in tdx_early_init().
++ */
++ if (!enc)
++ return tdx_enc_status_changed(vaddr, numpages, enc);
++ return true;
++}
++
+ void __init tdx_early_init(void)
+ {
+ u64 cc_mask;
+@@ -867,9 +891,30 @@ void __init tdx_early_init(void)
+ */
+ physical_mask &= cc_mask - 1;
+
+- x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
+- x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
+- x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed;
++ /*
++ * The kernel mapping should match the TDX metadata for the page.
++ * load_unaligned_zeropad() can touch memory *adjacent* to that which is
++ * owned by the caller and can catch even _momentary_ mismatches. Bad
++ * things happen on mismatch:
++ *
++ * - Private mapping => Shared Page == Guest shutdown
++ * - Shared mapping => Private Page == Recoverable #VE
++ *
++ * guest.enc_status_change_prepare() converts the page from
++ * shared=>private before the mapping becomes private.
++ *
++ * guest.enc_status_change_finish() converts the page from
++ * private=>shared after the mapping becomes private.
++ *
++ * In both cases there is a temporary shared mapping to a private page,
++ * which can result in a #VE. But, there is never a private mapping to
++ * a shared page.
++ */
++ x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
++ x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
++
++ x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
++ x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
+
+ pr_info("Guest detected\n");
+ }
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index bccea57dee81e..abadd5f234254 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -374,7 +374,7 @@ static int amd_pmu_hw_config(struct perf_event *event)
+
+ /* pass precise event sampling to ibs: */
+ if (event->attr.precise_ip && get_ibs_caps())
+- return -ENOENT;
++ return forward_event_to_ibs(event);
+
+ if (has_branch_stack(event) && !x86_pmu.lbr_nr)
+ return -EOPNOTSUPP;
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index 64582954b5f67..3710148021916 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -190,7 +190,7 @@ static struct perf_ibs *get_ibs_pmu(int type)
+ }
+
+ /*
+- * Use IBS for precise event sampling:
++ * core pmu config -> IBS config
+ *
+ * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
+ * perf record -a -e r076:p ... # same as -e cpu-cycles:p
+@@ -199,25 +199,9 @@ static struct perf_ibs *get_ibs_pmu(int type)
+ * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
+ * MSRC001_1033) is used to select either cycle or micro-ops counting
+ * mode.
+- *
+- * The rip of IBS samples has skid 0. Thus, IBS supports precise
+- * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
+- * rip is invalid when IBS was not able to record the rip correctly.
+- * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
+- *
+ */
+-static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
++static int core_pmu_ibs_config(struct perf_event *event, u64 *config)
+ {
+- switch (event->attr.precise_ip) {
+- case 0:
+- return -ENOENT;
+- case 1:
+- case 2:
+- break;
+- default:
+- return -EOPNOTSUPP;
+- }
+-
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ switch (event->attr.config) {
+@@ -243,22 +227,37 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
+ return -EOPNOTSUPP;
+ }
+
++/*
++ * The rip of IBS samples has skid 0. Thus, IBS supports precise
++ * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
++ * rip is invalid when IBS was not able to record the rip correctly.
++ * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
++ */
++int forward_event_to_ibs(struct perf_event *event)
++{
++ u64 config = 0;
++
++ if (!event->attr.precise_ip || event->attr.precise_ip > 2)
++ return -EOPNOTSUPP;
++
++ if (!core_pmu_ibs_config(event, &config)) {
++ event->attr.type = perf_ibs_op.pmu.type;
++ event->attr.config = config;
++ }
++ return -ENOENT;
++}
++
+ static int perf_ibs_init(struct perf_event *event)
+ {
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_ibs *perf_ibs;
+ u64 max_cnt, config;
+- int ret;
+
+ perf_ibs = get_ibs_pmu(event->attr.type);
+- if (perf_ibs) {
+- config = event->attr.config;
+- } else {
+- perf_ibs = &perf_ibs_op;
+- ret = perf_ibs_precise_event(event, &config);
+- if (ret)
+- return ret;
+- }
++ if (!perf_ibs)
++ return -ENOENT;
++
++ config = event->attr.config;
+
+ if (event->pmu != &perf_ibs->pmu)
+ return -ENOENT;
+diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
+index f0eeaf6e5f5f7..1bae790a553a5 100644
+--- a/arch/x86/include/asm/mtrr.h
++++ b/arch/x86/include/asm/mtrr.h
+@@ -23,14 +23,43 @@
+ #ifndef _ASM_X86_MTRR_H
+ #define _ASM_X86_MTRR_H
+
++#include <linux/bits.h>
+ #include <uapi/asm/mtrr.h>
+
++/* Defines for hardware MTRR registers. */
++#define MTRR_CAP_VCNT GENMASK(7, 0)
++#define MTRR_CAP_FIX BIT_MASK(8)
++#define MTRR_CAP_WC BIT_MASK(10)
++
++#define MTRR_DEF_TYPE_TYPE GENMASK(7, 0)
++#define MTRR_DEF_TYPE_FE BIT_MASK(10)
++#define MTRR_DEF_TYPE_E BIT_MASK(11)
++
++#define MTRR_DEF_TYPE_ENABLE (MTRR_DEF_TYPE_FE | MTRR_DEF_TYPE_E)
++#define MTRR_DEF_TYPE_DISABLE ~(MTRR_DEF_TYPE_TYPE | MTRR_DEF_TYPE_ENABLE)
++
++#define MTRR_PHYSBASE_TYPE GENMASK(7, 0)
++#define MTRR_PHYSBASE_RSVD GENMASK(11, 8)
++
++#define MTRR_PHYSMASK_RSVD GENMASK(10, 0)
++#define MTRR_PHYSMASK_V BIT_MASK(11)
++
++struct mtrr_state_type {
++ struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
++ mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
++ unsigned char enabled;
++ bool have_fixed;
++ mtrr_type def_type;
++};
++
+ /*
+ * The following functions are for use by other drivers that cannot use
+ * arch_phys_wc_add and arch_phys_wc_del.
+ */
+ # ifdef CONFIG_MTRR
+ void mtrr_bp_init(void);
++void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
++ mtrr_type def_type);
+ extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
+ extern void mtrr_save_fixed_ranges(void *);
+ extern void mtrr_save_state(void);
+@@ -48,6 +77,12 @@ void mtrr_disable(void);
+ void mtrr_enable(void);
+ void mtrr_generic_set_state(void);
+ # else
++static inline void mtrr_overwrite_state(struct mtrr_var_range *var,
++ unsigned int num_var,
++ mtrr_type def_type)
++{
++}
++
+ static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
+ {
+ /*
+@@ -121,7 +156,8 @@ struct mtrr_gentry32 {
+ #endif /* CONFIG_COMPAT */
+
+ /* Bit fields for enabled in struct mtrr_state_type */
+-#define MTRR_STATE_MTRR_FIXED_ENABLED 0x01
+-#define MTRR_STATE_MTRR_ENABLED 0x02
++#define MTRR_STATE_SHIFT 10
++#define MTRR_STATE_MTRR_FIXED_ENABLED (MTRR_DEF_TYPE_FE >> MTRR_STATE_SHIFT)
++#define MTRR_STATE_MTRR_ENABLED (MTRR_DEF_TYPE_E >> MTRR_STATE_SHIFT)
+
+ #endif /* _ASM_X86_MTRR_H */
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index abf09882f58b6..f1a46500a2753 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -478,8 +478,10 @@ struct pebs_xmm {
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ extern u32 get_ibs_caps(void);
++extern int forward_event_to_ibs(struct perf_event *event);
+ #else
+ static inline u32 get_ibs_caps(void) { return 0; }
++static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
+ #endif
+
+ #ifdef CONFIG_PERF_EVENTS
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index 7929327abe009..a629b1b9f65a6 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -237,8 +237,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+ #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
+-#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+-#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
++#define __swp_entry_to_pte(x) (__pte((x).val))
++#define __swp_entry_to_pmd(x) (__pmd((x).val))
+
+ extern void cleanup_highmap(void);
+
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index ebc271bb6d8ed..a0a58c4122ec3 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -187,12 +187,12 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ }
+ void setup_ghcb(void);
+ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+- unsigned int npages);
++ unsigned long npages);
+ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+- unsigned int npages);
++ unsigned long npages);
+ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
+-void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
+-void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
++void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
++void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
+@@ -207,12 +207,12 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
+ static inline void setup_ghcb(void) { }
+ static inline void __init
+-early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
++early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+ static inline void __init
+-early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
++early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+ static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
+-static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { }
+-static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
++static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
++static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
+ static inline void snp_set_wakeup_secondary_cpu(void) { }
+ static inline bool snp_init(struct boot_params *bp) { return false; }
+ static inline void snp_abort(void) { }
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index c1c8c581759d6..034e62838b284 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -150,7 +150,7 @@ struct x86_init_acpi {
+ * @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status
+ */
+ struct x86_guest {
+- void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
++ bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
+ bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
+ bool (*enc_tlb_flush_required)(bool enc);
+ bool (*enc_cache_flush_required)(void);
+diff --git a/arch/x86/include/uapi/asm/mtrr.h b/arch/x86/include/uapi/asm/mtrr.h
+index 376563f2bac1f..ab194c8316259 100644
+--- a/arch/x86/include/uapi/asm/mtrr.h
++++ b/arch/x86/include/uapi/asm/mtrr.h
+@@ -81,14 +81,6 @@ typedef __u8 mtrr_type;
+ #define MTRR_NUM_FIXED_RANGES 88
+ #define MTRR_MAX_VAR_RANGES 256
+
+-struct mtrr_state_type {
+- struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
+- mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
+- unsigned char enabled;
+- unsigned char have_fixed;
+- mtrr_type def_type;
+-};
+-
+ #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
+ #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
+
+diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
+index b5f43049fa5f7..ca2d567e729e2 100644
+--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
++++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
+@@ -173,7 +173,7 @@ early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
+
+ static void __init
+ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
+- unsigned char type, unsigned int address_bits)
++ unsigned char type)
+ {
+ u32 base_lo, base_hi, mask_lo, mask_hi;
+ u64 base, mask;
+@@ -183,7 +183,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
+ return;
+ }
+
+- mask = (1ULL << address_bits) - 1;
++ mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
+ mask &= ~((((u64)sizek) << 10) - 1);
+
+ base = ((u64)basek) << 10;
+@@ -209,7 +209,7 @@ save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
+ range_state[reg].type = type;
+ }
+
+-static void __init set_var_mtrr_all(unsigned int address_bits)
++static void __init set_var_mtrr_all(void)
+ {
+ unsigned long basek, sizek;
+ unsigned char type;
+@@ -220,7 +220,7 @@ static void __init set_var_mtrr_all(unsigned int address_bits)
+ sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
+ type = range_state[reg].type;
+
+- set_var_mtrr(reg, basek, sizek, type, address_bits);
++ set_var_mtrr(reg, basek, sizek, type);
+ }
+ }
+
+@@ -680,7 +680,7 @@ static int __init mtrr_search_optimal_index(void)
+ return index_good;
+ }
+
+-int __init mtrr_cleanup(unsigned address_bits)
++int __init mtrr_cleanup(void)
+ {
+ unsigned long x_remove_base, x_remove_size;
+ unsigned long base, size, def, dummy;
+@@ -742,7 +742,7 @@ int __init mtrr_cleanup(unsigned address_bits)
+ mtrr_print_out_one_result(i);
+
+ if (!result[i].bad) {
+- set_var_mtrr_all(address_bits);
++ set_var_mtrr_all();
+ pr_debug("New variable MTRRs\n");
+ print_out_mtrr_range_state();
+ return 1;
+@@ -786,7 +786,7 @@ int __init mtrr_cleanup(unsigned address_bits)
+ gran_size = result[i].gran_sizek;
+ gran_size <<= 10;
+ x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
+- set_var_mtrr_all(address_bits);
++ set_var_mtrr_all();
+ pr_debug("New variable MTRRs\n");
+ print_out_mtrr_range_state();
+ return 1;
+@@ -802,7 +802,7 @@ int __init mtrr_cleanup(unsigned address_bits)
+ return 0;
+ }
+ #else
+-int __init mtrr_cleanup(unsigned address_bits)
++int __init mtrr_cleanup(void)
+ {
+ return 0;
+ }
+@@ -890,7 +890,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
+ return 0;
+
+ rdmsr(MSR_MTRRdefType, def, dummy);
+- def &= 0xff;
++ def &= MTRR_DEF_TYPE_TYPE;
+ if (def != MTRR_TYPE_UNCACHABLE)
+ return 0;
+
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index ee09d359e08f0..e81d832475a1f 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -8,10 +8,12 @@
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/mm.h>
+-
++#include <linux/cc_platform.h>
+ #include <asm/processor-flags.h>
+ #include <asm/cacheinfo.h>
+ #include <asm/cpufeature.h>
++#include <asm/hypervisor.h>
++#include <asm/mshyperv.h>
+ #include <asm/tlbflush.h>
+ #include <asm/mtrr.h>
+ #include <asm/msr.h>
+@@ -38,6 +40,9 @@ u64 mtrr_tom2;
+ struct mtrr_state_type mtrr_state;
+ EXPORT_SYMBOL_GPL(mtrr_state);
+
++/* Reserved bits in the high portion of the MTRRphysBaseN MSR. */
++u32 phys_hi_rsvd;
++
+ /*
+ * BIOS is expected to clear MtrrFixDramModEn bit, see for example
+ * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
+@@ -69,10 +74,9 @@ static u64 get_mtrr_size(u64 mask)
+ {
+ u64 size;
+
+- mask >>= PAGE_SHIFT;
+- mask |= size_or_mask;
++ mask |= (u64)phys_hi_rsvd << 32;
+ size = -mask;
+- size <<= PAGE_SHIFT;
++
+ return size;
+ }
+
+@@ -171,7 +175,7 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
+ for (i = 0; i < num_var_ranges; ++i) {
+ unsigned short start_state, end_state, inclusive;
+
+- if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
++ if (!(mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V))
+ continue;
+
+ base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
+@@ -223,7 +227,7 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
+ if ((start & mask) != (base & mask))
+ continue;
+
+- curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
++ curr_match = mtrr_state.var_ranges[i].base_lo & MTRR_PHYSBASE_TYPE;
+ if (prev_match == MTRR_TYPE_INVALID) {
+ prev_match = curr_match;
+ continue;
+@@ -240,6 +244,62 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
+ return mtrr_state.def_type;
+ }
+
++/**
++ * mtrr_overwrite_state - set static MTRR state
++ *
++ * Used to set MTRR state via different means (e.g. with data obtained from
++ * a hypervisor).
++ * Is allowed only for special cases when running virtualized. Must be called
++ * from the x86_init.hyper.init_platform() hook. It can be called only once.
++ * The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR
++ * is cleared.
++ */
++void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
++ mtrr_type def_type)
++{
++ unsigned int i;
++
++ /* Only allowed to be called once before mtrr_bp_init(). */
++ if (WARN_ON_ONCE(mtrr_state_set))
++ return;
++
++ /* Only allowed when running virtualized. */
++ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
++ return;
++
++ /*
++ * Only allowed for special virtualization cases:
++ * - when running as Hyper-V, SEV-SNP guest using vTOM
++ * - when running as Xen PV guest
++ * - when running as SEV-SNP or TDX guest to avoid unnecessary
++ * VMM communication/Virtualization exceptions (#VC, #VE)
++ */
++ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) &&
++ !hv_is_isolation_supported() &&
++ !cpu_feature_enabled(X86_FEATURE_XENPV) &&
++ !cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
++ return;
++
++ /* Disable MTRR in order to disable MTRR modifications. */
++ setup_clear_cpu_cap(X86_FEATURE_MTRR);
++
++ if (var) {
++ if (num_var > MTRR_MAX_VAR_RANGES) {
++ pr_warn("Trying to overwrite MTRR state with %u variable entries\n",
++ num_var);
++ num_var = MTRR_MAX_VAR_RANGES;
++ }
++ for (i = 0; i < num_var; i++)
++ mtrr_state.var_ranges[i] = var[i];
++ num_var_ranges = num_var;
++ }
++
++ mtrr_state.def_type = def_type;
++ mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED;
++
++ mtrr_state_set = 1;
++}
++
+ /**
+ * mtrr_type_lookup - look up memory type in MTRR
+ *
+@@ -422,10 +482,10 @@ static void __init print_mtrr_state(void)
+ }
+ pr_debug("MTRR variable ranges %sabled:\n",
+ mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
+- high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
++ high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4;
+
+ for (i = 0; i < num_var_ranges; ++i) {
+- if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
++ if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V)
+ pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
+ i,
+ high_width,
+@@ -434,7 +494,8 @@ static void __init print_mtrr_state(void)
+ high_width,
+ mtrr_state.var_ranges[i].mask_hi,
+ mtrr_state.var_ranges[i].mask_lo >> 12,
+- mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
++ mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo &
++ MTRR_PHYSBASE_TYPE));
+ else
+ pr_debug(" %u disabled\n", i);
+ }
+@@ -452,7 +513,7 @@ bool __init get_mtrr_state(void)
+ vrs = mtrr_state.var_ranges;
+
+ rdmsr(MSR_MTRRcap, lo, dummy);
+- mtrr_state.have_fixed = (lo >> 8) & 1;
++ mtrr_state.have_fixed = lo & MTRR_CAP_FIX;
+
+ for (i = 0; i < num_var_ranges; i++)
+ get_mtrr_var_range(i, &vrs[i]);
+@@ -460,8 +521,8 @@ bool __init get_mtrr_state(void)
+ get_fixed_ranges(mtrr_state.fixed_ranges);
+
+ rdmsr(MSR_MTRRdefType, lo, dummy);
+- mtrr_state.def_type = (lo & 0xff);
+- mtrr_state.enabled = (lo & 0xc00) >> 10;
++ mtrr_state.def_type = lo & MTRR_DEF_TYPE_TYPE;
++ mtrr_state.enabled = (lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT;
+
+ if (amd_special_default_mtrr()) {
+ unsigned low, high;
+@@ -574,7 +635,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
+
+ rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
+
+- if ((mask_lo & 0x800) == 0) {
++ if (!(mask_lo & MTRR_PHYSMASK_V)) {
+ /* Invalid (i.e. free) range */
+ *base = 0;
+ *size = 0;
+@@ -585,8 +646,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
+ rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
+
+ /* Work out the shifted address mask: */
+- tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
+- mask = size_or_mask | tmp;
++ tmp = (u64)mask_hi << 32 | (mask_lo & PAGE_MASK);
++ mask = (u64)phys_hi_rsvd << 32 | tmp;
+
+ /* Expand tmp with high bits to all 1s: */
+ hi = fls64(tmp);
+@@ -604,9 +665,9 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
+ * This works correctly if size is a power of two, i.e. a
+ * contiguous range:
+ */
+- *size = -mask;
++ *size = -mask >> PAGE_SHIFT;
+ *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
+- *type = base_lo & 0xff;
++ *type = base_lo & MTRR_PHYSBASE_TYPE;
+
+ out_put_cpu:
+ put_cpu();
+@@ -644,9 +705,8 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
+ bool changed = false;
+
+ rdmsr(MTRRphysBase_MSR(index), lo, hi);
+- if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
+- || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
+- (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
++ if ((vr->base_lo & ~MTRR_PHYSBASE_RSVD) != (lo & ~MTRR_PHYSBASE_RSVD)
++ || (vr->base_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) {
+
+ mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
+ changed = true;
+@@ -654,9 +714,8 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
+
+ rdmsr(MTRRphysMask_MSR(index), lo, hi);
+
+- if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
+- || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
+- (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
++ if ((vr->mask_lo & ~MTRR_PHYSMASK_RSVD) != (lo & ~MTRR_PHYSMASK_RSVD)
++ || (vr->mask_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) {
+ mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
+ changed = true;
+ }
+@@ -691,11 +750,12 @@ static unsigned long set_mtrr_state(void)
+ * Set_mtrr_restore restores the old value of MTRRdefType,
+ * so to set it we fiddle with the saved value:
+ */
+- if ((deftype_lo & 0xff) != mtrr_state.def_type
+- || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
++ if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type ||
++ ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) {
+
+- deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
+- (mtrr_state.enabled << 10);
++ deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) |
++ mtrr_state.def_type |
++ (mtrr_state.enabled << MTRR_STATE_SHIFT);
+ change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
+ }
+
+@@ -708,7 +768,7 @@ void mtrr_disable(void)
+ rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+
+ /* Disable MTRRs, and set the default type to uncached */
+- mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
++ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi);
+ }
+
+ void mtrr_enable(void)
+@@ -762,9 +822,9 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
+ memset(vr, 0, sizeof(struct mtrr_var_range));
+ } else {
+ vr->base_lo = base << PAGE_SHIFT | type;
+- vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
+- vr->mask_lo = -size << PAGE_SHIFT | 0x800;
+- vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
++ vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
++ vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V;
++ vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
+
+ mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
+ mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
+@@ -817,7 +877,7 @@ static int generic_have_wrcomb(void)
+ {
+ unsigned long config, dummy;
+ rdmsr(MSR_MTRRcap, config, dummy);
+- return config & (1 << 10);
++ return config & MTRR_CAP_WC;
+ }
+
+ int positive_have_wrcomb(void)
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
+index 783f3210d5827..be35a0b09604d 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
+@@ -67,8 +67,6 @@ static bool mtrr_enabled(void)
+ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
+ static DEFINE_MUTEX(mtrr_mutex);
+
+-u64 size_or_mask, size_and_mask;
+-
+ const struct mtrr_ops *mtrr_if;
+
+ /* Returns non-zero if we have the write-combining memory type */
+@@ -117,7 +115,7 @@ static void __init set_num_var_ranges(bool use_generic)
+ else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
+ config = 8;
+
+- num_var_ranges = config & 0xff;
++ num_var_ranges = config & MTRR_CAP_VCNT;
+ }
+
+ static void __init init_table(void)
+@@ -619,77 +617,46 @@ static struct syscore_ops mtrr_syscore_ops = {
+
+ int __initdata changed_by_mtrr_cleanup;
+
+-#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
+ /**
+- * mtrr_bp_init - initialize mtrrs on the boot CPU
++ * mtrr_bp_init - initialize MTRRs on the boot CPU
+ *
+ * This needs to be called early; before any of the other CPUs are
+ * initialized (i.e. before smp_init()).
+- *
+ */
+ void __init mtrr_bp_init(void)
+ {
++ bool generic_mtrrs = cpu_feature_enabled(X86_FEATURE_MTRR);
+ const char *why = "(not available)";
+- u32 phys_addr;
+
+- phys_addr = 32;
+-
+- if (boot_cpu_has(X86_FEATURE_MTRR)) {
+- mtrr_if = &generic_mtrr_ops;
+- size_or_mask = SIZE_OR_MASK_BITS(36);
+- size_and_mask = 0x00f00000;
+- phys_addr = 36;
++ phys_hi_rsvd = GENMASK(31, boot_cpu_data.x86_phys_bits - 32);
+
++ if (!generic_mtrrs && mtrr_state.enabled) {
+ /*
+- * This is an AMD specific MSR, but we assume(hope?) that
+- * Intel will implement it too when they extend the address
+- * bus of the Xeon.
++ * Software overwrite of MTRR state, only for generic case.
++ * Note that X86_FEATURE_MTRR has been reset in this case.
+ */
+- if (cpuid_eax(0x80000000) >= 0x80000008) {
+- phys_addr = cpuid_eax(0x80000008) & 0xff;
+- /* CPUID workaround for Intel 0F33/0F34 CPU */
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+- boot_cpu_data.x86 == 0xF &&
+- boot_cpu_data.x86_model == 0x3 &&
+- (boot_cpu_data.x86_stepping == 0x3 ||
+- boot_cpu_data.x86_stepping == 0x4))
+- phys_addr = 36;
+-
+- size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
+- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
+- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
+- boot_cpu_data.x86 == 6) {
+- /*
+- * VIA C* family have Intel style MTRRs,
+- * but don't support PAE
+- */
+- size_or_mask = SIZE_OR_MASK_BITS(32);
+- size_and_mask = 0;
+- phys_addr = 32;
+- }
++ init_table();
++ pr_info("MTRRs set to read-only\n");
++
++ return;
++ }
++
++ if (generic_mtrrs) {
++ mtrr_if = &generic_mtrr_ops;
+ } else {
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+- if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
+- /* Pre-Athlon (K6) AMD CPU MTRRs */
++ /* Pre-Athlon (K6) AMD CPU MTRRs */
++ if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
+ mtrr_if = &amd_mtrr_ops;
+- size_or_mask = SIZE_OR_MASK_BITS(32);
+- size_and_mask = 0;
+- }
+ break;
+ case X86_VENDOR_CENTAUR:
+- if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
++ if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
+ mtrr_if = &centaur_mtrr_ops;
+- size_or_mask = SIZE_OR_MASK_BITS(32);
+- size_and_mask = 0;
+- }
+ break;
+ case X86_VENDOR_CYRIX:
+- if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
++ if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
+ mtrr_if = &cyrix_mtrr_ops;
+- size_or_mask = SIZE_OR_MASK_BITS(32);
+- size_and_mask = 0;
+- }
+ break;
+ default:
+ break;
+@@ -703,7 +670,7 @@ void __init mtrr_bp_init(void)
+ /* BIOS may override */
+ if (get_mtrr_state()) {
+ memory_caching_control |= CACHE_MTRR;
+- changed_by_mtrr_cleanup = mtrr_cleanup(phys_addr);
++ changed_by_mtrr_cleanup = mtrr_cleanup();
+ } else {
+ mtrr_if = NULL;
+ why = "by BIOS";
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index 02eb5871492d0..59e8fb26bf9dd 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -51,7 +51,6 @@ void fill_mtrr_var_range(unsigned int index,
+ u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
+ bool get_mtrr_state(void);
+
+-extern u64 size_or_mask, size_and_mask;
+ extern const struct mtrr_ops *mtrr_if;
+
+ #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
+@@ -59,6 +58,7 @@ extern const struct mtrr_ops *mtrr_if;
+ extern unsigned int num_var_ranges;
+ extern u64 mtrr_tom2;
+ extern struct mtrr_state_type mtrr_state;
++extern u32 phys_hi_rsvd;
+
+ void mtrr_state_warn(void);
+ const char *mtrr_attrib_to_str(int x);
+@@ -70,4 +70,4 @@ extern const struct mtrr_ops cyrix_mtrr_ops;
+ extern const struct mtrr_ops centaur_mtrr_ops;
+
+ extern int changed_by_mtrr_cleanup;
+-extern int mtrr_cleanup(unsigned address_bits);
++extern int mtrr_cleanup(void);
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 6ad33f355861f..61cdd9b1bb6d8 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -726,11 +726,15 @@ unlock:
+ static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
+ {
+ struct task_struct *p, *t;
++ pid_t pid;
+
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+- if (is_closid_match(t, r) || is_rmid_match(t, r))
+- seq_printf(s, "%d\n", t->pid);
++ if (is_closid_match(t, r) || is_rmid_match(t, r)) {
++ pid = task_pid_vnr(t);
++ if (pid)
++ seq_printf(s, "%d\n", pid);
++ }
+ }
+ rcu_read_unlock();
+ }
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 16babff771bdf..0cccfeb67c3ad 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1037,6 +1037,8 @@ void __init setup_arch(char **cmdline_p)
+ /*
+ * VMware detection requires dmi to be available, so this
+ * needs to be done after dmi_setup(), for the boot CPU.
++ * For some guest types (Xen PV, SEV-SNP, TDX) it is required to be
++ * called before cache_bp_init() for setting up MTRR state.
+ */
+ init_hypervisor_platform();
+
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 3f664ab277c49..45ef3926381f8 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -643,7 +643,7 @@ static u64 __init get_jump_table_addr(void)
+ return ret;
+ }
+
+-static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
++static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
+ {
+ unsigned long vaddr_end;
+ int rc;
+@@ -660,7 +660,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool valid
+ }
+ }
+
+-static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
++static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
+ {
+ unsigned long paddr_end;
+ u64 val;
+@@ -699,7 +699,7 @@ e_term:
+ }
+
+ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+- unsigned int npages)
++ unsigned long npages)
+ {
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+@@ -721,7 +721,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
+ }
+
+ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+- unsigned int npages)
++ unsigned long npages)
+ {
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+@@ -877,7 +877,7 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ }
+
+-static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
++static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
+ {
+ unsigned long vaddr_end, next_vaddr;
+ struct snp_psc_desc *desc;
+@@ -902,7 +902,7 @@ static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
+ kfree(desc);
+ }
+
+-void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
++void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
+ {
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+@@ -912,7 +912,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
+ set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
+ }
+
+-void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
++void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
+ {
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 10622cf2b30f4..41e5b4cb898c3 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -130,7 +130,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
+
+ static void default_nmi_init(void) { };
+
+-static void enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { }
++static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; }
+ static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return false; }
+ static bool enc_tlb_flush_required_noop(bool enc) { return false; }
+ static bool enc_cache_flush_required_noop(void) { return false; }
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 9c4d8dbcb1296..ff6c0462beee7 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -319,7 +319,7 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+ #endif
+ }
+
+-static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
++static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
+ {
+ /*
+ * To maintain the security guarantees of SEV-SNP guests, make sure
+@@ -327,6 +327,8 @@ static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool
+ */
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
+ snp_set_memory_shared(vaddr, npages);
++
++ return true;
+ }
+
+ /* Return true unconditionally: return value doesn't matter for the SEV side */
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index 356758b7d4b47..6a167290a1fd1 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -2151,7 +2151,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
+ cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
+
+ /* Notify hypervisor that we are about to set/clr encryption attribute. */
+- x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
++ if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
++ return -EIO;
+
+ ret = __change_page_attr_set_clr(&cpa, 1);
+
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 232acf418cfbe..77f7ac3668cb4 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -853,9 +853,9 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
+
+ /* Disable interrupts around EFI calls: */
+ local_irq_save(flags);
+- status = efi_call(efi.runtime->set_virtual_address_map,
+- memory_map_size, descriptor_size,
+- descriptor_version, virtual_map);
++ status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
++ memory_map_size, descriptor_size,
++ descriptor_version, virtual_map);
+ local_irq_restore(flags);
+
+ efi_fpu_end();
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 093b78c8bbec0..8732b85d56505 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -68,6 +68,7 @@
+ #include <asm/reboot.h>
+ #include <asm/hypervisor.h>
+ #include <asm/mach_traps.h>
++#include <asm/mtrr.h>
+ #include <asm/mwait.h>
+ #include <asm/pci_x86.h>
+ #include <asm/cpu.h>
+@@ -119,6 +120,54 @@ static int __init parse_xen_msr_safe(char *str)
+ }
+ early_param("xen_msr_safe", parse_xen_msr_safe);
+
++/* Get MTRR settings from Xen and put them into mtrr_state. */
++static void __init xen_set_mtrr_data(void)
++{
++#ifdef CONFIG_MTRR
++ struct xen_platform_op op = {
++ .cmd = XENPF_read_memtype,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ };
++ unsigned int reg;
++ unsigned long mask;
++ uint32_t eax, width;
++ static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata;
++
++ /* Get physical address width (only 64-bit cpus supported). */
++ width = 36;
++ eax = cpuid_eax(0x80000000);
++ if ((eax >> 16) == 0x8000 && eax >= 0x80000008) {
++ eax = cpuid_eax(0x80000008);
++ width = eax & 0xff;
++ }
++
++ for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) {
++ op.u.read_memtype.reg = reg;
++ if (HYPERVISOR_platform_op(&op))
++ break;
++
++ /*
++ * Only called in dom0, which has all RAM PFNs mapped at
++ * RAM MFNs, and all PCI space etc. is identity mapped.
++ * This means we can treat MFN == PFN regarding MTRR settings.
++ */
++ var[reg].base_lo = op.u.read_memtype.type;
++ var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT;
++ var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT);
++ mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1);
++ mask &= (1UL << width) - 1;
++ if (mask)
++ mask |= MTRR_PHYSMASK_V;
++ var[reg].mask_lo = mask;
++ var[reg].mask_hi = mask >> 32;
++ }
++
++ /* Only overwrite MTRR state if any MTRR could be got from Xen. */
++ if (reg)
++ mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
++#endif
++}
++
+ static void __init xen_pv_init_platform(void)
+ {
+ /* PV guests can't operate virtio devices without grants. */
+@@ -135,6 +184,9 @@ static void __init xen_pv_init_platform(void)
+
+ /* pvclock is in shared info area */
+ xen_init_time_ops();
++
++ if (xen_initial_domain())
++ xen_set_mtrr_data();
+ }
+
+ static void __init xen_pv_guest_late_init(void)
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 581aa08e34a8e..757c10cf26dda 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -585,8 +585,13 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ struct blkg_iostat_set *bis =
+ per_cpu_ptr(blkg->iostat_cpu, cpu);
+ memset(bis, 0, sizeof(*bis));
++
++ /* Re-initialize the cleared blkg_iostat_set */
++ u64_stats_init(&bis->sync);
++ bis->blkg = blkg;
+ }
+ memset(&blkg->iostat, 0, sizeof(blkg->iostat));
++ u64_stats_init(&blkg->iostat.sync);
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 4442c7a851125..b29429090d3f4 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -2455,6 +2455,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
+ u32 hwi, adj_step;
+ s64 margin;
+ u64 cost, new_inuse;
++ unsigned long flags;
+
+ current_hweight(iocg, NULL, &hwi);
+ old_hwi = hwi;
+@@ -2473,11 +2474,11 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
+ iocg->inuse == iocg->active)
+ return cost;
+
+- spin_lock_irq(&ioc->lock);
++ spin_lock_irqsave(&ioc->lock, flags);
+
+ /* we own inuse only when @iocg is in the normal active state */
+ if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
+- spin_unlock_irq(&ioc->lock);
++ spin_unlock_irqrestore(&ioc->lock, flags);
+ return cost;
+ }
+
+@@ -2498,7 +2499,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
+ } while (time_after64(vtime + cost, now->vnow) &&
+ iocg->inuse != iocg->active);
+
+- spin_unlock_irq(&ioc->lock);
++ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ TRACE_IOCG_PATH(inuse_adjust, iocg, now,
+ old_inuse, iocg->inuse, old_hwi, hwi);
+diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
+index b01818f8e216e..c152276736832 100644
+--- a/block/blk-mq-debugfs.c
++++ b/block/blk-mq-debugfs.c
+@@ -427,7 +427,7 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
+ seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
+ seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
+ seq_printf(m, "active_queues=%d\n",
+- atomic_read(&tags->active_queues));
++ READ_ONCE(tags->active_queues));
+
+ seq_puts(m, "\nbitmap_tags:\n");
+ sbitmap_queue_show(&tags->bitmap_tags, m);
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index a80d7c62bdfe6..100889c276c3f 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -40,6 +40,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+ unsigned int users;
++ struct blk_mq_tags *tags = hctx->tags;
+
+ /*
+ * calling test_bit() prior to test_and_set_bit() is intentional,
+@@ -57,9 +58,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ return;
+ }
+
+- users = atomic_inc_return(&hctx->tags->active_queues);
+-
+- blk_mq_update_wake_batch(hctx->tags, users);
++ spin_lock_irq(&tags->lock);
++ users = tags->active_queues + 1;
++ WRITE_ONCE(tags->active_queues, users);
++ blk_mq_update_wake_batch(tags, users);
++ spin_unlock_irq(&tags->lock);
+ }
+
+ /*
+@@ -92,9 +95,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+ return;
+ }
+
+- users = atomic_dec_return(&tags->active_queues);
+-
++ spin_lock_irq(&tags->lock);
++ users = tags->active_queues - 1;
++ WRITE_ONCE(tags->active_queues, users);
+ blk_mq_update_wake_batch(tags, users);
++ spin_unlock_irq(&tags->lock);
+
+ blk_mq_tag_wakeup_all(tags, false);
+ }
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index a7482d2cc82e7..4542308c8e62f 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -362,8 +362,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ return true;
+ }
+
+- users = atomic_read(&hctx->tags->active_queues);
+-
++ users = READ_ONCE(hctx->tags->active_queues);
+ if (!users)
+ return true;
+
+diff --git a/block/genhd.c b/block/genhd.c
+index 7f874737af682..c5a35e1b462fa 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -25,8 +25,9 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/badblocks.h>
+ #include <linux/part_stat.h>
+-#include "blk-throttle.h"
++#include <linux/blktrace_api.h>
+
++#include "blk-throttle.h"
+ #include "blk.h"
+ #include "blk-mq-sched.h"
+ #include "blk-rq-qos.h"
+@@ -1183,6 +1184,8 @@ static void disk_release(struct device *dev)
+ might_sleep();
+ WARN_ON_ONCE(disk_live(disk));
+
++ blk_trace_remove(disk->queue);
++
+ /*
+ * To undo the all initialization from blk_mq_init_allocated_queue in
+ * case of a probe failure where add_disk is never called we have to
+diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
+index 22f48bf4c6f57..227cedfa4f0ae 100644
+--- a/crypto/jitterentropy.c
++++ b/crypto/jitterentropy.c
+@@ -117,7 +117,6 @@ struct rand_data {
+ * zero). */
+ #define JENT_ESTUCK 8 /* Too many stuck results during init. */
+ #define JENT_EHEALTH 9 /* Health test failed during initialization */
+-#define JENT_ERCT 10 /* RCT failed during initialization */
+
+ /*
+ * The output n bits can receive more than n bits of min entropy, of course,
+@@ -762,14 +761,12 @@ int jent_entropy_init(void)
+ if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
+ jent_apt_reset(&ec,
+ delta & JENT_APT_WORD_MASK);
+- if (jent_health_failure(&ec))
+- return JENT_EHEALTH;
+ }
+ }
+
+- /* Validate RCT */
+- if (jent_rct_failure(&ec))
+- return JENT_ERCT;
++ /* Validate health test result */
++ if (jent_health_failure(&ec))
++ return JENT_EHEALTH;
+
+ /* test whether we have an increasing timer */
+ if (!(time2 > time))
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 34ad071a64e96..4382fe13ee3e4 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -1544,6 +1544,8 @@ struct list_head *ghes_get_devices(void)
+
+ pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
+ }
++ } else if (list_empty(&ghes_devs)) {
++ return NULL;
+ }
+
+ return &ghes_devs;
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 32084e38b73d0..5cb2023581d4d 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1632,9 +1632,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+- return -EINVAL;
+-
+ gpd_data = genpd_alloc_dev_data(dev, gd);
+ if (IS_ERR(gpd_data))
+ return PTR_ERR(gpd_data);
+@@ -1676,6 +1673,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+ {
+ int ret;
+
++ if (!genpd || !dev)
++ return -EINVAL;
++
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_add_device(genpd, dev, dev);
+ mutex_unlock(&gpd_list_lock);
+@@ -2523,6 +2523,9 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
+ struct generic_pm_domain *genpd;
+ int ret;
+
++ if (!dev)
++ return -EINVAL;
++
+ mutex_lock(&gpd_list_lock);
+
+ genpd = genpd_get_from_provider(genpdspec);
+@@ -2939,10 +2942,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
+
+ err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ if (!err)
+- genpd_state->residency_ns = 1000 * residency;
++ genpd_state->residency_ns = 1000LL * residency;
+
+- genpd_state->power_on_latency_ns = 1000 * exit_latency;
+- genpd_state->power_off_latency_ns = 1000 * entry_latency;
++ genpd_state->power_on_latency_ns = 1000LL * exit_latency;
++ genpd_state->power_off_latency_ns = 1000LL * entry_latency;
+ genpd_state->fwnode = &state_node->fwnode;
+
+ return 0;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 6afae98978434..b5ceec2b2d84f 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1814,7 +1814,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ if (!ddata->module_va)
+ return -EIO;
+
+- /* DISP_CONTROL */
++ /* DISP_CONTROL, shut down lcd and digit on disable if enabled */
+ val = sysc_read(ddata, dispc_offset + 0x40);
+ lcd_en = val & lcd_en_mask;
+ digit_en = val & digit_en_mask;
+@@ -1826,7 +1826,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ else
+ irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
+ }
+- if (disable & (lcd_en | digit_en))
++ if (disable && (lcd_en || digit_en))
+ sysc_write(ddata, dispc_offset + 0x40,
+ val & ~(lcd_en_mask | digit_en_mask));
+
+diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
+index 15ba1e6fae4d2..6e9dfac9fc9f4 100644
+--- a/drivers/char/hw_random/st-rng.c
++++ b/drivers/char/hw_random/st-rng.c
+@@ -42,7 +42,6 @@
+
+ struct st_rng_data {
+ void __iomem *base;
+- struct clk *clk;
+ struct hwrng ops;
+ };
+
+@@ -85,26 +84,18 @@ static int st_rng_probe(struct platform_device *pdev)
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+- clk = devm_clk_get(&pdev->dev, NULL);
++ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+- ret = clk_prepare_enable(clk);
+- if (ret)
+- return ret;
+-
+ ddata->ops.priv = (unsigned long)ddata;
+ ddata->ops.read = st_rng_read;
+ ddata->ops.name = pdev->name;
+ ddata->base = base;
+- ddata->clk = clk;
+-
+- dev_set_drvdata(&pdev->dev, ddata);
+
+ ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HW RNG\n");
+- clk_disable_unprepare(clk);
+ return ret;
+ }
+
+@@ -113,15 +104,6 @@ static int st_rng_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int st_rng_remove(struct platform_device *pdev)
+-{
+- struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
+-
+- clk_disable_unprepare(ddata->clk);
+-
+- return 0;
+-}
+-
+ static const struct of_device_id st_rng_match[] __maybe_unused = {
+ { .compatible = "st,rng" },
+ {},
+@@ -134,7 +116,6 @@ static struct platform_driver st_rng_driver = {
+ .of_match_table = of_match_ptr(st_rng_match),
+ },
+ .probe = st_rng_probe,
+- .remove = st_rng_remove
+ };
+
+ module_platform_driver(st_rng_driver);
+diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
+index f7690e0f92ede..e41a84e6b4b56 100644
+--- a/drivers/char/hw_random/virtio-rng.c
++++ b/drivers/char/hw_random/virtio-rng.c
+@@ -4,6 +4,7 @@
+ * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
+ */
+
++#include <asm/barrier.h>
+ #include <linux/err.h>
+ #include <linux/hw_random.h>
+ #include <linux/scatterlist.h>
+@@ -37,13 +38,13 @@ struct virtrng_info {
+ static void random_recv_done(struct virtqueue *vq)
+ {
+ struct virtrng_info *vi = vq->vdev->priv;
++ unsigned int len;
+
+ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+- if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
++ if (!virtqueue_get_buf(vi->vq, &len))
+ return;
+
+- vi->data_idx = 0;
+-
++ smp_store_release(&vi->data_avail, len);
+ complete(&vi->have_data);
+ }
+
+@@ -52,7 +53,6 @@ static void request_entropy(struct virtrng_info *vi)
+ struct scatterlist sg;
+
+ reinit_completion(&vi->have_data);
+- vi->data_avail = 0;
+ vi->data_idx = 0;
+
+ sg_init_one(&sg, vi->data, sizeof(vi->data));
+@@ -88,7 +88,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+ read = 0;
+
+ /* copy available data */
+- if (vi->data_avail) {
++ if (smp_load_acquire(&vi->data_avail)) {
+ chunk = copy_data(vi, buf, size);
+ size -= chunk;
+ read += chunk;
+diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
+index ce2f934797369..5df19f571a474 100644
+--- a/drivers/clk/bcm/clk-raspberrypi.c
++++ b/drivers/clk/bcm/clk-raspberrypi.c
+@@ -356,9 +356,9 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
+ while (clks->id) {
+ struct raspberrypi_clk_variant *variant;
+
+- if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
++ if (clks->id >= RPI_FIRMWARE_NUM_CLK_ID) {
+ dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+- clks->id, RPI_FIRMWARE_NUM_CLK_ID);
++ clks->id, RPI_FIRMWARE_NUM_CLK_ID - 1);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
+index 6350682f7e6d2..87890669297d8 100644
+--- a/drivers/clk/clk-cdce925.c
++++ b/drivers/clk/clk-cdce925.c
+@@ -701,6 +701,10 @@ static int cdce925_probe(struct i2c_client *client)
+ for (i = 0; i < data->chip_info->num_plls; ++i) {
+ pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d",
+ client->dev.of_node, i);
++ if (!pll_clk_name[i]) {
++ err = -ENOMEM;
++ goto error;
++ }
+ init.name = pll_clk_name[i];
+ data->pll[i].chip = data;
+ data->pll[i].hw.init = &init;
+@@ -742,6 +746,10 @@ static int cdce925_probe(struct i2c_client *client)
+ init.num_parents = 1;
+ init.parent_names = &parent_name; /* Mux Y1 to input */
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node);
++ if (!init.name) {
++ err = -ENOMEM;
++ goto error;
++ }
+ data->clk[0].chip = data;
+ data->clk[0].hw.init = &init;
+ data->clk[0].index = 0;
+@@ -760,6 +768,10 @@ static int cdce925_probe(struct i2c_client *client)
+ for (i = 1; i < data->chip_info->num_outputs; ++i) {
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d",
+ client->dev.of_node, i+1);
++ if (!init.name) {
++ err = -ENOMEM;
++ goto error;
++ }
+ data->clk[i].chip = data;
+ data->clk[i].hw.init = &init;
+ data->clk[i].index = i;
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index ff3a52d484790..6060cafe1aa22 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -6,6 +6,7 @@
+ * - 9FGV/9DBV/9DMV/9FGL/9DML/9QXL/9SQ
+ * Currently supported:
+ * - 9FGV0241
++ * - 9FGV0441
+ *
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ */
+@@ -18,7 +19,6 @@
+ #include <linux/regmap.h>
+
+ #define RS9_REG_OE 0x0
+-#define RS9_REG_OE_DIF_OE(n) BIT((n) + 1)
+ #define RS9_REG_SS 0x1
+ #define RS9_REG_SS_AMP_0V6 0x0
+ #define RS9_REG_SS_AMP_0V7 0x1
+@@ -31,9 +31,6 @@
+ #define RS9_REG_SS_SSC_MASK (3 << 3)
+ #define RS9_REG_SS_SSC_LOCK BIT(5)
+ #define RS9_REG_SR 0x2
+-#define RS9_REG_SR_2V0_DIF(n) 0
+-#define RS9_REG_SR_3V0_DIF(n) BIT((n) + 1)
+-#define RS9_REG_SR_DIF_MASK(n) BIT((n) + 1)
+ #define RS9_REG_REF 0x3
+ #define RS9_REG_REF_OE BIT(4)
+ #define RS9_REG_REF_OD BIT(5)
+@@ -45,22 +42,31 @@
+ #define RS9_REG_DID 0x6
+ #define RS9_REG_BCP 0x7
+
++#define RS9_REG_VID_IDT 0x01
++
++#define RS9_REG_DID_TYPE_FGV (0x0 << RS9_REG_DID_TYPE_SHIFT)
++#define RS9_REG_DID_TYPE_DBV (0x1 << RS9_REG_DID_TYPE_SHIFT)
++#define RS9_REG_DID_TYPE_DMV (0x2 << RS9_REG_DID_TYPE_SHIFT)
++#define RS9_REG_DID_TYPE_SHIFT 0x6
++
+ /* Supported Renesas 9-series models. */
+ enum rs9_model {
+ RENESAS_9FGV0241,
++ RENESAS_9FGV0441,
+ };
+
+ /* Structure to describe features of a particular 9-series model */
+ struct rs9_chip_info {
+ const enum rs9_model model;
+ unsigned int num_clks;
++ u8 did;
+ };
+
+ struct rs9_driver_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ const struct rs9_chip_info *chip_info;
+- struct clk_hw *clk_dif[2];
++ struct clk_hw *clk_dif[4];
+ u8 pll_amplitude;
+ u8 pll_ssc;
+ u8 clk_dif_sr;
+@@ -152,17 +158,29 @@ static const struct regmap_config rs9_regmap_config = {
+ .reg_read = rs9_regmap_i2c_read,
+ };
+
++static u8 rs9_calc_dif(const struct rs9_driver_data *rs9, int idx)
++{
++ enum rs9_model model = rs9->chip_info->model;
++
++ if (model == RENESAS_9FGV0241)
++ return BIT(idx) + 1;
++ else if (model == RENESAS_9FGV0441)
++ return BIT(idx);
++
++ return 0;
++}
++
+ static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
+ {
+ struct i2c_client *client = rs9->client;
++ u8 dif = rs9_calc_dif(rs9, idx);
+ unsigned char name[5] = "DIF0";
+ struct device_node *np;
+ int ret;
+ u32 sr;
+
+ /* Set defaults */
+- rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
+- rs9->clk_dif_sr |= RS9_REG_SR_3V0_DIF(idx);
++ rs9->clk_dif_sr |= dif;
+
+ snprintf(name, 5, "DIF%d", idx);
+ np = of_get_child_by_name(client->dev.of_node, name);
+@@ -174,11 +192,9 @@ static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
+ of_node_put(np);
+ if (!ret) {
+ if (sr == 2000000) { /* 2V/ns */
+- rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
+- rs9->clk_dif_sr |= RS9_REG_SR_2V0_DIF(idx);
++ rs9->clk_dif_sr &= ~dif;
+ } else if (sr == 3000000) { /* 3V/ns (default) */
+- rs9->clk_dif_sr &= ~RS9_REG_SR_DIF_MASK(idx);
+- rs9->clk_dif_sr |= RS9_REG_SR_3V0_DIF(idx);
++ rs9->clk_dif_sr |= dif;
+ } else
+ ret = dev_err_probe(&client->dev, -EINVAL,
+ "Invalid renesas,slew-rate value\n");
+@@ -249,11 +265,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
+ }
+
+ for (i = 0; i < rs9->chip_info->num_clks; i++) {
+- if (rs9->clk_dif_sr & RS9_REG_SR_3V0_DIF(i))
++ u8 dif = rs9_calc_dif(rs9, i);
++
++ if (rs9->clk_dif_sr & dif)
+ continue;
+
+- regmap_update_bits(rs9->regmap, RS9_REG_SR, RS9_REG_SR_3V0_DIF(i),
+- rs9->clk_dif_sr & RS9_REG_SR_3V0_DIF(i));
++ regmap_update_bits(rs9->regmap, RS9_REG_SR, dif,
++ rs9->clk_dif_sr & dif);
+ }
+ }
+
+@@ -270,6 +288,7 @@ static int rs9_probe(struct i2c_client *client)
+ {
+ unsigned char name[5] = "DIF0";
+ struct rs9_driver_data *rs9;
++ unsigned int vid, did;
+ struct clk_hw *hw;
+ int i, ret;
+
+@@ -306,6 +325,20 @@ static int rs9_probe(struct i2c_client *client)
+ if (ret < 0)
+ return ret;
+
++ ret = regmap_read(rs9->regmap, RS9_REG_VID, &vid);
++ if (ret < 0)
++ return ret;
++
++ ret = regmap_read(rs9->regmap, RS9_REG_DID, &did);
++ if (ret < 0)
++ return ret;
++
++ if (vid != RS9_REG_VID_IDT || did != rs9->chip_info->did)
++ return dev_err_probe(&client->dev, -ENODEV,
++ "Incorrect VID/DID: %#02x, %#02x. Expected %#02x, %#02x\n",
++ vid, did, RS9_REG_VID_IDT,
++ rs9->chip_info->did);
++
+ /* Register clock */
+ for (i = 0; i < rs9->chip_info->num_clks; i++) {
+ snprintf(name, 5, "DIF%d", i);
+@@ -349,16 +382,25 @@ static int __maybe_unused rs9_resume(struct device *dev)
+ static const struct rs9_chip_info renesas_9fgv0241_info = {
+ .model = RENESAS_9FGV0241,
+ .num_clks = 2,
++ .did = RS9_REG_DID_TYPE_FGV | 0x02,
++};
++
++static const struct rs9_chip_info renesas_9fgv0441_info = {
++ .model = RENESAS_9FGV0441,
++ .num_clks = 4,
++ .did = RS9_REG_DID_TYPE_FGV | 0x04,
+ };
+
+ static const struct i2c_device_id rs9_id[] = {
+- { "9fgv0241", .driver_data = RENESAS_9FGV0241 },
++ { "9fgv0241", .driver_data = (kernel_ulong_t)&renesas_9fgv0241_info },
++ { "9fgv0441", .driver_data = (kernel_ulong_t)&renesas_9fgv0441_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(i2c, rs9_id);
+
+ static const struct of_device_id clk_rs9_of_match[] = {
+ { .compatible = "renesas,9fgv0241", .data = &renesas_9fgv0241_info },
++ { .compatible = "renesas,9fgv0441", .data = &renesas_9fgv0441_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(of, clk_rs9_of_match);
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index 0e528d7ba656e..c7d8cbd22bacc 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -1553,7 +1553,7 @@ static int si5341_probe(struct i2c_client *client)
+ struct clk_init_data init;
+ struct clk *input;
+ const char *root_clock_name;
+- const char *synth_clock_names[SI5341_NUM_SYNTH];
++ const char *synth_clock_names[SI5341_NUM_SYNTH] = { NULL };
+ int err;
+ unsigned int i;
+ struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS];
+@@ -1697,6 +1697,10 @@ static int si5341_probe(struct i2c_client *client)
+ for (i = 0; i < data->num_synth; ++i) {
+ synth_clock_names[i] = devm_kasprintf(&client->dev, GFP_KERNEL,
+ "%s.N%u", client->dev.of_node->name, i);
++ if (!synth_clock_names[i]) {
++ err = -ENOMEM;
++ goto free_clk_names;
++ }
+ init.name = synth_clock_names[i];
+ data->synth[i].index = i;
+ data->synth[i].data = data;
+@@ -1705,6 +1709,7 @@ static int si5341_probe(struct i2c_client *client)
+ if (err) {
+ dev_err(&client->dev,
+ "synth N%u registration failed\n", i);
++ goto free_clk_names;
+ }
+ }
+
+@@ -1714,6 +1719,10 @@ static int si5341_probe(struct i2c_client *client)
+ for (i = 0; i < data->num_outputs; ++i) {
+ init.name = kasprintf(GFP_KERNEL, "%s.%d",
+ client->dev.of_node->name, i);
++ if (!init.name) {
++ err = -ENOMEM;
++ goto free_clk_names;
++ }
+ init.flags = config[i].synth_master ? CLK_SET_RATE_PARENT : 0;
+ data->clk[i].index = i;
+ data->clk[i].data = data;
+@@ -1735,7 +1744,7 @@ static int si5341_probe(struct i2c_client *client)
+ if (err) {
+ dev_err(&client->dev,
+ "output %u registration failed\n", i);
+- goto cleanup;
++ goto free_clk_names;
+ }
+ if (config[i].always_on)
+ clk_prepare(data->clk[i].hw.clk);
+@@ -1745,7 +1754,7 @@ static int si5341_probe(struct i2c_client *client)
+ data);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+- goto cleanup;
++ goto free_clk_names;
+ }
+
+ if (initialization_required) {
+@@ -1753,11 +1762,11 @@ static int si5341_probe(struct i2c_client *client)
+ regcache_cache_only(data->regmap, false);
+ err = regcache_sync(data->regmap);
+ if (err < 0)
+- goto cleanup;
++ goto free_clk_names;
+
+ err = si5341_finalize_defaults(data);
+ if (err < 0)
+- goto cleanup;
++ goto free_clk_names;
+ }
+
+ /* wait for device to report input clock present and PLL lock */
+@@ -1766,32 +1775,31 @@ static int si5341_probe(struct i2c_client *client)
+ 10000, 250000);
+ if (err) {
+ dev_err(&client->dev, "Error waiting for input clock or PLL lock\n");
+- goto cleanup;
++ goto free_clk_names;
+ }
+
+ /* clear sticky alarm bits from initialization */
+ err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
+ if (err) {
+ dev_err(&client->dev, "unable to clear sticky status\n");
+- goto cleanup;
++ goto free_clk_names;
+ }
+
+ err = sysfs_create_files(&client->dev.kobj, si5341_attributes);
+- if (err) {
++ if (err)
+ dev_err(&client->dev, "unable to create sysfs files\n");
+- goto cleanup;
+- }
+
++free_clk_names:
+ /* Free the names, clk framework makes copies */
+ for (i = 0; i < data->num_synth; ++i)
+ devm_kfree(&client->dev, (void *)synth_clock_names[i]);
+
+- return 0;
+-
+ cleanup:
+- for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
+- if (data->clk[i].vddo_reg)
+- regulator_disable(data->clk[i].vddo_reg);
++ if (err) {
++ for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
++ if (data->clk[i].vddo_reg)
++ regulator_disable(data->clk[i].vddo_reg);
++ }
+ }
+ return err;
+ }
+diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
+index fa71a57875ce8..e9a7f3c91ae0e 100644
+--- a/drivers/clk/clk-versaclock5.c
++++ b/drivers/clk/clk-versaclock5.c
+@@ -1028,6 +1028,11 @@ static int vc5_probe(struct i2c_client *client)
+ }
+
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
++
+ init.ops = &vc5_mux_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+@@ -1042,6 +1047,10 @@ static int vc5_probe(struct i2c_client *client)
+ memset(&init, 0, sizeof(init));
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.dbl",
+ client->dev.of_node);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
+ init.ops = &vc5_dbl_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+@@ -1057,6 +1066,10 @@ static int vc5_probe(struct i2c_client *client)
+ /* Register PFD */
+ memset(&init, 0, sizeof(init));
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.pfd", client->dev.of_node);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
+ init.ops = &vc5_pfd_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+@@ -1074,6 +1087,10 @@ static int vc5_probe(struct i2c_client *client)
+ /* Register PLL */
+ memset(&init, 0, sizeof(init));
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.pll", client->dev.of_node);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
+ init.ops = &vc5_pll_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+@@ -1093,6 +1110,10 @@ static int vc5_probe(struct i2c_client *client)
+ memset(&init, 0, sizeof(init));
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.fod%d",
+ client->dev.of_node, idx);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
+ init.ops = &vc5_fod_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+@@ -1111,6 +1132,10 @@ static int vc5_probe(struct i2c_client *client)
+ memset(&init, 0, sizeof(init));
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.out0_sel_i2cb",
+ client->dev.of_node);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
+ init.ops = &vc5_clk_out_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+@@ -1137,6 +1162,10 @@ static int vc5_probe(struct i2c_client *client)
+ memset(&init, 0, sizeof(init));
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.out%d",
+ client->dev.of_node, idx + 1);
++ if (!init.name) {
++ ret = -ENOMEM;
++ goto err_clk;
++ }
+ init.ops = &vc5_clk_out_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+@@ -1271,14 +1300,14 @@ static const struct vc5_chip_info idt_5p49v6975_info = {
+ };
+
+ static const struct i2c_device_id vc5_id[] = {
+- { "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
+- { "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
+- { "5p49v5933", .driver_data = IDT_VC5_5P49V5933 },
+- { "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
+- { "5p49v60", .driver_data = IDT_VC6_5P49V60 },
+- { "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
+- { "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
+- { "5p49v6975", .driver_data = IDT_VC6_5P49V6975 },
++ { "5p49v5923", .driver_data = (kernel_ulong_t)&idt_5p49v5923_info },
++ { "5p49v5925", .driver_data = (kernel_ulong_t)&idt_5p49v5925_info },
++ { "5p49v5933", .driver_data = (kernel_ulong_t)&idt_5p49v5933_info },
++ { "5p49v5935", .driver_data = (kernel_ulong_t)&idt_5p49v5935_info },
++ { "5p49v60", .driver_data = (kernel_ulong_t)&idt_5p49v60_info },
++ { "5p49v6901", .driver_data = (kernel_ulong_t)&idt_5p49v6901_info },
++ { "5p49v6965", .driver_data = (kernel_ulong_t)&idt_5p49v6965_info },
++ { "5p49v6975", .driver_data = (kernel_ulong_t)&idt_5p49v6975_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(i2c, vc5_id);
+diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
+index 8e4f86e852aa0..0ae191f50b4b2 100644
+--- a/drivers/clk/clk-versaclock7.c
++++ b/drivers/clk/clk-versaclock7.c
+@@ -1282,7 +1282,7 @@ static const struct regmap_config vc7_regmap_config = {
+ };
+
+ static const struct i2c_device_id vc7_i2c_id[] = {
+- { "rc21008a", VC7_RC21008A },
++ { "rc21008a", .driver_data = (kernel_ulong_t)&vc7_rc21008a_info },
+ {}
+ };
+ MODULE_DEVICE_TABLE(i2c, vc7_i2c_id);
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index ae07685c7588b..15a405a5582bb 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -1547,6 +1547,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *hw,
+ parent->core, req,
+ parent_rate);
+ }
++EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
+
+ static bool clk_core_can_round(struct clk_core * const core)
+ {
+@@ -4693,6 +4694,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ if (!ret) {
+ devres->clk = clk;
+ devres->nb = nb;
++ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index a042ed3a9d6c2..569b2abf40525 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -323,7 +323,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ void __iomem *base;
+ int ret;
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ IMX8MN_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+@@ -340,10 +340,10 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ hws[IMX8MN_CLK_EXT4] = imx_get_clk_hw_by_name(np, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
+- base = of_iomap(np, 0);
++ base = devm_of_iomap(dev, np, 0, NULL);
+ of_node_put(np);
+- if (WARN_ON(!base)) {
+- ret = -ENOMEM;
++ if (WARN_ON(IS_ERR(base))) {
++ ret = PTR_ERR(base);
+ goto unregister_hws;
+ }
+
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 3253589851ffb..de7d2d2176bea 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -414,25 +414,22 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ void __iomem *anatop_base, *ccm_base;
++ int err;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+- anatop_base = of_iomap(np, 0);
++ anatop_base = devm_of_iomap(dev, np, 0, NULL);
+ of_node_put(np);
+- if (WARN_ON(!anatop_base))
+- return -ENOMEM;
++ if (WARN_ON(IS_ERR(anatop_base)))
++ return PTR_ERR(anatop_base);
+
+ np = dev->of_node;
+ ccm_base = devm_platform_ioremap_resource(pdev, 0);
+- if (WARN_ON(IS_ERR(ccm_base))) {
+- iounmap(anatop_base);
++ if (WARN_ON(IS_ERR(ccm_base)))
+ return PTR_ERR(ccm_base);
+- }
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
+- if (WARN_ON(!clk_hw_data)) {
+- iounmap(anatop_base);
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
++ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+- }
+
+ clk_hw_data->num = IMX8MP_CLK_END;
+ hws = clk_hw_data->hws;
+@@ -721,7 +718,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+
+ imx_check_clk_hws(hws, IMX8MP_CLK_END);
+
+- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
++ err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
++ if (err < 0) {
++ dev_err(dev, "failed to register hws for i.MX8MP\n");
++ imx_unregister_hw_clocks(hws, IMX8MP_CLK_END);
++ return err;
++ }
+
+ imx_register_uart_clocks();
+
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index 8d0974db6bfd8..face30012b7dd 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -262,7 +262,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ void __iomem *base, *anatop_base;
+ int i, ret;
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ IMX93_CLK_END), GFP_KERNEL);
+ if (!clk_hw_data)
+ return -ENOMEM;
+@@ -286,10 +286,12 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ "sys_pll_pfd2", 1, 2);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop");
+- anatop_base = of_iomap(np, 0);
++ anatop_base = devm_of_iomap(dev, np, 0, NULL);
+ of_node_put(np);
+- if (WARN_ON(!anatop_base))
+- return -ENOMEM;
++ if (WARN_ON(IS_ERR(anatop_base))) {
++ ret = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", anatop_base + 0x1200,
+ &imx_fracn_gppll);
+@@ -299,8 +301,8 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ np = dev->of_node;
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base))) {
+- iounmap(anatop_base);
+- return PTR_ERR(base);
++ ret = PTR_ERR(base);
++ goto unregister_hws;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(root_array); i++) {
+@@ -332,7 +334,6 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+
+ unregister_hws:
+ imx_unregister_hw_clocks(clks, IMX93_CLK_END);
+- iounmap(anatop_base);
+
+ return ret;
+ }
+diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
+index fd5c51fc92c0e..08d155feb035a 100644
+--- a/drivers/clk/imx/clk-imxrt1050.c
++++ b/drivers/clk/imx/clk-imxrt1050.c
+@@ -42,7 +42,7 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ struct device_node *anp;
+ int ret;
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ IMXRT1050_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+@@ -53,10 +53,12 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ hws[IMXRT1050_CLK_OSC] = imx_get_clk_hw_by_name(np, "osc");
+
+ anp = of_find_compatible_node(NULL, NULL, "fsl,imxrt-anatop");
+- pll_base = of_iomap(anp, 0);
++ pll_base = devm_of_iomap(dev, anp, 0, NULL);
+ of_node_put(anp);
+- if (WARN_ON(!pll_base))
+- return -ENOMEM;
++ if (WARN_ON(IS_ERR(pll_base))) {
++ ret = PTR_ERR(pll_base);
++ goto unregister_hws;
++ }
+
+ /* Anatop clocks */
+ hws[IMXRT1050_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0UL);
+@@ -104,8 +106,10 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+
+ /* CCM clocks */
+ ccm_base = devm_platform_ioremap_resource(pdev, 0);
+- if (WARN_ON(IS_ERR(ccm_base)))
+- return PTR_ERR(ccm_base);
++ if (WARN_ON(IS_ERR(ccm_base))) {
++ ret = PTR_ERR(ccm_base);
++ goto unregister_hws;
++ }
+
+ hws[IMXRT1050_CLK_ARM_PODF] = imx_clk_hw_divider("arm_podf", "pll1_arm", ccm_base + 0x10, 0, 3);
+ hws[IMXRT1050_CLK_PRE_PERIPH_SEL] = imx_clk_hw_mux("pre_periph_sel", ccm_base + 0x18, 18, 2,
+@@ -149,8 +153,12 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register clks for i.MXRT1050.\n");
+- imx_unregister_hw_clocks(hws, IMXRT1050_CLK_END);
++ goto unregister_hws;
+ }
++ return 0;
++
++unregister_hws:
++ imx_unregister_hw_clocks(hws, IMXRT1050_CLK_END);
+ return ret;
+ }
+ static const struct of_device_id imxrt1050_clk_of_match[] = {
+diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
+index 1e6870f3671f6..db307890e4c16 100644
+--- a/drivers/clk/imx/clk-scu.c
++++ b/drivers/clk/imx/clk-scu.c
+@@ -707,11 +707,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
+
+ void imx_clk_scu_unregister(void)
+ {
+- struct imx_scu_clk_node *clk;
++ struct imx_scu_clk_node *clk, *n;
+ int i;
+
+ for (i = 0; i < IMX_SC_R_LAST; i++) {
+- list_for_each_entry(clk, &imx_scu_clks[i], node) {
++ list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) {
+ clk_hw_unregister(clk->hw);
+ kfree(clk);
+ }
+diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
+index d4b4e74e22da6..254f2cf24be21 100644
+--- a/drivers/clk/keystone/sci-clk.c
++++ b/drivers/clk/keystone/sci-clk.c
+@@ -294,6 +294,8 @@ static int _sci_clk_build(struct sci_clk_provider *provider,
+
+ name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id,
+ sci_clk->clk_id);
++ if (!name)
++ return -ENOMEM;
+
+ init.name = name;
+
+diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
+index a56c5845d07a5..0b95d14c18042 100644
+--- a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
++++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
+@@ -92,11 +92,13 @@ static int clk_mt8173_apmixed_probe(struct platform_device *pdev)
+
+ base = of_iomap(node, 0);
+ if (!base)
+- return PTR_ERR(base);
++ return -ENOMEM;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+- if (IS_ERR_OR_NULL(clk_data))
++ if (IS_ERR_OR_NULL(clk_data)) {
++ iounmap(base);
+ return -ENOMEM;
++ }
+
+ r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (r)
+@@ -127,6 +129,7 @@ unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ free_clk_data:
+ mtk_free_clk_data(clk_data);
++ iounmap(base);
+ return r;
+ }
+
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index 14e8b64a32a3c..b93fb1d80878c 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -492,8 +492,10 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ num_clks += mcd->num_mux_clks;
+
+ clk_data = mtk_alloc_clk_data(num_clks);
+- if (!clk_data)
+- return -ENOMEM;
++ if (!clk_data) {
++ r = -ENOMEM;
++ goto free_base;
++ }
+
+ if (mcd->fixed_clks) {
+ r = mtk_clk_register_fixed_clks(mcd->fixed_clks,
+@@ -578,6 +580,7 @@ unregister_fixed_clks:
+ mcd->num_fixed_clks, clk_data);
+ free_data:
+ mtk_free_clk_data(clk_data);
++free_base:
+ if (mcd->shared_io && base)
+ iounmap(base);
+ return r;
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 4bf40f6ccd1d1..22ed543fe6b06 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -603,10 +603,8 @@ static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
+ }
+
+ /* Output clock setting 1 */
+- writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN |
+- CPG_SIPLL5_CLK1_REFDIV_WEN | (params.pl5_postdiv1 << 0) |
+- (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8),
+- priv->base + CPG_SIPLL5_CLK1);
++ writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
++ (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
+
+ /* Output clock setting, SSCG modulation value setting 3 */
+ writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index eee780276a9e2..6cee9e56acc72 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -32,9 +32,6 @@
+ #define CPG_SIPLL5_STBY_RESETB_WEN BIT(16)
+ #define CPG_SIPLL5_STBY_SSCG_EN_WEN BIT(18)
+ #define CPG_SIPLL5_STBY_DOWNSPREAD_WEN BIT(20)
+-#define CPG_SIPLL5_CLK1_POSTDIV1_WEN BIT(16)
+-#define CPG_SIPLL5_CLK1_POSTDIV2_WEN BIT(20)
+-#define CPG_SIPLL5_CLK1_REFDIV_WEN BIT(24)
+ #define CPG_SIPLL5_CLK4_RESV_LSB (0xFF)
+ #define CPG_SIPLL5_MON_PLL5_LOCK BIT(4)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 219c80653dbdb..2a6db04342815 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -464,6 +464,7 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
+ err = load_one_timing_from_dt(tegra, timing, child);
+ if (err) {
+ of_node_put(child);
++ kfree(tegra->timings);
+ return err;
+ }
+
+@@ -515,6 +516,7 @@ struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np
+ err = load_timings_from_dt(tegra, node, node_ram_code);
+ if (err) {
+ of_node_put(node);
++ kfree(tegra);
+ return ERR_PTR(err);
+ }
+ }
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index f73f402ff7de9..87e5624789ef6 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -258,6 +258,9 @@ static const char * __init clkctrl_get_clock_name(struct device_node *np,
+ if (clkctrl_name && !legacy_naming) {
+ clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+ clkctrl_name, offset, index);
++ if (!clock_name)
++ return NULL;
++
+ strreplace(clock_name, '_', '-');
+
+ return clock_name;
+@@ -586,6 +589,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
+ if (clkctrl_name) {
+ provider->clkdm_name = kasprintf(GFP_KERNEL,
+ "%s_clkdm", clkctrl_name);
++ if (!provider->clkdm_name) {
++ kfree(provider);
++ return;
++ }
+ goto clkdm_found;
+ }
+
+diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+index eb1dfe7ecc1b4..4a23583933bcc 100644
+--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
++++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+@@ -354,7 +354,7 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
+ hw = &div->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+- hw = ERR_PTR(ret);
++ return ERR_PTR(ret);
+
+ return hw->clk;
+ }
+diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
+index 4efd0cf3b602d..0d52e28fea4de 100644
+--- a/drivers/clocksource/timer-cadence-ttc.c
++++ b/drivers/clocksource/timer-cadence-ttc.c
+@@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
+ * and use it. Note that the event timer uses the interrupt and it's the
+ * 2nd TTC hence the irq_of_parse_and_map(,1)
+ */
+- timer_baseaddr = of_iomap(timer, 0);
+- if (!timer_baseaddr) {
++ timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL);
++ if (IS_ERR(timer_baseaddr)) {
+ pr_err("ERROR: invalid timer base address\n");
+- return -ENXIO;
++ return PTR_ERR(timer_baseaddr);
+ }
+
+ irq = irq_of_parse_and_map(timer, 1);
+@@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
+ clk_ce = of_clk_get(timer, clksel);
+ if (IS_ERR(clk_ce)) {
+ pr_err("ERROR: timer input clock not found\n");
+- return PTR_ERR(clk_ce);
++ ret = PTR_ERR(clk_ce);
++ goto put_clk_cs;
+ }
+
+ ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
+ if (ret)
+- return ret;
++ goto put_clk_ce;
+
+ ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ if (ret)
+- return ret;
++ goto put_clk_ce;
+
+ pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
+
+ return 0;
++
++put_clk_ce:
++ clk_put(clk_ce);
++put_clk_cs:
++ clk_put(clk_cs);
++ return ret;
+ }
+
+ static const struct of_device_id ttc_timer_of_match[] = {
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 48a4613cef1e1..ee9e96a1893c6 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -824,6 +824,8 @@ static ssize_t store_energy_performance_preference(
+ err = cpufreq_start_governor(policy);
+ if (!ret)
+ ret = err;
++ } else {
++ ret = 0;
+ }
+ }
+
+diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
+index c6f2fa753b7c0..0f37dfd42d850 100644
+--- a/drivers/crypto/marvell/cesa/cipher.c
++++ b/drivers/crypto/marvell/cesa/cipher.c
+@@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int len)
+ {
+- struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
++ struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des3_key(cipher, key);
+diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
+index d00181a26dd65..483cef62acee8 100644
+--- a/drivers/crypto/nx/Makefile
++++ b/drivers/crypto/nx/Makefile
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
+ nx-crypto-objs := nx.o \
+- nx_debugfs.o \
+ nx-aes-cbc.o \
+ nx-aes-ecb.o \
+ nx-aes-gcm.o \
+@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
+ nx-sha256.o \
+ nx-sha512.o
+
++nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
+ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
+ nx-compress-objs := nx-842.o
+diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
+index c6233173c612e..2697baebb6a35 100644
+--- a/drivers/crypto/nx/nx.h
++++ b/drivers/crypto/nx/nx.h
+@@ -170,8 +170,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
+ void nx_debugfs_init(struct nx_crypto_driver *);
+ void nx_debugfs_fini(struct nx_crypto_driver *);
+ #else
+-#define NX_DEBUGFS_INIT(drv) (0)
+-#define NX_DEBUGFS_FINI(drv) (0)
++#define NX_DEBUGFS_INIT(drv) do {} while (0)
++#define NX_DEBUGFS_FINI(drv) do {} while (0)
+ #endif
+
+ #define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 935a7e012946e..4128200a90329 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -170,15 +170,14 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+ }
+
+ areq->dst_len = req->ctx.dh->p_size;
++ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
++ DMA_FROM_DEVICE);
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+ kfree_sensitive(req->dst_align);
+ }
+
+- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+- DMA_FROM_DEVICE);
+-
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+@@ -521,12 +520,14 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+- kfree_sensitive(req->src_align);
+-
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
+
++ kfree_sensitive(req->src_align);
++
+ areq->dst_len = req->ctx.rsa->key_sz;
++ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
++ DMA_FROM_DEVICE);
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+@@ -534,9 +535,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+ kfree_sensitive(req->dst_align);
+ }
+
+- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+- DMA_FROM_DEVICE);
+-
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index b2fd67fcebfb5..1997bc1bf64aa 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -125,10 +125,38 @@ static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
+ return xa_load(&port->regions, (unsigned long)cxlr);
+ }
+
++static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
++{
++ if (!cpu_cache_has_invalidate_memregion()) {
++ if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
++ dev_warn_once(
++ &cxlr->dev,
++ "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
++ return 0;
++ } else {
++ dev_err(&cxlr->dev,
++ "Failed to synchronize CPU cache state\n");
++ return -ENXIO;
++ }
++ }
++
++ cpu_cache_invalidate_memregion(IORES_DESC_CXL);
++ return 0;
++}
++
+ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+ {
+ struct cxl_region_params *p = &cxlr->params;
+- int i;
++ int i, rc = 0;
++
++ /*
++ * Before region teardown attempt to flush, and if the flush
++ * fails cancel the region teardown for data consistency
++ * concerns
++ */
++ rc = cxl_region_invalidate_memregion(cxlr);
++ if (rc)
++ return rc;
+
+ for (i = count - 1; i >= 0; i--) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+@@ -136,7 +164,6 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+ struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_ep *ep;
+- int rc = 0;
+
+ if (cxlds->rcd)
+ goto endpoint_reset;
+@@ -155,14 +182,19 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+ rc = cxld->reset(cxld);
+ if (rc)
+ return rc;
++ set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
+ }
+
+ endpoint_reset:
+ rc = cxled->cxld.reset(&cxled->cxld);
+ if (rc)
+ return rc;
++ set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
+ }
+
++ /* all decoders associated with this region have been torn down */
++ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
++
+ return 0;
+ }
+
+@@ -256,9 +288,19 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ goto out;
+ }
+
+- if (commit)
++ /*
++ * Invalidate caches before region setup to drop any speculative
++ * consumption of this address space
++ */
++ rc = cxl_region_invalidate_memregion(cxlr);
++ if (rc)
++ return rc;
++
++ if (commit) {
+ rc = cxl_region_decode_commit(cxlr);
+- else {
++ if (rc == 0)
++ p->state = CXL_CONFIG_COMMIT;
++ } else {
+ p->state = CXL_CONFIG_RESET_PENDING;
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+@@ -268,18 +310,20 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ * The lock was dropped, so need to revalidate that the reset is
+ * still pending.
+ */
+- if (p->state == CXL_CONFIG_RESET_PENDING)
++ if (p->state == CXL_CONFIG_RESET_PENDING) {
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
++ /*
++ * Revert to committed since there may still be active
++ * decoders associated with this region, or move forward
++ * to active to mark the reset successful
++ */
++ if (rc)
++ p->state = CXL_CONFIG_COMMIT;
++ else
++ p->state = CXL_CONFIG_ACTIVE;
++ }
+ }
+
+- if (rc)
+- goto out;
+-
+- if (commit)
+- p->state = CXL_CONFIG_COMMIT;
+- else if (p->state == CXL_CONFIG_RESET_PENDING)
+- p->state = CXL_CONFIG_ACTIVE;
+-
+ out:
+ up_write(&cxl_region_rwsem);
+
+@@ -1674,7 +1718,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ if (rc)
+ goto err_decrement;
+ p->state = CXL_CONFIG_ACTIVE;
+- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
+ }
+
+ cxled->cxld.interleave_ways = p->interleave_ways;
+@@ -2679,30 +2722,6 @@ out:
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
+
+-static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
+-{
+- if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
+- return 0;
+-
+- if (!cpu_cache_has_invalidate_memregion()) {
+- if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
+- dev_warn_once(
+- &cxlr->dev,
+- "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
+- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
+- return 0;
+- } else {
+- dev_err(&cxlr->dev,
+- "Failed to synchronize CPU cache state\n");
+- return -ENXIO;
+- }
+- }
+-
+- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+- clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
+- return 0;
+-}
+-
+ static int is_system_ram(struct resource *res, void *arg)
+ {
+ struct cxl_region *cxlr = arg;
+@@ -2730,7 +2749,12 @@ static int cxl_region_probe(struct device *dev)
+ goto out;
+ }
+
+- rc = cxl_region_invalidate_memregion(cxlr);
++ if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
++ dev_err(&cxlr->dev,
++ "failed to activate, re-commit region and retry\n");
++ rc = -ENXIO;
++ goto out;
++ }
+
+ /*
+ * From this point on any path that changes the region's state away from
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 044a92d9813e2..dcebe48bb5bb5 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -462,18 +462,20 @@ struct cxl_region_params {
+ int nr_targets;
+ };
+
+-/*
+- * Flag whether this region needs to have its HPA span synchronized with
+- * CPU cache state at region activation time.
+- */
+-#define CXL_REGION_F_INCOHERENT 0
+-
+ /*
+ * Indicate whether this region has been assembled by autodetection or
+ * userspace assembly. Prevent endpoint decoders outside of automatic
+ * detection from being added to the region.
+ */
+-#define CXL_REGION_F_AUTO 1
++#define CXL_REGION_F_AUTO 0
++
++/*
++ * Require that a committed region successfully complete a teardown once
++ * any of its associated decoders have been torn down. This maintains
++ * the commit state for the region since there are committed decoders,
++ * but blocks cxl_region_probe().
++ */
++#define CXL_REGION_F_NEEDS_RESET 1
+
+ /**
+ * struct cxl_region - CXL region
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index 227800053309f..e7c61358564e1 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -446,18 +446,34 @@ static void unregister_dev_dax(void *dev)
+ put_device(dev);
+ }
+
++static void dax_region_free(struct kref *kref)
++{
++ struct dax_region *dax_region;
++
++ dax_region = container_of(kref, struct dax_region, kref);
++ kfree(dax_region);
++}
++
++void dax_region_put(struct dax_region *dax_region)
++{
++ kref_put(&dax_region->kref, dax_region_free);
++}
++EXPORT_SYMBOL_GPL(dax_region_put);
++
+ /* a return value >= 0 indicates this invocation invalidated the id */
+ static int __free_dev_dax_id(struct dev_dax *dev_dax)
+ {
+- struct dax_region *dax_region = dev_dax->region;
+ struct device *dev = &dev_dax->dev;
++ struct dax_region *dax_region;
+ int rc = dev_dax->id;
+
+ device_lock_assert(dev);
+
+- if (is_static(dax_region) || dev_dax->id < 0)
++ if (!dev_dax->dyn_id || dev_dax->id < 0)
+ return -1;
++ dax_region = dev_dax->region;
+ ida_free(&dax_region->ida, dev_dax->id);
++ dax_region_put(dax_region);
+ dev_dax->id = -1;
+ return rc;
+ }
+@@ -473,6 +489,20 @@ static int free_dev_dax_id(struct dev_dax *dev_dax)
+ return rc;
+ }
+
++static int alloc_dev_dax_id(struct dev_dax *dev_dax)
++{
++ struct dax_region *dax_region = dev_dax->region;
++ int id;
++
++ id = ida_alloc(&dax_region->ida, GFP_KERNEL);
++ if (id < 0)
++ return id;
++ kref_get(&dax_region->kref);
++ dev_dax->dyn_id = true;
++ dev_dax->id = id;
++ return id;
++}
++
+ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+ {
+@@ -560,20 +590,6 @@ static const struct attribute_group *dax_region_attribute_groups[] = {
+ NULL,
+ };
+
+-static void dax_region_free(struct kref *kref)
+-{
+- struct dax_region *dax_region;
+-
+- dax_region = container_of(kref, struct dax_region, kref);
+- kfree(dax_region);
+-}
+-
+-void dax_region_put(struct dax_region *dax_region)
+-{
+- kref_put(&dax_region->kref, dax_region_free);
+-}
+-EXPORT_SYMBOL_GPL(dax_region_put);
+-
+ static void dax_region_unregister(void *region)
+ {
+ struct dax_region *dax_region = region;
+@@ -635,10 +651,12 @@ EXPORT_SYMBOL_GPL(alloc_dax_region);
+ static void dax_mapping_release(struct device *dev)
+ {
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+- struct dev_dax *dev_dax = to_dev_dax(dev->parent);
++ struct device *parent = dev->parent;
++ struct dev_dax *dev_dax = to_dev_dax(parent);
+
+ ida_free(&dev_dax->ida, mapping->id);
+ kfree(mapping);
++ put_device(parent);
+ }
+
+ static void unregister_dax_mapping(void *data)
+@@ -778,6 +796,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
+ dev = &mapping->dev;
+ device_initialize(dev);
+ dev->parent = &dev_dax->dev;
++ get_device(dev->parent);
+ dev->type = &dax_mapping_type;
+ dev_set_name(dev, "mapping%d", mapping->id);
+ rc = device_add(dev);
+@@ -1295,12 +1314,10 @@ static const struct attribute_group *dax_attribute_groups[] = {
+ static void dev_dax_release(struct device *dev)
+ {
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+- struct dax_region *dax_region = dev_dax->region;
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+
+ put_dax(dax_dev);
+ free_dev_dax_id(dev_dax);
+- dax_region_put(dax_region);
+ kfree(dev_dax->pgmap);
+ kfree(dev_dax);
+ }
+@@ -1324,6 +1341,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ if (!dev_dax)
+ return ERR_PTR(-ENOMEM);
+
++ dev_dax->region = dax_region;
+ if (is_static(dax_region)) {
+ if (dev_WARN_ONCE(parent, data->id < 0,
+ "dynamic id specified to static region\n")) {
+@@ -1339,13 +1357,11 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ goto err_id;
+ }
+
+- rc = ida_alloc(&dax_region->ida, GFP_KERNEL);
++ rc = alloc_dev_dax_id(dev_dax);
+ if (rc < 0)
+ goto err_id;
+- dev_dax->id = rc;
+ }
+
+- dev_dax->region = dax_region;
+ dev = &dev_dax->dev;
+ device_initialize(dev);
+ dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+@@ -1386,7 +1402,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ dev_dax->target_node = dax_region->target_node;
+ dev_dax->align = dax_region->align;
+ ida_init(&dev_dax->ida);
+- kref_get(&dax_region->kref);
+
+ inode = dax_inode(dax_dev);
+ dev->devt = inode->i_rdev;
+diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
+index 1c974b7caae6e..afcada6fd2eda 100644
+--- a/drivers/dax/dax-private.h
++++ b/drivers/dax/dax-private.h
+@@ -52,7 +52,8 @@ struct dax_mapping {
+ * @region - parent region
+ * @dax_dev - core dax functionality
+ * @target_node: effective numa node if dev_dax memory range is onlined
+- * @id: ida allocated id
++ * @dyn_id: is this a dynamic or statically created instance
++ * @id: ida allocated id when the dax_region is not static
+ * @ida: mapping id allocator
+ * @dev - device core
+ * @pgmap - pgmap for memmap setup / lifetime (driver owned)
+@@ -64,6 +65,7 @@ struct dev_dax {
+ struct dax_device *dax_dev;
+ unsigned int align;
+ int target_node;
++ bool dyn_id;
+ int id;
+ struct ida ida;
+ struct device dev;
+diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
+index 7b36db6f1cbdc..898ca95057547 100644
+--- a/drivers/dax/kmem.c
++++ b/drivers/dax/kmem.c
+@@ -99,7 +99,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
+ if (!data->res_name)
+ goto err_res_name;
+
+- rc = memory_group_register_static(numa_node, total_len);
++ rc = memory_group_register_static(numa_node, PFN_UP(total_len));
+ if (rc < 0)
+ goto err_reg_mgid;
+ data->mgid = rc;
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index 1e0203d74691f..732984295295f 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -378,6 +378,9 @@ efi_status_t efi_exit_boot_services(void *handle, void *priv,
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+
++ if (efi_disable_pci_dma)
++ efi_pci_disable_bridge_busmaster();
++
+ status = efi_get_memory_map(&map, true);
+ if (status != EFI_SUCCESS)
+ return status;
+@@ -388,9 +391,6 @@ efi_status_t efi_exit_boot_services(void *handle, void *priv,
+ return status;
+ }
+
+- if (efi_disable_pci_dma)
+- efi_pci_disable_bridge_busmaster();
+-
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
+
+ if (status == EFI_INVALID_PARAMETER) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 63dfcc98152d5..b3daca6372a90 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -170,8 +170,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
+
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+ err_data.err_addr = &err_rec;
+- amdgpu_umc_fill_error_record(&err_data, address,
+- (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
++ amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 587879f3ac2e6..30c0c49b37105 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1436,14 +1436,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ uint64_t eaddr;
+
+ /* validate the parameters */
+- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+- size == 0 || size & ~PAGE_MASK)
++ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
++ return -EINVAL;
++ if (saddr + size <= saddr || offset + size <= offset)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ eaddr = saddr + size - 1;
+- if (saddr >= eaddr ||
+- (bo && offset + size > amdgpu_bo_size(bo)) ||
++ if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+ (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+
+@@ -1502,14 +1502,14 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ int r;
+
+ /* validate the parameters */
+- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+- size == 0 || size & ~PAGE_MASK)
++ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
++ return -EINVAL;
++ if (saddr + size <= saddr || offset + size <= offset)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ eaddr = saddr + size - 1;
+- if (saddr >= eaddr ||
+- (bo && offset + size > amdgpu_bo_size(bo)) ||
++ if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+ (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 0778e587a2d68..eaf084acb706f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -115,18 +115,19 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+ &(mqd_mem_obj->gtt_mem),
+ &(mqd_mem_obj->gpu_addr),
+ (void *)&(mqd_mem_obj->cpu_ptr), true);
++
++ if (retval) {
++ kfree(mqd_mem_obj);
++ return NULL;
++ }
+ } else {
+ retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
+ &mqd_mem_obj);
+- }
+-
+- if (retval) {
+- kfree(mqd_mem_obj);
+- return NULL;
++ if (retval)
++ return NULL;
+ }
+
+ return mqd_mem_obj;
+-
+ }
+
+ static void init_mqd(struct mqd_manager *mm, void **mqd,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2cbd6949804f5..261dbd417c2f8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9276,6 +9276,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+
+ /* Now check if we should set freesync video mode */
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
++ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
++ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ is_timing_unchanged_for_freesync(new_crtc_state,
+ old_crtc_state)) {
+ new_crtc_state->mode_changed = false;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 3da519957f6c8..0096614f2a8be 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -48,8 +48,7 @@
+ #endif
+
+ #include "dc/dcn20/dcn20_resource.h"
+-bool is_timing_changed(struct dc_stream_state *cur_stream,
+- struct dc_stream_state *new_stream);
++
+ #define PEAK_FACTOR_X1000 1006
+
+ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+@@ -1426,7 +1425,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ struct dc_stream_state *stream = dm_state->context->streams[i];
+
+ if (local_dc_state->streams[i] &&
+- is_timing_changed(stream, local_dc_state->streams[i])) {
++ dc_is_timing_changed(stream, local_dc_state->streams[i])) {
+ DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
+ } else {
+ int ind = find_crtc_index_in_state_by_stream(state, stream);
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+index 1fbf1c105dc12..bdbf183066981 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+@@ -312,6 +312,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b
+ /* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */
+ uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0);
+
++ smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n",
++ enable, cache_timer_delay, cache_timer_scale);
++
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 9ec0a343efadb..e6e26fe1be0f8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2540,9 +2540,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+
+- if (u->flip_addr)
+- update_flags->bits.addr_update = 1;
+-
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
+ return UPDATE_TYPE_FULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 986de684b078e..7b0fd0dc31b34 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1878,7 +1878,7 @@ bool dc_add_all_planes_for_stream(
+ return add_all_planes_for_stream(dc, stream, &set, 1, context);
+ }
+
+-bool is_timing_changed(struct dc_stream_state *cur_stream,
++bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+ {
+ if (cur_stream == NULL)
+@@ -1903,7 +1903,7 @@ static bool are_stream_backends_same(
+ if (stream_a == NULL || stream_b == NULL)
+ return false;
+
+- if (is_timing_changed(stream_a, stream_b))
++ if (dc_is_timing_changed(stream_a, stream_b))
+ return false;
+
+ if (stream_a->signal != stream_b->signal)
+@@ -3527,7 +3527,7 @@ bool pipe_need_reprogram(
+ if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
+ return true;
+
+- if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
++ if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
+
+ if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3fb868f2f6f5b..9307442dc2258 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -2223,4 +2223,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ /* Disable acc mode Interfaces */
+ void dc_disable_accelerated_mode(struct dc *dc);
+
++bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
++ struct dc_stream_state *new_stream);
++
+ #endif /* DC_INTERFACE_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+index b7c2844d0cbee..f294f2f8c75bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+@@ -810,7 +810,7 @@ static bool CalculatePrefetchSchedule(
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
+ } else {
+ *swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
+- if (myPipe->BlockWidth256BytesC > 0)
++ if (myPipe->BlockHeight256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+index 395ae8761980f..9ba6cb67655f4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+@@ -116,7 +116,7 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
+ else
+ rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1;
+
+- if (min_meta_chunk_bytes == 0)
++ if (p1_min_meta_chunk_bytes == 0)
+ rq_regs->rq_regs_c.min_meta_chunk_size = 0;
+ else
+ rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 85d53597eb07a..f7ed3e655e397 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -431,7 +431,13 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
+ {
+ struct atom_smc_dpm_info_v4_9 *smc_dpm_table;
+ int index, ret;
+- I2cControllerConfig_t *table_member;
++ PPTable_beige_goby_t *ppt_beige_goby;
++ PPTable_t *ppt;
++
++ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
++ ppt_beige_goby = smu->smu_table.driver_pptable;
++ else
++ ppt = smu->smu_table.driver_pptable;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+@@ -440,9 +446,13 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+- GET_PPTABLE_MEMBER(I2cControllers, &table_member);
+- memcpy(table_member, smc_dpm_table->I2cControllers,
+- sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
++
++ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
++ smu_memcpy_trailing(ppt_beige_goby, I2cControllers, BoardReserved,
++ smc_dpm_table, I2cControllers);
++ else
++ smu_memcpy_trailing(ppt, I2cControllers, BoardReserved,
++ smc_dpm_table, I2cControllers);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 6846199a2ee14..9e387c3e9b696 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -1687,6 +1687,14 @@ static int anx7625_parse_dt(struct device *dev,
+ if (of_property_read_bool(np, "analogix,audio-enable"))
+ pdata->audio_en = 1;
+
++ return 0;
++}
++
++static int anx7625_parse_dt_panel(struct device *dev,
++ struct anx7625_platform_data *pdata)
++{
++ struct device_node *np = dev->of_node;
++
+ pdata->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
+ if (IS_ERR(pdata->panel_bridge)) {
+ if (PTR_ERR(pdata->panel_bridge) == -ENODEV) {
+@@ -2032,7 +2040,7 @@ static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx)
+ return 0;
+ }
+
+-static int anx7625_attach_dsi(struct anx7625_data *ctx)
++static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
+ {
+ struct mipi_dsi_device *dsi;
+ struct device *dev = &ctx->client->dev;
+@@ -2042,9 +2050,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
+ .channel = 0,
+ .node = NULL,
+ };
+- int ret;
+-
+- DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
+
+ host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+ if (!host) {
+@@ -2065,14 +2070,24 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
+ MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_HS_PKT_END_ALIGNED;
+
+- ret = devm_mipi_dsi_attach(dev, dsi);
++ ctx->dsi = dsi;
++
++ return 0;
++}
++
++static int anx7625_attach_dsi(struct anx7625_data *ctx)
++{
++ struct device *dev = &ctx->client->dev;
++ int ret;
++
++ DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
++
++ ret = devm_mipi_dsi_attach(dev, ctx->dsi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n");
+ return ret;
+ }
+
+- ctx->dsi = dsi;
+-
+ DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n");
+
+ return 0;
+@@ -2560,6 +2575,40 @@ static void anx7625_runtime_disable(void *data)
+ pm_runtime_disable(data);
+ }
+
++static int anx7625_link_bridge(struct drm_dp_aux *aux)
++{
++ struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
++ struct device *dev = aux->dev;
++ int ret;
++
++ ret = anx7625_parse_dt_panel(dev, &platform->pdata);
++ if (ret) {
++ DRM_DEV_ERROR(dev, "fail to parse DT for panel : %d\n", ret);
++ return ret;
++ }
++
++ platform->bridge.funcs = &anx7625_bridge_funcs;
++ platform->bridge.of_node = dev->of_node;
++ if (!anx7625_of_panel_on_aux_bus(dev))
++ platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
++ if (!platform->pdata.panel_bridge)
++ platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
++ DRM_BRIDGE_OP_DETECT;
++ platform->bridge.type = platform->pdata.panel_bridge ?
++ DRM_MODE_CONNECTOR_eDP :
++ DRM_MODE_CONNECTOR_DisplayPort;
++
++ drm_bridge_add(&platform->bridge);
++
++ if (!platform->pdata.is_dpi) {
++ ret = anx7625_attach_dsi(platform);
++ if (ret)
++ drm_bridge_remove(&platform->bridge);
++ }
++
++ return ret;
++}
++
+ static int anx7625_i2c_probe(struct i2c_client *client)
+ {
+ struct anx7625_data *platform;
+@@ -2634,6 +2683,24 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ platform->aux.wait_hpd_asserted = anx7625_wait_hpd_asserted;
+ drm_dp_aux_init(&platform->aux);
+
++ ret = anx7625_parse_dt(dev, pdata);
++ if (ret) {
++ if (ret != -EPROBE_DEFER)
++ DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
++ goto free_wq;
++ }
++
++ if (!platform->pdata.is_dpi) {
++ ret = anx7625_setup_dsi_device(platform);
++ if (ret < 0)
++ goto free_wq;
++ }
++
++ /*
++ * Registering the i2c devices will retrigger deferred probe, so it
++ * needs to be done after calls that might return EPROBE_DEFER,
++ * otherwise we can get an infinite loop.
++ */
+ if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
+@@ -2648,13 +2715,21 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ if (ret)
+ goto free_wq;
+
+- devm_of_dp_aux_populate_ep_devices(&platform->aux);
+-
+- ret = anx7625_parse_dt(dev, pdata);
++ /*
++ * Populating the aux bus will retrigger deferred probe, so it needs to
++ * be done after calls that might return EPROBE_DEFER, otherwise we can
++ * get an infinite loop.
++ */
++ ret = devm_of_dp_aux_populate_bus(&platform->aux, anx7625_link_bridge);
+ if (ret) {
+- if (ret != -EPROBE_DEFER)
+- DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+- goto free_wq;
++ if (ret != -ENODEV) {
++ DRM_DEV_ERROR(dev, "failed to populate aux bus : %d\n", ret);
++ goto free_wq;
++ }
++
++ ret = anx7625_link_bridge(&platform->aux);
++ if (ret)
++ goto free_wq;
+ }
+
+ if (!platform->pdata.low_power_mode) {
+@@ -2667,27 +2742,6 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ if (platform->pdata.intp_irq)
+ queue_work(platform->workqueue, &platform->work);
+
+- platform->bridge.funcs = &anx7625_bridge_funcs;
+- platform->bridge.of_node = client->dev.of_node;
+- if (!anx7625_of_panel_on_aux_bus(&client->dev))
+- platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
+- if (!platform->pdata.panel_bridge)
+- platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
+- DRM_BRIDGE_OP_DETECT;
+- platform->bridge.type = platform->pdata.panel_bridge ?
+- DRM_MODE_CONNECTOR_eDP :
+- DRM_MODE_CONNECTOR_DisplayPort;
+-
+- drm_bridge_add(&platform->bridge);
+-
+- if (!platform->pdata.is_dpi) {
+- ret = anx7625_attach_dsi(platform);
+- if (ret) {
+- DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", ret);
+- goto unregister_bridge;
+- }
+- }
+-
+ if (platform->pdata.audio_en)
+ anx7625_register_audio(dev, platform);
+
+@@ -2695,12 +2749,6 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+
+ return 0;
+
+-unregister_bridge:
+- drm_bridge_remove(&platform->bridge);
+-
+- if (!platform->pdata.low_power_mode)
+- pm_runtime_put_sync_suspend(&client->dev);
+-
+ free_wq:
+ if (platform->workqueue)
+ destroy_workqueue(platform->workqueue);
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index bc451b2a77c28..32ea61b79965e 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -3195,7 +3195,7 @@ static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+ {
+ struct it6505 *it6505 = file->private_data;
+- struct drm_display_mode *vid = &it6505->video_info;
++ struct drm_display_mode *vid;
+ u8 read_buf[READ_BUFFER_SIZE];
+ u8 *str = read_buf, *end = read_buf + READ_BUFFER_SIZE;
+ ssize_t ret, count;
+@@ -3204,6 +3204,7 @@ static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
+ return -ENODEV;
+
+ it6505_calc_video_info(it6505);
++ vid = &it6505->video_info;
+ str += scnprintf(str, end - str, "---video timing---\n");
+ str += scnprintf(str, end - str, "PCLK:%d.%03dMHz\n",
+ vid->clock / 1000, vid->clock % 1000);
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 6d16ec45ea614..232e23a1bfcc0 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1890,7 +1890,7 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
+ if (dsi_lanes < 0)
+ return dsi_lanes;
+
+- dsi = mipi_dsi_device_register_full(host, &info);
++ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi),
+ "failed to create dsi device\n");
+@@ -1901,7 +1901,7 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+- ret = mipi_dsi_attach(dsi);
++ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host: %d\n", ret);
+ return ret;
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 7c0cbe84611b9..966a25cb0b108 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -9,6 +9,8 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/kernel.h>
++#include <linux/media-bus-format.h>
++#include <linux/minmax.h>
+ #include <linux/module.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+@@ -146,6 +148,7 @@ struct tc358768_priv {
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
++ u32 dsi_bpp; /* number of Bits Per Pixel over DSI */
+
+ /* Parameters for PLL programming */
+ u32 fbd; /* PLL feedback divider */
+@@ -284,12 +287,12 @@ static void tc358768_hw_disable(struct tc358768_priv *priv)
+
+ static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+ {
+- return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
++ return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->dsi_bpp);
+ }
+
+ static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+ {
+- return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
++ return (u32)div_u64((u64)pclk * priv->dsi_bpp, priv->dsi_lanes);
+ }
+
+ static int tc358768_calc_pll(struct tc358768_priv *priv,
+@@ -334,13 +337,17 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
+ u32 fbd;
+
+ for (fbd = 0; fbd < 512; ++fbd) {
+- u32 pll, diff;
++ u32 pll, diff, pll_in;
+
+ pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+
+ if (pll >= max_pll || pll < min_pll)
+ continue;
+
++ pll_in = (u32)div_u64((u64)refclk, prd + 1);
++ if (pll_in < 4000000)
++ continue;
++
+ diff = max(pll, target_pll) - min(pll, target_pll);
+
+ if (diff < best_diff) {
+@@ -422,6 +429,7 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
++ priv->dsi_bpp = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+@@ -433,7 +441,7 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ }
+
+ if (ret)
+- priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
++ priv->pd_lines = priv->dsi_bpp;
+
+ drm_bridge_add(&priv->bridge);
+
+@@ -632,6 +640,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ unsigned long mode_flags = dsi_dev->mode_flags;
+ u32 val, val2, lptxcnt, hact, data_type;
++ s32 raw_val;
+ const struct drm_display_mode *mode;
+ u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+ u32 dsiclk, dsibclk, video_start;
+@@ -736,25 +745,26 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+ val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+- /* TCLK_PREPARE > 300ns */
+- val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+- dsibclk_nsk);
+- val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
++ /* TCLK_PREPARE + TCLK_ZERO > 300ns */
++ val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
++ dsibclk_nsk) - 2;
++ val |= val2 << 8;
+ dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+- /* TCLK_TRAIL > 60ns + 3*UI */
+- val = 60 + tc358768_to_ns(3 * ui_nsk);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
++ /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++ val = clamp(raw_val, 0, 127);
+ dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+ val = 50 + tc358768_to_ns(4 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+- /* THS_ZERO > 145ns + 10*UI */
+- val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+- val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
++ /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
++ raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++ val2 = clamp(raw_val, 0, 127);
++ val |= val2 << 8;
+ dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+@@ -770,9 +780,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+- /* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+- val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+- dsibclk_nsk) - 5;
++ /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
++ dsibclk_nsk) - 4;
++ val = clamp(raw_val, 0, 15);
+ dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+@@ -786,7 +797,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+ val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+ val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+ dsibclk_nsk) - 2;
+ val = val << 16 | val2;
+@@ -866,8 +877,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= (dsi_dev->lanes - 1) << 1;
+
+- if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+- val |= TC358768_DSI_CONTROL_TXMD;
++ val |= TC358768_DSI_CONTROL_TXMD;
+
+ if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ val |= TC358768_DSI_CONTROL_HSCKMD;
+@@ -913,6 +923,44 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
+ }
+ }
+
++#define MAX_INPUT_SEL_FORMATS 1
++
++static u32 *
++tc358768_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
++ struct drm_bridge_state *bridge_state,
++ struct drm_crtc_state *crtc_state,
++ struct drm_connector_state *conn_state,
++ u32 output_fmt,
++ unsigned int *num_input_fmts)
++{
++ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
++ u32 *input_fmts;
++
++ *num_input_fmts = 0;
++
++ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
++ GFP_KERNEL);
++ if (!input_fmts)
++ return NULL;
++
++ switch (priv->pd_lines) {
++ case 16:
++ input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16;
++ break;
++ case 18:
++ input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18;
++ break;
++ default:
++ case 24:
++ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
++ break;
++ };
++
++ *num_input_fmts = MAX_INPUT_SEL_FORMATS;
++
++ return input_fmts;
++}
++
+ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ .attach = tc358768_bridge_attach,
+ .mode_valid = tc358768_bridge_mode_valid,
+@@ -920,6 +968,11 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ .enable = tc358768_bridge_enable,
+ .disable = tc358768_bridge_disable,
+ .post_disable = tc358768_bridge_post_disable,
++
++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
++ .atomic_reset = drm_atomic_helper_bridge_reset,
++ .atomic_get_input_bus_fmts = tc358768_atomic_get_input_bus_fmts,
+ };
+
+ static const struct drm_bridge_timings default_tc358768_timings = {
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 91ecfbe45bf90..e4ee2904d0893 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -321,8 +321,8 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
+ return dsi_div - 1;
+ }
+
+-static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
+- struct drm_bridge_state *old_bridge_state)
++static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
++ struct drm_bridge_state *old_bridge_state)
+ {
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+@@ -478,17 +478,29 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
+ dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ /* On failure, disable PLL again and exit. */
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
++ regulator_disable(ctx->vcc);
+ return;
+ }
+
+ /* Trigger reset after CSR register update. */
+ regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
+
++ /* Wait for 10ms after soft reset as specified in datasheet */
++ usleep_range(10000, 12000);
++}
++
++static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
++ struct drm_bridge_state *old_bridge_state)
++{
++ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
++ unsigned int pval;
++
+ /* Clear all errors that got asserted during initialization. */
+ regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+ regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
+
+- usleep_range(10000, 12000);
++ /* Wait for 1ms and check for errors in status register */
++ usleep_range(1000, 1100);
+ regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+ if (pval)
+ dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
+@@ -555,6 +567,7 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
+ .attach = sn65dsi83_attach,
+ .detach = sn65dsi83_detach,
+ .atomic_enable = sn65dsi83_atomic_enable,
++ .atomic_pre_enable = sn65dsi83_atomic_pre_enable,
+ .atomic_disable = sn65dsi83_atomic_disable,
+ .mode_valid = sn65dsi83_mode_valid,
+
+@@ -695,6 +708,7 @@ static int sn65dsi83_probe(struct i2c_client *client)
+
+ ctx->bridge.funcs = &sn65dsi83_funcs;
+ ctx->bridge.of_node = dev->of_node;
++ ctx->bridge.pre_enable_prev_first = true;
+ drm_bridge_add(&ctx->bridge);
+
+ ret = sn65dsi83_host_attach(ctx);
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 38dab76ae69ea..e2e21ce79510e 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3404,7 +3404,7 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+
+ /* Skip failed payloads */
+ if (payload->vc_start_slot == -1) {
+- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
++ drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ payload->port->connector->name);
+ return -EIO;
+ }
+diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
+index d40b3edb52d07..f1539d4448c69 100644
+--- a/drivers/gpu/drm/drm_gem_vram_helper.c
++++ b/drivers/gpu/drm/drm_gem_vram_helper.c
+@@ -45,7 +45,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+- * To initialize the VRAM helper library call drmm_vram_helper_alloc_mm().
++ * To initialize the VRAM helper library call drmm_vram_helper_init().
+ * The function allocates and initializes an instance of &struct drm_vram_mm
+ * in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize
+ * &struct drm_driver and &DRM_VRAM_MM_FILE_OPERATIONS to initialize
+@@ -73,7 +73,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+ * // setup device, vram base and size
+ * // ...
+ *
+- * ret = drmm_vram_helper_alloc_mm(dev, vram_base, vram_size);
++ * ret = drmm_vram_helper_init(dev, vram_base, vram_size);
+ * if (ret)
+ * return ret;
+ * return 0;
+@@ -86,7 +86,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+ * to userspace.
+ *
+ * You don't have to clean up the instance of VRAM MM.
+- * drmm_vram_helper_alloc_mm() is a managed interface that installs a
++ * drmm_vram_helper_init() is a managed interface that installs a
+ * clean-up handler to run during the DRM device's release.
+ *
+ * For drawing or scanout operations, rsp. buffer objects have to be pinned
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+index 28f27091cd3b7..ee2b44f896a27 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+@@ -451,6 +451,33 @@ static ssize_t punit_req_freq_mhz_show(struct kobject *kobj,
+ return sysfs_emit(buff, "%u\n", preq);
+ }
+
++static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buff)
++{
++ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
++ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
++
++ return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
++}
++
++static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buff, size_t count)
++{
++ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
++ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
++ int err;
++ u32 val;
++
++ err = kstrtou32(buff, 0, &val);
++ if (err)
++ return err;
++
++ err = intel_guc_slpc_set_ignore_eff_freq(slpc, val);
++ return err ?: count;
++}
++
+ struct intel_gt_bool_throttle_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+@@ -663,6 +690,8 @@ static struct kobj_attribute attr_media_freq_factor_scale =
+ INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
+ INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
+
++INTEL_GT_ATTR_RW(slpc_ignore_eff_freq);
++
+ static const struct attribute *media_perf_power_attrs[] = {
+ &attr_media_freq_factor.attr,
+ &attr_media_freq_factor_scale.attr,
+@@ -744,6 +773,12 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
+ if (ret)
+ gt_warn(gt, "failed to create punit_req_freq_mhz sysfs (%pe)", ERR_PTR(ret));
+
++ if (intel_uc_uses_guc_slpc(&gt->uc)) {
++ ret = sysfs_create_file(kobj, &attr_slpc_ignore_eff_freq.attr);
++ if (ret)
++ gt_warn(gt, "failed to create ignore_eff_freq sysfs (%pe)", ERR_PTR(ret));
++ }
++
+ if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+index 8f8dd05835c5a..1adec6de223c7 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+@@ -6,6 +6,7 @@
+ #include <linux/string_helpers.h>
+
+ #include "intel_guc_rc.h"
++#include "intel_guc_print.h"
+ #include "gt/intel_gt.h"
+ #include "i915_drv.h"
+
+@@ -59,13 +60,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
+
+ ret = guc_action_control_gucrc(guc, enable);
+ if (ret) {
+- i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
+- str_enable_disable(enable), ERR_PTR(ret));
++ guc_probe_error(guc, "Failed to %s RC (%pe)\n",
++ str_enable_disable(enable), ERR_PTR(ret));
+ return ret;
+ }
+
+- drm_info(&gt->i915->drm, "GuC RC: %s\n",
+- str_enabled_disabled(enable));
++ guc_info(guc, "RC %s\n", str_enabled_disabled(enable));
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+index 63464933cbceb..56dbba1ef6684 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+@@ -9,6 +9,7 @@
+ #include "i915_drv.h"
+ #include "i915_reg.h"
+ #include "intel_guc_slpc.h"
++#include "intel_guc_print.h"
+ #include "intel_mchbar_regs.h"
+ #include "gt/intel_gt.h"
+ #include "gt/intel_gt_regs.h"
+@@ -171,14 +172,12 @@ static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
+ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
+ {
+ struct intel_guc *guc = slpc_to_guc(slpc);
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
+ int ret;
+
+ ret = guc_action_slpc_query(guc, offset);
+ if (unlikely(ret))
+- i915_probe_error(i915, "Failed to query task state (%pe)\n",
+- ERR_PTR(ret));
++ guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
+
+ drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
+
+@@ -188,15 +187,14 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
+ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
+ {
+ struct intel_guc *guc = slpc_to_guc(slpc);
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ int ret;
+
+ GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+ ret = guc_action_slpc_set_param(guc, id, value);
+ if (ret)
+- i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
+- id, value, ERR_PTR(ret));
++ guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
++ id, value, ERR_PTR(ret));
+
+ return ret;
+ }
+@@ -212,8 +210,8 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
+
+ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+ {
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
++ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+@@ -236,9 +234,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
+ if (ret)
+- drm_notice(&i915->drm,
+- "Failed to send set_param for min freq(%d): (%d)\n",
+- freq, ret);
++ guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
++ freq, ERR_PTR(ret));
+ }
+
+ return ret;
+@@ -267,7 +264,6 @@ static void slpc_boost_work(struct work_struct *work)
+ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
+ {
+ struct intel_guc *guc = slpc_to_guc(slpc);
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int err;
+
+@@ -275,14 +271,13 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
+
+ err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
+ if (unlikely(err)) {
+- i915_probe_error(i915,
+- "Failed to allocate SLPC struct (err=%pe)\n",
+- ERR_PTR(err));
++ guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ slpc->max_freq_softlimit = 0;
+ slpc->min_freq_softlimit = 0;
++ slpc->ignore_eff_freq = false;
+ slpc->min_is_rpmax = false;
+
+ slpc->boost_freq = 0;
+@@ -338,7 +333,6 @@ static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
+
+ static int slpc_reset(struct intel_guc_slpc *slpc)
+ {
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
+ int ret;
+@@ -346,15 +340,14 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
+ ret = guc_action_slpc_reset(guc, offset);
+
+ if (unlikely(ret < 0)) {
+- i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
+- ERR_PTR(ret));
++ guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!ret) {
+ if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
+- i915_probe_error(i915, "SLPC not enabled! State = %s\n",
+- slpc_get_state_string(slpc));
++ guc_probe_error(guc, "SLPC not enabled! State = %s\n",
++ slpc_get_state_string(slpc));
+ return -EIO;
+ }
+ }
+@@ -465,6 +458,29 @@ int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
+ return ret;
+ }
+
++int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
++{
++ struct drm_i915_private *i915 = slpc_to_i915(slpc);
++ intel_wakeref_t wakeref;
++ int ret;
++
++ mutex_lock(&slpc->lock);
++ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
++
++ ret = slpc_set_param(slpc,
++ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
++ val);
++ if (ret)
++ guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
++ val, ERR_PTR(ret));
++ else
++ slpc->ignore_eff_freq = val;
++
++ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
++ mutex_unlock(&slpc->lock);
++ return ret;
++}
++
+ /**
+ * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+@@ -490,16 +506,6 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+- /* Ignore efficient freq if lower min freq is requested */
+- ret = slpc_set_param(slpc,
+- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+- val < slpc->rp1_freq);
+- if (ret) {
+- i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
+- ERR_PTR(ret));
+- goto out;
+- }
+-
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ val);
+@@ -507,7 +513,6 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
+ if (!ret)
+ slpc->min_freq_softlimit = val;
+
+-out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+
+@@ -611,15 +616,12 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
+
+ static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
+ {
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ int slpc_min_freq;
+ int ret;
+
+ ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
+ if (ret) {
+- drm_err(&i915->drm,
+- "Failed to get min freq: (%d)\n",
+- ret);
++ guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
+ return false;
+ }
+
+@@ -685,9 +687,8 @@ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
+ if (ret)
+- drm_err(&i915->drm,
+- "Override gucrc mode %d failed %d\n",
+- mode, ret);
++ guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
++ mode, ERR_PTR(ret));
+ }
+
+ return ret;
+@@ -702,9 +703,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
+ if (ret)
+- drm_err(&i915->drm,
+- "Unsetting gucrc mode failed %d\n",
+- ret);
++ guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
+ }
+
+ return ret;
+@@ -725,7 +724,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
+ */
+ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+ {
+- struct drm_i915_private *i915 = slpc_to_i915(slpc);
++ struct intel_guc *guc = slpc_to_guc(slpc);
+ int ret;
+
+ GEM_BUG_ON(!slpc->vma);
+@@ -734,8 +733,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+
+ ret = slpc_reset(slpc);
+ if (unlikely(ret < 0)) {
+- i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
+- ERR_PTR(ret));
++ guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+@@ -743,7 +741,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+ if (unlikely(ret < 0))
+ return ret;
+
+- intel_guc_pm_intrmsk_enable(to_gt(i915));
++ intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
+
+ slpc_get_rp_values(slpc);
+
+@@ -753,22 +751,23 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+ /* Set SLPC max limit to RP0 */
+ ret = slpc_use_fused_rp0(slpc);
+ if (unlikely(ret)) {
+- i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
+- ERR_PTR(ret));
++ guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ /* Revert SLPC min/max to softlimits if necessary */
+ ret = slpc_set_softlimits(slpc);
+ if (unlikely(ret)) {
+- i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
+- ERR_PTR(ret));
++ guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ /* Set cached media freq ratio mode */
+ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+
++ /* Set cached value of ignore efficient freq */
++ intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+index 17ed515f6a852..597eb5413ddf2 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+@@ -46,5 +46,6 @@ void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
+ void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
+ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc);
+ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode);
++int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
+
+ #endif
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+index a6ef53b04e047..a886513314977 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+@@ -31,6 +31,7 @@ struct intel_guc_slpc {
+ /* frequency softlimits */
+ u32 min_freq_softlimit;
+ u32 max_freq_softlimit;
++ bool ignore_eff_freq;
+
+ /* cached media ratio mode */
+ u32 media_ratio_mode;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 0372f89082022..660c830c68764 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1740,6 +1740,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ {
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
++ struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+@@ -1766,7 +1767,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+
+ nr_rings = 4;
+
+- if (adreno_is_a510(adreno_gpu))
++ if (adreno_cmp_rev(ADRENO_REV(5, 1, 0, ANY_ID), config->rev))
+ nr_rings = 1;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 2942d2548ce69..f74495dcbd966 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1793,7 +1793,8 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+- if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
++ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
++ !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
+ quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+
+ return adreno_iommu_create_address_space(gpu, pdev, quirks);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index f29a339a37050..ce188452cd56a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -1575,6 +1575,8 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor)
+ {
++ struct msm_drm_private *priv = dev->dev_private;
++ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ int i;
+@@ -1606,7 +1608,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+
+- drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
++ if (dpu_kms->catalog->dspp_count)
++ drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index f7214c4401e19..c2462d58b67d6 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -94,9 +94,13 @@
+
+ #define INTF_SDM845_MASK (0)
+
+-#define INTF_SC7180_MASK BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE)
++#define INTF_SC7180_MASK \
++ (BIT(DPU_INTF_INPUT_CTRL) | \
++ BIT(DPU_INTF_TE) | \
++ BIT(DPU_INTF_STATUS_SUPPORTED) | \
++ BIT(DPU_DATA_HCTL_EN))
+
+-#define INTF_SC7280_MASK INTF_SC7180_MASK | BIT(DPU_DATA_HCTL_EN)
++#define INTF_SC7280_MASK (INTF_SC7180_MASK)
+
+ #define IRQ_SDM845_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+@@ -1562,7 +1566,7 @@ static struct dpu_pingpong_cfg qcm2290_pp[] = {
+ #define MERGE_3D_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+- .base = _base, .len = 0x100, \
++ .base = _base, .len = 0x8, \
+ .features = MERGE_3D_SM8150_MASK, \
+ .sblk = NULL \
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+index 5f96dd8def092..d7d45e1e7b310 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+@@ -214,17 +214,19 @@ enum {
+
+ /**
+ * INTF sub-blocks
+- * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
+- * pixel data arrives to this INTF
+- * @DPU_INTF_TE INTF block has TE configuration support
+- * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
+- than video timing
++ * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
++ * pixel data arrives to this INTF
++ * @DPU_INTF_TE INTF block has TE configuration support
++ * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
++ * than video timing
++ * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
+ * @DPU_INTF_MAX
+ */
+ enum {
+ DPU_INTF_INPUT_CTRL = 0x1,
+ DPU_INTF_TE,
+ DPU_DATA_HCTL_EN,
++ DPU_INTF_STATUS_SUPPORTED,
+ DPU_INTF_MAX
+ };
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+index 6c53ea560ffaa..4072638c37918 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+@@ -115,6 +115,9 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ ctx->pending_flush_mask = 0x0;
++ ctx->pending_intf_flush_mask = 0;
++ ctx->pending_wb_flush_mask = 0;
++ ctx->pending_merge_3d_flush_mask = 0;
+ }
+
+ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+@@ -505,7 +508,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->dsc) {
+- DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
++ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, BIT(DSC_IDX));
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+ }
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index 619926da1441e..68035745b7069 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -54,9 +54,10 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ if (is_cmd_mode)
+ initial_lines += 1;
+
+- slice_last_group_size = 3 - (dsc->slice_width % 3);
++ slice_last_group_size = (dsc->slice_width + 2) % 3;
++
+ data = (initial_lines << 20);
+- data |= ((slice_last_group_size - 1) << 18);
++ data |= (slice_last_group_size << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ data |= (dsc->bits_per_pixel << 8);
+ data |= (dsc->block_pred_enable << 7);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+index b2a94b9a3e987..b9dddf576c029 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+@@ -57,6 +57,7 @@
+ #define INTF_PROG_FETCH_START 0x170
+ #define INTF_PROG_ROT_START 0x174
+ #define INTF_MUX 0x25C
++#define INTF_STATUS 0x26C
+
+ #define INTF_CFG_ACTIVE_H_EN BIT(29)
+ #define INTF_CFG_ACTIVE_V_EN BIT(30)
+@@ -292,8 +293,13 @@ static void dpu_hw_intf_get_status(
+ struct intf_status *s)
+ {
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
++ unsigned long cap = intf->cap->features;
++
++ if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
++ s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
++ else
++ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+
+- s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 3f9a18410c0bb..22967cf6a79d3 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -325,6 +325,8 @@ static void dp_display_unbind(struct device *dev, struct device *master,
+
+ kthread_stop(dp->ev_tsk);
+
++ of_dp_aux_depopulate_bus(dp->aux);
++
+ dp_power_client_deinit(dp->power);
+ dp_unregister_audio_driver(dev, dp->audio);
+ dp_aux_unregister(dp->aux);
+@@ -1352,9 +1354,9 @@ static int dp_display_remove(struct platform_device *pdev)
+ {
+ struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
+
++ component_del(&pdev->dev, &dp_display_comp_ops);
+ dp_display_deinit_sub_modules(dp);
+
+- component_del(&pdev->dev, &dp_display_comp_ops);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+@@ -1538,11 +1540,6 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+ }
+ }
+
+-static void of_dp_aux_depopulate_bus_void(void *data)
+-{
+- of_dp_aux_depopulate_bus(data);
+-}
+-
+ static int dp_display_get_next_bridge(struct msm_dp *dp)
+ {
+ int rc;
+@@ -1571,12 +1568,6 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
+ of_node_put(aux_bus);
+ if (rc)
+ goto error;
+-
+- rc = devm_add_action_or_reset(dp->drm_dev->dev,
+- of_dp_aux_depopulate_bus_void,
+- dp_priv->aux);
+- if (rc)
+- goto error;
+ } else if (dp->is_edp) {
+ DRM_ERROR("eDP aux_bus not found\n");
+ return -ENODEV;
+@@ -1601,6 +1592,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
+ error:
+ if (dp->is_edp) {
+ disable_irq(dp_priv->irq);
++ of_dp_aux_depopulate_bus(dp_priv->aux);
+ dp_display_host_phy_exit(dp_priv);
+ dp_display_host_deinit(dp_priv);
+ }
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 18fa30e1e8583..3ee770dddc2fd 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -854,18 +854,17 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ */
+ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+
+- /*
+- * If slice_count is greater than slice_per_intf
+- * then default to 1. This can happen during partial
+- * update.
+- */
+- if (dsc->slice_count > slice_per_intf)
+- dsc->slice_count = 1;
+-
+ total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
+
+ eol_byte_num = total_bytes_per_intf % 3;
+- pkt_per_line = slice_per_intf / dsc->slice_count;
++
++ /*
++ * Typically, pkt_per_line = slice_per_intf * slice_per_pkt.
++ *
++ * Since the current driver only supports slice_per_pkt = 1,
++ * pkt_per_line will be equal to slice per intf for now.
++ */
++ pkt_per_line = slice_per_intf;
+
+ if (is_cmd_mode) /* packet data type */
+ reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
+@@ -989,7 +988,14 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ if (!msm_host->dsc)
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ else
+- wc = msm_host->dsc->slice_chunk_size * msm_host->dsc->slice_count + 1;
++ /*
++ * When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1.
++ * Currently, the driver only supports default value of slice_per_pkt = 1
++ *
++ * TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info
++ * and adjust DSC math to account for slice_per_pkt.
++ */
++ wc = msm_host->dsc->slice_chunk_size + 1;
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
+ DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+index 9f488adea7f54..3ce45b023e637 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+@@ -539,6 +539,9 @@ static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
+ if (unlikely(pll_14nm->phy->pll_on))
+ return 0;
+
++ if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
++ dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
++
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 5bb777ff13130..9b6824f6b9e4b 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -64,6 +64,7 @@
+ #include "nouveau_connector.h"
+ #include "nouveau_encoder.h"
+ #include "nouveau_fence.h"
++#include "nv50_display.h"
+
+ #include <subdev/bios/dp.h>
+
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
+index fbd3b15583bc8..60f77766766e9 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.h
++++ b/drivers/gpu/drm/nouveau/nv50_display.h
+@@ -31,7 +31,5 @@
+ #include "nouveau_reg.h"
+
+ int nv50_display_create(struct drm_device *);
+-void nv50_display_destroy(struct drm_device *);
+-int nv50_display_init(struct drm_device *);
+-void nv50_display_fini(struct drm_device *);
++
+ #endif /* __NV50_DISPLAY_H__ */
+diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+index d1ec80a3e3c72..ef148504cf24a 100644
+--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
++++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+@@ -192,15 +192,15 @@ static int sharp_nt_panel_enable(struct drm_panel *panel)
+ }
+
+ static const struct drm_display_mode default_mode = {
+- .clock = 41118,
++ .clock = (540 + 48 + 32 + 80) * (960 + 3 + 10 + 15) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 48,
+- .hsync_end = 540 + 48 + 80,
+- .htotal = 540 + 48 + 80 + 32,
++ .hsync_end = 540 + 48 + 32,
++ .htotal = 540 + 48 + 32 + 80,
+ .vdisplay = 960,
+ .vsync_start = 960 + 3,
+- .vsync_end = 960 + 3 + 15,
+- .vtotal = 960 + 3 + 15 + 1,
++ .vsync_end = 960 + 3 + 10,
++ .vtotal = 960 + 3 + 10 + 15,
+ };
+
+ static int sharp_nt_panel_get_modes(struct drm_panel *panel,
+@@ -280,6 +280,7 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
++ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_NO_EOT_PACKET;
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 065f378bba9d2..d8efbcee9bc12 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -759,8 +759,8 @@ static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+- .width = 105,
+- .height = 67,
++ .width = 99,
++ .height = 58,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ };
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 8ef25ab305ae7..b8f4dac68d850 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5517,6 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct ci_ps *ps;
++ int ret;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+@@ -5546,11 +5547,15 @@ static int ci_parse_power_table(struct radeon_device *rdev)
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+- if (!rdev->pm.power_state[i].clock_info)
+- return -EINVAL;
++ if (!rdev->pm.power_state[i].clock_info) {
++ ret = -EINVAL;
++ goto err_free_ps;
++ }
+ ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+- if (ps == NULL)
+- return -ENOMEM;
++ if (ps == NULL) {
++ ret = -ENOMEM;
++ goto err_free_ps;
++ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+@@ -5590,6 +5595,12 @@ static int ci_parse_power_table(struct radeon_device *rdev)
+ }
+
+ return 0;
++
++err_free_ps:
++ for (i = 0; i < rdev->pm.dpm.num_ps; i++)
++ kfree(rdev->pm.dpm.ps[i].ps_priv);
++ kfree(rdev->pm.dpm.ps);
++ return ret;
+ }
+
+ static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+@@ -5678,25 +5689,26 @@ int ci_dpm_init(struct radeon_device *rdev)
+
+ ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
+ if (ret) {
+- ci_dpm_fini(rdev);
++ kfree(rdev->pm.dpm.priv);
+ return ret;
+ }
+
+ ret = r600_get_platform_caps(rdev);
+ if (ret) {
+- ci_dpm_fini(rdev);
++ kfree(rdev->pm.dpm.priv);
+ return ret;
+ }
+
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret) {
+- ci_dpm_fini(rdev);
++ kfree(rdev->pm.dpm.priv);
+ return ret;
+ }
+
+ ret = ci_parse_power_table(rdev);
+ if (ret) {
+- ci_dpm_fini(rdev);
++ kfree(rdev->pm.dpm.priv);
++ r600_free_extended_power_table(rdev);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
+index fdddbbaecbb74..72a0768df00f7 100644
+--- a/drivers/gpu/drm/radeon/cypress_dpm.c
++++ b/drivers/gpu/drm/radeon/cypress_dpm.c
+@@ -557,8 +557,12 @@ static int cypress_populate_mclk_value(struct radeon_device *rdev,
+ ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+ u32 reference_clock = rdev->clock.mpll.reference_freq;
+ u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+- u32 clk_v = ss.percentage *
++ u32 clk_s, clk_v;
++
++ if (!decoded_ref)
++ return -EINVAL;
++ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
++ clk_v = ss.percentage *
+ (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
+
+ mpll_ss1 &= ~CLKV_MASK;
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index 672d2239293e0..3e1c1a392fb7b 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -2241,8 +2241,12 @@ static int ni_populate_mclk_value(struct radeon_device *rdev,
+ ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+ u32 reference_clock = rdev->clock.mpll.reference_freq;
+ u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+- u32 clk_v = ss.percentage *
++ u32 clk_s, clk_v;
++
++ if (!decoded_ref)
++ return -EINVAL;
++ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
++ clk_v = ss.percentage *
+ (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
+
+ mpll_ss1 &= ~CLKV_MASK;
+diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
+index d57a3e1df8d63..4464fd21a3029 100644
+--- a/drivers/gpu/drm/radeon/rv740_dpm.c
++++ b/drivers/gpu/drm/radeon/rv740_dpm.c
+@@ -249,8 +249,12 @@ int rv740_populate_mclk_value(struct radeon_device *rdev,
+ ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+ u32 reference_clock = rdev->clock.mpll.reference_freq;
+ u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+- u32 clk_v = 0x40000 * ss.percentage *
++ u32 clk_s, clk_v;
++
++ if (!decoded_ref)
++ return -EINVAL;
++ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
++ clk_v = 0x40000 * ss.percentage *
+ (dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
+
+ mpll_ss1 &= ~CLKV_MASK;
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index 523a6d7879210..936796851ffd3 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -778,21 +778,19 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
+ static int sun4i_tcon_init_clocks(struct device *dev,
+ struct sun4i_tcon *tcon)
+ {
+- tcon->clk = devm_clk_get(dev, "ahb");
++ tcon->clk = devm_clk_get_enabled(dev, "ahb");
+ if (IS_ERR(tcon->clk)) {
+ dev_err(dev, "Couldn't get the TCON bus clock\n");
+ return PTR_ERR(tcon->clk);
+ }
+- clk_prepare_enable(tcon->clk);
+
+ if (tcon->quirks->has_channel_0) {
+- tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
++ tcon->sclk0 = devm_clk_get_enabled(dev, "tcon-ch0");
+ if (IS_ERR(tcon->sclk0)) {
+ dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
+ return PTR_ERR(tcon->sclk0);
+ }
+ }
+- clk_prepare_enable(tcon->sclk0);
+
+ if (tcon->quirks->has_channel_1) {
+ tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+@@ -805,12 +803,6 @@ static int sun4i_tcon_init_clocks(struct device *dev,
+ return 0;
+ }
+
+-static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
+-{
+- clk_disable_unprepare(tcon->sclk0);
+- clk_disable_unprepare(tcon->clk);
+-}
+-
+ static int sun4i_tcon_init_irq(struct device *dev,
+ struct sun4i_tcon *tcon)
+ {
+@@ -1223,14 +1215,14 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
+ ret = sun4i_tcon_init_regmap(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON regmap\n");
+- goto err_free_clocks;
++ goto err_assert_reset;
+ }
+
+ if (tcon->quirks->has_channel_0) {
+ ret = sun4i_dclk_create(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't create our TCON dot clock\n");
+- goto err_free_clocks;
++ goto err_assert_reset;
+ }
+ }
+
+@@ -1293,8 +1285,6 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
+ err_free_dotclock:
+ if (tcon->quirks->has_channel_0)
+ sun4i_dclk_free(tcon);
+-err_free_clocks:
+- sun4i_tcon_free_clocks(tcon);
+ err_assert_reset:
+ reset_control_assert(tcon->lcd_rst);
+ return ret;
+@@ -1308,7 +1298,6 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
+ list_del(&tcon->list);
+ if (tcon->quirks->has_channel_0)
+ sun4i_dclk_free(tcon);
+- sun4i_tcon_free_clocks(tcon);
+ }
+
+ static const struct component_ops sun4i_tcon_ops = {
+diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
+index 8e53fa80742b2..80164e79af006 100644
+--- a/drivers/gpu/drm/vkms/vkms_composer.c
++++ b/drivers/gpu/drm/vkms/vkms_composer.c
+@@ -99,7 +99,7 @@ static void blend(struct vkms_writeback_job *wb,
+ if (!check_y_limit(plane[i]->frame_info, y))
+ continue;
+
+- plane[i]->plane_read(stage_buffer, plane[i]->frame_info, y);
++ vkms_compose_row(stage_buffer, plane[i], y);
+ pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
+ output_buffer);
+ }
+@@ -118,7 +118,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
+ u32 n_active_planes = crtc_state->num_active_planes;
+
+ for (size_t i = 0; i < n_active_planes; i++)
+- if (!planes[i]->plane_read)
++ if (!planes[i]->pixel_read)
+ return -1;
+
+ if (active_wb && !active_wb->wb_write)
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
+index 4a248567efb26..f152d54baf769 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -56,8 +56,7 @@ struct vkms_writeback_job {
+ struct vkms_plane_state {
+ struct drm_shadow_plane_state base;
+ struct vkms_frame_info *frame_info;
+- void (*plane_read)(struct line_buffer *buffer,
+- const struct vkms_frame_info *frame_info, int y);
++ void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel);
+ };
+
+ struct vkms_plane {
+@@ -155,6 +154,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ /* Composer Support */
+ void vkms_composer_worker(struct work_struct *work);
+ void vkms_set_composer(struct vkms_output *out, bool enabled);
++void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
+
+ /* Writeback */
+ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
+diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
+index d4950688b3f17..b11342026485f 100644
+--- a/drivers/gpu/drm/vkms/vkms_formats.c
++++ b/drivers/gpu/drm/vkms/vkms_formats.c
+@@ -42,100 +42,75 @@ static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y
+ return packed_pixels_addr(frame_info, x_src, y_src);
+ }
+
+-static void ARGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+- const struct vkms_frame_info *frame_info, int y)
++static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+- u8 *src_pixels = get_packed_src_addr(frame_info, y);
+- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+- stage_buffer->n_pixels);
+-
+- for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+- /*
+- * The 257 is the "conversion ratio". This number is obtained by the
+- * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
+- * the best color value in a pixel format with more possibilities.
+- * A similar idea applies to others RGB color conversions.
+- */
+- out_pixels[x].a = (u16)src_pixels[3] * 257;
+- out_pixels[x].r = (u16)src_pixels[2] * 257;
+- out_pixels[x].g = (u16)src_pixels[1] * 257;
+- out_pixels[x].b = (u16)src_pixels[0] * 257;
+- }
++ /*
++ * The 257 is the "conversion ratio". This number is obtained by the
++ * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
++ * the best color value in a pixel format with more possibilities.
++ * A similar idea applies to others RGB color conversions.
++ */
++ out_pixel->a = (u16)src_pixels[3] * 257;
++ out_pixel->r = (u16)src_pixels[2] * 257;
++ out_pixel->g = (u16)src_pixels[1] * 257;
++ out_pixel->b = (u16)src_pixels[0] * 257;
+ }
+
+-static void XRGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+- const struct vkms_frame_info *frame_info, int y)
++static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+- u8 *src_pixels = get_packed_src_addr(frame_info, y);
+- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+- stage_buffer->n_pixels);
+-
+- for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+- out_pixels[x].a = (u16)0xffff;
+- out_pixels[x].r = (u16)src_pixels[2] * 257;
+- out_pixels[x].g = (u16)src_pixels[1] * 257;
+- out_pixels[x].b = (u16)src_pixels[0] * 257;
+- }
++ out_pixel->a = (u16)0xffff;
++ out_pixel->r = (u16)src_pixels[2] * 257;
++ out_pixel->g = (u16)src_pixels[1] * 257;
++ out_pixel->b = (u16)src_pixels[0] * 257;
+ }
+
+-static void ARGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+- const struct vkms_frame_info *frame_info,
+- int y)
++static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+- u16 *src_pixels = get_packed_src_addr(frame_info, y);
+- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+- stage_buffer->n_pixels);
++ u16 *pixels = (u16 *)src_pixels;
+
+- for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+- out_pixels[x].a = le16_to_cpu(src_pixels[3]);
+- out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+- out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+- out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+- }
++ out_pixel->a = le16_to_cpu(pixels[3]);
++ out_pixel->r = le16_to_cpu(pixels[2]);
++ out_pixel->g = le16_to_cpu(pixels[1]);
++ out_pixel->b = le16_to_cpu(pixels[0]);
+ }
+
+-static void XRGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+- const struct vkms_frame_info *frame_info,
+- int y)
++static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+- u16 *src_pixels = get_packed_src_addr(frame_info, y);
+- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+- stage_buffer->n_pixels);
++ u16 *pixels = (u16 *)src_pixels;
+
+- for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+- out_pixels[x].a = (u16)0xffff;
+- out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+- out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+- out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+- }
++ out_pixel->a = (u16)0xffff;
++ out_pixel->r = le16_to_cpu(pixels[2]);
++ out_pixel->g = le16_to_cpu(pixels[1]);
++ out_pixel->b = le16_to_cpu(pixels[0]);
+ }
+
+-static void RGB565_to_argb_u16(struct line_buffer *stage_buffer,
+- const struct vkms_frame_info *frame_info, int y)
++static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+- u16 *src_pixels = get_packed_src_addr(frame_info, y);
+- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+- stage_buffer->n_pixels);
++ u16 *pixels = (u16 *)src_pixels;
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+- for (size_t x = 0; x < x_limit; x++, src_pixels++) {
+- u16 rgb_565 = le16_to_cpu(*src_pixels);
+- s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
+- s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
+- s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
++ u16 rgb_565 = le16_to_cpu(*pixels);
++ s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
++ s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
++ s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
+
+- out_pixels[x].a = (u16)0xffff;
+- out_pixels[x].r = drm_fixp2int(drm_fixp_mul(fp_r, fp_rb_ratio));
+- out_pixels[x].g = drm_fixp2int(drm_fixp_mul(fp_g, fp_g_ratio));
+- out_pixels[x].b = drm_fixp2int(drm_fixp_mul(fp_b, fp_rb_ratio));
+- }
++ out_pixel->a = (u16)0xffff;
++ out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
++ out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
++ out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
++}
++
++void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
++{
++ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
++ struct vkms_frame_info *frame_info = plane->frame_info;
++ u8 *src_pixels = get_packed_src_addr(frame_info, y);
++ int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
++
++ for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp)
++ plane->pixel_read(src_pixels, &out_pixels[x]);
+ }
+
+ /*
+@@ -241,15 +216,15 @@ static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
+ s64 fp_g = drm_int2fixp(in_pixels[x].g);
+ s64 fp_b = drm_int2fixp(in_pixels[x].b);
+
+- u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+- u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+- u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
++ u16 r = drm_fixp2int_round(drm_fixp_div(fp_r, fp_rb_ratio));
++ u16 g = drm_fixp2int_round(drm_fixp_div(fp_g, fp_g_ratio));
++ u16 b = drm_fixp2int_round(drm_fixp_div(fp_b, fp_rb_ratio));
+
+ *dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ }
+ }
+
+-void *get_frame_to_line_function(u32 format)
++void *get_pixel_conversion_function(u32 format)
+ {
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
+index 43b7c19790181..c5b113495d0c0 100644
+--- a/drivers/gpu/drm/vkms/vkms_formats.h
++++ b/drivers/gpu/drm/vkms/vkms_formats.h
+@@ -5,7 +5,7 @@
+
+ #include "vkms_drv.h"
+
+-void *get_frame_to_line_function(u32 format);
++void *get_pixel_conversion_function(u32 format);
+
+ void *get_line_to_frame_function(u32 format);
+
+diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
+index b3f8a115cc234..eaee51358a49b 100644
+--- a/drivers/gpu/drm/vkms/vkms_plane.c
++++ b/drivers/gpu/drm/vkms/vkms_plane.c
+@@ -123,7 +123,7 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
+ frame_info->offset = fb->offsets[0];
+ frame_info->pitch = fb->pitches[0];
+ frame_info->cpp = fb->format->cpp[0];
+- vkms_plane_state->plane_read = get_frame_to_line_function(fmt);
++ vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
+ }
+
+ static int vkms_plane_atomic_check(struct drm_plane *plane,
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 4ce012f83253e..b977450cac752 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1285,7 +1285,7 @@ config HID_MCP2221
+
+ config HID_KUNIT_TEST
+ tristate "KUnit tests for HID" if !KUNIT_ALL_TESTS
+- depends on KUNIT=y
++ depends on KUNIT
+ depends on HID_BATTERY_STRENGTH
+ depends on HID_UCLOGIC
+ default KUNIT_ALL_TESTS
+diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
+index 70121482a6173..27207ec6f7feb 100644
+--- a/drivers/hwmon/f71882fg.c
++++ b/drivers/hwmon/f71882fg.c
+@@ -1096,8 +1096,11 @@ static ssize_t show_pwm(struct device *dev,
+ val = data->pwm[nr];
+ else {
+ /* RPM mode */
+- val = 255 * fan_from_reg(data->fan_target[nr])
+- / fan_from_reg(data->fan_full_speed[nr]);
++ if (fan_from_reg(data->fan_full_speed[nr]))
++ val = 255 * fan_from_reg(data->fan_target[nr])
++ / fan_from_reg(data->fan_full_speed[nr]);
++ else
++ val = 0;
+ }
+ mutex_unlock(&data->update_lock);
+ return sprintf(buf, "%d\n", val);
+diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
+index 73e5d92b200b0..1501ceb551e79 100644
+--- a/drivers/hwmon/gsc-hwmon.c
++++ b/drivers/hwmon/gsc-hwmon.c
+@@ -82,8 +82,8 @@ static ssize_t pwm_auto_point_temp_store(struct device *dev,
+ if (kstrtol(buf, 10, &temp))
+ return -EINVAL;
+
+- temp = clamp_val(temp, 0, 10000);
+- temp = DIV_ROUND_CLOSEST(temp, 10);
++ temp = clamp_val(temp, 0, 100000);
++ temp = DIV_ROUND_CLOSEST(temp, 100);
+
+ regs[0] = temp & 0xff;
+ regs[1] = (temp >> 8) & 0xff;
+@@ -100,7 +100,7 @@ static ssize_t pwm_auto_point_pwm_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+- return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100);
++ return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)));
+ }
+
+ static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0);
+diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
+index 3b07bfb43e937..b8543c06d022a 100644
+--- a/drivers/hwmon/pmbus/adm1275.c
++++ b/drivers/hwmon/pmbus/adm1275.c
+@@ -37,10 +37,13 @@ enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1293, adm1294 };
+
+ #define ADM1272_IRANGE BIT(0)
+
++#define ADM1278_TSFILT BIT(15)
+ #define ADM1278_TEMP1_EN BIT(3)
+ #define ADM1278_VIN_EN BIT(2)
+ #define ADM1278_VOUT_EN BIT(1)
+
++#define ADM1278_PMON_DEFCONFIG (ADM1278_VOUT_EN | ADM1278_TEMP1_EN | ADM1278_TSFILT)
++
+ #define ADM1293_IRANGE_25 0
+ #define ADM1293_IRANGE_50 BIT(6)
+ #define ADM1293_IRANGE_100 BIT(7)
+@@ -462,6 +465,22 @@ static const struct i2c_device_id adm1275_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, adm1275_id);
+
++/* Enable VOUT & TEMP1 if not enabled (disabled by default) */
++static int adm1275_enable_vout_temp(struct i2c_client *client, int config)
++{
++ int ret;
++
++ if ((config & ADM1278_PMON_DEFCONFIG) != ADM1278_PMON_DEFCONFIG) {
++ config |= ADM1278_PMON_DEFCONFIG;
++ ret = i2c_smbus_write_word_data(client, ADM1275_PMON_CONFIG, config);
++ if (ret < 0) {
++ dev_err(&client->dev, "Failed to enable VOUT/TEMP1 monitoring\n");
++ return ret;
++ }
++ }
++ return 0;
++}
++
+ static int adm1275_probe(struct i2c_client *client)
+ {
+ s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
+@@ -615,19 +634,10 @@ static int adm1275_probe(struct i2c_client *client)
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+
+- /* Enable VOUT & TEMP1 if not enabled (disabled by default) */
+- if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
+- (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
+- config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
+- ret = i2c_smbus_write_byte_data(client,
+- ADM1275_PMON_CONFIG,
+- config);
+- if (ret < 0) {
+- dev_err(&client->dev,
+- "Failed to enable VOUT monitoring\n");
+- return -ENODEV;
+- }
+- }
++ ret = adm1275_enable_vout_temp(client, config);
++ if (ret)
++ return ret;
++
+ if (config & ADM1278_VIN_EN)
+ info->func[0] |= PMBUS_HAVE_VIN;
+ break;
+@@ -684,19 +694,9 @@ static int adm1275_probe(struct i2c_client *client)
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+
+- /* Enable VOUT & TEMP1 if not enabled (disabled by default) */
+- if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
+- (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
+- config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
+- ret = i2c_smbus_write_word_data(client,
+- ADM1275_PMON_CONFIG,
+- config);
+- if (ret < 0) {
+- dev_err(&client->dev,
+- "Failed to enable VOUT monitoring\n");
+- return -ENODEV;
+- }
+- }
++ ret = adm1275_enable_vout_temp(client, config);
++ if (ret)
++ return ret;
+
+ if (config & ADM1278_VIN_EN)
+ info->func[0] |= PMBUS_HAVE_VIN;
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 85e36c9f8e797..b4edcf12d0d19 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -283,15 +283,21 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+ for (indx = 0; indx < rdev->num_msix; indx++)
+ rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
+
+- bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+- false);
++ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
++ false);
++ if (rc) {
++ ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
++ return;
++ }
+ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ msix_ent[indx].vector, false);
+- if (rc)
++ if (rc) {
+ ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
+ indx - 1);
++ return;
++ }
+ }
+ }
+
+@@ -1004,12 +1010,6 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+ if (!ib_device_try_get(&rdev->ibdev))
+ return 0;
+
+- if (!sgid_tbl) {
+- ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
+- rc = -EINVAL;
+- goto out;
+- }
+-
+ for (index = 0; index < sgid_tbl->active; index++) {
+ gid_idx = sgid_tbl->hw_id[index];
+
+@@ -1027,7 +1027,7 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+ rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
+ rdev->qplib_res.netdev->dev_addr);
+ }
+-out:
++
+ ib_device_put(&rdev->ibdev);
+ return rc;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index ab2cc1c67f70b..74d56900387a1 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -405,6 +405,9 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
+
+ void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+ {
++ if (!nq->requested)
++ return;
++
+ tasklet_disable(&nq->nq_tasklet);
+ /* Mask h/w interrupt */
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
+@@ -412,11 +415,12 @@ void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+ synchronize_irq(nq->msix_vec);
+ if (kill)
+ tasklet_kill(&nq->nq_tasklet);
+- if (nq->requested) {
+- irq_set_affinity_hint(nq->msix_vec, NULL);
+- free_irq(nq->msix_vec, nq);
+- nq->requested = false;
+- }
++
++ irq_set_affinity_hint(nq->msix_vec, NULL);
++ free_irq(nq->msix_vec, nq);
++ kfree(nq->name);
++ nq->name = NULL;
++ nq->requested = false;
+ }
+
+ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+@@ -442,6 +446,7 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init)
+ {
++ struct bnxt_qplib_res *res = nq->res;
+ int rc;
+
+ if (nq->requested)
+@@ -453,10 +458,17 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ else
+ tasklet_enable(&nq->nq_tasklet);
+
+- snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
++ nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
++ nq_indx, pci_name(res->pdev));
++ if (!nq->name)
++ return -ENOMEM;
+ rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
+- if (rc)
++ if (rc) {
++ kfree(nq->name);
++ nq->name = NULL;
++ tasklet_disable(&nq->nq_tasklet);
+ return rc;
++ }
+
+ cpumask_clear(&nq->mask);
+ cpumask_set_cpu(nq_indx, &nq->mask);
+@@ -467,7 +479,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ nq->msix_vec, nq_indx);
+ }
+ nq->requested = true;
+- bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
++ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
+
+ return rc;
+ }
+@@ -1601,7 +1613,7 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ il_src = (void *)wqe->sg_list[indx].addr;
+ t_len += len;
+ if (t_len > qp->max_inline_data)
+- goto bad;
++ return -ENOMEM;
+ while (len) {
+ if (pull_dst) {
+ pull_dst = false;
+@@ -1625,8 +1637,6 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ }
+
+ return t_len;
+-bad:
+- return -ENOMEM;
+ }
+
+ static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
+@@ -2056,7 +2066,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ hwq_attr.sginfo = &cq->sg_info;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
+ if (rc)
+- goto exit;
++ return rc;
+
+ RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
+
+@@ -2097,7 +2107,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+
+ fail:
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+-exit:
+ return rc;
+ }
+
+@@ -2725,11 +2734,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
+
+ qp = (struct bnxt_qplib_qp *)((unsigned long)
+ le64_to_cpu(hwcqe->qp_handle));
+- if (!qp) {
+- dev_err(&cq->hwq.pdev->dev,
+- "FP: CQ Process terminal qp is NULL\n");
++ if (!qp)
+ return -EINVAL;
+- }
+
+ /* Must block new posting of SQ and RQ */
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 0375019525431..f859710f9a7f4 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -471,7 +471,7 @@ typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_nq {
+ struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
+- char name[32];
++ char *name;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_nq_db nq_db;
+ u16 ring_id;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index 061b2895dd9b5..75e0c42f6f424 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -181,7 +181,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+ } while (size > 0);
+ cmdq->seq_num++;
+
+- cmdq_prod = hwq->prod;
++ cmdq_prod = hwq->prod & 0xFFFF;
+ if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
+ /* The very first doorbell write
+ * is required to set this flag
+@@ -299,7 +299,8 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
+ }
+
+ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+- struct creq_qp_event *qp_event)
++ struct creq_qp_event *qp_event,
++ u32 *num_wait)
+ {
+ struct creq_qp_error_notification *err_event;
+ struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
+@@ -308,6 +309,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ u16 cbit, blocked = 0;
+ struct pci_dev *pdev;
+ unsigned long flags;
++ u32 wait_cmds = 0;
+ __le16 mcookie;
+ u16 cookie;
+ int rc = 0;
+@@ -367,9 +369,10 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ crsqe->req_size = 0;
+
+ if (!blocked)
+- wake_up(&rcfw->cmdq.waitq);
++ wait_cmds++;
+ spin_unlock_irqrestore(&hwq->lock, flags);
+ }
++ *num_wait += wait_cmds;
+ return rc;
+ }
+
+@@ -383,6 +386,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ struct creq_base *creqe;
+ u32 sw_cons, raw_cons;
+ unsigned long flags;
++ u32 num_wakeup = 0;
+
+ /* Service the CREQ until budget is over */
+ spin_lock_irqsave(&hwq->lock, flags);
+@@ -401,7 +405,8 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ switch (type) {
+ case CREQ_BASE_TYPE_QP_EVENT:
+ bnxt_qplib_process_qp_event
+- (rcfw, (struct creq_qp_event *)creqe);
++ (rcfw, (struct creq_qp_event *)creqe,
++ &num_wakeup);
+ creq->stats.creq_qp_event_processed++;
+ break;
+ case CREQ_BASE_TYPE_FUNC_EVENT:
+@@ -429,6 +434,8 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ rcfw->res->cctx, true);
+ }
+ spin_unlock_irqrestore(&hwq->lock, flags);
++ if (num_wakeup)
++ wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
+ }
+
+ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
+@@ -598,7 +605,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
+ rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
+
+ sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
+- hwq_attr.depth = rcfw->cmdq_depth;
++ hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
+ hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
+@@ -635,6 +642,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+ struct bnxt_qplib_creq_ctx *creq;
+
+ creq = &rcfw->creq;
++
++ if (!creq->requested)
++ return;
++
+ tasklet_disable(&creq->creq_tasklet);
+ /* Mask h/w interrupts */
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
+@@ -643,10 +654,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+ if (kill)
+ tasklet_kill(&creq->creq_tasklet);
+
+- if (creq->requested) {
+- free_irq(creq->msix_vec, rcfw);
+- creq->requested = false;
+- }
++ free_irq(creq->msix_vec, rcfw);
++ kfree(creq->irq_name);
++ creq->irq_name = NULL;
++ creq->requested = false;
+ }
+
+ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+@@ -678,9 +689,11 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init)
+ {
+ struct bnxt_qplib_creq_ctx *creq;
++ struct bnxt_qplib_res *res;
+ int rc;
+
+ creq = &rcfw->creq;
++ res = rcfw->res;
+
+ if (creq->requested)
+ return -EFAULT;
+@@ -690,13 +703,22 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
+ else
+ tasklet_enable(&creq->creq_tasklet);
++
++ creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s",
++ pci_name(res->pdev));
++ if (!creq->irq_name)
++ return -ENOMEM;
+ rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
+- "bnxt_qplib_creq", rcfw);
+- if (rc)
++ creq->irq_name, rcfw);
++ if (rc) {
++ kfree(creq->irq_name);
++ creq->irq_name = NULL;
++ tasklet_disable(&creq->creq_tasklet);
+ return rc;
++ }
+ creq->requested = true;
+
+- bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true);
++ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+index 0a3d8e7da3d42..b887e7fbad9ef 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+@@ -174,6 +174,7 @@ struct bnxt_qplib_creq_ctx {
+ u16 ring_id;
+ int msix_vec;
+ bool requested; /*irq handler installed */
++ char *irq_name;
+ };
+
+ /* RCFW Communication Channels */
+diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+index 8973a081d641e..e7d831330278d 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+@@ -215,11 +215,11 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ret = sdma_txadd_page(dd,
+- NULL,
+ txreq,
+ skb_frag_page(frag),
+ frag->bv_offset,
+- skb_frag_size(frag));
++ skb_frag_size(frag),
++ NULL, NULL, NULL);
+ if (unlikely(ret))
+ break;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index 71b9ac0188875..94f1701667301 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -19,8 +19,7 @@ static int mmu_notifier_range_start(struct mmu_notifier *,
+ const struct mmu_notifier_range *);
+ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
+ unsigned long, unsigned long);
+-static void do_remove(struct mmu_rb_handler *handler,
+- struct list_head *del_list);
++static void release_immediate(struct kref *refcount);
+ static void handle_remove(struct work_struct *work);
+
+ static const struct mmu_notifier_ops mn_opts = {
+@@ -103,7 +102,11 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
+ }
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+- do_remove(handler, &del_list);
++ while (!list_empty(&del_list)) {
++ rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
++ list_del(&rbnode->list);
++ kref_put(&rbnode->refcount, release_immediate);
++ }
+
+ /* Now the mm may be freed. */
+ mmdrop(handler->mn.mm);
+@@ -131,12 +134,6 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ }
+ __mmu_int_rb_insert(mnode, &handler->root);
+ list_add_tail(&mnode->list, &handler->lru_list);
+-
+- ret = handler->ops->insert(handler->ops_arg, mnode);
+- if (ret) {
+- __mmu_int_rb_remove(mnode, &handler->root);
+- list_del(&mnode->list); /* remove from LRU list */
+- }
+ mnode->handler = handler;
+ unlock:
+ spin_unlock_irqrestore(&handler->lock, flags);
+@@ -180,6 +177,48 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ return node;
+ }
+
++/*
++ * Must NOT call while holding mnode->handler->lock.
++ * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
++ * spinlock.
++ */
++static void release_immediate(struct kref *refcount)
++{
++ struct mmu_rb_node *mnode =
++ container_of(refcount, struct mmu_rb_node, refcount);
++ mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
++}
++
++/* Caller must hold mnode->handler->lock */
++static void release_nolock(struct kref *refcount)
++{
++ struct mmu_rb_node *mnode =
++ container_of(refcount, struct mmu_rb_node, refcount);
++ list_move(&mnode->list, &mnode->handler->del_list);
++ queue_work(mnode->handler->wq, &mnode->handler->del_work);
++}
++
++/*
++ * struct mmu_rb_node->refcount kref_put() callback.
++ * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
++ * handler->del_work on handler->wq.
++ * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
++ * Acquires mmu_rb_node->handler->lock; do not call while already holding
++ * handler->lock.
++ */
++void hfi1_mmu_rb_release(struct kref *refcount)
++{
++ struct mmu_rb_node *mnode =
++ container_of(refcount, struct mmu_rb_node, refcount);
++ struct mmu_rb_handler *handler = mnode->handler;
++ unsigned long flags;
++
++ spin_lock_irqsave(&handler->lock, flags);
++ list_move(&mnode->list, &mnode->handler->del_list);
++ spin_unlock_irqrestore(&handler->lock, flags);
++ queue_work(handler->wq, &handler->del_work);
++}
++
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ {
+ struct mmu_rb_node *rbnode, *ptr;
+@@ -194,6 +233,10 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+
+ spin_lock_irqsave(&handler->lock, flags);
+ list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
++ /* refcount == 1 implies mmu_rb_handler has only rbnode ref */
++ if (kref_read(&rbnode->refcount) > 1)
++ continue;
++
+ if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ &stop)) {
+ __mmu_int_rb_remove(rbnode, &handler->root);
+@@ -206,7 +249,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
+- handler->ops->remove(handler->ops_arg, rbnode);
++ kref_put(&rbnode->refcount, release_immediate);
+ }
+ }
+
+@@ -218,7 +261,6 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ struct rb_root_cached *root = &handler->root;
+ struct mmu_rb_node *node, *ptr = NULL;
+ unsigned long flags;
+- bool added = false;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
+@@ -227,38 +269,16 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ ptr = __mmu_int_rb_iter_next(node, range->start,
+ range->end - 1);
+ trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
+- if (handler->ops->invalidate(handler->ops_arg, node)) {
+- __mmu_int_rb_remove(node, root);
+- /* move from LRU list to delete list */
+- list_move(&node->list, &handler->del_list);
+- added = true;
+- }
++ /* Remove from rb tree and lru_list. */
++ __mmu_int_rb_remove(node, root);
++ list_del_init(&node->list);
++ kref_put(&node->refcount, release_nolock);
+ }
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+- if (added)
+- queue_work(handler->wq, &handler->del_work);
+-
+ return 0;
+ }
+
+-/*
+- * Call the remove function for the given handler and the list. This
+- * is expected to be called with a delete list extracted from handler.
+- * The caller should not be holding the handler lock.
+- */
+-static void do_remove(struct mmu_rb_handler *handler,
+- struct list_head *del_list)
+-{
+- struct mmu_rb_node *node;
+-
+- while (!list_empty(del_list)) {
+- node = list_first_entry(del_list, struct mmu_rb_node, list);
+- list_del(&node->list);
+- handler->ops->remove(handler->ops_arg, node);
+- }
+-}
+-
+ /*
+ * Work queue function to remove all nodes that have been queued up to
+ * be removed. The key feature is that mm->mmap_lock is not being held
+@@ -271,11 +291,16 @@ static void handle_remove(struct work_struct *work)
+ del_work);
+ struct list_head del_list;
+ unsigned long flags;
++ struct mmu_rb_node *node;
+
+ /* remove anything that is queued to get removed */
+ spin_lock_irqsave(&handler->lock, flags);
+ list_replace_init(&handler->del_list, &del_list);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+- do_remove(handler, &del_list);
++ while (!list_empty(&del_list)) {
++ node = list_first_entry(&del_list, struct mmu_rb_node, list);
++ list_del(&node->list);
++ handler->ops->remove(handler->ops_arg, node);
++ }
+ }
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
+index ed75acdb7b839..dd2c4a0ae95b1 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
+@@ -16,6 +16,7 @@ struct mmu_rb_node {
+ struct rb_node node;
+ struct mmu_rb_handler *handler;
+ struct list_head list;
++ struct kref refcount;
+ };
+
+ /*
+@@ -51,6 +52,8 @@ int hfi1_mmu_rb_register(void *ops_arg,
+ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode);
++void hfi1_mmu_rb_release(struct kref *refcount);
++
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+ struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
+ unsigned long addr,
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index bb2552dd29c1e..26c62162759ba 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1593,7 +1593,20 @@ static inline void sdma_unmap_desc(
+ struct hfi1_devdata *dd,
+ struct sdma_desc *descp)
+ {
+- system_descriptor_complete(dd, descp);
++ switch (sdma_mapping_type(descp)) {
++ case SDMA_MAP_SINGLE:
++ dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
++ sdma_mapping_len(descp), DMA_TO_DEVICE);
++ break;
++ case SDMA_MAP_PAGE:
++ dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
++ sdma_mapping_len(descp), DMA_TO_DEVICE);
++ break;
++ }
++
++ if (descp->pinning_ctx && descp->ctx_put)
++ descp->ctx_put(descp->pinning_ctx);
++ descp->pinning_ctx = NULL;
+ }
+
+ /*
+@@ -3113,8 +3126,8 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
+
+ /* Add descriptor for coalesce buffer */
+ tx->desc_limit = MAX_DESC;
+- return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
+- addr, tx->tlen);
++ return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
++ addr, tx->tlen, NULL, NULL, NULL);
+ }
+
+ return 1;
+@@ -3157,9 +3170,9 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ make_tx_sdma_desc(
+ tx,
+ SDMA_MAP_NONE,
+- NULL,
+ dd->sdma_pad_phys,
+- sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
++ sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)),
++ NULL, NULL, NULL);
+ tx->num_desc++;
+ _sdma_close_tx(dd, tx);
+ return rval;
+diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
+index 95aaec14c6c28..7fdebab202c4f 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -594,9 +594,11 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
+ static inline void make_tx_sdma_desc(
+ struct sdma_txreq *tx,
+ int type,
+- void *pinning_ctx,
+ dma_addr_t addr,
+- size_t len)
++ size_t len,
++ void *pinning_ctx,
++ void (*ctx_get)(void *),
++ void (*ctx_put)(void *))
+ {
+ struct sdma_desc *desc = &tx->descp[tx->num_desc];
+
+@@ -613,7 +615,11 @@ static inline void make_tx_sdma_desc(
+ << SDMA_DESC0_PHY_ADDR_SHIFT) |
+ (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
+ << SDMA_DESC0_BYTE_COUNT_SHIFT);
++
+ desc->pinning_ctx = pinning_ctx;
++ desc->ctx_put = ctx_put;
++ if (pinning_ctx && ctx_get)
++ ctx_get(pinning_ctx);
+ }
+
+ /* helper to extend txreq */
+@@ -645,18 +651,20 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ static inline int _sdma_txadd_daddr(
+ struct hfi1_devdata *dd,
+ int type,
+- void *pinning_ctx,
+ struct sdma_txreq *tx,
+ dma_addr_t addr,
+- u16 len)
++ u16 len,
++ void *pinning_ctx,
++ void (*ctx_get)(void *),
++ void (*ctx_put)(void *))
+ {
+ int rval = 0;
+
+ make_tx_sdma_desc(
+ tx,
+ type,
+- pinning_ctx,
+- addr, len);
++ addr, len,
++ pinning_ctx, ctx_get, ctx_put);
+ WARN_ON(len > tx->tlen);
+ tx->num_desc++;
+ tx->tlen -= len;
+@@ -676,11 +684,18 @@ static inline int _sdma_txadd_daddr(
+ /**
+ * sdma_txadd_page() - add a page to the sdma_txreq
+ * @dd: the device to use for mapping
+- * @pinning_ctx: context to be released at descriptor retirement
+ * @tx: tx request to which the page is added
+ * @page: page to map
+ * @offset: offset within the page
+ * @len: length in bytes
++ * @pinning_ctx: context to be stored on struct sdma_desc .pinning_ctx. Not
++ * added if coalesce buffer is used. E.g. pointer to pinned-page
++ * cache entry for the sdma_desc.
++ * @ctx_get: optional function to take reference to @pinning_ctx. Not called if
++ * @pinning_ctx is NULL.
++ * @ctx_put: optional function to release reference to @pinning_ctx after
++ * sdma_desc completes. May be called in interrupt context so must
++ * not sleep. Not called if @pinning_ctx is NULL.
+ *
+ * This is used to add a page/offset/length descriptor.
+ *
+@@ -692,11 +707,13 @@ static inline int _sdma_txadd_daddr(
+ */
+ static inline int sdma_txadd_page(
+ struct hfi1_devdata *dd,
+- void *pinning_ctx,
+ struct sdma_txreq *tx,
+ struct page *page,
+ unsigned long offset,
+- u16 len)
++ u16 len,
++ void *pinning_ctx,
++ void (*ctx_get)(void *),
++ void (*ctx_put)(void *))
+ {
+ dma_addr_t addr;
+ int rval;
+@@ -720,7 +737,8 @@ static inline int sdma_txadd_page(
+ return -ENOSPC;
+ }
+
+- return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
++ return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len,
++ pinning_ctx, ctx_get, ctx_put);
+ }
+
+ /**
+@@ -754,8 +772,8 @@ static inline int sdma_txadd_daddr(
+ return rval;
+ }
+
+- return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
+- addr, len);
++ return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len,
++ NULL, NULL, NULL);
+ }
+
+ /**
+@@ -801,7 +819,8 @@ static inline int sdma_txadd_kvaddr(
+ return -ENOSPC;
+ }
+
+- return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
++ return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len,
++ NULL, NULL, NULL);
+ }
+
+ struct iowait_work;
+@@ -1034,6 +1053,4 @@ u16 sdma_get_descq_cnt(void);
+ extern uint mod_num_sdma;
+
+ void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
+-
+-void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
+ #endif
+diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+index fad946cb5e0d8..85ae7293c2741 100644
+--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
++++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+@@ -20,6 +20,8 @@ struct sdma_desc {
+ /* private: don't use directly */
+ u64 qw[2];
+ void *pinning_ctx;
++ /* Release reference to @pinning_ctx. May be called in interrupt context. Must not sleep. */
++ void (*ctx_put)(void *ctx);
+ };
+
+ /**
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index ae58b48afe074..02bd62b857b75 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -62,18 +62,14 @@ static int defer_packet_queue(
+ static void activate_packet_queue(struct iowait *wait, int reason);
+ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+-static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
+ static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *arg2, bool *stop);
+ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
+-static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
+
+ static struct mmu_rb_ops sdma_rb_ops = {
+ .filter = sdma_rb_filter,
+- .insert = sdma_rb_insert,
+ .evict = sdma_rb_evict,
+ .remove = sdma_rb_remove,
+- .invalidate = sdma_rb_invalidate
+ };
+
+ static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
+@@ -247,14 +243,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ spin_unlock(&fd->pq_rcu_lock);
+ synchronize_srcu(&fd->pq_srcu);
+ /* at this point there can be no more new requests */
+- if (pq->handler)
+- hfi1_mmu_rb_unregister(pq->handler);
+ iowait_sdma_drain(&pq->busy);
+ /* Wait until all requests have been freed. */
+ wait_event_interruptible(
+ pq->wait,
+ !atomic_read(&pq->n_reqs));
+ kfree(pq->reqs);
++ if (pq->handler)
++ hfi1_mmu_rb_unregister(pq->handler);
+ bitmap_free(pq->req_in_use);
+ kmem_cache_destroy(pq->txreq_cache);
+ flush_pq_iowait(pq);
+@@ -1275,25 +1271,17 @@ static void free_system_node(struct sdma_mmu_node *node)
+ kfree(node);
+ }
+
+-static inline void acquire_node(struct sdma_mmu_node *node)
+-{
+- atomic_inc(&node->refcount);
+- WARN_ON(atomic_read(&node->refcount) < 0);
+-}
+-
+-static inline void release_node(struct mmu_rb_handler *handler,
+- struct sdma_mmu_node *node)
+-{
+- atomic_dec(&node->refcount);
+- WARN_ON(atomic_read(&node->refcount) < 0);
+-}
+-
++/*
++ * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
++ * from being released until after rb_node is assigned to an SDMA descriptor
++ * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
++ * virtual address range for rb_node is invalidated between now and then.
++ */
+ static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ unsigned long start,
+ unsigned long end)
+ {
+ struct mmu_rb_node *rb_node;
+- struct sdma_mmu_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+@@ -1302,11 +1290,12 @@ static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return NULL;
+ }
+- node = container_of(rb_node, struct sdma_mmu_node, rb);
+- acquire_node(node);
++
++ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
++ kref_get(&rb_node->refcount);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+- return node;
++ return container_of(rb_node, struct sdma_mmu_node, rb);
+ }
+
+ static int pin_system_pages(struct user_sdma_request *req,
+@@ -1355,6 +1344,13 @@ retry:
+ return 0;
+ }
+
++/*
++ * kref refcount on *node_p will be 2 on successful addition: one kref from
++ * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
++ * released until after *node_p is assigned to an SDMA descriptor (struct
++ * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
++ * address range for *node_p is invalidated between now and then.
++ */
+ static int add_system_pinning(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ unsigned long start, unsigned long len)
+@@ -1368,6 +1364,12 @@ static int add_system_pinning(struct user_sdma_request *req,
+ if (!node)
+ return -ENOMEM;
+
++ /* First kref "moves" to mmu_rb_handler */
++ kref_init(&node->rb.refcount);
++
++ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
++ kref_get(&node->rb.refcount);
++
+ node->pq = pq;
+ ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
+ if (ret == 0) {
+@@ -1431,15 +1433,15 @@ static int get_system_cache_entry(struct user_sdma_request *req,
+ return 0;
+ }
+
+- SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
+- node->rb.addr, atomic_read(&node->refcount));
++ SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
++ node->rb.addr, kref_read(&node->rb.refcount));
+ prepend_len = node->rb.addr - start;
+
+ /*
+ * This node will not be returned, instead a new node
+ * will be. So release the reference.
+ */
+- release_node(handler, node);
++ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+
+ /* Prepend a node to cover the beginning of the allocation */
+ ret = add_system_pinning(req, node_p, start, prepend_len);
+@@ -1451,6 +1453,20 @@ static int get_system_cache_entry(struct user_sdma_request *req,
+ }
+ }
+
++static void sdma_mmu_rb_node_get(void *ctx)
++{
++ struct mmu_rb_node *node = ctx;
++
++ kref_get(&node->refcount);
++}
++
++static void sdma_mmu_rb_node_put(void *ctx)
++{
++ struct sdma_mmu_node *node = ctx;
++
++ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
++}
++
+ static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct sdma_mmu_node *cache_entry,
+@@ -1494,9 +1510,12 @@ static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ ctx = cache_entry;
+ }
+
+- ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
++ ret = sdma_txadd_page(pq->dd, &tx->txreq,
+ cache_entry->pages[page_index],
+- page_offset, from_this_page);
++ page_offset, from_this_page,
++ ctx,
++ sdma_mmu_rb_node_get,
++ sdma_mmu_rb_node_put);
+ if (ret) {
+ /*
+ * When there's a failure, the entire request is freed by
+@@ -1518,8 +1537,6 @@ static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_iovec *iovec,
+ size_t from_this_iovec)
+ {
+- struct mmu_rb_handler *handler = req->pq->handler;
+-
+ while (from_this_iovec > 0) {
+ struct sdma_mmu_node *cache_entry;
+ size_t from_this_cache_entry;
+@@ -1540,15 +1557,15 @@ static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+
+ ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
+ from_this_cache_entry);
++
++ /*
++ * Done adding cache_entry to zero or more sdma_desc. Can
++ * kref_put() the "safety" kref taken under
++ * get_system_cache_entry().
++ */
++ kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
++
+ if (ret) {
+- /*
+- * We're guaranteed that there will be no descriptor
+- * completion callback that releases this node
+- * because only the last descriptor referencing it
+- * has a context attached, and a failure means the
+- * last descriptor was never added.
+- */
+- release_node(handler, cache_entry);
+ SDMA_DBG(req, "add system segment failed %d", ret);
+ return ret;
+ }
+@@ -1599,42 +1616,12 @@ static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
+ return 0;
+ }
+
+-void system_descriptor_complete(struct hfi1_devdata *dd,
+- struct sdma_desc *descp)
+-{
+- switch (sdma_mapping_type(descp)) {
+- case SDMA_MAP_SINGLE:
+- dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
+- sdma_mapping_len(descp), DMA_TO_DEVICE);
+- break;
+- case SDMA_MAP_PAGE:
+- dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
+- sdma_mapping_len(descp), DMA_TO_DEVICE);
+- break;
+- }
+-
+- if (descp->pinning_ctx) {
+- struct sdma_mmu_node *node = descp->pinning_ctx;
+-
+- release_node(node->rb.handler, node);
+- }
+-}
+-
+ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len)
+ {
+ return (bool)(node->addr == addr);
+ }
+
+-static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
+-{
+- struct sdma_mmu_node *node =
+- container_of(mnode, struct sdma_mmu_node, rb);
+-
+- atomic_inc(&node->refcount);
+- return 0;
+-}
+-
+ /*
+ * Return 1 to remove the node from the rb tree and call the remove op.
+ *
+@@ -1647,10 +1634,6 @@ static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ container_of(mnode, struct sdma_mmu_node, rb);
+ struct evict_data *evict_data = evict_arg;
+
+- /* is this node still being used? */
+- if (atomic_read(&node->refcount))
+- return 0; /* keep this node */
+-
+ /* this node will be evicted, add its pages to our count */
+ evict_data->cleared += node->npages;
+
+@@ -1668,13 +1651,3 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+
+ free_system_node(node);
+ }
+-
+-static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
+-{
+- struct sdma_mmu_node *node =
+- container_of(mnode, struct sdma_mmu_node, rb);
+-
+- if (!atomic_read(&node->refcount))
+- return 1;
+- return 0;
+-}
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index a241836371dc1..548347d4c5bc2 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -104,7 +104,6 @@ struct hfi1_user_sdma_comp_q {
+ struct sdma_mmu_node {
+ struct mmu_rb_node rb;
+ struct hfi1_user_sdma_pkt_q *pq;
+- atomic_t refcount;
+ struct page **pages;
+ unsigned int npages;
+ };
+diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+index 727eedfba332a..cc6324d2d1ddc 100644
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -64,11 +64,11 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
+
+ /* combine physically continuous fragments later? */
+ ret = sdma_txadd_page(sde->dd,
+- NULL,
+ &tx->txreq,
+ skb_frag_page(frag),
+ skb_frag_off(frag),
+- skb_frag_size(frag));
++ skb_frag_size(frag),
++ NULL, NULL, NULL);
+ if (unlikely(ret))
+ goto bail_txadd;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index aa8a08d1c0145..f30274986c0da 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -595,11 +595,12 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ }
+
+ /* Set HEM base address(128K/page, pa) to Hardware */
+- if (hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) {
++ ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
++ if (ret) {
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+- ret = -ENODEV;
+- dev_err(dev, "set HEM base address to HW failed.\n");
++ dev_err(dev, "set HEM base address to HW failed, ret = %d.\n",
++ ret);
+ goto out;
+ }
+
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index 16183e894da77..dd428d915c175 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -93,16 +93,18 @@ static int irdma_nop_1(struct irdma_qp_uk *qp)
+ */
+ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
+ {
+- __le64 *wqe;
++ struct irdma_qp_quanta *sq;
+ u32 wqe_idx;
+
+ if (!(qp_wqe_idx & 0x7F)) {
+ wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+- wqe = qp->sq_base[wqe_idx].elem;
++ sq = qp->sq_base + wqe_idx;
+ if (wqe_idx)
+- memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
++ memset(sq, qp->swqe_polarity ? 0 : 0xFF,
++ 128 * sizeof(*sq));
+ else
+- memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
++ memset(sq, qp->swqe_polarity ? 0xFF : 0,
++ 128 * sizeof(*sq));
+ }
+ }
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
+index afa5ce1a71166..a7ec57ab8fadd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mw.c
++++ b/drivers/infiniband/sw/rxe/rxe_mw.c
+@@ -48,7 +48,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
+ }
+
+ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+- struct rxe_mw *mw, struct rxe_mr *mr)
++ struct rxe_mw *mw, struct rxe_mr *mr, int access)
+ {
+ if (mw->ibmw.type == IB_MW_TYPE_1) {
+ if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
+@@ -58,7 +58,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ }
+
+ /* o10-36.2.2 */
+- if (unlikely((mw->access & IB_ZERO_BASED))) {
++ if (unlikely((access & IB_ZERO_BASED))) {
+ rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
+ return -EINVAL;
+ }
+@@ -104,7 +104,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ }
+
+ /* C10-74 */
+- if (unlikely((mw->access &
++ if (unlikely((access &
+ (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
+ !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
+ rxe_dbg_mw(mw,
+@@ -113,7 +113,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ }
+
+ /* C10-75 */
+- if (mw->access & IB_ZERO_BASED) {
++ if (access & IB_ZERO_BASED) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
+ rxe_dbg_mw(mw,
+ "attempt to bind a ZB MW outside of the MR\n");
+@@ -133,12 +133,12 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ }
+
+ static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+- struct rxe_mw *mw, struct rxe_mr *mr)
++ struct rxe_mw *mw, struct rxe_mr *mr, int access)
+ {
+ u32 key = wqe->wr.wr.mw.rkey & 0xff;
+
+ mw->rkey = (mw->rkey & ~0xff) | key;
+- mw->access = wqe->wr.wr.mw.access;
++ mw->access = access;
+ mw->state = RXE_MW_STATE_VALID;
+ mw->addr = wqe->wr.wr.mw.addr;
+ mw->length = wqe->wr.wr.mw.length;
+@@ -169,6 +169,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
+ u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
++ int access = wqe->wr.wr.mw.access;
+
+ mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
+ if (unlikely(!mw)) {
+@@ -198,11 +199,11 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+
+ spin_lock_bh(&mw->lock);
+
+- ret = rxe_check_bind_mw(qp, wqe, mw, mr);
++ ret = rxe_check_bind_mw(qp, wqe, mw, mr, access);
+ if (ret)
+ goto err_unlock;
+
+- rxe_do_bind_mw(qp, wqe, mw, mr);
++ rxe_do_bind_mw(qp, wqe, mw, mr, access);
+ err_unlock:
+ spin_unlock_bh(&mw->lock);
+ err_drop_mr:
+diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
+index eecca671b5884..a3f45e0ee0c75 100644
+--- a/drivers/input/misc/adxl34x.c
++++ b/drivers/input/misc/adxl34x.c
+@@ -817,8 +817,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
+ AC_WRITE(ac, POWER_CTL, 0);
+
+ err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+- dev_name(dev), ac);
++ IRQF_ONESHOT, dev_name(dev), ac);
+ if (err) {
+ dev_err(dev, "irq %d busy?\n", ac->irq);
+ goto err_free_mem;
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index 8a9ebfc04a2d9..85371fa1a03ed 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -435,6 +435,7 @@ static int drv260x_init(struct drv260x_data *haptics)
+ }
+
+ do {
++ usleep_range(15000, 15500);
+ error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf);
+ if (error) {
+ dev_err(&haptics->client->dev,
+diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
+index b6a27ebae977b..74d77d8aaeff2 100644
+--- a/drivers/input/misc/pm8941-pwrkey.c
++++ b/drivers/input/misc/pm8941-pwrkey.c
+@@ -50,7 +50,10 @@
+ #define PON_RESIN_PULL_UP BIT(0)
+
+ #define PON_DBC_CTL 0x71
+-#define PON_DBC_DELAY_MASK 0x7
++#define PON_DBC_DELAY_MASK_GEN1 0x7
++#define PON_DBC_DELAY_MASK_GEN2 0xf
++#define PON_DBC_SHIFT_GEN1 6
++#define PON_DBC_SHIFT_GEN2 14
+
+ struct pm8941_data {
+ unsigned int pull_up_bit;
+@@ -247,7 +250,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
+ struct device *parent;
+ struct device_node *regmap_node;
+ const __be32 *addr;
+- u32 req_delay;
++ u32 req_delay, mask, delay_shift;
+ int error;
+
+ if (of_property_read_u32(pdev->dev.of_node, "debounce", &req_delay))
+@@ -336,12 +339,20 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
+ pwrkey->input->phys = pwrkey->data->phys;
+
+ if (pwrkey->data->supports_debounce_config) {
+- req_delay = (req_delay << 6) / USEC_PER_SEC;
++ if (pwrkey->subtype >= PON_SUBTYPE_GEN2_PRIMARY) {
++ mask = PON_DBC_DELAY_MASK_GEN2;
++ delay_shift = PON_DBC_SHIFT_GEN2;
++ } else {
++ mask = PON_DBC_DELAY_MASK_GEN1;
++ delay_shift = PON_DBC_SHIFT_GEN1;
++ }
++
++ req_delay = (req_delay << delay_shift) / USEC_PER_SEC;
+ req_delay = ilog2(req_delay);
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_DBC_CTL,
+- PON_DBC_DELAY_MASK,
++ mask,
+ req_delay);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set debounce: %d\n",
+diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
+index 0cd6f626adec5..7cb26929dc732 100644
+--- a/drivers/input/touchscreen/cyttsp4_core.c
++++ b/drivers/input/touchscreen/cyttsp4_core.c
+@@ -1263,9 +1263,8 @@ static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd)
+ * Ensure we wait until the watchdog timer
+ * running on a different CPU finishes
+ */
+- del_timer_sync(&cd->watchdog_timer);
++ timer_shutdown_sync(&cd->watchdog_timer);
+ cancel_work_sync(&cd->watchdog_work);
+- del_timer_sync(&cd->watchdog_timer);
+ }
+
+ static void cyttsp4_watchdog_timer(struct timer_list *t)
+diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
+index a0c66f47a65ad..532e12ea23efe 100644
+--- a/drivers/iommu/iommufd/device.c
++++ b/drivers/iommu/iommufd/device.c
+@@ -560,8 +560,8 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
+ iopt_area_iova_to_index(
+ area,
+ min(last_iova, iopt_area_last_iova(area))));
+- up_read(&iopt->iova_rwsem);
+ WARN_ON(!iopt_area_contig_done(&iter));
++ up_read(&iopt->iova_rwsem);
+ }
+ EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
+
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index e0ae72b9e67f8..724c4c5742417 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -458,6 +458,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
+ {
+ struct iopt_area *area;
+ unsigned long unmapped_bytes = 0;
++ unsigned int tries = 0;
+ int rc = -ENOENT;
+
+ /*
+@@ -484,19 +485,26 @@ again:
+ goto out_unlock_iova;
+ }
+
++ if (area_first != start)
++ tries = 0;
++
+ /*
+ * num_accesses writers must hold the iova_rwsem too, so we can
+ * safely read it under the write side of the iovam_rwsem
+ * without the pages->mutex.
+ */
+ if (area->num_accesses) {
++ size_t length = iopt_area_length(area);
++
+ start = area_first;
+ area->prevent_access = true;
+ up_write(&iopt->iova_rwsem);
+ up_read(&iopt->domains_rwsem);
+- iommufd_access_notify_unmap(iopt, area_first,
+- iopt_area_length(area));
+- if (WARN_ON(READ_ONCE(area->num_accesses)))
++
++ iommufd_access_notify_unmap(iopt, area_first, length);
++ /* Something is not responding to unmap requests. */
++ tries++;
++ if (WARN_ON(tries > 100))
+ return -EDEADLOCK;
+ goto again;
+ }
+diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
+index 5b8fe9bfa9a5b..3551ed057774e 100644
+--- a/drivers/iommu/virtio-iommu.c
++++ b/drivers/iommu/virtio-iommu.c
+@@ -788,6 +788,29 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ return 0;
+ }
+
++static void viommu_detach_dev(struct viommu_endpoint *vdev)
++{
++ int i;
++ struct virtio_iommu_req_detach req;
++ struct viommu_domain *vdomain = vdev->vdomain;
++ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev);
++
++ if (!vdomain)
++ return;
++
++ req = (struct virtio_iommu_req_detach) {
++ .head.type = VIRTIO_IOMMU_T_DETACH,
++ .domain = cpu_to_le32(vdomain->id),
++ };
++
++ for (i = 0; i < fwspec->num_ids; i++) {
++ req.endpoint = cpu_to_le32(fwspec->ids[i]);
++ WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
++ }
++ vdomain->nr_endpoints--;
++ vdev->vdomain = NULL;
++}
++
+ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+@@ -810,25 +833,26 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ if (ret)
+ return ret;
+
+- map = (struct virtio_iommu_req_map) {
+- .head.type = VIRTIO_IOMMU_T_MAP,
+- .domain = cpu_to_le32(vdomain->id),
+- .virt_start = cpu_to_le64(iova),
+- .phys_start = cpu_to_le64(paddr),
+- .virt_end = cpu_to_le64(end),
+- .flags = cpu_to_le32(flags),
+- };
+-
+- if (!vdomain->nr_endpoints)
+- return 0;
++ if (vdomain->nr_endpoints) {
++ map = (struct virtio_iommu_req_map) {
++ .head.type = VIRTIO_IOMMU_T_MAP,
++ .domain = cpu_to_le32(vdomain->id),
++ .virt_start = cpu_to_le64(iova),
++ .phys_start = cpu_to_le64(paddr),
++ .virt_end = cpu_to_le64(end),
++ .flags = cpu_to_le32(flags),
++ };
+
+- ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+- if (ret)
+- viommu_del_mappings(vdomain, iova, end);
+- else if (mapped)
++ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
++ if (ret) {
++ viommu_del_mappings(vdomain, iova, end);
++ return ret;
++ }
++ }
++ if (mapped)
+ *mapped = size;
+
+- return ret;
++ return 0;
+ }
+
+ static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+@@ -990,6 +1014,7 @@ static void viommu_release_device(struct device *dev)
+ {
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+
++ viommu_detach_dev(vdev);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
+ kfree(vdev);
+ }
+diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
+index 5f47d8ee4ae39..b9dcc8e78c750 100644
+--- a/drivers/irqchip/irq-jcore-aic.c
++++ b/drivers/irqchip/irq-jcore-aic.c
+@@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node,
+ unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
+ unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
+ struct irq_domain *domain;
++ int ret;
+
+ pr_info("Initializing J-Core AIC\n");
+
+@@ -100,6 +101,12 @@ static int __init aic_irq_of_init(struct device_node *node,
+ jcore_aic.irq_unmask = noop;
+ jcore_aic.name = "AIC";
+
++ ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
++ of_node_to_nid(node));
++
++ if (ret < 0)
++ return ret;
++
+ domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+ &jcore_aic_irqdomain_ops,
+ &jcore_aic);
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 90181c42840b4..873a326ed6cbc 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -317,7 +317,7 @@ static void eiointc_resume(void)
+ desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
+ if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
+ raw_spin_lock(&desc->lock);
+- irq_data = &desc->irq_data;
++ irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
+ eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
+ raw_spin_unlock(&desc->lock);
+ }
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index 6a3f7498ea8ea..8bbb2b114636c 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -173,6 +173,16 @@ static struct irq_chip stm32_exti_h_chip_direct;
+ #define EXTI_INVALID_IRQ U8_MAX
+ #define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
+
++/*
++ * Use some intentionally tricky logic here to initialize the whole array to
++ * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
++ * that we "know" that there are overrides in this structure, and we'll need to
++ * disable that warning from W=1 builds.
++ */
++__diag_push();
++__diag_ignore_all("-Woverride-init",
++ "logic to initialize all and then override some is OK");
++
+ static const u8 stm32mp1_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+@@ -266,6 +276,8 @@ static const u8 stm32mp13_desc_irq[] = {
+ [70] = 98,
+ };
+
++__diag_pop();
++
+ static const struct stm32_exti_drv_data stm32mp1_drv_data = {
+ .exti_banks = stm32mp1_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index e7cc6ba1b657f..8bbeeec70905c 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -54,14 +54,7 @@ __acquires(bitmap->lock)
+ {
+ unsigned char *mappage;
+
+- if (page >= bitmap->pages) {
+- /* This can happen if bitmap_start_sync goes beyond
+- * End-of-device while looking for a whole page.
+- * It is harmless.
+- */
+- return -EINVAL;
+- }
+-
++ WARN_ON_ONCE(page >= bitmap->pages);
+ if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
+ return 0;
+
+@@ -1000,7 +993,6 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
+ return set;
+ }
+
+-
+ /* this gets called when the md device is ready to unplug its underlying
+ * (slave) device queues -- before we let any writes go down, we need to
+ * sync the dirty pages of the bitmap file to disk */
+@@ -1010,8 +1002,7 @@ void md_bitmap_unplug(struct bitmap *bitmap)
+ int dirty, need_write;
+ int writing = 0;
+
+- if (!bitmap || !bitmap->storage.filemap ||
+- test_bit(BITMAP_STALE, &bitmap->flags))
++ if (!md_bitmap_enabled(bitmap))
+ return;
+
+ /* look at each page to see if there are any set bits that need to be
+@@ -1364,6 +1355,14 @@ __acquires(bitmap->lock)
+ sector_t csize;
+ int err;
+
++ if (page >= bitmap->pages) {
++ /*
++ * This can happen if bitmap_start_sync goes beyond
++ * End-of-device while looking for a whole page or
++ * user set a huge number to sysfs bitmap_set_bits.
++ */
++ return NULL;
++ }
+ err = md_bitmap_checkpage(bitmap, page, create, 0);
+
+ if (bitmap->bp[page].hijacked ||
+diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
+index cfd7395de8fd3..3a4750952b3a7 100644
+--- a/drivers/md/md-bitmap.h
++++ b/drivers/md/md-bitmap.h
+@@ -273,6 +273,13 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *lo, sector_t *hi, bool clear_bits);
+ void md_bitmap_free(struct bitmap *bitmap);
+ void md_bitmap_wait_behind_writes(struct mddev *mddev);
++
++static inline bool md_bitmap_enabled(struct bitmap *bitmap)
++{
++ return bitmap && bitmap->storage.filemap &&
++ !test_bit(BITMAP_STALE, &bitmap->flags);
++}
++
+ #endif
+
+ #endif
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d479e1656ef33..2674cb1d699c0 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3814,8 +3814,9 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
+ static ssize_t
+ safe_delay_show(struct mddev *mddev, char *page)
+ {
+- int msec = (mddev->safemode_delay*1000)/HZ;
+- return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
++ unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
++
++ return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
+ }
+ static ssize_t
+ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
+@@ -3827,7 +3828,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
+ return -EINVAL;
+ }
+
+- if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
++ if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
+ return -EINVAL;
+ if (msec == 0)
+ mddev->safemode_delay = 0;
+@@ -4497,6 +4498,8 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len
+ rv = kstrtouint(buf, 10, &n);
+ if (rv < 0)
+ return rv;
++ if (n > INT_MAX)
++ return -EINVAL;
+ atomic_set(&mddev->max_corr_read_errors, n);
+ return len;
+ }
+diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
+index e61f6cad4e08e..e0c8ac8146331 100644
+--- a/drivers/md/raid1-10.c
++++ b/drivers/md/raid1-10.c
+@@ -109,3 +109,45 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
+ size -= len;
+ } while (idx++ < RESYNC_PAGES && size > 0);
+ }
++
++
++static inline void raid1_submit_write(struct bio *bio)
++{
++ struct md_rdev *rdev = (void *)bio->bi_bdev;
++
++ bio->bi_next = NULL;
++ bio_set_dev(bio, rdev->bdev);
++ if (test_bit(Faulty, &rdev->flags))
++ bio_io_error(bio);
++ else if (unlikely(bio_op(bio) == REQ_OP_DISCARD &&
++ !bdev_max_discard_sectors(bio->bi_bdev)))
++ /* Just ignore it */
++ bio_endio(bio);
++ else
++ submit_bio_noacct(bio);
++}
++
++static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
++ blk_plug_cb_fn unplug)
++{
++ struct raid1_plug_cb *plug = NULL;
++ struct blk_plug_cb *cb;
++
++ /*
++ * If bitmap is not enabled, it's safe to submit the io directly, and
++ * this can get optimal performance.
++ */
++ if (!md_bitmap_enabled(mddev->bitmap)) {
++ raid1_submit_write(bio);
++ return true;
++ }
++
++ cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
++ if (!cb)
++ return false;
++
++ plug = container_of(cb, struct raid1_plug_cb, cb);
++ bio_list_add(&plug->pending, bio);
++
++ return true;
++}
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 68a9e2d9985b2..e51b77a3a8397 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -799,17 +799,8 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
+- struct md_rdev *rdev = (void *)bio->bi_bdev;
+- bio->bi_next = NULL;
+- bio_set_dev(bio, rdev->bdev);
+- if (test_bit(Faulty, &rdev->flags)) {
+- bio_io_error(bio);
+- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+- !bdev_max_discard_sectors(bio->bi_bdev)))
+- /* Just ignore it */
+- bio_endio(bio);
+- else
+- submit_bio_noacct(bio);
++
++ raid1_submit_write(bio);
+ bio = next;
+ cond_resched();
+ }
+@@ -1343,8 +1334,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ struct bitmap *bitmap = mddev->bitmap;
+ unsigned long flags;
+ struct md_rdev *blocked_rdev;
+- struct blk_plug_cb *cb;
+- struct raid1_plug_cb *plug = NULL;
+ int first_clone;
+ int max_sectors;
+ bool write_behind = false;
+@@ -1573,15 +1562,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ r1_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void *)rdev;
+-
+- cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
+- if (cb)
+- plug = container_of(cb, struct raid1_plug_cb, cb);
+- else
+- plug = NULL;
+- if (plug) {
+- bio_list_add(&plug->pending, mbio);
+- } else {
++ if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug)) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index ea6967aeaa02a..f2f7538dd2a68 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -779,8 +779,16 @@ static struct md_rdev *read_balance(struct r10conf *conf,
+ disk = r10_bio->devs[slot].devnum;
+ rdev = rcu_dereference(conf->mirrors[disk].replacement);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+- r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
++ r10_bio->devs[slot].addr + sectors >
++ rdev->recovery_offset) {
++ /*
++ * Read replacement first to prevent reading both rdev
++ * and replacement as NULL during replacement replace
++ * rdev.
++ */
++ smp_mb();
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
++ }
+ if (rdev == NULL ||
+ test_bit(Faulty, &rdev->flags))
+ continue;
+@@ -909,17 +917,8 @@ static void flush_pending_writes(struct r10conf *conf)
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
+- struct md_rdev *rdev = (void*)bio->bi_bdev;
+- bio->bi_next = NULL;
+- bio_set_dev(bio, rdev->bdev);
+- if (test_bit(Faulty, &rdev->flags)) {
+- bio_io_error(bio);
+- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+- !bdev_max_discard_sectors(bio->bi_bdev)))
+- /* Just ignore it */
+- bio_endio(bio);
+- else
+- submit_bio_noacct(bio);
++
++ raid1_submit_write(bio);
+ bio = next;
+ }
+ blk_finish_plug(&plug);
+@@ -1128,17 +1127,8 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
+- struct md_rdev *rdev = (void*)bio->bi_bdev;
+- bio->bi_next = NULL;
+- bio_set_dev(bio, rdev->bdev);
+- if (test_bit(Faulty, &rdev->flags)) {
+- bio_io_error(bio);
+- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+- !bdev_max_discard_sectors(bio->bi_bdev)))
+- /* Just ignore it */
+- bio_endio(bio);
+- else
+- submit_bio_noacct(bio);
++
++ raid1_submit_write(bio);
+ bio = next;
+ }
+ kfree(plug);
+@@ -1280,8 +1270,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+ const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
+ unsigned long flags;
+- struct blk_plug_cb *cb;
+- struct raid1_plug_cb *plug = NULL;
+ struct r10conf *conf = mddev->private;
+ struct md_rdev *rdev;
+ int devnum = r10_bio->devs[n_copy].devnum;
+@@ -1321,14 +1309,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+
+ atomic_inc(&r10_bio->remaining);
+
+- cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
+- if (cb)
+- plug = container_of(cb, struct raid1_plug_cb, cb);
+- else
+- plug = NULL;
+- if (plug) {
+- bio_list_add(&plug->pending, mbio);
+- } else {
++ if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug)) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+@@ -1477,9 +1458,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+
+ for (i = 0; i < conf->copies; i++) {
+ int d = r10_bio->devs[i].devnum;
+- struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+- struct md_rdev *rrdev = rcu_dereference(
+- conf->mirrors[d].replacement);
++ struct md_rdev *rdev, *rrdev;
++
++ rrdev = rcu_dereference(conf->mirrors[d].replacement);
++ /*
++ * Read replacement first to prevent reading both rdev and
++ * replacement as NULL during replacement replace rdev.
++ */
++ smp_mb();
++ rdev = rcu_dereference(conf->mirrors[d].rdev);
+ if (rdev == rrdev)
+ rrdev = NULL;
+ if (rdev && (test_bit(Faulty, &rdev->flags)))
+@@ -3436,7 +3423,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ int must_sync;
+ int any_working;
+ int need_recover = 0;
+- int need_replace = 0;
+ struct raid10_info *mirror = &conf->mirrors[i];
+ struct md_rdev *mrdev, *mreplace;
+
+@@ -3448,11 +3434,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ !test_bit(Faulty, &mrdev->flags) &&
+ !test_bit(In_sync, &mrdev->flags))
+ need_recover = 1;
+- if (mreplace != NULL &&
+- !test_bit(Faulty, &mreplace->flags))
+- need_replace = 1;
++ if (mreplace && test_bit(Faulty, &mreplace->flags))
++ mreplace = NULL;
+
+- if (!need_recover && !need_replace) {
++ if (!need_recover && !mreplace) {
+ rcu_read_unlock();
+ continue;
+ }
+@@ -3468,8 +3453,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ rcu_read_unlock();
+ continue;
+ }
+- if (mreplace && test_bit(Faulty, &mreplace->flags))
+- mreplace = NULL;
+ /* Unless we are doing a full sync, or a replacement
+ * we only need to recover the block if it is set in
+ * the bitmap
+@@ -3592,11 +3575,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ bio = r10_bio->devs[1].repl_bio;
+ if (bio)
+ bio->bi_end_io = NULL;
+- /* Note: if need_replace, then bio
++ /* Note: if replace is not NULL, then bio
+ * cannot be NULL as r10buf_pool_alloc will
+ * have allocated it.
+ */
+- if (!need_replace)
++ if (!mreplace)
+ break;
+ bio->bi_next = biolist;
+ biolist = bio;
+diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
+index 76c82e9c8fceb..9339f80b21c50 100644
+--- a/drivers/memory/brcmstb_dpfe.c
++++ b/drivers/memory/brcmstb_dpfe.c
+@@ -434,15 +434,17 @@ static void __finalize_command(struct brcmstb_dpfe_priv *priv)
+ static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
+ u32 result[])
+ {
+- const u32 *msg = priv->dpfe_api->command[cmd];
+ void __iomem *regs = priv->regs;
+ unsigned int i, chksum, chksum_idx;
++ const u32 *msg;
+ int ret = 0;
+ u32 resp;
+
+ if (cmd >= DPFE_CMD_MAX)
+ return -1;
+
++ msg = priv->dpfe_api->command[cmd];
++
+ mutex_lock(&priv->lock);
+
+ /* Wait for DCPU to become ready */
+diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
+index 42bfc46842b82..461f5ffd02bc1 100644
+--- a/drivers/memstick/host/r592.c
++++ b/drivers/memstick/host/r592.c
+@@ -44,12 +44,10 @@ static const char *tpc_names[] = {
+ * memstick_debug_get_tpc_name - debug helper that returns string for
+ * a TPC number
+ */
+-const char *memstick_debug_get_tpc_name(int tpc)
++static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc)
+ {
+ return tpc_names[tpc-1];
+ }
+-EXPORT_SYMBOL(memstick_debug_get_tpc_name);
+-
+
+ /* Read a register*/
+ static inline u32 r592_read_reg(struct r592_device *dev, int address)
+diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
+index fb733288cca3b..faea4ff44c6fe 100644
+--- a/drivers/mfd/tps65010.c
++++ b/drivers/mfd/tps65010.c
+@@ -506,12 +506,8 @@ static void tps65010_remove(struct i2c_client *client)
+ struct tps65010 *tps = i2c_get_clientdata(client);
+ struct tps65010_board *board = dev_get_platdata(&client->dev);
+
+- if (board && board->teardown) {
+- int status = board->teardown(client, board->context);
+- if (status < 0)
+- dev_dbg(&client->dev, "board %s %s err %d\n",
+- "teardown", client->name, status);
+- }
++ if (board && board->teardown)
++ board->teardown(client, &tps->chip);
+ if (client->irq > 0)
+ free_irq(client->irq, tps);
+ cancel_delayed_work_sync(&tps->work);
+@@ -619,7 +615,7 @@ static int tps65010_probe(struct i2c_client *client)
+ tps, DEBUG_FOPS);
+
+ /* optionally register GPIOs */
+- if (board && board->base != 0) {
++ if (board) {
+ tps->outmask = board->outmask;
+
+ tps->chip.label = client->name;
+@@ -632,7 +628,7 @@ static int tps65010_probe(struct i2c_client *client)
+ /* NOTE: only partial support for inputs; nyet IRQs */
+ tps->chip.get = tps65010_gpio_get;
+
+- tps->chip.base = board->base;
++ tps->chip.base = -1;
+ tps->chip.ngpio = 7;
+ tps->chip.can_sleep = 1;
+
+@@ -641,7 +637,7 @@ static int tps65010_probe(struct i2c_client *client)
+ dev_err(&client->dev, "can't add gpiochip, err %d\n",
+ status);
+ else if (board->setup) {
+- status = board->setup(client, board->context);
++ status = board->setup(client, &tps->chip);
+ if (status < 0) {
+ dev_dbg(&client->dev,
+ "board %s %s err %d\n",
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index cfdd1ff40b865..4edf9057fa79d 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -53,6 +53,10 @@ struct mmc_fixup {
+ unsigned int manfid;
+ unsigned short oemid;
+
++ /* Manufacturing date */
++ unsigned short year;
++ unsigned char month;
++
+ /* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
+ u16 cis_vendor, cis_device;
+
+@@ -68,6 +72,8 @@ struct mmc_fixup {
+
+ #define CID_MANFID_ANY (-1u)
+ #define CID_OEMID_ANY ((unsigned short) -1)
++#define CID_YEAR_ANY ((unsigned short) -1)
++#define CID_MONTH_ANY ((unsigned char) -1)
+ #define CID_NAME_ANY (NULL)
+
+ #define EXT_CSD_REV_ANY (-1u)
+@@ -81,17 +87,21 @@ struct mmc_fixup {
+ #define CID_MANFID_APACER 0x27
+ #define CID_MANFID_KINGSTON 0x70
+ #define CID_MANFID_HYNIX 0x90
++#define CID_MANFID_KINGSTON_SD 0x9F
+ #define CID_MANFID_NUMONYX 0xFE
+
+ #define END_FIXUP { NULL }
+
+-#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
+- _cis_vendor, _cis_device, \
+- _fixup, _data, _ext_csd_rev) \
++#define _FIXUP_EXT(_name, _manfid, _oemid, _year, _month, \
++ _rev_start, _rev_end, \
++ _cis_vendor, _cis_device, \
++ _fixup, _data, _ext_csd_rev) \
+ { \
+ .name = (_name), \
+ .manfid = (_manfid), \
+ .oemid = (_oemid), \
++ .year = (_year), \
++ .month = (_month), \
+ .rev_start = (_rev_start), \
+ .rev_end = (_rev_end), \
+ .cis_vendor = (_cis_vendor), \
+@@ -103,8 +113,8 @@ struct mmc_fixup {
+
+ #define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _fixup, _data, _ext_csd_rev) \
+- _FIXUP_EXT(_name, _manfid, \
+- _oemid, _rev_start, _rev_end, \
++ _FIXUP_EXT(_name, _manfid, _oemid, CID_YEAR_ANY, CID_MONTH_ANY, \
++ _rev_start, _rev_end, \
+ SDIO_ANY_ID, SDIO_ANY_ID, \
+ _fixup, _data, _ext_csd_rev) \
+
+@@ -118,8 +128,9 @@ struct mmc_fixup {
+ _ext_csd_rev)
+
+ #define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
+- _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
+- CID_OEMID_ANY, 0, -1ull, \
++ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, CID_OEMID_ANY, \
++ CID_YEAR_ANY, CID_MONTH_ANY, \
++ 0, -1ull, \
+ _vendor, _device, \
+ _fixup, _data, EXT_CSD_REV_ANY) \
+
+@@ -264,4 +275,9 @@ static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+ }
+
++static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
++{
++ return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 29b9497936df9..a7ffbc930ea9d 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -53,6 +53,15 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
++ /*
++ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
++ * This has so far only been observed on cards from 11/2019, while new
++ * cards from 2023/05 do not exhibit this behavior.
++ */
++ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
++ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
++ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
++
+ /*
+ * Some SD cards lockup while using CMD23 multiblock transfers.
+ */
+@@ -209,6 +218,10 @@ static inline void mmc_fixup_device(struct mmc_card *card,
+ if (f->of_compatible &&
+ !mmc_fixup_of_compatible_match(card, f->of_compatible))
+ continue;
++ if (f->year != CID_YEAR_ANY && f->year != card->cid.year)
++ continue;
++ if (f->month != CID_MONTH_ANY && f->month != card->cid.month)
++ continue;
+
+ dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 72b664ed90cf6..246ce027ae0aa 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -1170,7 +1170,7 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
+ card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
+
+ /* Cache support at bit 0. */
+- if (reg_buf[4] & BIT(0))
++ if ((reg_buf[4] & BIT(0)) && !mmc_card_broken_sd_cache(card))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
+
+ /* Command queue support indicated via queue depth bits (0 to 4). */
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 9785ec91654f7..97c42aacaf346 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2707,7 +2707,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
+
+ /* Support for SDIO eint irq ? */
+ if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
+- host->eint_irq = platform_get_irq_byname(pdev, "sdio_wakeup");
++ host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup");
+ if (host->eint_irq > 0) {
+ host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
+ if (IS_ERR(host->pins_eint)) {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 806d33d9f7124..172939e275a3b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4171,7 +4171,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+ return skb->hash;
+
+ return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+- skb_mac_offset(skb), skb_network_offset(skb),
++ 0, skb_network_offset(skb),
+ skb_headlen(skb));
+ }
+
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 956a4a57396f9..74a47244f1291 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -538,6 +538,13 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+ return 0;
+ }
+
++static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie,
++ struct sk_buff *skb, u64 timestamp)
++{
++ skb_hwtstamps(skb)->hwtstamp =
++ ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div));
++}
++
+ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
+ {
+ u32 mode;
+@@ -1171,7 +1178,6 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ struct canfd_frame *cf;
+ struct can_priv *priv;
+ struct net_device_stats *stats;
+- struct skb_shared_hwtstamps *shhwtstamps;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+@@ -1214,12 +1220,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+-
+- shhwtstamps = skb_hwtstamps(skb);
+-
+- shhwtstamps->hwtstamp =
+- ns_to_ktime(div_u64(p->timestamp * 1000,
+- pcie->freq_to_ticks_div));
++ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+
+ return netif_rx(skb);
+ }
+@@ -1282,7 +1283,6 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+ struct net_device *ndev = can->can.dev;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+- struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats = &ndev->stats;
+
+ old_state = can->can.state;
+@@ -1323,10 +1323,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+ return -ENOMEM;
+ }
+
+- shhwtstamps = skb_hwtstamps(skb);
+- shhwtstamps->hwtstamp =
+- ns_to_ktime(div_u64(p->timestamp * 1000,
+- can->kv_pcie->freq_to_ticks_div));
++ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+
+ cf->data[6] = bec.txerr;
+@@ -1374,7 +1371,6 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
+ struct net_device *ndev = can->can.dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+- struct skb_shared_hwtstamps *shhwtstamps;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+@@ -1394,10 +1390,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+
+- shhwtstamps = skb_hwtstamps(skb);
+- shhwtstamps->hwtstamp =
+- ns_to_ktime(div_u64(p->timestamp * 1000,
+- can->kv_pcie->freq_to_ticks_div));
++ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+@@ -1526,6 +1519,7 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_BUSERROR;
++ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+@@ -1557,8 +1551,15 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
+ netdev_dbg(can->can.dev, "Packet was flushed\n");
+ } else {
+ int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
+- int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
+- u8 count = ioread32(can->reg_base +
++ int dlc;
++ u8 count;
++ struct sk_buff *skb;
++
++ skb = can->can.echo_skb[echo_idx];
++ if (skb)
++ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
++ dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
++ count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index e809249500e18..03c5aecd61402 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -515,6 +515,12 @@ enum ice_pf_flags {
+ ICE_PF_FLAGS_NBITS /* must be last */
+ };
+
++enum ice_misc_thread_tasks {
++ ICE_MISC_THREAD_EXTTS_EVENT,
++ ICE_MISC_THREAD_TX_TSTAMP,
++ ICE_MISC_THREAD_NBITS /* must be last */
++};
++
+ struct ice_switchdev_info {
+ struct ice_vsi *control_vsi;
+ struct ice_vsi *uplink_vsi;
+@@ -557,6 +563,7 @@ struct ice_pf {
+ DECLARE_BITMAP(features, ICE_F_MAX);
+ DECLARE_BITMAP(state, ICE_STATE_NBITS);
+ DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
++ DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
+ unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
+ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
+ unsigned long serv_tmr_period;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 98e8ce743fb2e..65f77ab3fc806 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3134,20 +3134,28 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
+
+ if (oicr & PFINT_OICR_TSYN_TX_M) {
+ ena_mask &= ~PFINT_OICR_TSYN_TX_M;
+- if (!hw->reset_ongoing)
++ if (!hw->reset_ongoing) {
++ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ ret = IRQ_WAKE_THREAD;
++ }
+ }
+
+ if (oicr & PFINT_OICR_TSYN_EVNT_M) {
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
+
+- /* Save EVENTs from GTSYN register */
+- pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
+- GLTSYN_STAT_EVENT1_M |
+- GLTSYN_STAT_EVENT2_M);
+ ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
+- kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
++
++ if (hw->func_caps.ts_func_info.src_tmr_owned) {
++ /* Save EVENTs from GLTSYN register */
++ pf->ptp.ext_ts_irq |= gltsyn_stat &
++ (GLTSYN_STAT_EVENT0_M |
++ GLTSYN_STAT_EVENT1_M |
++ GLTSYN_STAT_EVENT2_M);
++
++ set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
++ ret = IRQ_WAKE_THREAD;
++ }
+ }
+
+ #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
+@@ -3191,8 +3199,13 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+ if (ice_is_reset_in_progress(pf->state))
+ return IRQ_HANDLED;
+
+- while (!ice_ptp_process_ts(pf))
+- usleep_range(50, 100);
++ if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
++ ice_ptp_extts_event(pf);
++
++ if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
++ while (!ice_ptp_process_ts(pf))
++ usleep_range(50, 100);
++ }
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index ac6f06f9a2ed0..e8507d09b8488 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -1458,15 +1458,11 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+ }
+
+ /**
+- * ice_ptp_extts_work - Workqueue task function
+- * @work: external timestamp work structure
+- *
+- * Service for PTP external clock event
++ * ice_ptp_extts_event - Process PTP external clock event
++ * @pf: Board private structure
+ */
+-static void ice_ptp_extts_work(struct kthread_work *work)
++void ice_ptp_extts_event(struct ice_pf *pf)
+ {
+- struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
+- struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ struct ptp_clock_event event;
+ struct ice_hw *hw = &pf->hw;
+ u8 chan, tmr_idx;
+@@ -2558,7 +2554,6 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
+ ice_ptp_cfg_timestamp(pf, false);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+- kthread_cancel_work_sync(&ptp->extts_work);
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ return;
+@@ -2656,7 +2651,6 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
+- kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
+index 9cda2f43e0e56..9f8902c1e743d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
+@@ -169,7 +169,6 @@ struct ice_ptp_port {
+ * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @port: data for the PHY port initialization procedure
+ * @work: delayed work function for periodic tasks
+- * @extts_work: work function for handling external Tx timestamps
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+ * @ext_ts_chan: the external timestamp channel in use
+@@ -190,7 +189,6 @@ struct ice_ptp_port {
+ struct ice_ptp {
+ struct ice_ptp_port port;
+ struct kthread_delayed_work work;
+- struct kthread_work extts_work;
+ u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
+ u8 ext_ts_chan;
+@@ -256,6 +254,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
+ int ice_get_ptp_clock_index(struct ice_pf *pf);
+
++void ice_ptp_extts_event(struct ice_pf *pf);
+ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+ bool ice_ptp_process_ts(struct ice_pf *pf);
+
+@@ -284,6 +283,7 @@ static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
+ return -1;
+ }
+
++static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
+ static inline s8
+ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
+ {
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index df3e26c0cf01a..f83cbc4a1afa8 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -13,6 +13,7 @@
+ #include <linux/ptp_clock_kernel.h>
+ #include <linux/timecounter.h>
+ #include <linux/net_tstamp.h>
++#include <linux/bitfield.h>
+
+ #include "igc_hw.h"
+
+@@ -311,6 +312,33 @@ extern char igc_driver_name[];
+ #define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+ #define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+
++/* RX-desc Write-Back format RSS Type's */
++enum igc_rss_type_num {
++ IGC_RSS_TYPE_NO_HASH = 0,
++ IGC_RSS_TYPE_HASH_TCP_IPV4 = 1,
++ IGC_RSS_TYPE_HASH_IPV4 = 2,
++ IGC_RSS_TYPE_HASH_TCP_IPV6 = 3,
++ IGC_RSS_TYPE_HASH_IPV6_EX = 4,
++ IGC_RSS_TYPE_HASH_IPV6 = 5,
++ IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6,
++ IGC_RSS_TYPE_HASH_UDP_IPV4 = 7,
++ IGC_RSS_TYPE_HASH_UDP_IPV6 = 8,
++ IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9,
++ IGC_RSS_TYPE_MAX = 10,
++};
++#define IGC_RSS_TYPE_MAX_TABLE 16
++#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */
++
++/* igc_rss_type - Rx descriptor RSS type field */
++static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
++{
++ /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved)
++ * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info)
++ * is slightly slower than via u32 (wb.lower.lo_dword.data)
++ */
++ return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK);
++}
++
+ /* Interrupt defines */
+ #define IGC_START_ITR 648 /* ~6000 ints/sec */
+ #define IGC_4K_ITR 980
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index b35f5ff3536e5..c85ceed443223 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1687,14 +1687,36 @@ static void igc_rx_checksum(struct igc_ring *ring,
+ le32_to_cpu(rx_desc->wb.upper.status_error));
+ }
+
++/* Mapping HW RSS Type to enum pkt_hash_types */
++static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
++ [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
++ [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
++ [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
++ [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
++ [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
++ [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
++ [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
++ [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
++ [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
++ [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
++ [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
++ [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
++ [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
++ [13] = PKT_HASH_TYPE_NONE,
++ [14] = PKT_HASH_TYPE_NONE,
++ [15] = PKT_HASH_TYPE_NONE,
++};
++
+ static inline void igc_rx_hash(struct igc_ring *ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+ {
+- if (ring->netdev->features & NETIF_F_RXHASH)
+- skb_set_hash(skb,
+- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+- PKT_HASH_TYPE_L3);
++ if (ring->netdev->features & NETIF_F_RXHASH) {
++ u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
++ u32 rss_type = igc_rss_type(rx_desc);
++
++ skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
++ }
+ }
+
+ static void igc_rx_vlan(struct igc_ring *rx_ring,
+@@ -6553,6 +6575,7 @@ static int igc_probe(struct pci_dev *pdev,
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ netdev->features |= NETIF_F_TSO_ECN;
++ netdev->features |= NETIF_F_RXHASH;
+ netdev->features |= NETIF_F_RXCSUM;
+ netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_SCTP_CRC;
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index b63e47af63655..8c019f382a7f3 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -1297,8 +1297,10 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
+ {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
++ spin_lock_bh(&efx->stats_lock);
+ kfree(nic_data->mc_stats);
+ nic_data->mc_stats = NULL;
++ spin_unlock_bh(&efx->stats_lock);
+ }
+
+ static int efx_ef10_init_nic(struct efx_nic *efx)
+@@ -1852,9 +1854,14 @@ static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+- efx_nic_copy_stats(efx, nic_data->mc_stats);
+- efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+- mask, stats, nic_data->mc_stats, false);
++ /* If NIC was fini'd (probably resetting), then we can't read
++ * updated stats right now.
++ */
++ if (nic_data->mc_stats) {
++ efx_nic_copy_stats(efx, nic_data->mc_stats);
++ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
++ mask, stats, nic_data->mc_stats, false);
++ }
+
+ /* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4e59f0e164ec0..29d70ecdac846 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7394,12 +7394,6 @@ void stmmac_dvr_remove(struct device *dev)
+ netif_carrier_off(ndev);
+ unregister_netdev(ndev);
+
+- /* Serdes power down needs to happen after VLAN filter
+- * is deleted that is triggered by unregister_netdev().
+- */
+- if (priv->plat->serdes_powerdown)
+- priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+-
+ #ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+ #endif
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 3e310b55bce2b..734822321e0ab 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2042,6 +2042,11 @@ static int axienet_probe(struct platform_device *pdev)
+ goto cleanup_clk;
+ }
+
++ /* Reset core now that clocks are enabled, prior to accessing MDIO */
++ ret = __axienet_device_reset(lp);
++ if (ret)
++ goto cleanup_clk;
++
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+@@ -2096,11 +2101,6 @@ static int axienet_probe(struct platform_device *pdev)
+ lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+
+- /* Reset core now that clocks are enabled, prior to accessing MDIO */
+- ret = __axienet_device_reset(lp);
+- if (ret)
+- goto cleanup_clk;
+-
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 15c7dc82107f4..acb20ad4e37eb 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -631,7 +631,9 @@ static void __gtp_encap_destroy(struct sock *sk)
+ gtp->sk1u = NULL;
+ udp_sk(sk)->encap_type = 0;
+ rcu_assign_sk_user_data(sk, NULL);
++ release_sock(sk);
+ sock_put(sk);
++ return;
+ }
+ release_sock(sk);
+ }
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index ab5133eb1d517..e45817caaee8d 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -585,7 +585,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
+ consume_skb(skb);
+ return NET_XMIT_DROP;
+ }
+- return ipvlan_rcv_frame(addr, &skb, true);
++ ipvlan_rcv_frame(addr, &skb, true);
++ return NET_XMIT_SUCCESS;
+ }
+ }
+ out:
+@@ -611,7 +612,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ consume_skb(skb);
+ return NET_XMIT_DROP;
+ }
+- return ipvlan_rcv_frame(addr, &skb, true);
++ ipvlan_rcv_frame(addr, &skb, true);
++ return NET_XMIT_SUCCESS;
+ }
+ }
+ skb = skb_share_check(skb, GFP_ATOMIC);
+@@ -623,7 +625,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ * the skb for the main-dev. At the RX side we just return
+ * RX_PASS for it to be processed further on the stack.
+ */
+- return dev_forward_skb(ipvlan->phy_dev, skb);
++ dev_forward_skb(ipvlan->phy_dev, skb);
++ return NET_XMIT_SUCCESS;
+
+ } else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 5eb131ab916fd..b6052dcc45ebf 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -2504,7 +2504,6 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
+ static void ath10k_core_restart(struct work_struct *work)
+ {
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+- struct ath10k_vif *arvif;
+ int ret;
+
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+@@ -2543,14 +2542,6 @@ static void ath10k_core_restart(struct work_struct *work)
+ ar->state = ATH10K_STATE_RESTARTING;
+ ath10k_halt(ar);
+ ath10k_scan_finish(ar);
+- if (ar->hw_params.hw_restart_disconnect) {
+- list_for_each_entry(arvif, &ar->arvifs, list) {
+- if (arvif->is_up &&
+- arvif->vdev_type == WMI_VDEV_TYPE_STA)
+- ieee80211_hw_restart_disconnect(arvif->vif);
+- }
+- }
+-
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH10K_STATE_OFF:
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index ec8d5b29bc72c..f0729acdec50a 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -8108,6 +8108,7 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+ {
+ struct ath10k *ar = hw->priv;
++ struct ath10k_vif *arvif;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+@@ -8122,6 +8123,12 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ ar->state = ATH10K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+ clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags);
++ if (ar->hw_params.hw_restart_disconnect) {
++ list_for_each_entry(arvif, &ar->arvifs, list) {
++ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
++ ieee80211_hw_restart_disconnect(arvif->vif);
++ }
++ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 5cbba9a8b6ba9..396548e57022f 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -1127,6 +1127,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
+ switch (hw_rev) {
+ case ATH11K_HW_IPQ8074:
+ case ATH11K_HW_IPQ6018_HW10:
++ case ATH11K_HW_IPQ5018_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_ipq8074;
+ pci_ops = NULL;
+ break;
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index 75fdbe4ef83a4..329f0957f9f09 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -671,6 +671,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .single_pdev_only = false,
+ .cold_boot_calib = true,
++ .cbcal_restart_fw = true,
+ .fix_l1ss = true,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
+index ab8f0ccacc6be..727e6a785bb98 100644
+--- a/drivers/net/wireless/ath/ath11k/hw.c
++++ b/drivers/net/wireless/ath/ath11k/hw.c
+@@ -1165,7 +1165,7 @@ const struct ath11k_hw_ops ipq5018_ops = {
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+-
++ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+ };
+
+ #define ATH11K_TX_RING_MASK_0 BIT(0)
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index ab923e24b0a9c..2328b9447cf1b 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2058,6 +2058,9 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
++ if (!ab->qmi.target_mem[idx].iaddr)
++ return -EIO;
++
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ host_ddr_sz = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+@@ -2083,6 +2086,8 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
++ if (!ab->qmi.target_mem[idx].iaddr)
++ return -EIO;
+ } else {
+ ab->qmi.target_mem[idx].paddr =
+ ATH11K_QMI_CALDB_ADDRESS;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+index 4f27a9fb1482b..e9bd13eeee92f 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+@@ -1099,17 +1099,22 @@ static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
+ {
+ u32 dma_dbg_chain, dma_dbg_complete;
+ u8 dcu_chain_state, dcu_complete_state;
++ unsigned int dbg_reg, reg_offset;
+ int i;
+
+- for (i = 0; i < NUM_STATUS_READS; i++) {
+- if (queue < 6)
+- dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
+- else
+- dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
++ if (queue < 6) {
++ dbg_reg = AR_DMADBG_4;
++ reg_offset = queue * 5;
++ } else {
++ dbg_reg = AR_DMADBG_5;
++ reg_offset = (queue - 6) * 5;
++ }
+
++ for (i = 0; i < NUM_STATUS_READS; i++) {
++ dma_dbg_chain = REG_READ(ah, dbg_reg);
+ dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
+
+- dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
++ dcu_chain_state = (dma_dbg_chain >> reg_offset) & 0x1f;
+ dcu_complete_state = dma_dbg_complete & 0x3;
+
+ if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
+@@ -1128,6 +1133,7 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+ u8 dcu_chain_state, dcu_complete_state;
+ bool dcu_wait_frdone = false;
+ unsigned long chk_dcu = 0;
++ unsigned int reg_offset;
+ unsigned int i = 0;
+
+ dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
+@@ -1139,12 +1145,15 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+ goto exit;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+- if (i < 6)
++ if (i < 6) {
+ chk_dbg = dma_dbg_4;
+- else
++ reg_offset = i * 5;
++ } else {
+ chk_dbg = dma_dbg_5;
++ reg_offset = (i - 6) * 5;
++ }
+
+- dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
++ dcu_chain_state = (chk_dbg >> reg_offset) & 0x1f;
+ if (dcu_chain_state == 0x6) {
+ dcu_wait_frdone = true;
+ chk_dcu |= BIT(i);
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index fe62ff668f757..99667aba289df 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -114,7 +114,13 @@ static void htc_process_conn_rsp(struct htc_target *target,
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
+- if (epid < 0 || epid >= ENDPOINT_MAX)
++
++ /* Check that the received epid for the endpoint to attach
++ * a new service is valid. ENDPOINT0 can't be used here as it
++ * is already reserved for HTC_CTRL_RSVD_SVC service and thus
++ * should not be modified.
++ */
++ if (epid <= ENDPOINT0 || epid >= ENDPOINT_MAX)
+ return;
+
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a4197c14f0a92..6360d3356e256 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -203,7 +203,7 @@ void ath_cancel_work(struct ath_softc *sc)
+ void ath_restart_work(struct ath_softc *sc)
+ {
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+- ATH_HW_CHECK_POLL_INT);
++ msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
+
+ if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+@@ -850,7 +850,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
+ static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
+ {
+ struct ath_hw *ah = sc->sc_ah;
+- int i;
++ int i, j;
+ struct ath_txq *txq;
+ bool key_in_use = false;
+
+@@ -868,8 +868,9 @@ static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ int idx = txq->txq_tailidx;
+
+- while (!key_in_use &&
+- !list_empty(&txq->txq_fifo[idx])) {
++ for (j = 0; !key_in_use &&
++ !list_empty(&txq->txq_fifo[idx]) &&
++ j < ATH_TXFIFO_DEPTH; j++) {
+ key_in_use = ath9k_txq_list_has_key(
+ &txq->txq_fifo[idx], keyix);
+ INCR(idx, ATH_TXFIFO_DEPTH);
+@@ -2239,7 +2240,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ }
+
+ ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
+- ATH_HW_CHECK_POLL_INT);
++ msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
+ }
+
+ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index 19345b8f7bfd5..d652c647d56b5 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -221,6 +221,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ if (unlikely(wmi->stopped))
+ goto free_skb;
+
++ /* Validate the obtained SKB. */
++ if (unlikely(skb->len < sizeof(struct wmi_cmd_hdr)))
++ goto free_skb;
++
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+
+diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
+index 453bb84cb3386..58bba9875d366 100644
+--- a/drivers/net/wireless/atmel/atmel_cs.c
++++ b/drivers/net/wireless/atmel/atmel_cs.c
+@@ -72,6 +72,7 @@ struct local_info {
+ static int atmel_probe(struct pcmcia_device *p_dev)
+ {
+ struct local_info *local;
++ int ret;
+
+ dev_dbg(&p_dev->dev, "atmel_attach()\n");
+
+@@ -82,8 +83,16 @@ static int atmel_probe(struct pcmcia_device *p_dev)
+
+ p_dev->priv = local;
+
+- return atmel_config(p_dev);
+-} /* atmel_attach */
++ ret = atmel_config(p_dev);
++ if (ret)
++ goto err_free_priv;
++
++ return 0;
++
++err_free_priv:
++ kfree(p_dev->priv);
++ return ret;
++}
+
+ static void atmel_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 9711841bb4564..3d91293cc250d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1697,8 +1697,11 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ else
+ set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+
+- if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
++ if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) {
++ local_bh_disable();
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
++ local_bh_enable();
++ }
+ }
+
+ out:
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index ad410b6efce73..7ebcf0ef29255 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -274,7 +274,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc,
+- u32 status)
++ u32 status,
++ struct ieee80211_rx_status *stats)
+ {
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif;
+@@ -303,8 +304,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+
+ /* good cases */
+ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+- !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)))
++ !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
++ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
++ }
+
+ if (!sta)
+ return -1;
+@@ -373,7 +376,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+
+ if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_protected(hdr->frame_control)))
+- return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
++ return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats);
+
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 9c9f87fe83777..b455e981faa1f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1620,14 +1620,14 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+ struct msix_entry *entry = dev_id;
+ struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+ struct iwl_trans *trans = trans_pcie->trans;
+- struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
++ struct iwl_rxq *rxq;
+
+ trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
+
+ if (WARN_ON(entry->entry >= trans->num_rx_queues))
+ return IRQ_NONE;
+
+- if (!rxq) {
++ if (!trans_pcie->rxq) {
+ if (net_ratelimit())
+ IWL_ERR(trans,
+ "[%d] Got MSI-X interrupt before we have Rx queues\n",
+@@ -1635,6 +1635,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+ return IRQ_NONE;
+ }
+
++ rxq = &trans_pcie->rxq[entry->entry];
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+ IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
+
+diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
+index a956f965a1e5e..03bfd2482656c 100644
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
+@@ -96,6 +96,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
+ {
+ struct orinoco_private *priv;
+ struct orinoco_pccard *card;
++ int ret;
+
+ priv = alloc_orinocodev(sizeof(*card), &link->dev,
+ orinoco_cs_hard_reset, NULL);
+@@ -107,8 +108,16 @@ orinoco_cs_probe(struct pcmcia_device *link)
+ card->p_dev = link;
+ link->priv = priv;
+
+- return orinoco_cs_config(link);
+-} /* orinoco_cs_attach */
++ ret = orinoco_cs_config(link);
++ if (ret)
++ goto err_free_orinocodev;
++
++ return 0;
++
++err_free_orinocodev:
++ free_orinocodev(priv);
++ return ret;
++}
+
+ static void orinoco_cs_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+index 291ef97ed45ec..841d623c621ac 100644
+--- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
++++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+@@ -157,6 +157,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
+ {
+ struct orinoco_private *priv;
+ struct orinoco_pccard *card;
++ int ret;
+
+ priv = alloc_orinocodev(sizeof(*card), &link->dev,
+ spectrum_cs_hard_reset,
+@@ -169,8 +170,16 @@ spectrum_cs_probe(struct pcmcia_device *link)
+ card->p_dev = link;
+ link->priv = priv;
+
+- return spectrum_cs_config(link);
+-} /* spectrum_cs_attach */
++ ret = spectrum_cs_config(link);
++ if (ret)
++ goto err_free_orinocodev;
++
++ return 0;
++
++err_free_orinocodev:
++ free_orinocodev(priv);
++ return ret;
++}
+
+ static void spectrum_cs_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index ac8001c842935..644b1e134b01c 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -2187,9 +2187,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+
+ if (nd_config) {
+ adapter->nd_info =
+- kzalloc(sizeof(struct cfg80211_wowlan_nd_match) +
+- sizeof(struct cfg80211_wowlan_nd_match *) *
+- scan_rsp->number_of_sets, GFP_ATOMIC);
++ kzalloc(struct_size(adapter->nd_info, matches,
++ scan_rsp->number_of_sets),
++ GFP_ATOMIC);
+
+ if (adapter->nd_info)
+ adapter->nd_info->n_matches = scan_rsp->number_of_sets;
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index 5adc69d5bcae3..a28da59384813 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -485,6 +485,9 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
+ int offset = 8;
+
++ param->mode_802_11i = 2;
++ param->rsn_found = true;
++
+ /* extract RSN capabilities */
+ if (offset < rsn_ie_len) {
+ /* skip over pairwise suites */
+@@ -494,11 +497,8 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ /* skip over authentication suites */
+ offset += (rsn_ie[offset] * 4) + 2;
+
+- if (offset + 1 < rsn_ie_len) {
+- param->mode_802_11i = 2;
+- param->rsn_found = true;
++ if (offset + 1 < rsn_ie_len)
+ memcpy(param->rsn_cap, &rsn_ie[offset], 2);
+- }
+ }
+ }
+ }
+diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
+index 1f57a0055bbd8..38782d4c4694a 100644
+--- a/drivers/net/wireless/ray_cs.c
++++ b/drivers/net/wireless/ray_cs.c
+@@ -270,13 +270,14 @@ static int ray_probe(struct pcmcia_device *p_dev)
+ {
+ ray_dev_t *local;
+ struct net_device *dev;
++ int ret;
+
+ dev_dbg(&p_dev->dev, "ray_attach()\n");
+
+ /* Allocate space for private device-specific data */
+ dev = alloc_etherdev(sizeof(ray_dev_t));
+ if (!dev)
+- goto fail_alloc_dev;
++ return -ENOMEM;
+
+ local = netdev_priv(dev);
+ local->finder = p_dev;
+@@ -313,11 +314,16 @@ static int ray_probe(struct pcmcia_device *p_dev)
+ timer_setup(&local->timer, NULL, 0);
+
+ this_device = p_dev;
+- return ray_config(p_dev);
++ ret = ray_config(p_dev);
++ if (ret)
++ goto err_free_dev;
++
++ return 0;
+
+-fail_alloc_dev:
+- return -ENOMEM;
+-} /* ray_attach */
++err_free_dev:
++ free_netdev(dev);
++ return ret;
++}
+
+ static void ray_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 44a5fafb99055..976eafa739a2d 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -535,7 +535,7 @@ static void rtw_usb_rx_handler(struct work_struct *work)
+ }
+
+ if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) {
+- rtw_err(rtwdev, "failed to get rx_queue, overflow\n");
++ dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n");
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+index d09998796ac08..1911fef3bbad6 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+@@ -1463,10 +1463,8 @@ static void rsi_shutdown(struct device *dev)
+
+ rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n");
+
+- if (hw) {
+- struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+-
+- if (rsi_config_wowlan(adapter, wowlan))
++ if (hw && hw->wiphy && hw->wiphy->wowlan_config) {
++ if (rsi_config_wowlan(adapter, hw->wiphy->wowlan_config))
+ rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ }
+
+@@ -1481,9 +1479,6 @@ static void rsi_shutdown(struct device *dev)
+ if (sdev->write_fail)
+ rsi_dbg(INFO_ZONE, "###### Device is not ready #######\n");
+
+- if (rsi_set_sdio_pm_caps(adapter))
+- rsi_dbg(INFO_ZONE, "Setting power management caps failed\n");
+-
+ rsi_dbg(INFO_ZONE, "***** RSI module shut down *****\n");
+ }
+
+diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
+index 7fb2f95134760..c45c4b7cbbaf1 100644
+--- a/drivers/net/wireless/wl3501_cs.c
++++ b/drivers/net/wireless/wl3501_cs.c
+@@ -1862,6 +1862,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
+ {
+ struct net_device *dev;
+ struct wl3501_card *this;
++ int ret;
+
+ /* The io structure describes IO port mapping */
+ p_dev->resource[0]->end = 16;
+@@ -1873,8 +1874,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
+
+ dev = alloc_etherdev(sizeof(struct wl3501_card));
+ if (!dev)
+- goto out_link;
+-
++ return -ENOMEM;
+
+ dev->netdev_ops = &wl3501_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+@@ -1887,9 +1887,15 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
+ netif_stop_queue(dev);
+ p_dev->priv = dev;
+
+- return wl3501_config(p_dev);
+-out_link:
+- return -ENOMEM;
++ ret = wl3501_config(p_dev);
++ if (ret)
++ goto out_free_etherdev;
++
++ return 0;
++
++out_free_etherdev:
++ free_netdev(dev);
++ return ret;
+ }
+
+ static int wl3501_config(struct pcmcia_device *link)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 8a632bf7f5a8f..8d8403b65e1b3 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3872,8 +3872,10 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &key);
+- if (ret)
++ if (ret) {
++ kfree(dhchap_secret);
+ return ret;
++ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ host_key = ctrl->host_key;
+@@ -3881,7 +3883,8 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ ctrl->host_key = key;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ nvme_auth_free_key(host_key);
+- }
++ } else
++ kfree(dhchap_secret);
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+@@ -3926,8 +3929,10 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &key);
+- if (ret)
++ if (ret) {
++ kfree(dhchap_secret);
+ return ret;
++ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ ctrl_key = ctrl->ctrl_key;
+@@ -3935,7 +3940,8 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ ctrl->ctrl_key = key;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ nvme_auth_free_key(ctrl_key);
+- }
++ } else
++ kfree(dhchap_secret);
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+@@ -5243,6 +5249,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+
+ return 0;
+ out_free_cdev:
++ nvme_fault_inject_fini(&ctrl->fault_inject);
++ dev_pm_qos_hide_latency_tolerance(ctrl->device);
+ cdev_device_del(&ctrl->cdev, ctrl->device);
+ out_free_name:
+ nvme_put_ctrl(ctrl);
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 940c7dd701d68..5b14f7ee3c798 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -12,6 +12,8 @@
+
+ #include "pcie-cadence.h"
+
++#define LINK_RETRAIN_TIMEOUT HZ
++
+ static u64 bar_max_size[] = {
+ [RP_BAR0] = _ULL(128 * SZ_2G),
+ [RP_BAR1] = SZ_2G,
+@@ -77,6 +79,27 @@ static struct pci_ops cdns_pcie_host_ops = {
+ .write = pci_generic_config_write,
+ };
+
++static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
++{
++ u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
++ unsigned long end_jiffies;
++ u16 lnk_stat;
++
++ /* Wait for link training to complete. Exit after timeout. */
++ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
++ do {
++ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
++ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
++ break;
++ usleep_range(0, 1000);
++ } while (time_before(jiffies, end_jiffies));
++
++ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
++ return 0;
++
++ return -ETIMEDOUT;
++}
++
+ static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+ {
+ struct device *dev = pcie->dev;
+@@ -118,6 +141,10 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
+ cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
+ lnk_ctl);
+
++ ret = cdns_pcie_host_training_complete(pcie);
++ if (ret)
++ return ret;
++
+ ret = cdns_pcie_host_wait_for_link(pcie);
+ }
+ return ret;
+diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
+index ecd3009df586d..6e7981d2ed5e1 100644
+--- a/drivers/pci/controller/pci-ftpci100.c
++++ b/drivers/pci/controller/pci-ftpci100.c
+@@ -429,22 +429,12 @@ static int faraday_pci_probe(struct platform_device *pdev)
+ p->dev = dev;
+
+ /* Retrieve and enable optional clocks */
+- clk = devm_clk_get(dev, "PCLK");
++ clk = devm_clk_get_enabled(dev, "PCLK");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+- ret = clk_prepare_enable(clk);
+- if (ret) {
+- dev_err(dev, "could not prepare PCLK\n");
+- return ret;
+- }
+- p->bus_clk = devm_clk_get(dev, "PCICLK");
++ p->bus_clk = devm_clk_get_enabled(dev, "PCICLK");
+ if (IS_ERR(p->bus_clk))
+ return PTR_ERR(p->bus_clk);
+- ret = clk_prepare_enable(p->bus_clk);
+- if (ret) {
+- dev_err(dev, "could not prepare PCICLK\n");
+- return ret;
+- }
+
+ p->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->base))
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index 990630ec57c6a..e718a816d4814 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -927,7 +927,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+ if (!list_empty(&child->devices)) {
+ dev = list_first_entry(&child->devices,
+ struct pci_dev, bus_list);
+- if (pci_reset_bus(dev))
++ ret = pci_reset_bus(dev);
++ if (ret)
+ pci_warn(dev, "can't reset device: %d\n", ret);
+
+ break;
+@@ -1036,6 +1037,13 @@ static void vmd_remove(struct pci_dev *dev)
+ ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ }
+
++static void vmd_shutdown(struct pci_dev *dev)
++{
++ struct vmd_dev *vmd = pci_get_drvdata(dev);
++
++ vmd_remove_irq_domain(vmd);
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int vmd_suspend(struct device *dev)
+ {
+@@ -1101,6 +1109,7 @@ static struct pci_driver vmd_drv = {
+ .id_table = vmd_ids,
+ .probe = vmd_probe,
+ .remove = vmd_remove,
++ .shutdown = vmd_shutdown,
+ .driver = {
+ .pm = &vmd_dev_pm_ops,
+ },
+diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
+index 9fd5608868718..8efb6a869e7ce 100644
+--- a/drivers/pci/endpoint/functions/Kconfig
++++ b/drivers/pci/endpoint/functions/Kconfig
+@@ -27,7 +27,7 @@ config PCI_EPF_NTB
+ If in doubt, say "N" to disable Endpoint NTB driver.
+
+ config PCI_EPF_VNTB
+- tristate "PCI Endpoint NTB driver"
++ tristate "PCI Endpoint Virtual NTB driver"
+ depends on PCI_ENDPOINT
+ depends on NTB
+ select CONFIGFS_FS
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 0f9d2ec822ac6..172e5ac0bd96c 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -112,7 +112,7 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ size_t len, dma_addr_t dma_remote,
+ enum dma_transfer_direction dir)
+ {
+- struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
++ struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
+ epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+ dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 529c348084401..32baba1b7f131 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -256,6 +256,14 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
+ present = pciehp_card_present(ctrl);
+ link_active = pciehp_check_link_active(ctrl);
+ if (present <= 0 && link_active <= 0) {
++ if (ctrl->state == BLINKINGON_STATE) {
++ ctrl->state = OFF_STATE;
++ cancel_delayed_work(&ctrl->button_work);
++ pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
++ INDICATOR_NOOP);
++ ctrl_info(ctrl, "Slot(%s): Card not present\n",
++ slot_name(ctrl));
++ }
+ mutex_unlock(&ctrl->state_lock);
+ return;
+ }
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 66d7514ca111b..db32335039d61 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1010,21 +1010,24 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+- /*
+- * All PCIe functions are in one slot, remove one function will remove
+- * the whole slot, so just wait until we are the last function left.
+- */
+- if (!list_empty(&parent->subordinate->devices))
+- goto out;
+
+ link = parent->link_state;
+ root = link->root;
+ parent_link = link->parent;
+
+- /* All functions are removed, so just disable ASPM for the link */
++ /*
++ * link->downstream is a pointer to the pci_dev of function 0. If
++ * we remove that function, the pci_dev is about to be deallocated,
++ * so we can't use link->downstream again. Free the link state to
++ * avoid this.
++ *
++ * If we're removing a non-0 function, it's possible we could
++ * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
++ * programming the same ASPM Control value for all functions of
++ * multi-function devices, so disable ASPM for all of them.
++ */
+ pcie_config_aspm_link(link, 0);
+ list_del(&link->sibling);
+- /* Clock PM is for endpoint device */
+ free_link_state(link);
+
+ /* Recheck latencies and configure upstream links */
+@@ -1032,7 +1035,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ pcie_update_aspm_capable(root);
+ pcie_config_aspm_path(parent_link);
+ }
+-out:
++
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+ }
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 44b719f39c3b3..4f86b7fd9823f 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1899,9 +1899,10 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id
+ if (dtc->irq < 0)
+ return dtc->irq;
+
+- writel_relaxed(0, dtc->base + CMN_DT_PMCR);
++ writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
++ writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
++ writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
+ writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
+- writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
+
+ return 0;
+ }
+@@ -1961,7 +1962,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ dn->type = CMN_TYPE_CCLA;
+ }
+
+- writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
++ arm_cmn_set_state(cmn, CMN_STATE_DISABLED);
+
+ return 0;
+ }
+diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
+index e31302ab7e37c..35d2fe33a7b6f 100644
+--- a/drivers/perf/arm_cspmu/arm_cspmu.c
++++ b/drivers/perf/arm_cspmu/arm_cspmu.c
+@@ -189,10 +189,10 @@ static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
+ ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- struct dev_ext_attribute *eattr =
+- container_of(attr, struct dev_ext_attribute, attr);
+- return sysfs_emit(buf, "event=0x%llx\n",
+- (unsigned long long)eattr->var);
++ struct perf_pmu_events_attr *pmu_attr;
++
++ pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
++ return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id);
+ }
+ EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
+
+@@ -1230,7 +1230,8 @@ static struct platform_driver arm_cspmu_driver = {
+ static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
+ {
+ cpumask_set_cpu(cpu, &cspmu->active_cpu);
+- WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
++ if (cspmu->irq)
++ WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
+ }
+
+ static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 6fee0b6e163bb..e10fc7cb9493a 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -683,7 +683,7 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+
+ pcie_pmu->on_cpu = -1;
+ /* Choose a new CPU from all online cpus. */
+- target = cpumask_first(cpu_online_mask);
++ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids) {
+ pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
+ return 0;
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 7435173e10f43..1489191a213fe 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -376,10 +376,8 @@ static int bcm2835_add_pin_ranges_fallback(struct gpio_chip *gc)
+ if (!pctldev)
+ return 0;
+
+- gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
+- gc->ngpio);
+-
+- return 0;
++ return gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
++ gc->ngpio);
+ }
+
+ static const struct gpio_chip bcm2835_gpio_chip = {
+diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c
+index ea261b6e74581..3b252d684d723 100644
+--- a/drivers/pinctrl/freescale/pinctrl-scu.c
++++ b/drivers/pinctrl/freescale/pinctrl-scu.c
+@@ -90,7 +90,7 @@ int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
+ struct imx_sc_msg_req_pad_set msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ unsigned int mux = configs[0];
+- unsigned int conf = configs[1];
++ unsigned int conf;
+ unsigned int val;
+ int ret;
+
+@@ -115,6 +115,7 @@ int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
+ * Set mux and conf together in one IPC call
+ */
+ WARN_ON(num_configs != 2);
++ conf = configs[1];
+
+ val = conf | BM_PAD_CTL_IFMUX_ENABLE | BM_PAD_CTL_GP_ENABLE;
+ val |= mux << BP_PAD_CTL_IFMUX;
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 722990e278361..87cf1e7403979 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -949,11 +949,6 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+
+ break;
+
+- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+- if (!(ctrl1 & CHV_PADCTRL1_ODEN))
+- return -EINVAL;
+- break;
+-
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: {
+ u32 cfg;
+
+@@ -963,6 +958,16 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ return -EINVAL;
+
+ break;
++
++ case PIN_CONFIG_DRIVE_PUSH_PULL:
++ if (ctrl1 & CHV_PADCTRL1_ODEN)
++ return -EINVAL;
++ break;
++
++ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
++ if (!(ctrl1 & CHV_PADCTRL1_ODEN))
++ return -EINVAL;
++ break;
+ }
+
+ default:
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+index ff5bcea172e84..071bdfd570f94 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+@@ -1881,6 +1881,8 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
+ }
+
+ pctrl->gpio_bank[id].base = ioremap(res.start, resource_size(&res));
++ if (!pctrl->gpio_bank[id].base)
++ return -EINVAL;
+
+ ret = bgpio_init(&pctrl->gpio_bank[id].gc, dev, 4,
+ pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DIN,
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index c775d239444a6..20433c1745805 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1151,6 +1151,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ /* Pin naming convention: P(bank_name)(bank_pin_number). */
+ pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d",
+ bank + 'A', line);
++ if (!pin_desc[i].name)
++ return -ENOMEM;
+
+ group->name = group_names[i] = pin_desc[i].name;
+ group->pin = pin_desc[i].number;
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 9fa68ca4a412d..9184d457edf8d 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1294,10 +1294,11 @@ static const struct of_device_id at91_pinctrl_of_match[] = {
+ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
+ struct at91_pinctrl *info)
+ {
++ struct device *dev = &pdev->dev;
+ int ret = 0;
+ int i, j, ngpio_chips_enabled = 0;
+ uint32_t *tmp;
+- struct device_node *np = pdev->dev.of_node;
++ struct device_node *np = dev->of_node;
+ struct device_node *child;
+
+ if (!np)
+@@ -1361,9 +1362,8 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
+ continue;
+ ret = at91_pinctrl_parse_functions(child, info, i++);
+ if (ret) {
+- dev_err(&pdev->dev, "failed to parse function\n");
+ of_node_put(child);
+- return ret;
++ return dev_err_probe(dev, ret, "failed to parse function\n");
+ }
+ }
+
+@@ -1399,8 +1399,8 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
+ char **names;
+
+ names = devm_kasprintf_strarray(dev, "pio", MAX_NB_GPIO_PER_BANK);
+- if (!names)
+- return -ENOMEM;
++ if (IS_ERR(names))
++ return PTR_ERR(names);
+
+ for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
+ char *name = names[j];
+@@ -1416,11 +1416,8 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, info);
+ info->pctl = devm_pinctrl_register(&pdev->dev, &at91_pinctrl_desc,
+ info);
+-
+- if (IS_ERR(info->pctl)) {
+- dev_err(&pdev->dev, "could not register AT91 pinctrl driver\n");
+- return PTR_ERR(info->pctl);
+- }
++ if (IS_ERR(info->pctl))
++ return dev_err_probe(dev, PTR_ERR(info->pctl), "could not register AT91 pinctrl driver\n");
+
+ /* We will handle a range of GPIO pins */
+ for (i = 0; i < gpio_banks; i++)
+@@ -1821,46 +1818,29 @@ static int at91_gpio_probe(struct platform_device *pdev)
+ char **names;
+
+ BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips));
+- if (gpio_chips[alias_idx]) {
+- ret = -EBUSY;
+- goto err;
+- }
++ if (gpio_chips[alias_idx])
++ return dev_err_probe(dev, -EBUSY, "%d slot is occupied.\n", alias_idx);
+
+ irq = platform_get_irq(pdev, 0);
+- if (irq < 0) {
+- ret = irq;
+- goto err;
+- }
++ if (irq < 0)
++ return irq;
+
+ at91_chip = devm_kzalloc(&pdev->dev, sizeof(*at91_chip), GFP_KERNEL);
+- if (!at91_chip) {
+- ret = -ENOMEM;
+- goto err;
+- }
++ if (!at91_chip)
++ return -ENOMEM;
+
+ at91_chip->regbase = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(at91_chip->regbase)) {
+- ret = PTR_ERR(at91_chip->regbase);
+- goto err;
+- }
++ if (IS_ERR(at91_chip->regbase))
++ return PTR_ERR(at91_chip->regbase);
+
+ at91_chip->ops = (const struct at91_pinctrl_mux_ops *)
+ of_match_device(at91_gpio_of_match, &pdev->dev)->data;
+ at91_chip->pioc_virq = irq;
+ at91_chip->pioc_idx = alias_idx;
+
+- at91_chip->clock = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(at91_chip->clock)) {
+- dev_err(&pdev->dev, "failed to get clock, ignoring.\n");
+- ret = PTR_ERR(at91_chip->clock);
+- goto err;
+- }
+-
+- ret = clk_prepare_enable(at91_chip->clock);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to prepare and enable clock, ignoring.\n");
+- goto clk_enable_err;
+- }
++ at91_chip->clock = devm_clk_get_enabled(&pdev->dev, NULL);
++ if (IS_ERR(at91_chip->clock))
++ return dev_err_probe(dev, PTR_ERR(at91_chip->clock), "failed to get clock, ignoring.\n");
+
+ at91_chip->chip = at91_gpio_template;
+ at91_chip->id = alias_idx;
+@@ -1873,17 +1853,15 @@ static int at91_gpio_probe(struct platform_device *pdev)
+
+ if (!of_property_read_u32(np, "#gpio-lines", &ngpio)) {
+ if (ngpio >= MAX_NB_GPIO_PER_BANK)
+- pr_err("at91_gpio.%d, gpio-nb >= %d failback to %d\n",
+- alias_idx, MAX_NB_GPIO_PER_BANK, MAX_NB_GPIO_PER_BANK);
++ dev_err(dev, "at91_gpio.%d, gpio-nb >= %d failback to %d\n",
++ alias_idx, MAX_NB_GPIO_PER_BANK, MAX_NB_GPIO_PER_BANK);
+ else
+ chip->ngpio = ngpio;
+ }
+
+ names = devm_kasprintf_strarray(dev, "pio", chip->ngpio);
+- if (!names) {
+- ret = -ENOMEM;
+- goto clk_enable_err;
+- }
++ if (IS_ERR(names))
++ return PTR_ERR(names);
+
+ for (i = 0; i < chip->ngpio; i++)
+ strreplace(names[i], '-', alias_idx + 'A');
+@@ -1900,11 +1878,11 @@ static int at91_gpio_probe(struct platform_device *pdev)
+
+ ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+ if (ret)
+- goto gpiochip_add_err;
++ return ret;
+
+ ret = gpiochip_add_data(chip, at91_chip);
+ if (ret)
+- goto gpiochip_add_err;
++ return ret;
+
+ gpio_chips[alias_idx] = at91_chip;
+ platform_set_drvdata(pdev, at91_chip);
+@@ -1913,14 +1891,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
+ dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
+
+ return 0;
+-
+-gpiochip_add_err:
+-clk_enable_err:
+- clk_disable_unprepare(at91_chip->clock);
+-err:
+- dev_err(&pdev->dev, "Failure %i for GPIO %i\n", ret, alias_idx);
+-
+- return ret;
+ }
+
+ static const struct dev_pm_ops at91_gpio_pm_ops = {
+diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+index 4794602316e7d..666d8b7cdbad3 100644
+--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
++++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+@@ -818,6 +818,9 @@ static int microchip_sgpio_register_bank(struct device *dev,
+ pctl_desc->name = devm_kasprintf(dev, GFP_KERNEL, "%s-%sput",
+ dev_name(dev),
+ bank->is_input ? "in" : "out");
++ if (!pctl_desc->name)
++ return -ENOMEM;
++
+ pctl_desc->pctlops = &sgpio_pctl_ops;
+ pctl_desc->pmxops = &sgpio_pmx_ops;
+ pctl_desc->confops = &sgpio_confops;
+diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
+index 6bbbab3a6fdf3..150996949ede7 100644
+--- a/drivers/pinctrl/sunplus/sppctl.c
++++ b/drivers/pinctrl/sunplus/sppctl.c
+@@ -834,11 +834,6 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ int i, size = 0;
+
+ list = of_get_property(np_config, "sunplus,pins", &size);
+-
+- if (nmG <= 0)
+- nmG = 0;
+-
+- parent = of_get_parent(np_config);
+ *num_maps = size / sizeof(*list);
+
+ /*
+@@ -866,10 +861,14 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ }
+ }
+
++ if (nmG <= 0)
++ nmG = 0;
++
+ *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+- if (*map == NULL)
++ if (!(*map))
+ return -ENOMEM;
+
++ parent = of_get_parent(np_config);
+ for (i = 0; i < (*num_maps); i++) {
+ dt_pin = be32_to_cpu(list[i]);
+ pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
+@@ -883,6 +882,8 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ (*map)[i].data.configs.num_configs = 1;
+ (*map)[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_num);
+ configs = kmalloc(sizeof(*configs), GFP_KERNEL);
++ if (!configs)
++ goto sppctl_map_err;
+ *configs = FIELD_GET(GENMASK(7, 0), dt_pin);
+ (*map)[i].data.configs.configs = configs;
+
+@@ -896,6 +897,8 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ (*map)[i].data.configs.num_configs = 1;
+ (*map)[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_num);
+ configs = kmalloc(sizeof(*configs), GFP_KERNEL);
++ if (!configs)
++ goto sppctl_map_err;
+ *configs = SPPCTL_IOP_CONFIGS;
+ (*map)[i].data.configs.configs = configs;
+
+@@ -965,6 +968,14 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ of_node_put(parent);
+ dev_dbg(pctldev->dev, "%d pins mapped\n", *num_maps);
+ return 0;
++
++sppctl_map_err:
++ for (i = 0; i < (*num_maps); i++)
++ if ((*map)[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
++ kfree((*map)[i].data.configs.configs);
++ kfree(*map);
++ of_node_put(parent);
++ return -ENOMEM;
+ }
+
+ static const struct pinctrl_ops sppctl_pctl_ops = {
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index 1729b7ddfa946..21e08fbd1df0e 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -232,7 +232,7 @@ static const char *tegra_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ {
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+- return pmx->soc->functions[function].name;
++ return pmx->functions[function].name;
+ }
+
+ static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+@@ -242,8 +242,8 @@ static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+ {
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+- *groups = pmx->soc->functions[function].groups;
+- *num_groups = pmx->soc->functions[function].ngroups;
++ *groups = pmx->functions[function].groups;
++ *num_groups = pmx->functions[function].ngroups;
+
+ return 0;
+ }
+@@ -795,10 +795,17 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
+ if (!pmx->group_pins)
+ return -ENOMEM;
+
++ pmx->functions = devm_kcalloc(&pdev->dev, pmx->soc->nfunctions,
++ sizeof(*pmx->functions), GFP_KERNEL);
++ if (!pmx->functions)
++ return -ENOMEM;
++
+ group_pins = pmx->group_pins;
++
+ for (fn = 0; fn < soc_data->nfunctions; fn++) {
+- struct tegra_function *func = &soc_data->functions[fn];
++ struct tegra_function *func = &pmx->functions[fn];
+
++ func->name = pmx->soc->functions[fn];
+ func->groups = group_pins;
+
+ for (gn = 0; gn < soc_data->ngroups; gn++) {
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
+index 6130cba7cce54..b3289bdf727d8 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
+@@ -13,6 +13,7 @@ struct tegra_pmx {
+ struct pinctrl_dev *pctl;
+
+ const struct tegra_pinctrl_soc_data *soc;
++ struct tegra_function *functions;
+ const char **group_pins;
+
+ struct pinctrl_gpio_range gpio_range;
+@@ -191,7 +192,7 @@ struct tegra_pinctrl_soc_data {
+ const char *gpio_compatible;
+ const struct pinctrl_pin_desc *pins;
+ unsigned npins;
+- struct tegra_function *functions;
++ const char * const *functions;
+ unsigned nfunctions;
+ const struct tegra_pingroup *groups;
+ unsigned ngroups;
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
+index e72ab1eb23983..3d425b2018e78 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
+@@ -1452,12 +1452,9 @@ enum tegra_mux {
+ TEGRA_MUX_VI_ALT3,
+ };
+
+-#define FUNCTION(fname) \
+- { \
+- .name = #fname, \
+- }
++#define FUNCTION(fname) #fname
+
+-static struct tegra_function tegra114_functions[] = {
++static const char * const tegra114_functions[] = {
+ FUNCTION(blink),
+ FUNCTION(cec),
+ FUNCTION(cldvfs),
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
+index 26096c6b967e2..2a50c5c7516c3 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
+@@ -1611,12 +1611,9 @@ enum tegra_mux {
+ TEGRA_MUX_VIMCLK2_ALT,
+ };
+
+-#define FUNCTION(fname) \
+- { \
+- .name = #fname, \
+- }
++#define FUNCTION(fname) #fname
+
+-static struct tegra_function tegra124_functions[] = {
++static const char * const tegra124_functions[] = {
+ FUNCTION(blink),
+ FUNCTION(ccla),
+ FUNCTION(cec),
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c
+index 277973c884344..69f58df628977 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra194.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c
+@@ -1189,12 +1189,9 @@ enum tegra_mux_dt {
+ };
+
+ /* Make list of each function name */
+-#define TEGRA_PIN_FUNCTION(lid) \
+- { \
+- .name = #lid, \
+- }
++#define TEGRA_PIN_FUNCTION(lid) #lid
+
+-static struct tegra_function tegra194_functions[] = {
++static const char * const tegra194_functions[] = {
+ TEGRA_PIN_FUNCTION(rsvd0),
+ TEGRA_PIN_FUNCTION(rsvd1),
+ TEGRA_PIN_FUNCTION(rsvd2),
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
+index 0dc2cf0d05b1e..737fc2000f66b 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
+@@ -1889,12 +1889,9 @@ enum tegra_mux {
+ TEGRA_MUX_XIO,
+ };
+
+-#define FUNCTION(fname) \
+- { \
+- .name = #fname, \
+- }
++#define FUNCTION(fname) #fname
+
+-static struct tegra_function tegra20_functions[] = {
++static const char * const tegra20_functions[] = {
+ FUNCTION(ahb_clk),
+ FUNCTION(apb_clk),
+ FUNCTION(audio_sync),
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
+index b480f607fa16f..9bb29146dfff7 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
+@@ -1185,12 +1185,9 @@ enum tegra_mux {
+ TEGRA_MUX_VIMCLK2,
+ };
+
+-#define FUNCTION(fname) \
+- { \
+- .name = #fname, \
+- }
++#define FUNCTION(fname) #fname
+
+-static struct tegra_function tegra210_functions[] = {
++static const char * const tegra210_functions[] = {
+ FUNCTION(aud),
+ FUNCTION(bcl),
+ FUNCTION(blink),
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
+index 7299a371827f1..de5aa2d4d28d3 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
+@@ -2010,12 +2010,9 @@ enum tegra_mux {
+ TEGRA_MUX_VI_ALT3,
+ };
+
+-#define FUNCTION(fname) \
+- { \
+- .name = #fname, \
+- }
++#define FUNCTION(fname) #fname
+
+-static struct tegra_function tegra30_functions[] = {
++static const char * const tegra30_functions[] = {
+ FUNCTION(blink),
+ FUNCTION(cec),
+ FUNCTION(clk_12m_out),
+diff --git a/drivers/platform/x86/dell/dell-rbtn.c b/drivers/platform/x86/dell/dell-rbtn.c
+index aa0e6c9074942..c8fcb537fd65d 100644
+--- a/drivers/platform/x86/dell/dell-rbtn.c
++++ b/drivers/platform/x86/dell/dell-rbtn.c
+@@ -395,16 +395,16 @@ static int rbtn_add(struct acpi_device *device)
+ return -EINVAL;
+ }
+
++ rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
++ if (!rbtn_data)
++ return -ENOMEM;
++
+ ret = rbtn_acquire(device, true);
+ if (ret < 0) {
+ dev_err(&device->dev, "Cannot enable device\n");
+ return ret;
+ }
+
+- rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
+- if (!rbtn_data)
+- return -ENOMEM;
+-
+ rbtn_data->type = type;
+ device->driver_data = rbtn_data;
+
+@@ -420,10 +420,12 @@ static int rbtn_add(struct acpi_device *device)
+ break;
+ default:
+ ret = -EINVAL;
++ break;
+ }
++ if (ret)
++ rbtn_acquire(device, false);
+
+ return ret;
+-
+ }
+
+ static void rbtn_remove(struct acpi_device *device)
+@@ -442,7 +444,6 @@ static void rbtn_remove(struct acpi_device *device)
+ }
+
+ rbtn_acquire(device, false);
+- device->driver_data = NULL;
+ }
+
+ static void rbtn_notify(struct acpi_device *device, u32 event)
+diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
+index b9591969e0fa1..bed083525fbe7 100644
+--- a/drivers/platform/x86/intel/pmc/core.c
++++ b/drivers/platform/x86/intel/pmc/core.c
+@@ -1039,7 +1039,6 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init),
+- X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, mtl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init),
+ {}
+ };
+diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
+index 810204d758ab3..d1c29fcd07ae9 100644
+--- a/drivers/platform/x86/intel/pmc/core.h
++++ b/drivers/platform/x86/intel/pmc/core.h
+@@ -247,6 +247,14 @@ enum ppfear_regs {
+ #define MTL_LPM_STATUS_LATCH_EN_OFFSET 0x16F8
+ #define MTL_LPM_STATUS_OFFSET 0x1700
+ #define MTL_LPM_LIVE_STATUS_OFFSET 0x175C
++#define MTL_PMC_LTR_IOE_PMC 0x1C0C
++#define MTL_PMC_LTR_ESE 0x1BAC
++#define MTL_SOCM_NUM_IP_IGN_ALLOWED 25
++#define MTL_SOC_PMC_MMIO_REG_LEN 0x2708
++#define MTL_PMC_LTR_SPG 0x1B74
++
++/* Meteor Lake PGD PFET Enable Ack Status */
++#define MTL_SOCM_PPFEAR_NUM_ENTRIES 8
+
+ extern const char *pmc_lpm_modes[];
+
+@@ -393,7 +401,25 @@ extern const struct pmc_bit_map adl_vnn_req_status_3_map[];
+ extern const struct pmc_bit_map adl_vnn_misc_status_map[];
+ extern const struct pmc_bit_map *adl_lpm_maps[];
+ extern const struct pmc_reg_map adl_reg_map;
+-extern const struct pmc_reg_map mtl_reg_map;
++extern const struct pmc_bit_map mtl_socm_pfear_map[];
++extern const struct pmc_bit_map *ext_mtl_socm_pfear_map[];
++extern const struct pmc_bit_map mtl_socm_ltr_show_map[];
++extern const struct pmc_bit_map mtl_socm_clocksource_status_map[];
++extern const struct pmc_bit_map mtl_socm_power_gating_status_0_map[];
++extern const struct pmc_bit_map mtl_socm_power_gating_status_1_map[];
++extern const struct pmc_bit_map mtl_socm_power_gating_status_2_map[];
++extern const struct pmc_bit_map mtl_socm_d3_status_0_map[];
++extern const struct pmc_bit_map mtl_socm_d3_status_1_map[];
++extern const struct pmc_bit_map mtl_socm_d3_status_2_map[];
++extern const struct pmc_bit_map mtl_socm_d3_status_3_map[];
++extern const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[];
++extern const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[];
++extern const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[];
++extern const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[];
++extern const struct pmc_bit_map mtl_socm_vnn_misc_status_map[];
++extern const struct pmc_bit_map mtl_socm_signal_status_map[];
++extern const struct pmc_bit_map *mtl_socm_lpm_maps[];
++extern const struct pmc_reg_map mtl_socm_reg_map;
+
+ extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
+ extern int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value);
+diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
+index eeb3bd8c2502d..de9348e031003 100644
+--- a/drivers/platform/x86/intel/pmc/mtl.c
++++ b/drivers/platform/x86/intel/pmc/mtl.c
+@@ -10,28 +10,458 @@
+
+ #include "core.h"
+
+-const struct pmc_reg_map mtl_reg_map = {
+- .pfear_sts = ext_tgl_pfear_map,
++/*
++ * Die Mapping to Product.
++ * Product SOCDie IOEDie PCHDie
++ * MTL-M SOC-M IOE-M None
++ * MTL-P SOC-M IOE-P None
++ * MTL-S SOC-S IOE-P PCH-S
++ */
++
++const struct pmc_bit_map mtl_socm_pfear_map[] = {
++ {"PMC", BIT(0)},
++ {"OPI", BIT(1)},
++ {"SPI", BIT(2)},
++ {"XHCI", BIT(3)},
++ {"SPA", BIT(4)},
++ {"SPB", BIT(5)},
++ {"SPC", BIT(6)},
++ {"GBE", BIT(7)},
++
++ {"SATA", BIT(0)},
++ {"DSP0", BIT(1)},
++ {"DSP1", BIT(2)},
++ {"DSP2", BIT(3)},
++ {"DSP3", BIT(4)},
++ {"SPD", BIT(5)},
++ {"LPSS", BIT(6)},
++ {"LPC", BIT(7)},
++
++ {"SMB", BIT(0)},
++ {"ISH", BIT(1)},
++ {"P2SB", BIT(2)},
++ {"NPK_VNN", BIT(3)},
++ {"SDX", BIT(4)},
++ {"SPE", BIT(5)},
++ {"FUSE", BIT(6)},
++ {"SBR8", BIT(7)},
++
++ {"RSVD24", BIT(0)},
++ {"OTG", BIT(1)},
++ {"EXI", BIT(2)},
++ {"CSE", BIT(3)},
++ {"CSME_KVM", BIT(4)},
++ {"CSME_PMT", BIT(5)},
++ {"CSME_CLINK", BIT(6)},
++ {"CSME_PTIO", BIT(7)},
++
++ {"CSME_USBR", BIT(0)},
++ {"CSME_SUSRAM", BIT(1)},
++ {"CSME_SMT1", BIT(2)},
++ {"RSVD35", BIT(3)},
++ {"CSME_SMS2", BIT(4)},
++ {"CSME_SMS", BIT(5)},
++ {"CSME_RTC", BIT(6)},
++ {"CSME_PSF", BIT(7)},
++
++ {"SBR0", BIT(0)},
++ {"SBR1", BIT(1)},
++ {"SBR2", BIT(2)},
++ {"SBR3", BIT(3)},
++ {"SBR4", BIT(4)},
++ {"SBR5", BIT(5)},
++ {"RSVD46", BIT(6)},
++ {"PSF1", BIT(7)},
++
++ {"PSF2", BIT(0)},
++ {"PSF3", BIT(1)},
++ {"PSF4", BIT(2)},
++ {"CNVI", BIT(3)},
++ {"UFSX2", BIT(4)},
++ {"EMMC", BIT(5)},
++ {"SPF", BIT(6)},
++ {"SBR6", BIT(7)},
++
++ {"SBR7", BIT(0)},
++ {"NPK_AON", BIT(1)},
++ {"HDA4", BIT(2)},
++ {"HDA5", BIT(3)},
++ {"HDA6", BIT(4)},
++ {"PSF6", BIT(5)},
++ {"RSVD62", BIT(6)},
++ {"RSVD63", BIT(7)},
++ {}
++};
++
++const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = {
++ mtl_socm_pfear_map,
++ NULL
++};
++
++const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
++ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
++ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
++ {"SATA", CNP_PMC_LTR_SATA},
++ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
++ {"XHCI", CNP_PMC_LTR_XHCI},
++ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
++ {"ME", CNP_PMC_LTR_ME},
++ {"SATA1", CNP_PMC_LTR_EVA},
++ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
++ {"HD_AUDIO", CNP_PMC_LTR_AZ},
++ {"CNV", CNP_PMC_LTR_CNV},
++ {"LPSS", CNP_PMC_LTR_LPSS},
++ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
++ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
++ {"SATA2", CNP_PMC_LTR_CAM},
++ {"ESPI", CNP_PMC_LTR_ESPI},
++ {"SCC", CNP_PMC_LTR_SCC},
++ {"ISH", CNP_PMC_LTR_ISH},
++ {"UFSX2", CNP_PMC_LTR_UFSX2},
++ {"EMMC", CNP_PMC_LTR_EMMC},
++ {"WIGIG", ICL_PMC_LTR_WIGIG},
++ {"THC0", TGL_PMC_LTR_THC0},
++ {"THC1", TGL_PMC_LTR_THC1},
++ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
++ {"ESE", MTL_PMC_LTR_ESE},
++ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
++
++ /* Below two cannot be used for LTR_IGNORE */
++ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
++ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
++ {"AON2_OFF_STS", BIT(0)},
++ {"AON3_OFF_STS", BIT(1)},
++ {"AON4_OFF_STS", BIT(2)},
++ {"AON5_OFF_STS", BIT(3)},
++ {"AON1_OFF_STS", BIT(4)},
++ {"XTAL_LVM_OFF_STS", BIT(5)},
++ {"MPFPW1_0_PLL_OFF_STS", BIT(6)},
++ {"MPFPW1_1_PLL_OFF_STS", BIT(7)},
++ {"USB3_PLL_OFF_STS", BIT(8)},
++ {"AON3_SPL_OFF_STS", BIT(9)},
++ {"MPFPW2_0_PLL_OFF_STS", BIT(12)},
++ {"MPFPW3_0_PLL_OFF_STS", BIT(13)},
++ {"XTAL_AGGR_OFF_STS", BIT(17)},
++ {"USB2_PLL_OFF_STS", BIT(18)},
++ {"FILTER_PLL_OFF_STS", BIT(22)},
++ {"ACE_PLL_OFF_STS", BIT(24)},
++ {"FABRIC_PLL_OFF_STS", BIT(25)},
++ {"SOC_PLL_OFF_STS", BIT(26)},
++ {"PCIFAB_PLL_OFF_STS", BIT(27)},
++ {"REF_PLL_OFF_STS", BIT(28)},
++ {"IMG_PLL_OFF_STS", BIT(29)},
++ {"RTC_PLL_OFF_STS", BIT(31)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
++ {"PMC_PGD0_PG_STS", BIT(0)},
++ {"DMI_PGD0_PG_STS", BIT(1)},
++ {"ESPISPI_PGD0_PG_STS", BIT(2)},
++ {"XHCI_PGD0_PG_STS", BIT(3)},
++ {"SPA_PGD0_PG_STS", BIT(4)},
++ {"SPB_PGD0_PG_STS", BIT(5)},
++ {"SPC_PGD0_PG_STS", BIT(6)},
++ {"GBE_PGD0_PG_STS", BIT(7)},
++ {"SATA_PGD0_PG_STS", BIT(8)},
++ {"PSF13_PGD0_PG_STS", BIT(9)},
++ {"SOC_D2D_PGD3_PG_STS", BIT(10)},
++ {"MPFPW3_PGD0_PG_STS", BIT(11)},
++ {"ESE_PGD0_PG_STS", BIT(12)},
++ {"SPD_PGD0_PG_STS", BIT(13)},
++ {"LPSS_PGD0_PG_STS", BIT(14)},
++ {"LPC_PGD0_PG_STS", BIT(15)},
++ {"SMB_PGD0_PG_STS", BIT(16)},
++ {"ISH_PGD0_PG_STS", BIT(17)},
++ {"P2S_PGD0_PG_STS", BIT(18)},
++ {"NPK_PGD0_PG_STS", BIT(19)},
++ {"DBG_SBR_PGD0_PG_STS", BIT(20)},
++ {"SBRG_PGD0_PG_STS", BIT(21)},
++ {"FUSE_PGD0_PG_STS", BIT(22)},
++ {"SBR8_PGD0_PG_STS", BIT(23)},
++ {"SOC_D2D_PGD2_PG_STS", BIT(24)},
++ {"XDCI_PGD0_PG_STS", BIT(25)},
++ {"EXI_PGD0_PG_STS", BIT(26)},
++ {"CSE_PGD0_PG_STS", BIT(27)},
++ {"KVMCC_PGD0_PG_STS", BIT(28)},
++ {"PMT_PGD0_PG_STS", BIT(29)},
++ {"CLINK_PGD0_PG_STS", BIT(30)},
++ {"PTIO_PGD0_PG_STS", BIT(31)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
++ {"USBR0_PGD0_PG_STS", BIT(0)},
++ {"SUSRAM_PGD0_PG_STS", BIT(1)},
++ {"SMT1_PGD0_PG_STS", BIT(2)},
++ {"FIACPCB_U_PGD0_PG_STS", BIT(3)},
++ {"SMS2_PGD0_PG_STS", BIT(4)},
++ {"SMS1_PGD0_PG_STS", BIT(5)},
++ {"CSMERTC_PGD0_PG_STS", BIT(6)},
++ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
++ {"SBR0_PGD0_PG_STS", BIT(8)},
++ {"SBR1_PGD0_PG_STS", BIT(9)},
++ {"SBR2_PGD0_PG_STS", BIT(10)},
++ {"SBR3_PGD0_PG_STS", BIT(11)},
++ {"U3FPW1_PGD0_PG_STS", BIT(12)},
++ {"SBR5_PGD0_PG_STS", BIT(13)},
++ {"MPFPW1_PGD0_PG_STS", BIT(14)},
++ {"UFSPW1_PGD0_PG_STS", BIT(15)},
++ {"FIA_X_PGD0_PG_STS", BIT(16)},
++ {"SOC_D2D_PGD0_PG_STS", BIT(17)},
++ {"MPFPW2_PGD0_PG_STS", BIT(18)},
++ {"CNVI_PGD0_PG_STS", BIT(19)},
++ {"UFSX2_PGD0_PG_STS", BIT(20)},
++ {"ENDBG_PGD0_PG_STS", BIT(21)},
++ {"DBG_PSF_PGD0_PG_STS", BIT(22)},
++ {"SBR6_PGD0_PG_STS", BIT(23)},
++ {"SBR7_PGD0_PG_STS", BIT(24)},
++ {"NPK_PGD1_PG_STS", BIT(25)},
++ {"FIACPCB_X_PGD0_PG_STS", BIT(26)},
++ {"DBC_PGD0_PG_STS", BIT(27)},
++ {"FUSEGPSB_PGD0_PG_STS", BIT(28)},
++ {"PSF6_PGD0_PG_STS", BIT(29)},
++ {"PSF7_PGD0_PG_STS", BIT(30)},
++ {"GBETSN1_PGD0_PG_STS", BIT(31)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = {
++ {"PSF8_PGD0_PG_STS", BIT(0)},
++ {"FIA_PGD0_PG_STS", BIT(1)},
++ {"SOC_D2D_PGD1_PG_STS", BIT(2)},
++ {"FIA_U_PGD0_PG_STS", BIT(3)},
++ {"TAM_PGD0_PG_STS", BIT(4)},
++ {"GBETSN_PGD0_PG_STS", BIT(5)},
++ {"TBTLSX_PGD0_PG_STS", BIT(6)},
++ {"THC0_PGD0_PG_STS", BIT(7)},
++ {"THC1_PGD0_PG_STS", BIT(8)},
++ {"PMC_PGD1_PG_STS", BIT(9)},
++ {"GNA_PGD0_PG_STS", BIT(10)},
++ {"ACE_PGD0_PG_STS", BIT(11)},
++ {"ACE_PGD1_PG_STS", BIT(12)},
++ {"ACE_PGD2_PG_STS", BIT(13)},
++ {"ACE_PGD3_PG_STS", BIT(14)},
++ {"ACE_PGD4_PG_STS", BIT(15)},
++ {"ACE_PGD5_PG_STS", BIT(16)},
++ {"ACE_PGD6_PG_STS", BIT(17)},
++ {"ACE_PGD7_PG_STS", BIT(18)},
++ {"ACE_PGD8_PG_STS", BIT(19)},
++ {"FIA_PGS_PGD0_PG_STS", BIT(20)},
++ {"FIACPCB_PGS_PGD0_PG_STS", BIT(21)},
++ {"FUSEPMSB_PGD0_PG_STS", BIT(22)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_d3_status_0_map[] = {
++ {"LPSS_D3_STS", BIT(3)},
++ {"XDCI_D3_STS", BIT(4)},
++ {"XHCI_D3_STS", BIT(5)},
++ {"SPA_D3_STS", BIT(12)},
++ {"SPB_D3_STS", BIT(13)},
++ {"SPC_D3_STS", BIT(14)},
++ {"SPD_D3_STS", BIT(15)},
++ {"ESPISPI_D3_STS", BIT(18)},
++ {"SATA_D3_STS", BIT(20)},
++ {"PSTH_D3_STS", BIT(21)},
++ {"DMI_D3_STS", BIT(22)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_d3_status_1_map[] = {
++ {"GBETSN1_D3_STS", BIT(14)},
++ {"GBE_D3_STS", BIT(19)},
++ {"ITSS_D3_STS", BIT(23)},
++ {"P2S_D3_STS", BIT(24)},
++ {"CNVI_D3_STS", BIT(27)},
++ {"UFSX2_D3_STS", BIT(28)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
++ {"GNA_D3_STS", BIT(0)},
++ {"CSMERTC_D3_STS", BIT(1)},
++ {"SUSRAM_D3_STS", BIT(2)},
++ {"CSE_D3_STS", BIT(4)},
++ {"KVMCC_D3_STS", BIT(5)},
++ {"USBR0_D3_STS", BIT(6)},
++ {"ISH_D3_STS", BIT(7)},
++ {"SMT1_D3_STS", BIT(8)},
++ {"SMT2_D3_STS", BIT(9)},
++ {"SMT3_D3_STS", BIT(10)},
++ {"CLINK_D3_STS", BIT(14)},
++ {"PTIO_D3_STS", BIT(16)},
++ {"PMT_D3_STS", BIT(17)},
++ {"SMS1_D3_STS", BIT(18)},
++ {"SMS2_D3_STS", BIT(19)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_d3_status_3_map[] = {
++ {"ESE_D3_STS", BIT(2)},
++ {"GBETSN_D3_STS", BIT(13)},
++ {"THC0_D3_STS", BIT(14)},
++ {"THC1_D3_STS", BIT(15)},
++ {"ACE_D3_STS", BIT(23)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[] = {
++ {"LPSS_VNN_REQ_STS", BIT(3)},
++ {"FIA_VNN_REQ_STS", BIT(17)},
++ {"ESPISPI_VNN_REQ_STS", BIT(18)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[] = {
++ {"NPK_VNN_REQ_STS", BIT(4)},
++ {"DFXAGG_VNN_REQ_STS", BIT(8)},
++ {"EXI_VNN_REQ_STS", BIT(9)},
++ {"P2D_VNN_REQ_STS", BIT(18)},
++ {"GBE_VNN_REQ_STS", BIT(19)},
++ {"SMB_VNN_REQ_STS", BIT(25)},
++ {"LPC_VNN_REQ_STS", BIT(26)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[] = {
++ {"CSMERTC_VNN_REQ_STS", BIT(1)},
++ {"CSE_VNN_REQ_STS", BIT(4)},
++ {"ISH_VNN_REQ_STS", BIT(7)},
++ {"SMT1_VNN_REQ_STS", BIT(8)},
++ {"CLINK_VNN_REQ_STS", BIT(14)},
++ {"SMS1_VNN_REQ_STS", BIT(18)},
++ {"SMS2_VNN_REQ_STS", BIT(19)},
++ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
++ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
++ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
++ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
++ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = {
++ {"ESE_VNN_REQ_STS", BIT(2)},
++ {"DTS0_VNN_REQ_STS", BIT(7)},
++ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_vnn_misc_status_map[] = {
++ {"CPU_C10_REQ_STS", BIT(0)},
++ {"TS_OFF_REQ_STS", BIT(1)},
++ {"PNDE_MET_REQ_STS", BIT(2)},
++ {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
++ {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)},
++ {"NPK_VNNAON_REQ_STS", BIT(5)},
++ {"VNN_SOC_REQ_STS", BIT(6)},
++ {"ISH_VNNAON_REQ_STS", BIT(7)},
++ {"IOE_COND_MET_S02I2_0_REQ_STS", BIT(8)},
++ {"IOE_COND_MET_S02I2_1_REQ_STS", BIT(9)},
++ {"IOE_COND_MET_S02I2_2_REQ_STS", BIT(10)},
++ {"PLT_GREATER_REQ_STS", BIT(11)},
++ {"PCIE_CLKREQ_REQ_STS", BIT(12)},
++ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
++ {"PM_SYNC_STATES_REQ_STS", BIT(14)},
++ {"EA_REQ_STS", BIT(15)},
++ {"MPHY_CORE_OFF_REQ_STS", BIT(16)},
++ {"BRK_EV_EN_REQ_STS", BIT(17)},
++ {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
++ {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
++ {"LPC_CLK_SRC_REQ_STS", BIT(20)},
++ {"ARC_IDLE_REQ_STS", BIT(21)},
++ {"MPHY_SUS_REQ_STS", BIT(22)},
++ {"FIA_DEEP_PM_REQ_STS", BIT(23)},
++ {"UXD_CONNECTED_REQ_STS", BIT(24)},
++ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
++ {"USB2_VNNAON_ACT_REQ_STS", BIT(26)},
++ {"PRE_WAKE0_REQ_STS", BIT(27)},
++ {"PRE_WAKE1_REQ_STS", BIT(28)},
++ {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
++ {"WOV_REQ_STS", BIT(30)},
++ {"CNVI_V1P05_REQ_STS", BIT(31)},
++ {}
++};
++
++const struct pmc_bit_map mtl_socm_signal_status_map[] = {
++ {"LSX_Wake0_En_STS", BIT(0)},
++ {"LSX_Wake0_Pol_STS", BIT(1)},
++ {"LSX_Wake1_En_STS", BIT(2)},
++ {"LSX_Wake1_Pol_STS", BIT(3)},
++ {"LSX_Wake2_En_STS", BIT(4)},
++ {"LSX_Wake2_Pol_STS", BIT(5)},
++ {"LSX_Wake3_En_STS", BIT(6)},
++ {"LSX_Wake3_Pol_STS", BIT(7)},
++ {"LSX_Wake4_En_STS", BIT(8)},
++ {"LSX_Wake4_Pol_STS", BIT(9)},
++ {"LSX_Wake5_En_STS", BIT(10)},
++ {"LSX_Wake5_Pol_STS", BIT(11)},
++ {"LSX_Wake6_En_STS", BIT(12)},
++ {"LSX_Wake6_Pol_STS", BIT(13)},
++ {"LSX_Wake7_En_STS", BIT(14)},
++ {"LSX_Wake7_Pol_STS", BIT(15)},
++ {"LPSS_Wake0_En_STS", BIT(16)},
++ {"LPSS_Wake0_Pol_STS", BIT(17)},
++ {"LPSS_Wake1_En_STS", BIT(18)},
++ {"LPSS_Wake1_Pol_STS", BIT(19)},
++ {"Int_Timer_SS_Wake0_En_STS", BIT(20)},
++ {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)},
++ {"Int_Timer_SS_Wake1_En_STS", BIT(22)},
++ {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)},
++ {"Int_Timer_SS_Wake2_En_STS", BIT(24)},
++ {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)},
++ {"Int_Timer_SS_Wake3_En_STS", BIT(26)},
++ {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)},
++ {"Int_Timer_SS_Wake4_En_STS", BIT(28)},
++ {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)},
++ {"Int_Timer_SS_Wake5_En_STS", BIT(30)},
++ {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)},
++ {}
++};
++
++const struct pmc_bit_map *mtl_socm_lpm_maps[] = {
++ mtl_socm_clocksource_status_map,
++ mtl_socm_power_gating_status_0_map,
++ mtl_socm_power_gating_status_1_map,
++ mtl_socm_power_gating_status_2_map,
++ mtl_socm_d3_status_0_map,
++ mtl_socm_d3_status_1_map,
++ mtl_socm_d3_status_2_map,
++ mtl_socm_d3_status_3_map,
++ mtl_socm_vnn_req_status_0_map,
++ mtl_socm_vnn_req_status_1_map,
++ mtl_socm_vnn_req_status_2_map,
++ mtl_socm_vnn_req_status_3_map,
++ mtl_socm_vnn_misc_status_map,
++ mtl_socm_signal_status_map,
++ NULL
++};
++
++const struct pmc_reg_map mtl_socm_reg_map = {
++ .pfear_sts = ext_mtl_socm_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+- .ltr_show_sts = adl_ltr_show_map,
++ .ltr_show_sts = mtl_socm_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+- .regmap_length = CNP_PMC_MMIO_REG_LEN,
++ .regmap_length = MTL_SOC_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+- .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
++ .ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+- .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+- .lpm_num_modes = ADL_LPM_NUM_MODES,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
++ .ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+- .lpm_sts = adl_lpm_maps,
++ .lpm_sts = mtl_socm_lpm_maps,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ };
+@@ -47,6 +477,6 @@ void mtl_core_configure(struct pmc_dev *pmcdev)
+
+ void mtl_core_init(struct pmc_dev *pmcdev)
+ {
+- pmcdev->map = &mtl_reg_map;
++ pmcdev->map = &mtl_socm_reg_map;
+ pmcdev->core_configure = mtl_core_configure;
+ }
+diff --git a/drivers/platform/x86/lenovo-yogabook-wmi.c b/drivers/platform/x86/lenovo-yogabook-wmi.c
+index 5f4bd1eec38a9..d57fcc8388519 100644
+--- a/drivers/platform/x86/lenovo-yogabook-wmi.c
++++ b/drivers/platform/x86/lenovo-yogabook-wmi.c
+@@ -2,7 +2,6 @@
+ /* WMI driver for Lenovo Yoga Book YB1-X90* / -X91* tablets */
+
+ #include <linux/acpi.h>
+-#include <linux/devm-helpers.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/gpio/machine.h>
+ #include <linux/interrupt.h>
+@@ -248,10 +247,7 @@ static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+ data->brightness = YB_KBD_BL_DEFAULT;
+ set_bit(YB_KBD_IS_ON, &data->flags);
+ set_bit(YB_DIGITIZER_IS_ON, &data->flags);
+-
+- r = devm_work_autocancel(&wdev->dev, &data->work, yogabook_wmi_work);
+- if (r)
+- return r;
++ INIT_WORK(&data->work, yogabook_wmi_work);
+
+ data->kbd_adev = acpi_dev_get_first_match_dev("GDIX1001", NULL, -1);
+ if (!data->kbd_adev) {
+@@ -299,10 +295,12 @@ static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+ }
+ data->backside_hall_irq = r;
+
+- r = devm_request_irq(&wdev->dev, data->backside_hall_irq,
+- yogabook_backside_hall_irq,
+- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+- "backside_hall_sw", data);
++ /* Set default brightness before enabling the IRQ */
++ yogabook_wmi_set_kbd_backlight(data->wdev, YB_KBD_BL_DEFAULT);
++
++ r = request_irq(data->backside_hall_irq, yogabook_backside_hall_irq,
++ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
++ "backside_hall_sw", data);
+ if (r) {
+ dev_err_probe(&wdev->dev, r, "Requesting backside_hall_sw IRQ\n");
+ goto error_put_devs;
+@@ -318,11 +316,14 @@ static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+ r = devm_led_classdev_register(&wdev->dev, &data->kbd_bl_led);
+ if (r < 0) {
+ dev_err_probe(&wdev->dev, r, "Registering backlight LED device\n");
+- goto error_put_devs;
++ goto error_free_irq;
+ }
+
+ return 0;
+
++error_free_irq:
++ free_irq(data->backside_hall_irq, data);
++ cancel_work_sync(&data->work);
+ error_put_devs:
+ put_device(data->dig_dev);
+ put_device(data->kbd_dev);
+@@ -334,6 +335,19 @@ error_put_devs:
+ static void yogabook_wmi_remove(struct wmi_device *wdev)
+ {
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
++ int r = 0;
++
++ free_irq(data->backside_hall_irq, data);
++ cancel_work_sync(&data->work);
++
++ if (!test_bit(YB_KBD_IS_ON, &data->flags))
++ r |= device_reprobe(data->kbd_dev);
++
++ if (!test_bit(YB_DIGITIZER_IS_ON, &data->flags))
++ r |= device_reprobe(data->dig_dev);
++
++ if (r)
++ dev_warn(&wdev->dev, "Reprobe of devices failed\n");
+
+ put_device(data->dig_dev);
+ put_device(data->kbd_dev);
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 78dc82bda4dde..4b7f2a969dfec 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -14,6 +14,7 @@
+ #include <linux/acpi.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
++#include <linux/mutex.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/dmi.h>
+@@ -171,7 +172,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
+ #define TLMI_POP_PWD (1 << 0)
+ #define TLMI_PAP_PWD (1 << 1)
+ #define TLMI_HDD_PWD (1 << 2)
+-#define TLMI_SYS_PWD (1 << 3)
++#define TLMI_SMP_PWD (1 << 6) /* System Management */
+ #define TLMI_CERT (1 << 7)
+
+ #define to_tlmi_pwd_setting(kobj) container_of(kobj, struct tlmi_pwd_setting, kobj)
+@@ -195,6 +196,7 @@ static const char * const level_options[] = {
+ };
+ static struct think_lmi tlmi_priv;
+ static struct class *fw_attr_class;
++static DEFINE_MUTEX(tlmi_mutex);
+
+ /* ------ Utility functions ------------*/
+ /* Strip out CR if one is present */
+@@ -437,6 +439,9 @@ static ssize_t new_password_store(struct kobject *kobj,
+ /* Strip out CR if one is present, setting password won't work if it is present */
+ strip_cr(new_pwd);
+
++ /* Use lock in case multiple WMI operations needed */
++ mutex_lock(&tlmi_mutex);
++
+ pwdlen = strlen(new_pwd);
+ /* pwdlen == 0 is allowed to clear the password */
+ if (pwdlen && ((pwdlen < setting->minlen) || (pwdlen > setting->maxlen))) {
+@@ -456,9 +461,9 @@ static ssize_t new_password_store(struct kobject *kobj,
+ sprintf(pwd_type, "mhdp%d", setting->index);
+ } else if (setting == tlmi_priv.pwd_nvme) {
+ if (setting->level == TLMI_LEVEL_USER)
+- sprintf(pwd_type, "unvp%d", setting->index);
++ sprintf(pwd_type, "udrp%d", setting->index);
+ else
+- sprintf(pwd_type, "mnvp%d", setting->index);
++ sprintf(pwd_type, "adrp%d", setting->index);
+ } else {
+ sprintf(pwd_type, "%s", setting->pwd_type);
+ }
+@@ -493,6 +498,7 @@ static ssize_t new_password_store(struct kobject *kobj,
+ kfree(auth_str);
+ }
+ out:
++ mutex_unlock(&tlmi_mutex);
+ kfree(new_pwd);
+ return ret ?: count;
+ }
+@@ -982,6 +988,9 @@ static ssize_t current_value_store(struct kobject *kobj,
+ /* Strip out CR if one is present */
+ strip_cr(new_setting);
+
++ /* Use lock in case multiple WMI operations needed */
++ mutex_lock(&tlmi_mutex);
++
+ /* Check if certificate authentication is enabled and active */
+ if (tlmi_priv.certificate_support && tlmi_priv.pwd_admin->cert_installed) {
+ if (!tlmi_priv.pwd_admin->signature || !tlmi_priv.pwd_admin->save_signature) {
+@@ -1040,6 +1049,7 @@ static ssize_t current_value_store(struct kobject *kobj,
+ kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ }
+ out:
++ mutex_unlock(&tlmi_mutex);
+ kfree(auth_str);
+ kfree(set_str);
+ kfree(new_setting);
+@@ -1512,11 +1522,11 @@ static int tlmi_analyze(void)
+ tlmi_priv.pwd_power->valid = true;
+
+ if (tlmi_priv.opcode_support) {
+- tlmi_priv.pwd_system = tlmi_create_auth("sys", "system");
++ tlmi_priv.pwd_system = tlmi_create_auth("smp", "system");
+ if (!tlmi_priv.pwd_system)
+ goto fail_clear_attr;
+
+- if (tlmi_priv.pwdcfg.core.password_state & TLMI_SYS_PWD)
++ if (tlmi_priv.pwdcfg.core.password_state & TLMI_SMP_PWD)
+ tlmi_priv.pwd_system->valid = true;
+
+ tlmi_priv.pwd_hdd = tlmi_create_auth("hdd", "hdd");
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index e40cbe81b12c1..97c6ec12d0829 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10524,8 +10524,8 @@ unlock:
+ static void dytc_profile_refresh(void)
+ {
+ enum platform_profile_option profile;
+- int output, err = 0;
+- int perfmode, funcmode;
++ int output = 0, err = 0;
++ int perfmode, funcmode = 0;
+
+ mutex_lock(&dytc_mutex);
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+@@ -10538,6 +10538,8 @@ static void dytc_profile_refresh(void)
+ err = dytc_command(DYTC_CMD_GET, &output);
+ /* Check if we are PSC mode, or have AMT enabled */
+ funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
++ } else { /* Unknown profile mode */
++ err = -ENODEV;
+ }
+ mutex_unlock(&dytc_mutex);
+ if (err)
+diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
+index 90d33cd1b670a..b063f75117738 100644
+--- a/drivers/powercap/Kconfig
++++ b/drivers/powercap/Kconfig
+@@ -18,10 +18,12 @@ if POWERCAP
+ # Client driver configurations go here.
+ config INTEL_RAPL_CORE
+ tristate
++ depends on PCI
++ select IOSF_MBI
+
+ config INTEL_RAPL
+ tristate "Intel RAPL Support via MSR Interface"
+- depends on X86 && IOSF_MBI
++ depends on X86 && PCI
+ select INTEL_RAPL_CORE
+ help
+ This enables support for the Intel Running Average Power Limit (RAPL)
+diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
+index a27673706c3d6..9ea4797d70b44 100644
+--- a/drivers/powercap/intel_rapl_msr.c
++++ b/drivers/powercap/intel_rapl_msr.c
+@@ -22,7 +22,6 @@
+ #include <linux/processor.h>
+ #include <linux/platform_device.h>
+
+-#include <asm/iosf_mbi.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+
+@@ -137,14 +136,14 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
+
+ /* List of verified CPUs. */
+ static const struct x86_cpu_id pl4_support_ids[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE_L, X86_FEATURE_ANY },
++ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
+ {}
+ };
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 323e8187a98ff..443be7b6e31df 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1918,19 +1918,17 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+
+ if (err != -EEXIST)
+ regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+- if (!regulator->debugfs) {
++ if (IS_ERR(regulator->debugfs))
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+- } else {
+- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+- &regulator->uA_load);
+- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+- &regulator->voltage[PM_SUSPEND_ON].min_uV);
+- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+- &regulator->voltage[PM_SUSPEND_ON].max_uV);
+- debugfs_create_file("constraint_flags", 0444,
+- regulator->debugfs, regulator,
+- &constraint_flags_fops);
+- }
++
++ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
++ &regulator->uA_load);
++ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
++ &regulator->voltage[PM_SUSPEND_ON].min_uV);
++ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
++ &regulator->voltage[PM_SUSPEND_ON].max_uV);
++ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
++ regulator, &constraint_flags_fops);
+
+ /*
+ * Check now if the regulator is an always on regulator - if
+@@ -5263,10 +5261,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
+ }
+
+ rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
+- if (IS_ERR(rdev->debugfs)) {
+- rdev_warn(rdev, "Failed to create debugfs directory\n");
+- return;
+- }
++ if (IS_ERR(rdev->debugfs))
++ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+
+ debugfs_create_u32("use_count", 0444, rdev->debugfs,
+ &rdev->use_count);
+@@ -6186,7 +6182,7 @@ static int __init regulator_init(void)
+
+ debugfs_root = debugfs_create_dir("regulator", NULL);
+ if (IS_ERR(debugfs_root))
+- pr_warn("regulator: Failed to create debugfs directory\n");
++ pr_debug("regulator: Failed to create debugfs directory\n");
+
+ #ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("supply_map", 0444, debugfs_root, NULL,
+diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
+index ffdecb12d654c..9bd70e4618d52 100644
+--- a/drivers/scsi/3w-xxxx.c
++++ b/drivers/scsi/3w-xxxx.c
+@@ -2305,8 +2305,10 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+ TW_DISABLE_INTERRUPTS(tw_dev);
+
+ /* Initialize the card */
+- if (tw_reset_sequence(tw_dev))
++ if (tw_reset_sequence(tw_dev)) {
++ retval = -EINVAL;
+ goto out_release_mem_region;
++ }
+
+ /* Set host specific parameters */
+ host->max_id = TW_MAX_UNITS;
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 62d2ca688cd14..e07242ac0f014 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -5466,9 +5466,19 @@ out:
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ }
++ lpfc_drop_node(vport, ndlp);
++ } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
++ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
++ ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
++ /* Drop ndlp if there is no planned or outstanding
++ * issued PRLI.
++ *
++ * In cases when the ndlp is acting as both an initiator
++ * and target function, let our issued PRLI determine
++ * the final ndlp kref drop.
++ */
++ lpfc_drop_node(vport, ndlp);
+ }
+-
+- lpfc_drop_node(vport, ndlp);
+ }
+
+ /* Release the originating I/O reference. */
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index 35e16600fc637..f2c7dd4db9c64 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -3043,9 +3043,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
+ * addresses of our queues
+ */
+ if (!qedf->p_cpuq) {
+- status = -EINVAL;
+ QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
+- goto mem_alloc_failure;
++ return -EINVAL;
+ }
+
+ qedf->global_queues = kzalloc((sizeof(struct global_queue *)
+diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
+index e935187635267..25b4b71df9b89 100644
+--- a/drivers/soc/amlogic/meson-secure-pwrc.c
++++ b/drivers/soc/amlogic/meson-secure-pwrc.c
+@@ -105,7 +105,7 @@ static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
+ SEC_PD(ACODEC, 0),
+ SEC_PD(AUDIO, 0),
+ SEC_PD(OTP, 0),
+- SEC_PD(DMA, 0),
++ SEC_PD(DMA, GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE),
+ SEC_PD(SD_EMMC, 0),
+ SEC_PD(RAMA, 0),
+ /* SRAMB is used as ATF runtime memory, and should be always on */
+diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
+index 357c5800b112f..7afa796dbbb89 100644
+--- a/drivers/soc/fsl/qe/Kconfig
++++ b/drivers/soc/fsl/qe/Kconfig
+@@ -39,6 +39,7 @@ config QE_TDM
+
+ config QE_USB
+ bool
++ depends on QUICC_ENGINE
+ default y if USB_FSL_QE
+ help
+ QE USB Controller support
+diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
+index f26eb2f637d52..77d6299774427 100644
+--- a/drivers/soc/mediatek/mtk-svs.c
++++ b/drivers/soc/mediatek/mtk-svs.c
+@@ -2101,9 +2101,9 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+ svsb = &svsp->banks[idx];
+
+ if (svsb->type == SVSB_HIGH)
+- svsb->opp_dev = svs_add_device_link(svsp, "mali");
++ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ else if (svsb->type == SVSB_LOW)
+- svsb->opp_dev = svs_get_subsys_device(svsp, "mali");
++ svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index c76381899ef49..f9d9b82b562da 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -192,11 +192,12 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
+ struct registered_event_data *eve_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
++ struct hlist_node *tmp;
+
+ is_need_to_unregister = false;
+
+ /* Check for existing entry in hash table for given cb_type */
+- hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
++ hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+@@ -228,11 +229,12 @@ static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
+ u64 key = ((u64)node_id << 32U) | (u64)event;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
++ struct hlist_node *tmp;
+
+ is_need_to_unregister = false;
+
+ /* Check for existing entry in hash table for given key id */
+- hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
++ hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
+ if (eve_data->key == key) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
+index c3bfb6c84cab2..4976e3b8923ee 100644
+--- a/drivers/spi/spi-dw-core.c
++++ b/drivers/spi/spi-dw-core.c
+@@ -426,7 +426,10 @@ static int dw_spi_transfer_one(struct spi_controller *master,
+ int ret;
+
+ dws->dma_mapped = 0;
+- dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
++ dws->n_bytes =
++ roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
++ BITS_PER_BYTE));
++
+ dws->tx = (void *)transfer->tx_buf;
+ dws->tx_len = transfer->len / dws->n_bytes;
+ dws->rx = transfer->rx_buf;
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index baf477383682d..d147519fe1089 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -35,7 +35,7 @@
+ #define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
+
+ #define SE_SPI_TRANS_CFG 0x25c
+-#define CS_TOGGLE BIT(0)
++#define CS_TOGGLE BIT(1)
+
+ #define SE_SPI_WORD_LEN 0x268
+ #define WORD_LEN_MSK GENMASK(9, 0)
+diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
+index 9235fda4ec1eb..337153042318f 100644
+--- a/drivers/thermal/amlogic_thermal.c
++++ b/drivers/thermal/amlogic_thermal.c
+@@ -285,7 +285,7 @@ static int amlogic_thermal_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- if (devm_thermal_add_hwmon_sysfs(pdata->tzd))
++ if (devm_thermal_add_hwmon_sysfs(&pdev->dev, pdata->tzd))
+ dev_warn(&pdev->dev, "Failed to add hwmon sysfs attributes\n");
+
+ ret = amlogic_thermal_initialize(pdata);
+diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
+index 72b5d6f319c1d..e1bec196c5350 100644
+--- a/drivers/thermal/imx8mm_thermal.c
++++ b/drivers/thermal/imx8mm_thermal.c
+@@ -343,7 +343,7 @@ static int imx8mm_tmu_probe(struct platform_device *pdev)
+ }
+ tmu->sensors[i].hw_id = i;
+
+- if (devm_thermal_add_hwmon_sysfs(tmu->sensors[i].tzd))
++ if (devm_thermal_add_hwmon_sysfs(&pdev->dev, tmu->sensors[i].tzd))
+ dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
+ }
+
+diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
+index f32e59e746231..e24572fc9e731 100644
+--- a/drivers/thermal/imx_sc_thermal.c
++++ b/drivers/thermal/imx_sc_thermal.c
+@@ -119,7 +119,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- if (devm_thermal_add_hwmon_sysfs(sensor->tzd))
++ if (devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd))
+ dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
+ }
+
+diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
+index 22c9bcb899c37..df184b837cdd0 100644
+--- a/drivers/thermal/k3_bandgap.c
++++ b/drivers/thermal/k3_bandgap.c
+@@ -222,7 +222,7 @@ static int k3_bandgap_probe(struct platform_device *pdev)
+ goto err_alloc;
+ }
+
+- if (devm_thermal_add_hwmon_sysfs(data[id].tzd))
++ if (devm_thermal_add_hwmon_sysfs(dev, data[id].tzd))
+ dev_warn(dev, "Failed to add hwmon sysfs attributes\n");
+ }
+
+diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
+index ab730f9552d0e..585704d2df2be 100644
+--- a/drivers/thermal/mediatek/auxadc_thermal.c
++++ b/drivers/thermal/mediatek/auxadc_thermal.c
+@@ -1210,7 +1210,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ goto err_disable_clk_peri_therm;
+ }
+
+- ret = devm_thermal_add_hwmon_sysfs(tzdev);
++ ret = devm_thermal_add_hwmon_sysfs(&pdev->dev, tzdev);
+ if (ret)
+ dev_warn(&pdev->dev, "error in thermal_add_hwmon_sysfs");
+
+diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+index 31164ade2dd11..dcb24a94f3fb4 100644
+--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
++++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+@@ -689,7 +689,7 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
+ return PTR_ERR(tzd);
+ }
+ adc_tm->channels[i].tzd = tzd;
+- if (devm_thermal_add_hwmon_sysfs(tzd))
++ if (devm_thermal_add_hwmon_sysfs(adc_tm->dev, tzd))
+ dev_warn(adc_tm->dev,
+ "Failed to add hwmon sysfs attributes\n");
+ }
+diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+index 101c75d0e13f3..c0cfb255c14e2 100644
+--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+@@ -459,7 +459,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- if (devm_thermal_add_hwmon_sysfs(chip->tz_dev))
++ if (devm_thermal_add_hwmon_sysfs(&pdev->dev, chip->tz_dev))
+ dev_warn(&pdev->dev,
+ "Failed to add hwmon sysfs attributes\n");
+
+diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
+index e89c6f39a3aea..e9ce7b62b3818 100644
+--- a/drivers/thermal/qcom/tsens-v0_1.c
++++ b/drivers/thermal/qcom/tsens-v0_1.c
+@@ -243,6 +243,18 @@ static int calibrate_8974(struct tsens_priv *priv)
+ return 0;
+ }
+
++static int __init init_8226(struct tsens_priv *priv)
++{
++ priv->sensor[0].slope = 2901;
++ priv->sensor[1].slope = 2846;
++ priv->sensor[2].slope = 3038;
++ priv->sensor[3].slope = 2955;
++ priv->sensor[4].slope = 2901;
++ priv->sensor[5].slope = 2846;
++
++ return init_common(priv);
++}
++
+ static int __init init_8939(struct tsens_priv *priv) {
+ priv->sensor[0].slope = 2911;
+ priv->sensor[1].slope = 2789;
+@@ -258,7 +270,28 @@ static int __init init_8939(struct tsens_priv *priv) {
+ return init_common(priv);
+ }
+
+-/* v0.1: 8916, 8939, 8974, 9607 */
++static int __init init_9607(struct tsens_priv *priv)
++{
++ int i;
++
++ for (i = 0; i < priv->num_sensors; ++i)
++ priv->sensor[i].slope = 3000;
++
++ priv->sensor[0].p1_calib_offset = 1;
++ priv->sensor[0].p2_calib_offset = 1;
++ priv->sensor[1].p1_calib_offset = -4;
++ priv->sensor[1].p2_calib_offset = -2;
++ priv->sensor[2].p1_calib_offset = 4;
++ priv->sensor[2].p2_calib_offset = 8;
++ priv->sensor[3].p1_calib_offset = -3;
++ priv->sensor[3].p2_calib_offset = -5;
++ priv->sensor[4].p1_calib_offset = -4;
++ priv->sensor[4].p2_calib_offset = -4;
++
++ return init_common(priv);
++}
++
++/* v0.1: 8226, 8916, 8939, 8974, 9607 */
+
+ static struct tsens_features tsens_v0_1_feat = {
+ .ver_major = VER_0_1,
+@@ -313,6 +346,19 @@ static const struct tsens_ops ops_v0_1 = {
+ .get_temp = get_temp_common,
+ };
+
++static const struct tsens_ops ops_8226 = {
++ .init = init_8226,
++ .calibrate = tsens_calibrate_common,
++ .get_temp = get_temp_common,
++};
++
++struct tsens_plat_data data_8226 = {
++ .num_sensors = 6,
++ .ops = &ops_8226,
++ .feat = &tsens_v0_1_feat,
++ .fields = tsens_v0_1_regfields,
++};
++
+ static const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+@@ -356,9 +402,15 @@ struct tsens_plat_data data_8974 = {
+ .fields = tsens_v0_1_regfields,
+ };
+
++static const struct tsens_ops ops_9607 = {
++ .init = init_9607,
++ .calibrate = tsens_calibrate_common,
++ .get_temp = get_temp_common,
++};
++
+ struct tsens_plat_data data_9607 = {
+ .num_sensors = 5,
+- .ops = &ops_v0_1,
++ .ops = &ops_9607,
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+ };
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index 8020ead2794e9..38f5c783fb297 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -134,10 +134,12 @@ int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2,
+ p1[i] = p1[i] + (base1 << shift);
+ break;
+ case TWO_PT_CALIB:
++ case TWO_PT_CALIB_NO_OFFSET:
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = (p2[i] + base2) << shift;
+ fallthrough;
+ case ONE_PT_CALIB2:
++ case ONE_PT_CALIB2_NO_OFFSET:
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (p1[i] + base1) << shift;
+ break;
+@@ -149,6 +151,18 @@ int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2,
+ }
+ }
+
++ /* Apply calibration offset workaround except for _NO_OFFSET modes */
++ switch (mode) {
++ case TWO_PT_CALIB:
++ for (i = 0; i < priv->num_sensors; i++)
++ p2[i] += priv->sensor[i].p2_calib_offset;
++ fallthrough;
++ case ONE_PT_CALIB2:
++ for (i = 0; i < priv->num_sensors; i++)
++ p1[i] += priv->sensor[i].p1_calib_offset;
++ break;
++ }
++
+ return mode;
+ }
+
+@@ -254,7 +268,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
+
+ if (!priv->sensor[i].slope)
+ priv->sensor[i].slope = SLOPE_DEFAULT;
+- if (mode == TWO_PT_CALIB) {
++ if (mode == TWO_PT_CALIB || mode == TWO_PT_CALIB_NO_OFFSET) {
+ /*
+ * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+ * temp_120_degc - temp_30_degc (x2 - x1)
+@@ -1095,6 +1109,9 @@ static const struct of_device_id tsens_table[] = {
+ }, {
+ .compatible = "qcom,mdm9607-tsens",
+ .data = &data_9607,
++ }, {
++ .compatible = "qcom,msm8226-tsens",
++ .data = &data_8226,
+ }, {
+ .compatible = "qcom,msm8916-tsens",
+ .data = &data_8916,
+@@ -1189,7 +1206,7 @@ static int tsens_register(struct tsens_priv *priv)
+ if (priv->ops->enable)
+ priv->ops->enable(priv, i);
+
+- if (devm_thermal_add_hwmon_sysfs(tzd))
++ if (devm_thermal_add_hwmon_sysfs(priv->dev, tzd))
+ dev_warn(priv->dev,
+ "Failed to add hwmon sysfs attributes\n");
+ }
+diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
+index dba9cd38f637c..1cd8f4fe0971f 100644
+--- a/drivers/thermal/qcom/tsens.h
++++ b/drivers/thermal/qcom/tsens.h
+@@ -10,6 +10,8 @@
+ #define ONE_PT_CALIB 0x1
+ #define ONE_PT_CALIB2 0x2
+ #define TWO_PT_CALIB 0x3
++#define ONE_PT_CALIB2_NO_OFFSET 0x6
++#define TWO_PT_CALIB_NO_OFFSET 0x7
+ #define CAL_DEGC_PT1 30
+ #define CAL_DEGC_PT2 120
+ #define SLOPE_FACTOR 1000
+@@ -57,6 +59,8 @@ struct tsens_sensor {
+ unsigned int hw_id;
+ int slope;
+ u32 status;
++ int p1_calib_offset;
++ int p2_calib_offset;
+ };
+
+ /**
+@@ -635,7 +639,7 @@ int get_temp_common(const struct tsens_sensor *s, int *temp);
+ extern struct tsens_plat_data data_8960;
+
+ /* TSENS v0.1 targets */
+-extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
++extern struct tsens_plat_data data_8226, data_8916, data_8939, data_8974, data_9607;
+
+ /* TSENS v1 targets */
+ extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
+diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
+index 431c29c0898a7..dec66cf3eba2c 100644
+--- a/drivers/thermal/qoriq_thermal.c
++++ b/drivers/thermal/qoriq_thermal.c
+@@ -31,7 +31,6 @@
+ #define TMR_DISABLE 0x0
+ #define TMR_ME 0x80000000
+ #define TMR_ALPF 0x0c000000
+-#define TMR_MSITE_ALL GENMASK(15, 0)
+
+ #define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */
+ #define TMTMIR_DEFAULT 0x0000000f
+@@ -105,6 +104,11 @@ static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
+ * within sensor range. TEMP is an 9 bit value representing
+ * temperature in KelVin.
+ */
++
++ regmap_read(qdata->regmap, REGS_TMR, &val);
++ if (!(val & TMR_ME))
++ return -EAGAIN;
++
+ if (regmap_read_poll_timeout(qdata->regmap,
+ REGS_TRITSR(qsensor->id),
+ val,
+@@ -128,15 +132,7 @@ static const struct thermal_zone_device_ops tmu_tz_ops = {
+ static int qoriq_tmu_register_tmu_zone(struct device *dev,
+ struct qoriq_tmu_data *qdata)
+ {
+- int id;
+-
+- if (qdata->ver == TMU_VER1) {
+- regmap_write(qdata->regmap, REGS_TMR,
+- TMR_MSITE_ALL | TMR_ME | TMR_ALPF);
+- } else {
+- regmap_write(qdata->regmap, REGS_V2_TMSR, TMR_MSITE_ALL);
+- regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
+- }
++ int id, sites = 0;
+
+ for (id = 0; id < SITES_MAX; id++) {
+ struct thermal_zone_device *tzd;
+@@ -153,14 +149,26 @@ static int qoriq_tmu_register_tmu_zone(struct device *dev,
+ if (ret == -ENODEV)
+ continue;
+
+- regmap_write(qdata->regmap, REGS_TMR, TMR_DISABLE);
+ return ret;
+ }
+
+- if (devm_thermal_add_hwmon_sysfs(tzd))
++ if (qdata->ver == TMU_VER1)
++ sites |= 0x1 << (15 - id);
++ else
++ sites |= 0x1 << id;
++
++ if (devm_thermal_add_hwmon_sysfs(dev, tzd))
+ dev_warn(dev,
+ "Failed to add hwmon sysfs attributes\n");
++ }
+
++ if (sites) {
++ if (qdata->ver == TMU_VER1) {
++ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF | sites);
++ } else {
++ regmap_write(qdata->regmap, REGS_V2_TMSR, sites);
++ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
++ }
+ }
+
+ return 0;
+diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
+index 497beac63e5d9..7517067d6e817 100644
+--- a/drivers/thermal/sun8i_thermal.c
++++ b/drivers/thermal/sun8i_thermal.c
+@@ -319,6 +319,11 @@ out:
+ return ret;
+ }
+
++static void sun8i_ths_reset_control_assert(void *data)
++{
++ reset_control_assert(data);
++}
++
+ static int sun8i_ths_resource_init(struct ths_device *tmdev)
+ {
+ struct device *dev = tmdev->dev;
+@@ -339,47 +344,35 @@ static int sun8i_ths_resource_init(struct ths_device *tmdev)
+ if (IS_ERR(tmdev->reset))
+ return PTR_ERR(tmdev->reset);
+
+- tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus");
++ ret = reset_control_deassert(tmdev->reset);
++ if (ret)
++ return ret;
++
++ ret = devm_add_action_or_reset(dev, sun8i_ths_reset_control_assert,
++ tmdev->reset);
++ if (ret)
++ return ret;
++
++ tmdev->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
+ if (IS_ERR(tmdev->bus_clk))
+ return PTR_ERR(tmdev->bus_clk);
+ }
+
+ if (tmdev->chip->has_mod_clk) {
+- tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod");
++ tmdev->mod_clk = devm_clk_get_enabled(&pdev->dev, "mod");
+ if (IS_ERR(tmdev->mod_clk))
+ return PTR_ERR(tmdev->mod_clk);
+ }
+
+- ret = reset_control_deassert(tmdev->reset);
+- if (ret)
+- return ret;
+-
+- ret = clk_prepare_enable(tmdev->bus_clk);
+- if (ret)
+- goto assert_reset;
+-
+ ret = clk_set_rate(tmdev->mod_clk, 24000000);
+ if (ret)
+- goto bus_disable;
+-
+- ret = clk_prepare_enable(tmdev->mod_clk);
+- if (ret)
+- goto bus_disable;
++ return ret;
+
+ ret = sun8i_ths_calibrate(tmdev);
+ if (ret)
+- goto mod_disable;
++ return ret;
+
+ return 0;
+-
+-mod_disable:
+- clk_disable_unprepare(tmdev->mod_clk);
+-bus_disable:
+- clk_disable_unprepare(tmdev->bus_clk);
+-assert_reset:
+- reset_control_assert(tmdev->reset);
+-
+- return ret;
+ }
+
+ static int sun8i_h3_thermal_init(struct ths_device *tmdev)
+@@ -475,7 +468,7 @@ static int sun8i_ths_register(struct ths_device *tmdev)
+ if (IS_ERR(tmdev->sensor[i].tzd))
+ return PTR_ERR(tmdev->sensor[i].tzd);
+
+- if (devm_thermal_add_hwmon_sysfs(tmdev->sensor[i].tzd))
++ if (devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd))
+ dev_warn(tmdev->dev,
+ "Failed to add hwmon sysfs attributes\n");
+ }
+@@ -530,17 +523,6 @@ static int sun8i_ths_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int sun8i_ths_remove(struct platform_device *pdev)
+-{
+- struct ths_device *tmdev = platform_get_drvdata(pdev);
+-
+- clk_disable_unprepare(tmdev->mod_clk);
+- clk_disable_unprepare(tmdev->bus_clk);
+- reset_control_assert(tmdev->reset);
+-
+- return 0;
+-}
+-
+ static const struct ths_thermal_chip sun8i_a83t_ths = {
+ .sensor_num = 3,
+ .scale = 705,
+@@ -642,7 +624,6 @@ MODULE_DEVICE_TABLE(of, of_ths_match);
+
+ static struct platform_driver ths_driver = {
+ .probe = sun8i_ths_probe,
+- .remove = sun8i_ths_remove,
+ .driver = {
+ .name = "sun8i-thermal",
+ .of_match_table = of_ths_match,
+diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
+index b3218b71b6d97..823560c82aaee 100644
+--- a/drivers/thermal/tegra/tegra30-tsensor.c
++++ b/drivers/thermal/tegra/tegra30-tsensor.c
+@@ -528,7 +528,7 @@ static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
+ return 0;
+ }
+
+- if (devm_thermal_add_hwmon_sysfs(tsc->tzd))
++ if (devm_thermal_add_hwmon_sysfs(ts->dev, tsc->tzd))
+ dev_warn(ts->dev, "failed to add hwmon sysfs attributes\n");
+
+ return 0;
+diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
+index c594c42bea6da..964db7941e310 100644
+--- a/drivers/thermal/thermal_hwmon.c
++++ b/drivers/thermal/thermal_hwmon.c
+@@ -263,7 +263,7 @@ static void devm_thermal_hwmon_release(struct device *dev, void *res)
+ thermal_remove_hwmon_sysfs(*(struct thermal_zone_device **)res);
+ }
+
+-int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
++int devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device *tz)
+ {
+ struct thermal_zone_device **ptr;
+ int ret;
+@@ -280,7 +280,7 @@ int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ }
+
+ *ptr = tz;
+- devres_add(&tz->device, ptr);
++ devres_add(dev, ptr);
+
+ return ret;
+ }
+diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
+index 1a9d65f6a6a8b..b429f6e7abdb2 100644
+--- a/drivers/thermal/thermal_hwmon.h
++++ b/drivers/thermal/thermal_hwmon.h
+@@ -17,7 +17,7 @@
+
+ #ifdef CONFIG_THERMAL_HWMON
+ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
+-int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
++int devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device *tz);
+ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
+ #else
+ static inline int
+@@ -27,7 +27,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ }
+
+ static inline int
+-devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
++devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device *tz)
+ {
+ return 0;
+ }
+diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+index 8a9055bd376ec..42d0ffd82514d 100644
+--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
++++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+@@ -182,7 +182,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
+ ti_bandgap_set_sensor_data(bgp, id, data);
+ ti_bandgap_write_update_interval(bgp, data->sensor_id, interval);
+
+- if (devm_thermal_add_hwmon_sysfs(data->ti_thermal))
++ if (devm_thermal_add_hwmon_sysfs(bgp->dev, data->ti_thermal))
+ dev_warn(bgp->dev, "failed to add hwmon sysfs attributes\n");
+
+ return 0;
+diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
+index 529f8507a5e4c..7d8ff743a1b28 100644
+--- a/drivers/ufs/core/ufshcd-priv.h
++++ b/drivers/ufs/core/ufshcd-priv.h
+@@ -84,9 +84,6 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ u8 **buf, bool ascii);
+
+-int ufshcd_hold(struct ufs_hba *hba, bool async);
+-void ufshcd_release(struct ufs_hba *hba);
+-
+ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index aec74987cb4e0..e67981317dcbf 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -2917,7 +2917,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+ (hba->clk_gating.state != CLKS_ON));
+
+ lrbp = &hba->lrb[tag];
+- WARN_ON(lrbp->cmd);
+ lrbp->cmd = cmd;
+ lrbp->task_tag = tag;
+ lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+@@ -2933,7 +2932,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+
+ err = ufshcd_map_sg(hba, lrbp);
+ if (err) {
+- lrbp->cmd = NULL;
+ ufshcd_release(hba);
+ goto out;
+ }
+@@ -3071,7 +3069,7 @@ retry:
+ * not trigger any race conditions.
+ */
+ hba->dev_cmd.complete = NULL;
+- err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
++ err = ufshcd_get_tr_ocs(lrbp, NULL);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ } else {
+@@ -3152,13 +3150,12 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+- WARN_ON(lrbp->cmd);
++ lrbp->cmd = NULL;
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out;
+
+ hba->dev_cmd.complete = &wait;
+- hba->dev_cmd.cqe = NULL;
+
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
+
+@@ -5391,7 +5388,6 @@ static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd = lrbp->cmd;
+
+ scsi_dma_unmap(cmd);
+- lrbp->cmd = NULL; /* Mark the command as completed. */
+ ufshcd_release(hba);
+ ufshcd_clk_scaling_update_busy(hba);
+ }
+@@ -5407,6 +5403,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ {
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
++ enum utp_ocs ocs;
+
+ lrbp = &hba->lrb[task_tag];
+ lrbp->compl_time_stamp = ktime_get();
+@@ -5422,8 +5419,11 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+- hba->dev_cmd.cqe = cqe;
+- ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
++ if (cqe) {
++ ocs = le32_to_cpu(cqe->status) & MASK_OCS;
++ lrbp->utr_descriptor_ptr->header.dword_2 =
++ cpu_to_le32(ocs);
++ }
+ complete(hba->dev_cmd.complete);
+ ufshcd_clk_scaling_update_busy(hba);
+ }
+@@ -7006,7 +7006,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+- WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0;
+@@ -7178,7 +7177,6 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+- WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = UFS_UPIU_RPMB_WLUN;
+@@ -9153,7 +9151,8 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
+ };
+
+ return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
+- /*bufflen=*/0, /*timeout=*/HZ, /*retries=*/0, &args);
++ /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
++ &args);
+ }
+
+ /**
+diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
+index 58f91b3bd670c..ed4737de45289 100644
+--- a/drivers/vfio/mdev/mdev_core.c
++++ b/drivers/vfio/mdev/mdev_core.c
+@@ -72,12 +72,6 @@ int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ parent->nr_types = nr_types;
+ atomic_set(&parent->available_instances, mdev_driver->max_instances);
+
+- if (!mdev_bus_compat_class) {
+- mdev_bus_compat_class = class_compat_register("mdev_bus");
+- if (!mdev_bus_compat_class)
+- return -ENOMEM;
+- }
+-
+ ret = parent_create_sysfs_files(parent);
+ if (ret)
+ return ret;
+@@ -251,13 +245,24 @@ int mdev_device_remove(struct mdev_device *mdev)
+
+ static int __init mdev_init(void)
+ {
+- return bus_register(&mdev_bus_type);
++ int ret;
++
++ ret = bus_register(&mdev_bus_type);
++ if (ret)
++ return ret;
++
++ mdev_bus_compat_class = class_compat_register("mdev_bus");
++ if (!mdev_bus_compat_class) {
++ bus_unregister(&mdev_bus_type);
++ return -ENOMEM;
++ }
++
++ return 0;
+ }
+
+ static void __exit mdev_exit(void)
+ {
+- if (mdev_bus_compat_class)
+- class_compat_unregister(mdev_bus_compat_class);
++ class_compat_unregister(mdev_bus_compat_class);
+ bus_unregister(&mdev_bus_type);
+ }
+
+diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
+index 03cff39d392db..cc1079aad61f2 100644
+--- a/drivers/video/fbdev/omap/lcd_mipid.c
++++ b/drivers/video/fbdev/omap/lcd_mipid.c
+@@ -563,11 +563,15 @@ static int mipid_spi_probe(struct spi_device *spi)
+
+ r = mipid_detect(md);
+ if (r < 0)
+- return r;
++ goto free_md;
+
+ omapfb_register_panel(&md->panel);
+
+ return 0;
++
++free_md:
++ kfree(md);
++ return r;
+ }
+
+ static void mipid_spi_remove(struct spi_device *spi)
+diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig
+index f9db0799ae67c..da2d7ca531f0f 100644
+--- a/drivers/virt/coco/sev-guest/Kconfig
++++ b/drivers/virt/coco/sev-guest/Kconfig
+@@ -2,6 +2,7 @@ config SEV_GUEST
+ tristate "AMD SEV Guest driver"
+ default m
+ depends on AMD_MEM_ENCRYPT
++ select CRYPTO
+ select CRYPTO_AEAD2
+ select CRYPTO_GCM
+ help
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index ada899613486a..4bb2c6f4ad0e7 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -59,30 +59,30 @@ struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ return bio;
+ }
+
+-static struct bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
+- struct bio *orig, u64 map_length,
+- bool use_append)
++static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
++ struct btrfs_bio *orig_bbio,
++ u64 map_length, bool use_append)
+ {
+- struct btrfs_bio *orig_bbio = btrfs_bio(orig);
++ struct btrfs_bio *bbio;
+ struct bio *bio;
+
+ if (use_append) {
+ unsigned int nr_segs;
+
+- bio = bio_split_rw(orig, &fs_info->limits, &nr_segs,
++ bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs,
+ &btrfs_clone_bioset, map_length);
+ } else {
+- bio = bio_split(orig, map_length >> SECTOR_SHIFT, GFP_NOFS,
+- &btrfs_clone_bioset);
++ bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT,
++ GFP_NOFS, &btrfs_clone_bioset);
+ }
+- btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode, NULL, orig_bbio);
++ bbio = btrfs_bio(bio);
++ btrfs_bio_init(bbio, orig_bbio->inode, NULL, orig_bbio);
+
+- btrfs_bio(bio)->file_offset = orig_bbio->file_offset;
+- if (!(orig->bi_opf & REQ_BTRFS_ONE_ORDERED))
+- orig_bbio->file_offset += map_length;
++ bbio->file_offset = orig_bbio->file_offset;
++ orig_bbio->file_offset += map_length;
+
+ atomic_inc(&orig_bbio->pending_ios);
+- return bio;
++ return bbio;
+ }
+
+ static void btrfs_orig_write_end_io(struct bio *bio);
+@@ -631,8 +631,8 @@ static bool btrfs_submit_chunk(struct bio *bio, int mirror_num)
+ map_length = min(map_length, fs_info->max_zone_append_size);
+
+ if (map_length < length) {
+- bio = btrfs_split_bio(fs_info, bio, map_length, use_append);
+- bbio = btrfs_bio(bio);
++ bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append);
++ bio = &bbio->bio;
+ }
+
+ /*
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index d4ed200a94714..7e0667bdfba07 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -121,6 +121,12 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
+ seq_puts(m, " nosparse");
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
++ spin_lock(&tcon->tc_lock);
++ if (tcon->origin_fullpath) {
++ seq_printf(m, "\n\tDFS origin fullpath: %s",
++ tcon->origin_fullpath);
++ }
++ spin_unlock(&tcon->tc_lock);
+ seq_putc(m, '\n');
+ }
+
+@@ -377,13 +383,9 @@ skip_rdma:
+ seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d",
+ atomic_read(&server->in_send),
+ atomic_read(&server->num_waiters));
+- if (IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)) {
+- if (server->origin_fullpath)
+- seq_printf(m, "\nDFS origin full path: %s",
+- server->origin_fullpath);
+- if (server->leaf_fullpath)
+- seq_printf(m, "\nDFS leaf full path: %s",
+- server->leaf_fullpath);
++ if (server->leaf_fullpath) {
++ seq_printf(m, "\nDFS leaf full path: %s",
++ server->leaf_fullpath);
+ }
+
+ seq_printf(m, "\n\n\tSessions: ");
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 5f8fd20951af3..56d440772e029 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -736,23 +736,20 @@ struct TCP_Server_Info {
+ #endif
+ struct mutex refpath_lock; /* protects leaf_fullpath */
+ /*
+- * origin_fullpath: Canonical copy of smb3_fs_context::source.
+- * It is used for matching existing DFS tcons.
+- *
+ * leaf_fullpath: Canonical DFS referral path related to this
+ * connection.
+ * It is used in DFS cache refresher, reconnect and may
+ * change due to nested DFS links.
+ *
+- * Both protected by @refpath_lock and @srv_lock. The @refpath_lock is
+- * mosly used for not requiring a copy of @leaf_fullpath when getting
++ * Protected by @refpath_lock and @srv_lock. The @refpath_lock is
++ * mostly used for not requiring a copy of @leaf_fullpath when getting
+ * cached or new DFS referrals (which might also sleep during I/O).
+ * While @srv_lock is held for making string and NULL comparions against
+ * both fields as in mount(2) and cache refresh.
+ *
+ * format: \\HOST\SHARE[\OPTIONAL PATH]
+ */
+- char *origin_fullpath, *leaf_fullpath;
++ char *leaf_fullpath;
+ };
+
+ static inline bool is_smb1(struct TCP_Server_Info *server)
+@@ -1242,6 +1239,7 @@ struct cifs_tcon {
+ struct delayed_work dfs_cache_work;
+ #endif
+ struct delayed_work query_interfaces; /* query interfaces workqueue job */
++ char *origin_fullpath; /* canonical copy of smb3_fs_context::source */
+ };
+
+ /*
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index c1c704990b986..16eac67fd48ac 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -649,7 +649,7 @@ int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
+ int resp_buftype,
+ struct cifs_search_info *srch_inf);
+
+-struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server);
++struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon);
+ void cifs_put_tcp_super(struct super_block *sb);
+ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
+ char *extract_hostname(const char *unc);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 1250d156619b7..f0189afd940cf 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -996,7 +996,6 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
+ */
+ }
+
+- kfree(server->origin_fullpath);
+ kfree(server->leaf_fullpath);
+ kfree(server);
+
+@@ -1386,7 +1385,9 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ }
+
+ /* this function must be called with srv_lock held */
+-static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
++static int match_server(struct TCP_Server_Info *server,
++ struct smb3_fs_context *ctx,
++ bool match_super)
+ {
+ struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
+
+@@ -1417,36 +1418,38 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
+ (struct sockaddr *)&server->srcaddr))
+ return 0;
+ /*
+- * - Match for an DFS tcon (@server->origin_fullpath).
+- * - Match for an DFS root server connection (@server->leaf_fullpath).
+- * - If none of the above and @ctx->leaf_fullpath is set, then
+- * it is a new DFS connection.
+- * - If 'nodfs' mount option was passed, then match only connections
+- * that have no DFS referrals set
+- * (e.g. can't failover to other targets).
++ * When matching cifs.ko superblocks (@match_super == true), we can't
++ * really match either @server->leaf_fullpath or @server->dstaddr
++ * directly since this @server might belong to a completely different
++ * server -- in case of domain-based DFS referrals or DFS links -- as
++ * provided earlier by mount(2) through 'source' and 'ip' options.
++ *
++ * Otherwise, match the DFS referral in @server->leaf_fullpath or the
++ * destination address in @server->dstaddr.
++ *
++ * When using 'nodfs' mount option, we avoid sharing it with DFS
++ * connections as they might failover.
+ */
+- if (!ctx->nodfs) {
+- if (ctx->source && server->origin_fullpath) {
+- if (!dfs_src_pathname_equal(ctx->source,
+- server->origin_fullpath))
++ if (!match_super) {
++ if (!ctx->nodfs) {
++ if (server->leaf_fullpath) {
++ if (!ctx->leaf_fullpath ||
++ strcasecmp(server->leaf_fullpath,
++ ctx->leaf_fullpath))
++ return 0;
++ } else if (ctx->leaf_fullpath) {
+ return 0;
++ }
+ } else if (server->leaf_fullpath) {
+- if (!ctx->leaf_fullpath ||
+- strcasecmp(server->leaf_fullpath,
+- ctx->leaf_fullpath))
+- return 0;
+- } else if (ctx->leaf_fullpath) {
+ return 0;
+ }
+- } else if (server->origin_fullpath || server->leaf_fullpath) {
+- return 0;
+ }
+
+ /*
+ * Match for a regular connection (address/hostname/port) which has no
+ * DFS referrals set.
+ */
+- if (!server->origin_fullpath && !server->leaf_fullpath &&
++ if (!server->leaf_fullpath &&
+ (strcasecmp(server->hostname, ctx->server_hostname) ||
+ !match_server_address(server, addr) ||
+ !match_port(server, addr)))
+@@ -1482,7 +1485,8 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
+ * Skip ses channels since they're only handled in lower layers
+ * (e.g. cifs_send_recv).
+ */
+- if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
++ if (CIFS_SERVER_IS_CHAN(server) ||
++ !match_server(server, ctx, false)) {
+ spin_unlock(&server->srv_lock);
+ continue;
+ }
+@@ -2270,10 +2274,16 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+
+ if (tcon->status == TID_EXITING)
+ return 0;
+- /* Skip UNC validation when matching DFS connections or superblocks */
+- if (!server->origin_fullpath && !server->leaf_fullpath &&
+- strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
++
++ if (tcon->origin_fullpath) {
++ if (!ctx->source ||
++ !dfs_src_pathname_equal(ctx->source,
++ tcon->origin_fullpath))
++ return 0;
++ } else if (!server->leaf_fullpath &&
++ strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) {
+ return 0;
++ }
+ if (tcon->seal != ctx->seal)
+ return 0;
+ if (tcon->snapshot_time != ctx->snapshot_time)
+@@ -2672,7 +2682,7 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ }
+
+ static int match_prepath(struct super_block *sb,
+- struct TCP_Server_Info *server,
++ struct cifs_tcon *tcon,
+ struct cifs_mnt_data *mnt_data)
+ {
+ struct smb3_fs_context *ctx = mnt_data->ctx;
+@@ -2683,8 +2693,8 @@ static int match_prepath(struct super_block *sb,
+ bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ new->prepath;
+
+- if (server->origin_fullpath &&
+- dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
++ if (tcon->origin_fullpath &&
++ dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source))
+ return 1;
+
+ if (old_set && new_set && !strcmp(new->prepath, old->prepath))
+@@ -2732,10 +2742,10 @@ cifs_match_super(struct super_block *sb, void *data)
+ spin_lock(&ses->ses_lock);
+ spin_lock(&ses->chan_lock);
+ spin_lock(&tcon->tc_lock);
+- if (!match_server(tcp_srv, ctx) ||
++ if (!match_server(tcp_srv, ctx, true) ||
+ !match_session(ses, ctx) ||
+ !match_tcon(tcon, ctx) ||
+- !match_prepath(sb, tcp_srv, mnt_data)) {
++ !match_prepath(sb, tcon, mnt_data)) {
+ rc = 0;
+ goto out;
+ }
+diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c
+index 2390b2fedd6a3..267536a7531df 100644
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -249,14 +249,12 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ server = mnt_ctx->server;
+ tcon = mnt_ctx->tcon;
+
+- mutex_lock(&server->refpath_lock);
+- spin_lock(&server->srv_lock);
+- if (!server->origin_fullpath) {
+- server->origin_fullpath = origin_fullpath;
++ spin_lock(&tcon->tc_lock);
++ if (!tcon->origin_fullpath) {
++ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
+ }
+- spin_unlock(&server->srv_lock);
+- mutex_unlock(&server->refpath_lock);
++ spin_unlock(&tcon->tc_lock);
+
+ if (list_empty(&tcon->dfs_ses_list)) {
+ list_replace_init(&mnt_ctx->dfs_ses_list,
+@@ -279,18 +277,13 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_ses *ses;
+- char *source = ctx->source;
+ bool nodfs = ctx->nodfs;
+ int rc;
+
+ *isdfs = false;
+- /* Temporarily set @ctx->source to NULL as we're not matching DFS
+- * superblocks yet. See cifs_match_super() and match_server().
+- */
+- ctx->source = NULL;
+ rc = get_session(mnt_ctx, NULL);
+ if (rc)
+- goto out;
++ return rc;
+
+ ctx->dfs_root_ses = mnt_ctx->ses;
+ /*
+@@ -304,7 +297,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
+ if (rc) {
+ if (rc != -ENOENT && rc != -EOPNOTSUPP && rc != -EIO)
+- goto out;
++ return rc;
+ nodfs = true;
+ }
+ }
+@@ -312,7 +305,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ rc = cifs_mount_get_tcon(mnt_ctx);
+ if (!rc)
+ rc = cifs_is_path_remote(mnt_ctx);
+- goto out;
++ return rc;
+ }
+
+ *isdfs = true;
+@@ -328,12 +321,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ rc = __dfs_mount_share(mnt_ctx);
+ if (ses == ctx->dfs_root_ses)
+ cifs_put_smb_ses(ses);
+-out:
+- /*
+- * Restore previous value of @ctx->source so DFS superblock can be
+- * matched in cifs_match_super().
+- */
+- ctx->source = source;
++
+ return rc;
+ }
+
+@@ -567,11 +555,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
+ const struct smb_version_operations *ops = server->ops;
+- struct super_block *sb = NULL;
+- struct cifs_sb_info *cifs_sb;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+- char *tree;
++ struct cifs_sb_info *cifs_sb = NULL;
++ struct super_block *sb = NULL;
+ struct dfs_info3_param ref = {0};
++ char *tree;
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
+@@ -603,19 +591,18 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ goto out;
+ }
+
+- sb = cifs_get_tcp_super(server);
+- if (IS_ERR(sb)) {
+- rc = PTR_ERR(sb);
+- cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
+- goto out;
+- }
+-
+- cifs_sb = CIFS_SB(sb);
++ sb = cifs_get_dfs_tcon_super(tcon);
++ if (!IS_ERR(sb))
++ cifs_sb = CIFS_SB(sb);
+
+- /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+- if (!server->leaf_fullpath ||
++ /*
++ * Tree connect to last share in @tcon->tree_name whether dfs super or
++ * cached dfs referral was not found.
++ */
++ if (!cifs_sb || !server->leaf_fullpath ||
+ dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
+- rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
++ rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon,
++ cifs_sb ? cifs_sb->local_nls : nlsc);
+ goto out;
+ }
+
+diff --git a/fs/cifs/dfs.h b/fs/cifs/dfs.h
+index 1c90df5ecfbda..98e9d2aca6a7a 100644
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -39,16 +39,15 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ {
+ struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+- struct TCP_Server_Info *server = tcon->ses->server;
+ size_t len;
+ char *s;
+
+- spin_lock(&server->srv_lock);
+- if (unlikely(!server->origin_fullpath)) {
+- spin_unlock(&server->srv_lock);
++ spin_lock(&tcon->tc_lock);
++ if (unlikely(!tcon->origin_fullpath)) {
++ spin_unlock(&tcon->tc_lock);
+ return ERR_PTR(-EREMOTE);
+ }
+- spin_unlock(&server->srv_lock);
++ spin_unlock(&tcon->tc_lock);
+
+ s = dentry_path_raw(dentry, page, PATH_MAX);
+ if (IS_ERR(s))
+@@ -57,16 +56,16 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ if (!s[1])
+ s++;
+
+- spin_lock(&server->srv_lock);
+- len = strlen(server->origin_fullpath);
++ spin_lock(&tcon->tc_lock);
++ len = strlen(tcon->origin_fullpath);
+ if (s < (char *)page + len) {
+- spin_unlock(&server->srv_lock);
++ spin_unlock(&tcon->tc_lock);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ s -= len;
+- memcpy(s, server->origin_fullpath, len);
+- spin_unlock(&server->srv_lock);
++ memcpy(s, tcon->origin_fullpath, len);
++ spin_unlock(&tcon->tc_lock);
+ convert_delimiter(s, '/');
+
+ return s;
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 1513b2709889b..33adf43a01f1d 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -1248,18 +1248,20 @@ static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
+ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+ {
+ struct cifs_tcon *tcon;
+- struct TCP_Server_Info *server;
+
+ if (!cifs_sb || !cifs_sb->master_tlink)
+ return -EINVAL;
+
+ tcon = cifs_sb_master_tcon(cifs_sb);
+- server = tcon->ses->server;
+
+- if (!server->origin_fullpath) {
++ spin_lock(&tcon->tc_lock);
++ if (!tcon->origin_fullpath) {
++ spin_unlock(&tcon->tc_lock);
+ cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
+ return 0;
+ }
++ spin_unlock(&tcon->tc_lock);
++
+ /*
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 051283386e229..1a854dc204823 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -4936,20 +4936,19 @@ oplock_break_ack:
+
+ _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
+ /*
+- * releasing stale oplock after recent reconnect of smb session using
+- * a now incorrect file handle is not a data integrity issue but do
+- * not bother sending an oplock release if session to server still is
+- * disconnected since oplock already released by the server
++ * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
++ * an acknowledgment to be sent when the file has already been closed.
++ * check for server null, since can race with kill_sb calling tree disconnect.
+ */
+- if (!oplock_break_cancelled) {
+- /* check for server null since can race with kill_sb calling tree disconnect */
+- if (tcon->ses && tcon->ses->server) {
+- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+- volatile_fid, net_fid, cinode);
+- cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+- } else
+- pr_warn_once("lease break not sent for unmounted share\n");
+- }
++ spin_lock(&cinode->open_file_lock);
++ if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
++ !list_empty(&cinode->openFileList)) {
++ spin_unlock(&cinode->open_file_lock);
++ rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++ volatile_fid, net_fid, cinode);
++ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++ } else
++ spin_unlock(&cinode->open_file_lock);
+
+ cifs_done_oplock_break(cinode);
+ }
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index cd914be905b24..b0dedc26643b6 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -156,6 +156,7 @@ tconInfoFree(struct cifs_tcon *tcon)
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
+ #endif
++ kfree(tcon->origin_fullpath);
+ kfree(tcon);
+ }
+
+@@ -1106,20 +1107,25 @@ struct super_cb_data {
+ struct super_block *sb;
+ };
+
+-static void tcp_super_cb(struct super_block *sb, void *arg)
++static void tcon_super_cb(struct super_block *sb, void *arg)
+ {
+ struct super_cb_data *sd = arg;
+- struct TCP_Server_Info *server = sd->data;
+ struct cifs_sb_info *cifs_sb;
+- struct cifs_tcon *tcon;
++ struct cifs_tcon *t1 = sd->data, *t2;
+
+ if (sd->sb)
+ return;
+
+ cifs_sb = CIFS_SB(sb);
+- tcon = cifs_sb_master_tcon(cifs_sb);
+- if (tcon->ses->server == server)
++ t2 = cifs_sb_master_tcon(cifs_sb);
++
++ spin_lock(&t2->tc_lock);
++ if (t1->ses == t2->ses &&
++ t1->ses->server == t2->ses->server &&
++ t2->origin_fullpath &&
++ dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
+ sd->sb = sb;
++ spin_unlock(&t2->tc_lock);
+ }
+
+ static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
+@@ -1145,6 +1151,7 @@ static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void
+ return sd.sb;
+ }
+ }
++ pr_warn_once("%s: could not find dfs superblock\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+@@ -1154,9 +1161,15 @@ static void __cifs_put_super(struct super_block *sb)
+ cifs_sb_deactive(sb);
+ }
+
+-struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
++struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon)
+ {
+- return __cifs_get_super(tcp_super_cb, server);
++ spin_lock(&tcon->tc_lock);
++ if (!tcon->origin_fullpath) {
++ spin_unlock(&tcon->tc_lock);
++ return ERR_PTR(-ENOENT);
++ }
++ spin_unlock(&tcon->tc_lock);
++ return __cifs_get_super(tcon_super_cb, tcon);
+ }
+
+ void cifs_put_tcp_super(struct super_block *sb)
+@@ -1238,9 +1251,16 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+ */
+ if (strlen(full_path) < 2 || !cifs_sb ||
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+- !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
++ !is_tcon_dfs(tcon))
+ return 0;
+
++ spin_lock(&tcon->tc_lock);
++ if (!tcon->origin_fullpath) {
++ spin_unlock(&tcon->tc_lock);
++ return 0;
++ }
++ spin_unlock(&tcon->tc_lock);
++
+ /*
+ * Slow path - tcon is DFS and @full_path has prefix path, so attempt
+ * to get a referral to figure out whether it is an DFS link.
+@@ -1264,7 +1284,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+
+ /*
+ * XXX: we are not using dfs_cache_find() here because we might
+- * end filling all the DFS cache and thus potentially
++ * end up filling all the DFS cache and thus potentially
+ * removing cached DFS targets that the client would eventually
+ * need during failover.
+ */
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 163a03298430d..8e696fbd72fa8 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -398,9 +398,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ rsp_iov);
+
+ finished:
+- if (cfile)
+- cifsFileInfo_put(cfile);
+-
+ SMB2_open_free(&rqst[0]);
+ if (rc == -EREMCHG) {
+ pr_warn_once("server share %s deleted\n", tcon->tree_name);
+@@ -529,6 +526,9 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ break;
+ }
+
++ if (cfile)
++ cifsFileInfo_put(cfile);
++
+ if (rc && err_iov && err_buftype) {
+ memcpy(err_iov, rsp_iov, 3 * sizeof(*err_iov));
+ memcpy(err_buftype, resp_buftype, 3 * sizeof(*err_buftype));
+@@ -609,9 +609,6 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ if (islink)
+ rc = -EREMOTE;
+ }
+- if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+- rc = -EOPNOTSUPP;
+ }
+
+ out:
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 5065398665f11..bb41b9bae262d 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -208,6 +208,16 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+
+ spin_lock(&server->req_lock);
+ while (1) {
++ spin_unlock(&server->req_lock);
++
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus == CifsExiting) {
++ spin_unlock(&server->srv_lock);
++ return -ENOENT;
++ }
++ spin_unlock(&server->srv_lock);
++
++ spin_lock(&server->req_lock);
+ if (server->credits <= 0) {
+ spin_unlock(&server->req_lock);
+ cifs_num_waiters_inc(server);
+@@ -218,15 +228,6 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+ return rc;
+ spin_lock(&server->req_lock);
+ } else {
+- spin_unlock(&server->req_lock);
+- spin_lock(&server->srv_lock);
+- if (server->tcpStatus == CifsExiting) {
+- spin_unlock(&server->srv_lock);
+- return -ENOENT;
+- }
+- spin_unlock(&server->srv_lock);
+-
+- spin_lock(&server->req_lock);
+ scredits = server->credits;
+ /* can deadlock with reopen */
+ if (scredits <= 8) {
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 24bdd5f4d3bcc..968bfd029b8eb 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -522,6 +522,16 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ }
+
+ while (1) {
++ spin_unlock(&server->req_lock);
++
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus == CifsExiting) {
++ spin_unlock(&server->srv_lock);
++ return -ENOENT;
++ }
++ spin_unlock(&server->srv_lock);
++
++ spin_lock(&server->req_lock);
+ if (*credits < num_credits) {
+ scredits = *credits;
+ spin_unlock(&server->req_lock);
+@@ -547,15 +557,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ return -ERESTARTSYS;
+ spin_lock(&server->req_lock);
+ } else {
+- spin_unlock(&server->req_lock);
+-
+- spin_lock(&server->srv_lock);
+- if (server->tcpStatus == CifsExiting) {
+- spin_unlock(&server->srv_lock);
+- return -ENOENT;
+- }
+- spin_unlock(&server->srv_lock);
+-
+ /*
+ * For normal commands, reserve the last MAX_COMPOUND
+ * credits to compound requests.
+@@ -569,7 +570,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ * for servers that are slow to hand out credits on
+ * new sessions.
+ */
+- spin_lock(&server->req_lock);
+ if (!optype && num_credits == 1 &&
+ server->in_flight > 2 * MAX_COMPOUND &&
+ *credits <= MAX_COMPOUND) {
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index d7add72a09437..72325d4b98f9d 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -94,11 +94,8 @@ struct z_erofs_pcluster {
+
+ /* let's avoid the valid 32-bit kernel addresses */
+
+-/* the chained workgroup has't submitted io (still open) */
++/* the end of a chain of pclusters */
+ #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
+-/* the chained workgroup has already submitted io */
+-#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
+-
+ #define Z_EROFS_PCLUSTER_NIL (NULL)
+
+ struct z_erofs_decompressqueue {
+@@ -499,20 +496,6 @@ out_error_pcluster_pool:
+
+ enum z_erofs_pclustermode {
+ Z_EROFS_PCLUSTER_INFLIGHT,
+- /*
+- * The current pclusters was the tail of an exist chain, in addition
+- * that the previous processed chained pclusters are all decided to
+- * be hooked up to it.
+- * A new chain will be created for the remaining pclusters which are
+- * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
+- * the next pcluster cannot reuse the whole page safely for inplace I/O
+- * in the following scenario:
+- * ________________________________________________________________
+- * | tail (partial) page | head (partial) page |
+- * | (belongs to the next pcl) | (belongs to the current pcl) |
+- * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
+- */
+- Z_EROFS_PCLUSTER_HOOKED,
+ /*
+ * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
+ * could be dispatched into bypass queue later due to uptodated managed
+@@ -530,8 +513,8 @@ enum z_erofs_pclustermode {
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current cl) | (of the previous collection) |
+- * | PCLUSTER_FOLLOWED or | |
+- * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
++ * | | |
++ * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________|
+ *
+ * [ (*) the above page can be used as inplace I/O. ]
+ */
+@@ -544,7 +527,7 @@ struct z_erofs_decompress_frontend {
+ struct z_erofs_bvec_iter biter;
+
+ struct page *candidate_bvpage;
+- struct z_erofs_pcluster *pcl, *tailpcl;
++ struct z_erofs_pcluster *pcl;
+ z_erofs_next_pcluster_t owned_head;
+ enum z_erofs_pclustermode mode;
+
+@@ -750,19 +733,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
+ return;
+ }
+
+- /*
+- * type 2, link to the end of an existing open chain, be careful
+- * that its submission is controlled by the original attached chain.
+- */
+- if (*owned_head != &pcl->next && pcl != f->tailpcl &&
+- cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+- *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
+- *owned_head = Z_EROFS_PCLUSTER_TAIL;
+- f->mode = Z_EROFS_PCLUSTER_HOOKED;
+- f->tailpcl = NULL;
+- return;
+- }
+- /* type 3, it belongs to a chain, but it isn't the end of the chain */
++ /* type 2, it belongs to an ongoing chain */
+ f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
+ }
+
+@@ -823,9 +794,6 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ goto err_out;
+ }
+ }
+- /* used to check tail merging loop due to corrupted images */
+- if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+- fe->tailpcl = pcl;
+ fe->owned_head = &pcl->next;
+ fe->pcl = pcl;
+ return 0;
+@@ -846,7 +814,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
+
+ /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
+ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
+- DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ if (!(map->m_flags & EROFS_MAP_META)) {
+ grp = erofs_find_workgroup(fe->inode->i_sb,
+@@ -865,10 +832,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
+
+ if (ret == -EEXIST) {
+ mutex_lock(&fe->pcl->lock);
+- /* used to check tail merging loop due to corrupted images */
+- if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+- fe->tailpcl = fe->pcl;
+-
+ z_erofs_try_to_claim_pcluster(fe);
+ } else if (ret) {
+ return ret;
+@@ -1025,8 +988,7 @@ hitted:
+ * those chains are handled asynchronously thus the page cannot be used
+ * for inplace I/O or bvpage (should be processed in a strict order.)
+ */
+- tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
+- fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
++ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+
+ cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+@@ -1407,10 +1369,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ };
+ z_erofs_next_pcluster_t owned = io->head;
+
+- while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
+- /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
+- DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
+- /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
++ while (owned != Z_EROFS_PCLUSTER_TAIL) {
+ DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
+
+ be.pcl = container_of(owned, struct z_erofs_pcluster, next);
+@@ -1427,7 +1386,7 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
+ container_of(work, struct z_erofs_decompressqueue, u.work);
+ struct page *pagepool = NULL;
+
+- DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
++ DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
+ z_erofs_decompress_queue(bgq, &pagepool);
+ erofs_release_pages(&pagepool);
+ kvfree(bgq);
+@@ -1615,7 +1574,7 @@ fg_out:
+ q->sync = true;
+ }
+ q->sb = sb;
+- q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
++ q->head = Z_EROFS_PCLUSTER_TAIL;
+ return q;
+ }
+
+@@ -1633,11 +1592,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+ z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
+ z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
+
+- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+- if (owned_head == Z_EROFS_PCLUSTER_TAIL)
+- owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+-
+- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
++ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
+
+ WRITE_ONCE(*submit_qtail, owned_head);
+ WRITE_ONCE(*bypass_qtail, &pcl->next);
+@@ -1708,15 +1663,10 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ unsigned int i = 0;
+ bool bypass = true;
+
+- /* no possible 'owned_head' equals the following */
+- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
+-
+ pcl = container_of(owned_head, struct z_erofs_pcluster, next);
++ owned_head = READ_ONCE(pcl->next);
+
+- /* close the main owned chain at first */
+- owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+- Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ if (z_erofs_is_inline_pcluster(pcl)) {
+ move_to_bypass_jobqueue(pcl, qtail, owned_head);
+ continue;
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index b5f4086537548..322f110b3c8f4 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -148,7 +148,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ u8 *in, type;
+ bool big_pcluster;
+
+- if (1 << amortizedshift == 4)
++ if (1 << amortizedshift == 4 && lclusterbits <= 14)
+ vcnt = 2;
+ else if (1 << amortizedshift == 2 && lclusterbits == 12)
+ vcnt = 16;
+@@ -250,7 +250,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ {
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+- const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
+ ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+ const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+@@ -258,9 +257,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int amortizedshift;
+ erofs_off_t pos;
+
+- if (lclusterbits != 12)
+- return -EOPNOTSUPP;
+-
+ if (lcn >= totalidx)
+ return -EINVAL;
+
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index 569e5eecdf3db..3e391a7d5a3ab 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -536,7 +536,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+ out[baselen + 3] = PERIOD;
+
+ if (dot_present)
+- memcpy(&out[baselen + 4], extension, 4);
++ memcpy(out + baselen + 4, extension, 4);
+ else
+ out[baselen + 4] = '\0';
+ smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 9a47303b2cba6..0c05668019c2b 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -355,7 +355,6 @@ static int lockd_get(void)
+ int error;
+
+ if (nlmsvc_serv) {
+- svc_get(nlmsvc_serv);
+ nlmsvc_users++;
+ return 0;
+ }
+diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
+index 76ae118342066..911f634ba3da7 100644
+--- a/fs/nfs/nfs42xattr.c
++++ b/fs/nfs/nfs42xattr.c
+@@ -991,6 +991,29 @@ static void nfs4_xattr_cache_init_once(void *p)
+ INIT_LIST_HEAD(&cache->dispose);
+ }
+
++static int nfs4_xattr_shrinker_init(struct shrinker *shrinker,
++ struct list_lru *lru, const char *name)
++{
++ int ret = 0;
++
++ ret = register_shrinker(shrinker, name);
++ if (ret)
++ return ret;
++
++ ret = list_lru_init_memcg(lru, shrinker);
++ if (ret)
++ unregister_shrinker(shrinker);
++
++ return ret;
++}
++
++static void nfs4_xattr_shrinker_destroy(struct shrinker *shrinker,
++ struct list_lru *lru)
++{
++ unregister_shrinker(shrinker);
++ list_lru_destroy(lru);
++}
++
+ int __init nfs4_xattr_cache_init(void)
+ {
+ int ret = 0;
+@@ -1002,44 +1025,30 @@ int __init nfs4_xattr_cache_init(void)
+ if (nfs4_xattr_cache_cachep == NULL)
+ return -ENOMEM;
+
+- ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
+- &nfs4_xattr_large_entry_shrinker);
+- if (ret)
+- goto out4;
+-
+- ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
+- &nfs4_xattr_entry_shrinker);
+- if (ret)
+- goto out3;
+-
+- ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
+- &nfs4_xattr_cache_shrinker);
+- if (ret)
+- goto out2;
+-
+- ret = register_shrinker(&nfs4_xattr_cache_shrinker, "nfs-xattr_cache");
++ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_cache_shrinker,
++ &nfs4_xattr_cache_lru,
++ "nfs-xattr_cache");
+ if (ret)
+ goto out1;
+
+- ret = register_shrinker(&nfs4_xattr_entry_shrinker, "nfs-xattr_entry");
++ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_entry_shrinker,
++ &nfs4_xattr_entry_lru,
++ "nfs-xattr_entry");
+ if (ret)
+- goto out;
++ goto out2;
+
+- ret = register_shrinker(&nfs4_xattr_large_entry_shrinker,
+- "nfs-xattr_large_entry");
++ ret = nfs4_xattr_shrinker_init(&nfs4_xattr_large_entry_shrinker,
++ &nfs4_xattr_large_entry_lru,
++ "nfs-xattr_large_entry");
+ if (!ret)
+ return 0;
+
+- unregister_shrinker(&nfs4_xattr_entry_shrinker);
+-out:
+- unregister_shrinker(&nfs4_xattr_cache_shrinker);
+-out1:
+- list_lru_destroy(&nfs4_xattr_cache_lru);
++ nfs4_xattr_shrinker_destroy(&nfs4_xattr_entry_shrinker,
++ &nfs4_xattr_entry_lru);
+ out2:
+- list_lru_destroy(&nfs4_xattr_entry_lru);
+-out3:
+- list_lru_destroy(&nfs4_xattr_large_entry_lru);
+-out4:
++ nfs4_xattr_shrinker_destroy(&nfs4_xattr_cache_shrinker,
++ &nfs4_xattr_cache_lru);
++out1:
+ kmem_cache_destroy(nfs4_xattr_cache_cachep);
+
+ return ret;
+@@ -1047,11 +1056,11 @@ out4:
+
+ void nfs4_xattr_cache_exit(void)
+ {
+- unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
+- unregister_shrinker(&nfs4_xattr_entry_shrinker);
+- unregister_shrinker(&nfs4_xattr_cache_shrinker);
+- list_lru_destroy(&nfs4_xattr_large_entry_lru);
+- list_lru_destroy(&nfs4_xattr_entry_lru);
+- list_lru_destroy(&nfs4_xattr_cache_lru);
++ nfs4_xattr_shrinker_destroy(&nfs4_xattr_large_entry_shrinker,
++ &nfs4_xattr_large_entry_lru);
++ nfs4_xattr_shrinker_destroy(&nfs4_xattr_entry_shrinker,
++ &nfs4_xattr_entry_lru);
++ nfs4_xattr_shrinker_destroy(&nfs4_xattr_cache_shrinker,
++ &nfs4_xattr_cache_lru);
+ kmem_cache_destroy(nfs4_xattr_cache_cachep);
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5607b1e2b8212..23a23387211ba 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -921,6 +921,7 @@ out:
+ out_noaction:
+ return ret;
+ session_recover:
++ set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
+ nfs4_schedule_session_recovery(session, status);
+ dprintk("%s ERROR: %d Reset session\n", __func__, status);
+ nfs41_sequence_free_slot(res);
+diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
+index aecbd712a00cf..929a1133bc180 100644
+--- a/fs/ocfs2/cluster/tcp.c
++++ b/fs/ocfs2/cluster/tcp.c
+@@ -2087,18 +2087,24 @@ void o2net_stop_listening(struct o2nm_node *node)
+
+ int o2net_init(void)
+ {
++ struct folio *folio;
++ void *p;
+ unsigned long i;
+
+ o2quo_init();
+-
+ o2net_debugfs_init();
+
+- o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
+- o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
+- o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
+- if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp)
++ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0);
++ if (!folio)
+ goto out;
+
++ p = folio_address(folio);
++ o2net_hand = p;
++ p += sizeof(struct o2net_handshake);
++ o2net_keep_req = p;
++ p += sizeof(struct o2net_msg);
++ o2net_keep_resp = p;
++
+ o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
+ o2net_hand->connector_id = cpu_to_be64(1);
+
+@@ -2124,9 +2130,6 @@ int o2net_init(void)
+ return 0;
+
+ out:
+- kfree(o2net_hand);
+- kfree(o2net_keep_req);
+- kfree(o2net_keep_resp);
+ o2net_debugfs_exit();
+ o2quo_exit();
+ return -ENOMEM;
+@@ -2135,8 +2138,6 @@ out:
+ void o2net_exit(void)
+ {
+ o2quo_exit();
+- kfree(o2net_hand);
+- kfree(o2net_keep_req);
+- kfree(o2net_keep_resp);
+ o2net_debugfs_exit();
++ folio_put(virt_to_folio(o2net_hand));
+ }
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index c14e90764e356..7bf101e756c8c 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -576,6 +576,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
+ /* Restore timestamps on parent (best effort) */
+ ovl_set_timestamps(ofs, upperdir, &c->pstat);
+ ovl_dentry_set_upper_alias(c->dentry);
++ ovl_dentry_update_reval(c->dentry, upper);
+ }
+ }
+ inode_unlock(udir);
+@@ -895,6 +896,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
+ inode_unlock(udir);
+
+ ovl_dentry_set_upper_alias(c->dentry);
++ ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
+ }
+
+ out:
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index fc25fb95d5fc0..9be52d8013c83 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -269,8 +269,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
+
+ ovl_dir_modified(dentry->d_parent, false);
+ ovl_dentry_set_upper_alias(dentry);
+- ovl_dentry_update_reval(dentry, newdentry,
+- DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++ ovl_dentry_init_reval(dentry, newdentry);
+
+ if (!hardlink) {
+ /*
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index defd4e231ad2c..5c36fb3a7bab1 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -326,8 +326,7 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
+ if (upper_alias)
+ ovl_dentry_set_upper_alias(dentry);
+
+- ovl_dentry_update_reval(dentry, upper,
+- DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++ ovl_dentry_init_reval(dentry, upper);
+
+ return d_instantiate_anon(dentry, inode);
+
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index cfb3420b7df0e..100a492d2b2a6 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -1122,8 +1122,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ ovl_set_flag(OVL_UPPERDATA, inode);
+ }
+
+- ovl_dentry_update_reval(dentry, upperdentry,
+- DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++ ovl_dentry_init_reval(dentry, upperdentry);
+
+ revert_creds(old_cred);
+ if (origin_path) {
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 4d0b278f5630e..e100c55bb924a 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -375,8 +375,10 @@ bool ovl_index_all(struct super_block *sb);
+ bool ovl_verify_lower(struct super_block *sb);
+ struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
+ bool ovl_dentry_remote(struct dentry *dentry);
+-void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
+- unsigned int mask);
++void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry);
++void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry);
++void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
++ unsigned int mask);
+ bool ovl_dentry_weird(struct dentry *dentry);
+ enum ovl_path_type ovl_path_type(struct dentry *dentry);
+ void ovl_path_upper(struct dentry *dentry, struct path *path);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index f1d9f75f8786c..49b6956468f9e 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1885,7 +1885,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
+ ovl_dentry_set_flag(OVL_E_CONNECTED, root);
+ ovl_set_upperdata(d_inode(root));
+ ovl_inode_init(d_inode(root), &oip, ino, fsid);
+- ovl_dentry_update_reval(root, upperdentry, DCACHE_OP_WEAK_REVALIDATE);
++ ovl_dentry_init_flags(root, upperdentry, DCACHE_OP_WEAK_REVALIDATE);
+
+ return root;
+ }
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 923d66d131c16..6a0652bd51f24 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -94,14 +94,30 @@ struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
+ return oe;
+ }
+
++#define OVL_D_REVALIDATE (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE)
++
+ bool ovl_dentry_remote(struct dentry *dentry)
+ {
+- return dentry->d_flags &
+- (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++ return dentry->d_flags & OVL_D_REVALIDATE;
++}
++
++void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry)
++{
++ if (!ovl_dentry_remote(realdentry))
++ return;
++
++ spin_lock(&dentry->d_lock);
++ dentry->d_flags |= realdentry->d_flags & OVL_D_REVALIDATE;
++ spin_unlock(&dentry->d_lock);
++}
++
++void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry)
++{
++ return ovl_dentry_init_flags(dentry, upperdentry, OVL_D_REVALIDATE);
+ }
+
+-void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
+- unsigned int mask)
++void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
++ unsigned int mask)
+ {
+ struct ovl_entry *oe = OVL_E(dentry);
+ unsigned int i, flags = 0;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 966191d3a5ba2..85aaf0fc6d7d1 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -599,6 +599,8 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ raw_spin_lock_init(&prz->buffer_lock);
+ prz->flags = flags;
+ prz->label = kstrdup(label, GFP_KERNEL);
++ if (!prz->label)
++ goto err;
+
+ ret = persistent_ram_buffer_map(start, size, prz, memtype);
+ if (ret)
+diff --git a/fs/splice.c b/fs/splice.c
+index 2c3dec2b6dfaf..5eca589fe8479 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -338,7 +338,6 @@ ssize_t direct_splice_read(struct file *in, loff_t *ppos,
+ reclaim -= ret;
+ remain = ret;
+ *ppos = kiocb.ki_pos;
+- file_accessed(in);
+ } else if (ret < 0) {
+ /*
+ * callers of ->splice_read() expect -EAGAIN on
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index 255645c1f9a89..6ea339d5de088 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -71,6 +71,7 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ }
+
+ #define DRM_FIXED_POINT 32
++#define DRM_FIXED_POINT_HALF 16
+ #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
+@@ -87,6 +88,11 @@ static inline int drm_fixp2int(s64 a)
+ return ((s64)a) >> DRM_FIXED_POINT;
+ }
+
++static inline int drm_fixp2int_round(s64 a)
++{
++ return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
++}
++
+ static inline int drm_fixp2int_ceil(s64 a)
+ {
+ if (a > 0)
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index de0b0c3e7395a..4110d6e99b2b9 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -748,8 +748,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+ struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+-
+- atomic_t active_queues;
++ unsigned int active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 941304f17492f..3d620f298aebd 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1297,7 +1297,7 @@ static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
+ }
+
+ static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+- blk_opf_t op)
++ enum req_op op)
+ {
+ if (!bdev_is_zoned(bdev))
+ return false;
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index cfbda114348c9..122c62e561fc7 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -85,10 +85,14 @@ extern int blk_trace_remove(struct request_queue *q);
+ # define blk_add_driver_data(rq, data, len) do {} while (0)
+ # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
+ # define blk_trace_startstop(q, start) (-ENOTTY)
+-# define blk_trace_remove(q) (-ENOTTY)
+ # define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+ # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
+ # define blk_trace_note_message_enabled(q) (false)
++
++static inline int blk_trace_remove(struct request_queue *q)
++{
++ return -ENOTTY;
++}
+ #endif /* CONFIG_BLK_DEV_IO_TRACE */
+
+ #ifdef CONFIG_COMPAT
+diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
+index cc35d010fa949..e1a3c9c9754c5 100644
+--- a/include/linux/bootmem_info.h
++++ b/include/linux/bootmem_info.h
+@@ -3,6 +3,7 @@
+ #define __LINUX_BOOTMEM_INFO_H
+
+ #include <linux/mm.h>
++#include <linux/kmemleak.h>
+
+ /*
+ * Types for free bootmem stored in page->lru.next. These have to be in
+@@ -59,6 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page,
+
+ static inline void free_bootmem_page(struct page *page)
+ {
++ kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
+ free_reserved_page(page);
+ }
+ #endif
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 5bd6ac04773aa..18397e54bac18 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1082,7 +1082,6 @@ struct bpf_trampoline {
+ int progs_cnt[BPF_TRAMP_MAX];
+ /* Executable image of trampoline */
+ struct bpf_tramp_image *cur_image;
+- u64 selector;
+ struct module *mod;
+ };
+
+diff --git a/include/linux/can/length.h b/include/linux/can/length.h
+index 6995092b774ec..ef1fd32cef16b 100644
+--- a/include/linux/can/length.h
++++ b/include/linux/can/length.h
+@@ -69,17 +69,18 @@
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+- * Stuff Bit Count (SBC) 0...16: 4 20...64:5
++ * Stuff Bit Count (SBC) 4
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
++ * Fixed Stuff bits (FSB) 0...16: 6 20...64:7
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+- * assuming CRC21, rounded up and ignoring bitstuffing
++ * assuming CRC21, rounded up and ignoring dynamic bitstuffing
+ */
+-#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
++#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(67, 8)
+
+ /*
+ * Size of a CAN-FD Extended Frame
+@@ -98,17 +99,18 @@
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+- * Stuff Bit Count (SBC) 0...16: 4 20...64:5
++ * Stuff Bit Count (SBC) 4
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
++ * Fixed Stuff bits (FSB) 0...16: 6 20...64:7
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+- * assuming CRC21, rounded up and ignoring bitstuffing
++ * assuming CRC21, rounded up and ignoring dynamic bitstuffing
+ */
+-#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
++#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(86, 8)
+
+ /*
+ * Maximum size of a Classical CAN frame
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 2463bdd2a382d..1e25c9060225c 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -4592,15 +4592,12 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
++ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ /*
+ * The length is the first octet pointed by mle->variable so no
+ * need to add anything
+ */
+ break;
+- case IEEE80211_ML_CONTROL_TYPE_RECONF:
+- if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+- common += ETH_ALEN;
+- return common;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+diff --git a/include/linux/mfd/tps65010.h b/include/linux/mfd/tps65010.h
+index a1fb9bc5311de..5edf1aef11185 100644
+--- a/include/linux/mfd/tps65010.h
++++ b/include/linux/mfd/tps65010.h
+@@ -28,6 +28,8 @@
+ #ifndef __LINUX_I2C_TPS65010_H
+ #define __LINUX_I2C_TPS65010_H
+
++struct gpio_chip;
++
+ /*
+ * ----------------------------------------------------------------------------
+ * Registers, all 8 bits
+@@ -176,12 +178,10 @@ struct i2c_client;
+
+ /**
+ * struct tps65010_board - packages GPIO and LED lines
+- * @base: the GPIO number to assign to GPIO-1
+ * @outmask: bit (N-1) is set to allow GPIO-N to be used as an
+ * (open drain) output
+ * @setup: optional callback issued once the GPIOs are valid
+ * @teardown: optional callback issued before the GPIOs are invalidated
+- * @context: optional parameter passed to setup() and teardown()
+ *
+ * Board data may be used to package the GPIO (and LED) lines for use
+ * in by the generic GPIO and LED frameworks. The first four GPIOs
+@@ -193,12 +193,9 @@ struct i2c_client;
+ * devices in their initial states using these GPIOs.
+ */
+ struct tps65010_board {
+- int base;
+ unsigned outmask;
+-
+- int (*setup)(struct i2c_client *client, void *context);
+- int (*teardown)(struct i2c_client *client, void *context);
+- void *context;
++ int (*setup)(struct i2c_client *client, struct gpio_chip *gc);
++ void (*teardown)(struct i2c_client *client, struct gpio_chip *gc);
+ };
+
+ #endif /* __LINUX_I2C_TPS65010_H */
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index c726ea7812552..daa2f40d9ce65 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -294,6 +294,7 @@ struct mmc_card {
+ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
++#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
+
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 7ed63f5bbe056..02ac3a058c09f 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -5063,6 +5063,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev)
+ return dev->priv_flags & IFF_L3MDEV_SLAVE;
+ }
+
++static inline int dev_sdif(const struct net_device *dev)
++{
++#ifdef CONFIG_NET_L3_MASTER_DEV
++ if (netif_is_l3_slave(dev))
++ return dev->ifindex;
++#endif
++ return 0;
++}
++
+ static inline bool netif_is_bridge_master(const struct net_device *dev)
+ {
+ return dev->priv_flags & IFF_EBRIDGE;
+diff --git a/include/linux/nmi.h b/include/linux/nmi.h
+index 048c0b9aa623d..771d77b62bc10 100644
+--- a/include/linux/nmi.h
++++ b/include/linux/nmi.h
+@@ -197,7 +197,7 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh);
+ #endif
+
+ #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+- defined(CONFIG_HARDLOCKUP_DETECTOR)
++ defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
+ void watchdog_update_hrtimer_threshold(u64 period);
+ #else
+ static inline void watchdog_update_hrtimer_threshold(u64 period) { }
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index a5dda515fcd1d..87d499ca7e176 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1866,6 +1866,7 @@ static inline int pci_dev_present(const struct pci_device_id *ids)
+ #define pci_dev_put(dev) do { } while (0)
+
+ static inline void pci_set_master(struct pci_dev *dev) { }
++static inline void pci_clear_master(struct pci_dev *dev) { }
+ static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
+ static inline void pci_disable_device(struct pci_dev *dev) { }
+ static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index d2c3f16cf6b18..02e0086b10f6f 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -261,18 +261,14 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+
+ extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+
+-#ifdef CONFIG_WATCH_QUEUE
+ unsigned long account_pipe_buffers(struct user_struct *user,
+ unsigned long old, unsigned long new);
+ bool too_many_pipe_buffers_soft(unsigned long user_bufs);
+ bool too_many_pipe_buffers_hard(unsigned long user_bufs);
+ bool pipe_is_unprivileged_user(void);
+-#endif
+
+ /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+-#ifdef CONFIG_WATCH_QUEUE
+ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
+-#endif
+ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
+
+diff --git a/include/net/dsa.h b/include/net/dsa.h
+index def06ef676dd8..56e297e7848d6 100644
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -329,9 +329,17 @@ struct dsa_port {
+ struct list_head fdbs;
+ struct list_head mdbs;
+
+- /* List of VLANs that CPU and DSA ports are members of. */
+ struct mutex vlans_lock;
+- struct list_head vlans;
++ union {
++ /* List of VLANs that CPU and DSA ports are members of.
++ * Access to this is serialized by the sleepable @vlans_lock.
++ */
++ struct list_head vlans;
++ /* List of VLANs that user ports are members of.
++ * Access to this is serialized by netif_addr_lock_bh().
++ */
++ struct list_head user_vlans;
++ };
+ };
+
+ /* TODO: ideally DSA ports would have a single dp->link_dp member,
+diff --git a/include/net/regulatory.h b/include/net/regulatory.h
+index 896191f420d50..b2cb4a9eb04dc 100644
+--- a/include/net/regulatory.h
++++ b/include/net/regulatory.h
+@@ -140,17 +140,6 @@ struct regulatory_request {
+ * otherwise initiating radiation is not allowed. This will enable the
+ * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
+ * option
+- * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
+- * all interfaces on this wiphy reside on allowed channels. If this flag
+- * is not set, upon a regdomain change, the interfaces are given a grace
+- * period (currently 60 seconds) to disconnect or move to an allowed
+- * channel. Interfaces on forbidden channels are forcibly disconnected.
+- * Currently these types of interfaces are supported for enforcement:
+- * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
+- * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
+- * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
+- * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
+- * includes any modes unsupported for enforcement checking.
+ * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
+ * regdom management. These devices will ignore all regdom changes not
+ * originating from their own wiphy.
+@@ -177,7 +166,7 @@ enum ieee80211_regulatory_flags {
+ REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
+ REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
+ REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
+- REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
++ /* reuse bit 6 next time */
+ REGULATORY_WIPHY_SELF_MANAGED = BIT(7),
+ };
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f0654c44acf5f..81ad7fa03b73a 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2100,6 +2100,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
+ }
+
+ kuid_t sock_i_uid(struct sock *sk);
++unsigned long __sock_i_ino(struct sock *sk);
+ unsigned long sock_i_ino(struct sock *sk);
+
+ static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index 3e8619c72f774..b4bc2828fa09f 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -158,7 +158,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
+ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
+ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
+ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
+- { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
++ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \
++ { HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \
++ { HRTIMER_MODE_REL_HARD, "REL|HARD" }, \
++ { HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \
++ { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
+
+ /**
+ * hrtimer_init - called when the hrtimer is initialized
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index db70944c681aa..91803587691a6 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -225,7 +225,6 @@ struct ufs_dev_cmd {
+ struct mutex lock;
+ struct completion *complete;
+ struct ufs_query query;
+- struct cq_entry *cqe;
+ };
+
+ /**
+diff --git a/init/Makefile b/init/Makefile
+index 26de459006c4e..ec557ada3c12e 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -60,3 +60,4 @@ include/generated/utsversion.h: FORCE
+ $(obj)/version-timestamp.o: include/generated/utsversion.h
+ CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+ KASAN_SANITIZE_version-timestamp.o := n
++GCOV_PROFILE_version-timestamp.o := n
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index b86b907e566ca..bb70f400c25eb 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -1826,6 +1826,12 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+ ret = 1;
+ } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
+ /* optlen is out of bounds */
++ if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
++ pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
++ ctx.optlen, max_optlen);
++ ret = 0;
++ goto out;
++ }
+ ret = -EFAULT;
+ } else {
+ /* optlen within bounds, run kernel handler */
+@@ -1881,8 +1887,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ .optname = optname,
+ .current_task = current,
+ };
++ int orig_optlen;
+ int ret;
+
++ orig_optlen = max_optlen;
+ ctx.optlen = max_optlen;
+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
+ if (max_optlen < 0)
+@@ -1905,6 +1913,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ ret = -EFAULT;
+ goto out;
+ }
++ orig_optlen = ctx.optlen;
+
+ if (copy_from_user(ctx.optval, optval,
+ min(ctx.optlen, max_optlen)) != 0) {
+@@ -1922,6 +1931,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ goto out;
+
+ if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
++ if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
++ pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
++ ctx.optlen, max_optlen);
++ ret = retval;
++ goto out;
++ }
+ ret = -EFAULT;
+ goto out;
+ }
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index d0ed7d6f5eec5..fecb8d2d885a3 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -279,11 +279,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
+ return tlinks;
+ }
+
+-static void __bpf_tramp_image_put_deferred(struct work_struct *work)
++static void bpf_tramp_image_free(struct bpf_tramp_image *im)
+ {
+- struct bpf_tramp_image *im;
+-
+- im = container_of(work, struct bpf_tramp_image, work);
+ bpf_image_ksym_del(&im->ksym);
+ bpf_jit_free_exec(im->image);
+ bpf_jit_uncharge_modmem(PAGE_SIZE);
+@@ -291,6 +288,14 @@ static void __bpf_tramp_image_put_deferred(struct work_struct *work)
+ kfree_rcu(im, rcu);
+ }
+
++static void __bpf_tramp_image_put_deferred(struct work_struct *work)
++{
++ struct bpf_tramp_image *im;
++
++ im = container_of(work, struct bpf_tramp_image, work);
++ bpf_tramp_image_free(im);
++}
++
+ /* callback, fexit step 3 or fentry step 2 */
+ static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
+ {
+@@ -372,7 +377,7 @@ static void bpf_tramp_image_put(struct bpf_tramp_image *im)
+ call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
+ }
+
+-static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
++static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
+ {
+ struct bpf_tramp_image *im;
+ struct bpf_ksym *ksym;
+@@ -399,7 +404,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
+
+ ksym = &im->ksym;
+ INIT_LIST_HEAD_RCU(&ksym->lnode);
+- snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
++ snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
+ bpf_image_ksym_add(image, ksym);
+ return im;
+
+@@ -429,11 +434,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
+ err = unregister_fentry(tr, tr->cur_image->image);
+ bpf_tramp_image_put(tr->cur_image);
+ tr->cur_image = NULL;
+- tr->selector = 0;
+ goto out;
+ }
+
+- im = bpf_tramp_image_alloc(tr->key, tr->selector);
++ im = bpf_tramp_image_alloc(tr->key);
+ if (IS_ERR(im)) {
+ err = PTR_ERR(im);
+ goto out;
+@@ -466,12 +470,11 @@ again:
+ &tr->func.model, tr->flags, tlinks,
+ tr->func.addr);
+ if (err < 0)
+- goto out;
++ goto out_free;
+
+ set_memory_rox((long)im->image, 1);
+
+- WARN_ON(tr->cur_image && tr->selector == 0);
+- WARN_ON(!tr->cur_image && tr->selector);
++ WARN_ON(tr->cur_image && total == 0);
+ if (tr->cur_image)
+ /* progs already running at this address */
+ err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
+@@ -496,18 +499,21 @@ again:
+ }
+ #endif
+ if (err)
+- goto out;
++ goto out_free;
+
+ if (tr->cur_image)
+ bpf_tramp_image_put(tr->cur_image);
+ tr->cur_image = im;
+- tr->selector++;
+ out:
+ /* If any error happens, restore previous flags */
+ if (err)
+ tr->flags = orig_flags;
+ kfree(tlinks);
+ return err;
++
++out_free:
++ bpf_tramp_image_free(im);
++ goto out;
+ }
+
+ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 5a60cc52adc0c..8a7baf4e332e3 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -1270,7 +1270,9 @@ static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
+ DEFINE_TSAN_ATOMIC_OPS(8);
+ DEFINE_TSAN_ATOMIC_OPS(16);
+ DEFINE_TSAN_ATOMIC_OPS(32);
++#ifdef CONFIG_64BIT
+ DEFINE_TSAN_ATOMIC_OPS(64);
++#endif
+
+ void __tsan_atomic_thread_fence(int memorder);
+ void __tsan_atomic_thread_fence(int memorder)
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index 3d578c6fefee3..22acee18195a5 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -1122,6 +1122,7 @@ int crash_shrink_memory(unsigned long new_size)
+ start = crashk_res.start;
+ end = crashk_res.end;
+ old_size = (end == 0) ? 0 : end - start + 1;
++ new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN);
+ if (new_size >= old_size) {
+ ret = (new_size == old_size) ? 0 : -EINVAL;
+ goto unlock;
+@@ -1133,9 +1134,7 @@ int crash_shrink_memory(unsigned long new_size)
+ goto unlock;
+ }
+
+- start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
+- end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
+-
++ end = start + new_size;
+ crash_free_reserved_phys_range(end, crashk_res.end);
+
+ if ((start == end) && (crashk_res.parent != NULL))
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index 115616ac3bfa6..2d7e85dbf6734 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -603,4 +603,10 @@ void show_rcu_tasks_trace_gp_kthread(void);
+ static inline void show_rcu_tasks_trace_gp_kthread(void) {}
+ #endif
+
++#ifdef CONFIG_TINY_RCU
++static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
++#else
++bool rcu_cpu_beenfullyonline(int cpu);
++#endif
++
+ #endif /* __LINUX_RCU_H */
+diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
+index 91fb5905a008f..602f0958e4362 100644
+--- a/kernel/rcu/rcuscale.c
++++ b/kernel/rcu/rcuscale.c
+@@ -522,89 +522,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
+ scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
+ }
+
+-static void
+-rcu_scale_cleanup(void)
+-{
+- int i;
+- int j;
+- int ngps = 0;
+- u64 *wdp;
+- u64 *wdpp;
+-
+- /*
+- * Would like warning at start, but everything is expedited
+- * during the mid-boot phase, so have to wait till the end.
+- */
+- if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
+- SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
+- if (rcu_gp_is_normal() && gp_exp)
+- SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
+- if (gp_exp && gp_async)
+- SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
+-
+- if (torture_cleanup_begin())
+- return;
+- if (!cur_ops) {
+- torture_cleanup_end();
+- return;
+- }
+-
+- if (reader_tasks) {
+- for (i = 0; i < nrealreaders; i++)
+- torture_stop_kthread(rcu_scale_reader,
+- reader_tasks[i]);
+- kfree(reader_tasks);
+- }
+-
+- if (writer_tasks) {
+- for (i = 0; i < nrealwriters; i++) {
+- torture_stop_kthread(rcu_scale_writer,
+- writer_tasks[i]);
+- if (!writer_n_durations)
+- continue;
+- j = writer_n_durations[i];
+- pr_alert("%s%s writer %d gps: %d\n",
+- scale_type, SCALE_FLAG, i, j);
+- ngps += j;
+- }
+- pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
+- scale_type, SCALE_FLAG,
+- t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
+- t_rcu_scale_writer_finished -
+- t_rcu_scale_writer_started,
+- ngps,
+- rcuscale_seq_diff(b_rcu_gp_test_finished,
+- b_rcu_gp_test_started));
+- for (i = 0; i < nrealwriters; i++) {
+- if (!writer_durations)
+- break;
+- if (!writer_n_durations)
+- continue;
+- wdpp = writer_durations[i];
+- if (!wdpp)
+- continue;
+- for (j = 0; j < writer_n_durations[i]; j++) {
+- wdp = &wdpp[j];
+- pr_alert("%s%s %4d writer-duration: %5d %llu\n",
+- scale_type, SCALE_FLAG,
+- i, j, *wdp);
+- if (j % 100 == 0)
+- schedule_timeout_uninterruptible(1);
+- }
+- kfree(writer_durations[i]);
+- }
+- kfree(writer_tasks);
+- kfree(writer_durations);
+- kfree(writer_n_durations);
+- }
+-
+- /* Do torture-type-specific cleanup operations. */
+- if (cur_ops->cleanup != NULL)
+- cur_ops->cleanup();
+-
+- torture_cleanup_end();
+-}
+-
+ /*
+ * Return the number if non-negative. If -1, the number of CPUs.
+ * If less than -1, that much less than the number of CPUs, but
+@@ -624,21 +541,6 @@ static int compute_real(int n)
+ return nr;
+ }
+
+-/*
+- * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
+- * down system.
+- */
+-static int
+-rcu_scale_shutdown(void *arg)
+-{
+- wait_event(shutdown_wq,
+- atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
+- smp_mb(); /* Wake before output. */
+- rcu_scale_cleanup();
+- kernel_power_off();
+- return -EINVAL;
+-}
+-
+ /*
+ * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
+ * of iterations and measure total time and number of GP for all iterations to complete.
+@@ -771,8 +673,8 @@ kfree_scale_cleanup(void)
+ static int
+ kfree_scale_shutdown(void *arg)
+ {
+- wait_event(shutdown_wq,
+- atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
++ wait_event_idle(shutdown_wq,
++ atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
+
+ smp_mb(); /* Wake before output. */
+
+@@ -875,6 +777,108 @@ unwind:
+ return firsterr;
+ }
+
++static void
++rcu_scale_cleanup(void)
++{
++ int i;
++ int j;
++ int ngps = 0;
++ u64 *wdp;
++ u64 *wdpp;
++
++ /*
++ * Would like warning at start, but everything is expedited
++ * during the mid-boot phase, so have to wait till the end.
++ */
++ if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
++ SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
++ if (rcu_gp_is_normal() && gp_exp)
++ SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
++ if (gp_exp && gp_async)
++ SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
++
++ if (kfree_rcu_test) {
++ kfree_scale_cleanup();
++ return;
++ }
++
++ if (torture_cleanup_begin())
++ return;
++ if (!cur_ops) {
++ torture_cleanup_end();
++ return;
++ }
++
++ if (reader_tasks) {
++ for (i = 0; i < nrealreaders; i++)
++ torture_stop_kthread(rcu_scale_reader,
++ reader_tasks[i]);
++ kfree(reader_tasks);
++ }
++
++ if (writer_tasks) {
++ for (i = 0; i < nrealwriters; i++) {
++ torture_stop_kthread(rcu_scale_writer,
++ writer_tasks[i]);
++ if (!writer_n_durations)
++ continue;
++ j = writer_n_durations[i];
++ pr_alert("%s%s writer %d gps: %d\n",
++ scale_type, SCALE_FLAG, i, j);
++ ngps += j;
++ }
++ pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
++ scale_type, SCALE_FLAG,
++ t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
++ t_rcu_scale_writer_finished -
++ t_rcu_scale_writer_started,
++ ngps,
++ rcuscale_seq_diff(b_rcu_gp_test_finished,
++ b_rcu_gp_test_started));
++ for (i = 0; i < nrealwriters; i++) {
++ if (!writer_durations)
++ break;
++ if (!writer_n_durations)
++ continue;
++ wdpp = writer_durations[i];
++ if (!wdpp)
++ continue;
++ for (j = 0; j < writer_n_durations[i]; j++) {
++ wdp = &wdpp[j];
++ pr_alert("%s%s %4d writer-duration: %5d %llu\n",
++ scale_type, SCALE_FLAG,
++ i, j, *wdp);
++ if (j % 100 == 0)
++ schedule_timeout_uninterruptible(1);
++ }
++ kfree(writer_durations[i]);
++ }
++ kfree(writer_tasks);
++ kfree(writer_durations);
++ kfree(writer_n_durations);
++ }
++
++ /* Do torture-type-specific cleanup operations. */
++ if (cur_ops->cleanup != NULL)
++ cur_ops->cleanup();
++
++ torture_cleanup_end();
++}
++
++/*
++ * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
++ * down system.
++ */
++static int
++rcu_scale_shutdown(void *arg)
++{
++ wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
++ smp_mb(); /* Wake before output. */
++ rcu_scale_cleanup();
++ kernel_power_off();
++ return -EINVAL;
++}
++
+ static int __init
+ rcu_scale_init(void)
+ {
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index bfb5e1549f2b2..dfa6ff2eb32f2 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -455,6 +455,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+ {
+ int cpu;
+ int cpunext;
++ int cpuwq;
+ unsigned long flags;
+ int len;
+ struct rcu_head *rhp;
+@@ -465,11 +466,13 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+ cpunext = cpu * 2 + 1;
+ if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+ rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+- queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
++ cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
++ queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ cpunext++;
+ if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+ rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+- queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
++ cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
++ queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ }
+ }
+
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index a565dc5c54440..be490f55cb834 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -4279,7 +4279,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
+ */
+ rnp = rdp->mynode;
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+- rdp->beenonline = true; /* We have now been online. */
+ rdp->gp_seq = READ_ONCE(rnp->gp_seq);
+ rdp->gp_seq_needed = rdp->gp_seq;
+ rdp->cpu_no_qs.b.norm = true;
+@@ -4306,6 +4305,16 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
+ rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
+ }
+
++/*
++ * Has the specified (known valid) CPU ever been fully online?
++ */
++bool rcu_cpu_beenfullyonline(int cpu)
++{
++ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
++
++ return smp_load_acquire(&rdp->beenonline);
++}
++
+ /*
+ * Near the end of the CPU-online process. Pretty much all services
+ * enabled, and the CPU is now very much alive.
+@@ -4364,15 +4373,16 @@ int rcutree_offline_cpu(unsigned int cpu)
+ * Note that this function is special in that it is invoked directly
+ * from the incoming CPU rather than from the cpuhp_step mechanism.
+ * This is because this function must be invoked at a precise location.
++ * This incoming CPU must not have enabled interrupts yet.
+ */
+ void rcu_cpu_starting(unsigned int cpu)
+ {
+- unsigned long flags;
+ unsigned long mask;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+ bool newcpu;
+
++ lockdep_assert_irqs_disabled();
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ if (rdp->cpu_started)
+ return;
+@@ -4380,7 +4390,6 @@ void rcu_cpu_starting(unsigned int cpu)
+
+ rnp = rdp->mynode;
+ mask = rdp->grpmask;
+- local_irq_save(flags);
+ arch_spin_lock(&rcu_state.ofl_lock);
+ rcu_dynticks_eqs_online();
+ raw_spin_lock(&rcu_state.barrier_lock);
+@@ -4399,17 +4408,17 @@ void rcu_cpu_starting(unsigned int cpu)
+ /* An incoming CPU should never be blocking a grace period. */
+ if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
+ /* rcu_report_qs_rnp() *really* wants some flags to restore */
+- unsigned long flags2;
++ unsigned long flags;
+
+- local_irq_save(flags2);
++ local_irq_save(flags);
+ rcu_disable_urgency_upon_qs(rdp);
+ /* Report QS -after- changing ->qsmaskinitnext! */
+- rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
++ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
+ } else {
+ raw_spin_unlock_rcu_node(rnp);
+ }
+ arch_spin_unlock(&rcu_state.ofl_lock);
+- local_irq_restore(flags);
++ smp_store_release(&rdp->beenonline, true);
+ smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
+ }
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index ed89be0aa6503..853b7ef9dcafc 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5519,6 +5519,14 @@ static void __cfsb_csd_unthrottle(void *arg)
+
+ rq_lock(rq, &rf);
+
++ /*
++ * Iterating over the list can trigger several call to
++ * update_rq_clock() in unthrottle_cfs_rq().
++ * Do it once and skip the potential next ones.
++ */
++ update_rq_clock(rq);
++ rq_clock_start_loop_update(rq);
++
+ /*
+ * Since we hold rq lock we're safe from concurrent manipulation of
+ * the CSD list. However, this RCU critical section annotates the
+@@ -5538,6 +5546,7 @@ static void __cfsb_csd_unthrottle(void *arg)
+
+ rcu_read_unlock();
+
++ rq_clock_stop_loop_update(rq);
+ rq_unlock(rq, &rf);
+ }
+
+@@ -6054,6 +6063,13 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
+
+ lockdep_assert_rq_held(rq);
+
++ /*
++ * The rq clock has already been updated in the
++ * set_rq_offline(), so we should skip updating
++ * the rq clock again in unthrottle_cfs_rq().
++ */
++ rq_clock_start_loop_update(rq);
++
+ rcu_read_lock();
+ list_for_each_entry_rcu(tg, &task_groups, list) {
+ struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+@@ -6076,6 +6092,8 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
+ unthrottle_cfs_rq(cfs_rq);
+ }
+ rcu_read_unlock();
++
++ rq_clock_stop_loop_update(rq);
+ }
+
+ #else /* CONFIG_CFS_BANDWIDTH */
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 3e8df6d31c1e3..3adac73b17ca5 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1546,6 +1546,28 @@ static inline void rq_clock_cancel_skipupdate(struct rq *rq)
+ rq->clock_update_flags &= ~RQCF_REQ_SKIP;
+ }
+
++/*
++ * During cpu offlining and rq wide unthrottling, we can trigger
++ * an update_rq_clock() for several cfs and rt runqueues (Typically
++ * when using list_for_each_entry_*)
++ * rq_clock_start_loop_update() can be called after updating the clock
++ * once and before iterating over the list to prevent multiple update.
++ * After the iterative traversal, we need to call rq_clock_stop_loop_update()
++ * to clear RQCF_ACT_SKIP of rq->clock_update_flags.
++ */
++static inline void rq_clock_start_loop_update(struct rq *rq)
++{
++ lockdep_assert_rq_held(rq);
++ SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP);
++ rq->clock_update_flags |= RQCF_ACT_SKIP;
++}
++
++static inline void rq_clock_stop_loop_update(struct rq *rq)
++{
++ lockdep_assert_rq_held(rq);
++ rq->clock_update_flags &= ~RQCF_ACT_SKIP;
++}
++
+ struct rq_flags {
+ unsigned long flags;
+ struct pin_cookie cookie;
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 808a247205a9a..ed3c4a9543982 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -1037,27 +1037,52 @@ retry_delete:
+ }
+
+ /*
+- * return timer owned by the process, used by exit_itimers
++ * Delete a timer if it is armed, remove it from the hash and schedule it
++ * for RCU freeing.
+ */
+ static void itimer_delete(struct k_itimer *timer)
+ {
+-retry_delete:
+- spin_lock_irq(&timer->it_lock);
++ unsigned long flags;
++
++ /*
++ * irqsave is required to make timer_wait_running() work.
++ */
++ spin_lock_irqsave(&timer->it_lock, flags);
+
++retry_delete:
++ /*
++ * Even if the timer is not longer accessible from other tasks
++ * it still might be armed and queued in the underlying timer
++ * mechanism. Worse, that timer mechanism might run the expiry
++ * function concurrently.
++ */
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+- spin_unlock_irq(&timer->it_lock);
++ /*
++ * Timer is expired concurrently, prevent livelocks
++ * and pointless spinning on RT.
++ *
++ * timer_wait_running() drops timer::it_lock, which opens
++ * the possibility for another task to delete the timer.
++ *
++ * That's not possible here because this is invoked from
++ * do_exit() only for the last thread of the thread group.
++ * So no other task can access and delete that timer.
++ */
++ if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer))
++ return;
++
+ goto retry_delete;
+ }
+ list_del(&timer->list);
+
+- spin_unlock_irq(&timer->it_lock);
++ spin_unlock_irqrestore(&timer->it_lock, flags);
+ release_posix_timer(timer, IT_ID_SET);
+ }
+
+ /*
+- * This is called by do_exit or de_thread, only when nobody else can
+- * modify the signal->posix_timers list. Yet we need sighand->siglock
+- * to prevent the race with /proc/pid/timers.
++ * Invoked from do_exit() when the last thread of a thread group exits.
++ * At that point no other task can access the timers of the dying
++ * task anymore.
+ */
+ void exit_itimers(struct task_struct *tsk)
+ {
+@@ -1067,10 +1092,12 @@ void exit_itimers(struct task_struct *tsk)
+ if (list_empty(&tsk->signal->posix_timers))
+ return;
+
++ /* Protect against concurrent read via /proc/$PID/timers */
+ spin_lock_irq(&tsk->sighand->siglock);
+ list_replace_init(&tsk->signal->posix_timers, &timers);
+ spin_unlock_irq(&tsk->sighand->siglock);
+
++ /* The timers are not longer accessible via tsk::signal */
+ while (!list_empty(&timers)) {
+ tmr = list_first_entry(&timers, struct k_itimer, list);
+ itimer_delete(tmr);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index d6fb6a676bbbb..1ad89eec2a55f 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1046,7 +1046,7 @@ static bool report_idle_softirq(void)
+ return false;
+ }
+
+- if (ratelimit < 10)
++ if (ratelimit >= 10)
+ return false;
+
+ /* On RT, softirqs handling may be waiting on some lock */
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 247bf0b1582ca..1e8a49dc956e2 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -114,14 +114,14 @@ static void watchdog_overflow_callback(struct perf_event *event,
+ /* Ensure the watchdog never gets throttled */
+ event->hw.interrupts = 0;
+
++ if (!watchdog_check_timestamp())
++ return;
++
+ if (__this_cpu_read(watchdog_nmi_touch) == true) {
+ __this_cpu_write(watchdog_nmi_touch, false);
+ return;
+ }
+
+- if (!watchdog_check_timestamp())
+- return;
+-
+ /* check for a hardlockup
+ * This is done by making sure our timer interrupt
+ * is incrementing. The timer interrupt should have
+diff --git a/lib/ts_bm.c b/lib/ts_bm.c
+index 1f2234221dd11..c8ecbf74ef295 100644
+--- a/lib/ts_bm.c
++++ b/lib/ts_bm.c
+@@ -60,10 +60,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
+ struct ts_bm *bm = ts_config_priv(conf);
+ unsigned int i, text_len, consumed = state->offset;
+ const u8 *text;
+- int shift = bm->patlen - 1, bs;
++ int bs;
+ const u8 icase = conf->flags & TS_IGNORECASE;
+
+ for (;;) {
++ int shift = bm->patlen - 1;
++
+ text_len = conf->get_next_block(consumed, &text, conf, state);
+
+ if (unlikely(text_len == 0))
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 2723104cc06a1..8f048e62279a2 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2903,7 +2903,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ do {
+ cond_resched();
+
+- if (*ppos >= i_size_read(file_inode(in)))
++ if (*ppos >= i_size_read(in->f_mapping->host))
+ break;
+
+ iocb.ki_pos = *ppos;
+@@ -2919,7 +2919,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ * part of the page is not copied back to userspace (unless
+ * another truncate extends the file - this is desired though).
+ */
+- isize = i_size_read(file_inode(in));
++ isize = i_size_read(in->f_mapping->host);
+ if (unlikely(*ppos >= isize))
+ break;
+ end_offset = min_t(loff_t, isize, *ppos + len);
+diff --git a/mm/memory.c b/mm/memory.c
+index 2c2caae6fb3b6..e9dcc1c1eb6e9 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3914,6 +3914,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ }
+ }
+
++ /*
++ * Some architectures may have to restore extra metadata to the page
++ * when reading from swap. This metadata may be indexed by swap entry
++ * so this must be called before swap_free().
++ */
++ arch_swap_restore(entry, folio);
++
+ /*
+ * Remove the swap entry and conditionally try to free up the swapcache.
+ * We're already holding a reference on the page but haven't mapped it
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 1d6f165923bff..ebbab8c24beaf 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6529,12 +6529,11 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
+ static struct sock *
+ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+- u64 flags)
++ u64 flags, int sdif)
+ {
+ struct sock *sk = NULL;
+ struct net *net;
+ u8 family;
+- int sdif;
+
+ if (len == sizeof(tuple->ipv4))
+ family = AF_INET;
+@@ -6546,10 +6545,12 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX)))
+ goto out;
+
+- if (family == AF_INET)
+- sdif = inet_sdif(skb);
+- else
+- sdif = inet6_sdif(skb);
++ if (sdif < 0) {
++ if (family == AF_INET)
++ sdif = inet_sdif(skb);
++ else
++ sdif = inet6_sdif(skb);
++ }
+
+ if ((s32)netns_id < 0) {
+ net = caller_net;
+@@ -6569,10 +6570,11 @@ out:
+ static struct sock *
+ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+- u64 flags)
++ u64 flags, int sdif)
+ {
+ struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
+- ifindex, proto, netns_id, flags);
++ ifindex, proto, netns_id, flags,
++ sdif);
+
+ if (sk) {
+ struct sock *sk2 = sk_to_full_sk(sk);
+@@ -6612,7 +6614,7 @@ bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ }
+
+ return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
+- netns_id, flags);
++ netns_id, flags, -1);
+ }
+
+ static struct sock *
+@@ -6701,6 +6703,78 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
+ .arg5_type = ARG_ANYTHING,
+ };
+
++BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb,
++ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
++{
++ struct net_device *dev = skb->dev;
++ int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++ struct net *caller_net = dev_net(dev);
++
++ return (unsigned long)__bpf_skc_lookup(skb, tuple, len, caller_net,
++ ifindex, IPPROTO_TCP, netns_id,
++ flags, sdif);
++}
++
++static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = {
++ .func = bpf_tc_skc_lookup_tcp,
++ .gpl_only = false,
++ .pkt_access = true,
++ .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
++ .arg1_type = ARG_PTR_TO_CTX,
++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
++ .arg3_type = ARG_CONST_SIZE,
++ .arg4_type = ARG_ANYTHING,
++ .arg5_type = ARG_ANYTHING,
++};
++
++BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb,
++ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
++{
++ struct net_device *dev = skb->dev;
++ int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++ struct net *caller_net = dev_net(dev);
++
++ return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net,
++ ifindex, IPPROTO_TCP, netns_id,
++ flags, sdif);
++}
++
++static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = {
++ .func = bpf_tc_sk_lookup_tcp,
++ .gpl_only = false,
++ .pkt_access = true,
++ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
++ .arg1_type = ARG_PTR_TO_CTX,
++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
++ .arg3_type = ARG_CONST_SIZE,
++ .arg4_type = ARG_ANYTHING,
++ .arg5_type = ARG_ANYTHING,
++};
++
++BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb,
++ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
++{
++ struct net_device *dev = skb->dev;
++ int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++ struct net *caller_net = dev_net(dev);
++
++ return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net,
++ ifindex, IPPROTO_UDP, netns_id,
++ flags, sdif);
++}
++
++static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = {
++ .func = bpf_tc_sk_lookup_udp,
++ .gpl_only = false,
++ .pkt_access = true,
++ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
++ .arg1_type = ARG_PTR_TO_CTX,
++ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
++ .arg3_type = ARG_CONST_SIZE,
++ .arg4_type = ARG_ANYTHING,
++ .arg5_type = ARG_ANYTHING,
++};
++
+ BPF_CALL_1(bpf_sk_release, struct sock *, sk)
+ {
+ if (sk && sk_is_refcounted(sk))
+@@ -6718,12 +6792,13 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
+ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+ {
+- struct net *caller_net = dev_net(ctx->rxq->dev);
+- int ifindex = ctx->rxq->dev->ifindex;
++ struct net_device *dev = ctx->rxq->dev;
++ int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++ struct net *caller_net = dev_net(dev);
+
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
+ ifindex, IPPROTO_UDP, netns_id,
+- flags);
++ flags, sdif);
+ }
+
+ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+@@ -6741,12 +6816,13 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+ BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+ {
+- struct net *caller_net = dev_net(ctx->rxq->dev);
+- int ifindex = ctx->rxq->dev->ifindex;
++ struct net_device *dev = ctx->rxq->dev;
++ int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++ struct net *caller_net = dev_net(dev);
+
+ return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
+ ifindex, IPPROTO_TCP, netns_id,
+- flags);
++ flags, sdif);
+ }
+
+ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
+@@ -6764,12 +6840,13 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
+ BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+ {
+- struct net *caller_net = dev_net(ctx->rxq->dev);
+- int ifindex = ctx->rxq->dev->ifindex;
++ struct net_device *dev = ctx->rxq->dev;
++ int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++ struct net *caller_net = dev_net(dev);
+
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
+ ifindex, IPPROTO_TCP, netns_id,
+- flags);
++ flags, sdif);
+ }
+
+ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
+@@ -6789,7 +6866,8 @@ BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ {
+ return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
+ sock_net(ctx->sk), 0,
+- IPPROTO_TCP, netns_id, flags);
++ IPPROTO_TCP, netns_id, flags,
++ -1);
+ }
+
+ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
+@@ -6808,7 +6886,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ {
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
+ sock_net(ctx->sk), 0, IPPROTO_TCP,
+- netns_id, flags);
++ netns_id, flags, -1);
+ }
+
+ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
+@@ -6827,7 +6905,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
+ {
+ return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
+ sock_net(ctx->sk), 0, IPPROTO_UDP,
+- netns_id, flags);
++ netns_id, flags, -1);
+ }
+
+ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
+@@ -7954,9 +8032,9 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ #endif
+ #ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_tcp:
+- return &bpf_sk_lookup_tcp_proto;
++ return &bpf_tc_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+- return &bpf_sk_lookup_udp_proto;
++ return &bpf_tc_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+ case BPF_FUNC_tcp_sock:
+@@ -7964,7 +8042,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ case BPF_FUNC_get_listener_sock:
+ return &bpf_get_listener_sock_proto;
+ case BPF_FUNC_skc_lookup_tcp:
+- return &bpf_skc_lookup_tcp_proto;
++ return &bpf_tc_skc_lookup_tcp_proto;
+ case BPF_FUNC_tcp_check_syncookie:
+ return &bpf_tcp_check_syncookie_proto;
+ case BPF_FUNC_skb_ecn_set_ce:
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index f235cc6832767..f21254a9cd373 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -958,24 +958,27 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ nla_total_size(sizeof(struct ifla_vf_rate)) +
+ nla_total_size(sizeof(struct ifla_vf_link_state)) +
+ nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
+- nla_total_size(0) + /* nest IFLA_VF_STATS */
+- /* IFLA_VF_STATS_RX_PACKETS */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_TX_PACKETS */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_RX_BYTES */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_TX_BYTES */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_BROADCAST */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_MULTICAST */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_RX_DROPPED */
+- nla_total_size_64bit(sizeof(__u64)) +
+- /* IFLA_VF_STATS_TX_DROPPED */
+- nla_total_size_64bit(sizeof(__u64)) +
+ nla_total_size(sizeof(struct ifla_vf_trust)));
++ if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
++ size += num_vfs *
++ (nla_total_size(0) + /* nest IFLA_VF_STATS */
++ /* IFLA_VF_STATS_RX_PACKETS */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_TX_PACKETS */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_RX_BYTES */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_TX_BYTES */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_BROADCAST */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_MULTICAST */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_RX_DROPPED */
++ nla_total_size_64bit(sizeof(__u64)) +
++ /* IFLA_VF_STATS_TX_DROPPED */
++ nla_total_size_64bit(sizeof(__u64)));
++ }
+ return size;
+ } else
+ return 0;
+@@ -1267,7 +1270,8 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
+ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ struct net_device *dev,
+ int vfs_num,
+- struct nlattr *vfinfo)
++ struct nlattr *vfinfo,
++ u32 ext_filter_mask)
+ {
+ struct ifla_vf_rss_query_en vf_rss_query_en;
+ struct nlattr *vf, *vfstats, *vfvlanlist;
+@@ -1373,33 +1377,35 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ goto nla_put_vf_failure;
+ }
+ nla_nest_end(skb, vfvlanlist);
+- memset(&vf_stats, 0, sizeof(vf_stats));
+- if (dev->netdev_ops->ndo_get_vf_stats)
+- dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
+- &vf_stats);
+- vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
+- if (!vfstats)
+- goto nla_put_vf_failure;
+- if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
+- vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
+- vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
+- vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
+- vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
+- vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
+- vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+- vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+- vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
+- nla_nest_cancel(skb, vfstats);
+- goto nla_put_vf_failure;
++ if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
++ memset(&vf_stats, 0, sizeof(vf_stats));
++ if (dev->netdev_ops->ndo_get_vf_stats)
++ dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
++ &vf_stats);
++ vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
++ if (!vfstats)
++ goto nla_put_vf_failure;
++ if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
++ vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
++ vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
++ vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
++ vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
++ vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
++ vf_stats.multicast, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
++ vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
++ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
++ vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
++ nla_nest_cancel(skb, vfstats);
++ goto nla_put_vf_failure;
++ }
++ nla_nest_end(skb, vfstats);
+ }
+- nla_nest_end(skb, vfstats);
+ nla_nest_end(skb, vf);
+ return 0;
+
+@@ -1432,7 +1438,7 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
+ return -EMSGSIZE;
+
+ for (i = 0; i < num_vfs; i++) {
+- if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
++ if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
+ return -EMSGSIZE;
+ }
+
+@@ -4087,7 +4093,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+ ndm->ndm_ifindex = dev->ifindex;
+ ndm->ndm_state = ndm_state;
+
+- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
++ if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
+ goto nla_put_failure;
+ if (vid)
+ if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
+@@ -4101,10 +4107,10 @@ nla_put_failure:
+ return -EMSGSIZE;
+ }
+
+-static inline size_t rtnl_fdb_nlmsg_size(void)
++static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
+ {
+ return NLMSG_ALIGN(sizeof(struct ndmsg)) +
+- nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
++ nla_total_size(dev->addr_len) + /* NDA_LLADDR */
+ nla_total_size(sizeof(u16)) + /* NDA_VLAN */
+ 0;
+ }
+@@ -4116,7 +4122,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+- skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
++ skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
+ if (!skb)
+ goto errout;
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index b34c48f802e98..a009e1fac4a69 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2555,13 +2555,24 @@ kuid_t sock_i_uid(struct sock *sk)
+ }
+ EXPORT_SYMBOL(sock_i_uid);
+
+-unsigned long sock_i_ino(struct sock *sk)
++unsigned long __sock_i_ino(struct sock *sk)
+ {
+ unsigned long ino;
+
+- read_lock_bh(&sk->sk_callback_lock);
++ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+- read_unlock_bh(&sk->sk_callback_lock);
++ read_unlock(&sk->sk_callback_lock);
++ return ino;
++}
++EXPORT_SYMBOL(__sock_i_ino);
++
++unsigned long sock_i_ino(struct sock *sk)
++{
++ unsigned long ino;
++
++ local_bh_disable();
++ ino = __sock_i_ino(sk);
++ local_bh_enable();
+ return ino;
+ }
+ EXPORT_SYMBOL(sock_i_ino);
+diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
+index 6cd8607a3928f..390d790c0b056 100644
+--- a/net/dsa/dsa.c
++++ b/net/dsa/dsa.c
+@@ -1105,7 +1105,7 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
+ mutex_init(&dp->vlans_lock);
+ INIT_LIST_HEAD(&dp->fdbs);
+ INIT_LIST_HEAD(&dp->mdbs);
+- INIT_LIST_HEAD(&dp->vlans);
++ INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
+ INIT_LIST_HEAD(&dp->list);
+ list_add_tail(&dp->list, &dst->ports);
+
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 165bb2cb84316..527b1d576460f 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -27,6 +27,7 @@
+ #include "master.h"
+ #include "netlink.h"
+ #include "slave.h"
++#include "switch.h"
+ #include "tag.h"
+
+ struct dsa_switchdev_event_work {
+@@ -161,8 +162,7 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
+ return 0;
+ }
+
+-static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid,
+- void *arg)
++static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid)
+ {
+ struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
+
+@@ -170,6 +170,28 @@ static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid,
+ ctx->addr, vid);
+ }
+
++static int dsa_slave_vlan_for_each(struct net_device *dev,
++ int (*cb)(void *arg, int vid), void *arg)
++{
++ struct dsa_port *dp = dsa_slave_to_port(dev);
++ struct dsa_vlan *v;
++ int err;
++
++ lockdep_assert_held(&dev->addr_list_lock);
++
++ err = cb(arg, 0);
++ if (err)
++ return err;
++
++ list_for_each_entry(v, &dp->user_vlans, list) {
++ err = cb(arg, v->vid);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
+ static int dsa_slave_sync_uc(struct net_device *dev,
+ const unsigned char *addr)
+ {
+@@ -180,18 +202,14 @@ static int dsa_slave_sync_uc(struct net_device *dev,
+ .addr = addr,
+ .event = DSA_UC_ADD,
+ };
+- int err;
+
+ dev_uc_add(master, addr);
+
+ if (!dsa_switch_supports_uc_filtering(dp->ds))
+ return 0;
+
+- err = dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
+- if (err)
+- return err;
+-
+- return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
++ return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
++ &ctx);
+ }
+
+ static int dsa_slave_unsync_uc(struct net_device *dev,
+@@ -204,18 +222,14 @@ static int dsa_slave_unsync_uc(struct net_device *dev,
+ .addr = addr,
+ .event = DSA_UC_DEL,
+ };
+- int err;
+
+ dev_uc_del(master, addr);
+
+ if (!dsa_switch_supports_uc_filtering(dp->ds))
+ return 0;
+
+- err = dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
+- if (err)
+- return err;
+-
+- return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
++ return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
++ &ctx);
+ }
+
+ static int dsa_slave_sync_mc(struct net_device *dev,
+@@ -228,18 +242,14 @@ static int dsa_slave_sync_mc(struct net_device *dev,
+ .addr = addr,
+ .event = DSA_MC_ADD,
+ };
+- int err;
+
+ dev_mc_add(master, addr);
+
+ if (!dsa_switch_supports_mc_filtering(dp->ds))
+ return 0;
+
+- err = dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
+- if (err)
+- return err;
+-
+- return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
++ return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
++ &ctx);
+ }
+
+ static int dsa_slave_unsync_mc(struct net_device *dev,
+@@ -252,18 +262,14 @@ static int dsa_slave_unsync_mc(struct net_device *dev,
+ .addr = addr,
+ .event = DSA_MC_DEL,
+ };
+- int err;
+
+ dev_mc_del(master, addr);
+
+ if (!dsa_switch_supports_mc_filtering(dp->ds))
+ return 0;
+
+- err = dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
+- if (err)
+- return err;
+-
+- return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
++ return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
++ &ctx);
+ }
+
+ void dsa_slave_sync_ha(struct net_device *dev)
+@@ -1759,6 +1765,7 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ struct netlink_ext_ack extack = {0};
+ struct dsa_switch *ds = dp->ds;
+ struct netdev_hw_addr *ha;
++ struct dsa_vlan *v;
+ int ret;
+
+ /* User port... */
+@@ -1782,8 +1789,17 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ !dsa_switch_supports_mc_filtering(ds))
+ return 0;
+
++ v = kzalloc(sizeof(*v), GFP_KERNEL);
++ if (!v) {
++ ret = -ENOMEM;
++ goto rollback;
++ }
++
+ netif_addr_lock_bh(dev);
+
++ v->vid = vid;
++ list_add_tail(&v->list, &dp->user_vlans);
++
+ if (dsa_switch_supports_mc_filtering(ds)) {
+ netdev_for_each_synced_mc_addr(ha, dev) {
+ dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
+@@ -1803,6 +1819,12 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ dsa_flush_workqueue();
+
+ return 0;
++
++rollback:
++ dsa_port_host_vlan_del(dp, &vlan);
++ dsa_port_vlan_del(dp, &vlan);
++
++ return ret;
+ }
+
+ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+@@ -1816,6 +1838,7 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ };
+ struct dsa_switch *ds = dp->ds;
+ struct netdev_hw_addr *ha;
++ struct dsa_vlan *v;
+ int err;
+
+ err = dsa_port_vlan_del(dp, &vlan);
+@@ -1832,6 +1855,15 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+
+ netif_addr_lock_bh(dev);
+
++ v = dsa_vlan_find(&dp->user_vlans, &vlan);
++ if (!v) {
++ netif_addr_unlock_bh(dev);
++ return -ENOENT;
++ }
++
++ list_del(&v->list);
++ kfree(v);
++
+ if (dsa_switch_supports_mc_filtering(ds)) {
+ netdev_for_each_synced_mc_addr(ha, dev) {
+ dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
+diff --git a/net/dsa/switch.c b/net/dsa/switch.c
+index d5bc4bb7310dc..36f8ffea2d168 100644
+--- a/net/dsa/switch.c
++++ b/net/dsa/switch.c
+@@ -634,8 +634,8 @@ static bool dsa_port_host_vlan_match(struct dsa_port *dp,
+ return false;
+ }
+
+-static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
+- const struct switchdev_obj_port_vlan *vlan)
++struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
++ const struct switchdev_obj_port_vlan *vlan)
+ {
+ struct dsa_vlan *v;
+
+diff --git a/net/dsa/switch.h b/net/dsa/switch.h
+index 15e67b95eb6e1..ea034677da153 100644
+--- a/net/dsa/switch.h
++++ b/net/dsa/switch.h
+@@ -111,6 +111,9 @@ struct dsa_notifier_master_state_info {
+ bool operational;
+ };
+
++struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
++ const struct switchdev_obj_port_vlan *vlan);
++
+ int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
+ int dsa_broadcast(unsigned long e, void *v);
+
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 0bac9af3ca966..371add9061f1f 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -691,7 +691,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+ DEBUGFS_ADD_MODE(uapsd_queues, 0600);
+ DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
+ DEBUGFS_ADD_MODE(tdls_wider_bw, 0600);
+- DEBUGFS_ADD_MODE(valid_links, 0200);
++ DEBUGFS_ADD_MODE(valid_links, 0400);
+ DEBUGFS_ADD_MODE(active_links, 0600);
+ }
+
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 941bda9141faa..2ec9e1ead127e 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2901,6 +2901,8 @@ int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id)
+ if (!test_sta_flag(sta, WLAN_STA_INSERTED))
+ goto hash;
+
++ ieee80211_recalc_min_chandef(sdata, link_id);
++
+ /* Ensure the values are updated for the driver,
+ * redone by sta_remove_link on failure.
+ */
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 1a0d38cd46337..ce25d14db4d28 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -3707,10 +3707,8 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+ }
+
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+- if (!eht_cap) {
+- sdata_info(sdata, "Missing iftype sband data/EHT cap");
++ if (!eht_cap)
+ eht_oper = NULL;
+- }
+
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index c1557d47ccd1e..d4fd626d2b8c3 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -432,9 +432,19 @@ static bool dccp_error(const struct dccp_hdr *dh,
+ struct sk_buff *skb, unsigned int dataoff,
+ const struct nf_hook_state *state)
+ {
++ static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
++ 1 << DCCP_PKT_RESPONSE |
++ 1 << DCCP_PKT_CLOSEREQ |
++ 1 << DCCP_PKT_CLOSE |
++ 1 << DCCP_PKT_RESET |
++ 1 << DCCP_PKT_SYNC |
++ 1 << DCCP_PKT_SYNCACK;
+ unsigned int dccp_len = skb->len - dataoff;
+ unsigned int cscov;
+ const char *msg;
++ u8 type;
++
++ BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
+
+ if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
+ dh->dccph_doff * 4 > dccp_len) {
+@@ -459,34 +469,70 @@ static bool dccp_error(const struct dccp_hdr *dh,
+ goto out_invalid;
+ }
+
+- if (dh->dccph_type >= DCCP_PKT_INVALID) {
++ type = dh->dccph_type;
++ if (type >= DCCP_PKT_INVALID) {
+ msg = "nf_ct_dccp: reserved packet type ";
+ goto out_invalid;
+ }
++
++ if (test_bit(type, &require_seq48) && !dh->dccph_x) {
++ msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
++ goto out_invalid;
++ }
++
+ return false;
+ out_invalid:
+ nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
+ return true;
+ }
+
++struct nf_conntrack_dccp_buf {
++ struct dccp_hdr dh; /* generic header part */
++ struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
++ union { /* depends on header type */
++ struct dccp_hdr_ack_bits ack;
++ struct dccp_hdr_request req;
++ struct dccp_hdr_response response;
++ struct dccp_hdr_reset rst;
++ } u;
++};
++
++static struct dccp_hdr *
++dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
++ struct nf_conntrack_dccp_buf *buf)
++{
++ unsigned int hdrlen = __dccp_hdr_len(dh);
++
++ if (hdrlen > sizeof(*buf))
++ return NULL;
++
++ return skb_header_pointer(skb, offset, hdrlen, buf);
++}
++
+ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
+ {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+- struct dccp_hdr _dh, *dh;
++ struct nf_conntrack_dccp_buf _dh;
+ u_int8_t type, old_state, new_state;
+ enum ct_dccp_roles role;
+ unsigned int *timeouts;
++ struct dccp_hdr *dh;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh);
+ if (!dh)
+ return NF_DROP;
+
+ if (dccp_error(dh, skb, dataoff, state))
+ return -NF_ACCEPT;
+
++ /* pull again, including possible 48 bit sequences and subtype header */
++ dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
++ if (!dh)
++ return NF_DROP;
++
+ type = dh->dccph_type;
+ if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
+ return -NF_ACCEPT;
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 77f5e82d8e3fe..d0eac27f6ba03 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -611,7 +611,7 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+ start += strlen(name);
+ *val = simple_strtoul(start, &end, 0);
+ if (start == end)
+- return 0;
++ return -1;
+ if (matchoff && matchlen) {
+ *matchoff = start - dptr;
+ *matchlen = end - start;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 717e27a4b66a0..f4adccb8d49a2 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1600,6 +1600,7 @@ out:
+ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
+ {
+ struct netlink_set_err_data info;
++ unsigned long flags;
+ struct sock *sk;
+ int ret = 0;
+
+@@ -1609,12 +1610,12 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
+ /* sk->sk_err wants a positive error value */
+ info.code = -code;
+
+- read_lock(&nl_table_lock);
++ read_lock_irqsave(&nl_table_lock, flags);
+
+ sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
+ ret += do_one_set_err(sk, &info);
+
+- read_unlock(&nl_table_lock);
++ read_unlock_irqrestore(&nl_table_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(netlink_set_err);
+diff --git a/net/netlink/diag.c b/net/netlink/diag.c
+index c6255eac305c7..e4f21b1067bcc 100644
+--- a/net/netlink/diag.c
++++ b/net/netlink/diag.c
+@@ -94,6 +94,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net *net = sock_net(skb->sk);
+ struct netlink_diag_req *req;
+ struct netlink_sock *nlsk;
++ unsigned long flags;
+ struct sock *sk;
+ int num = 2;
+ int ret = 0;
+@@ -152,7 +153,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ num++;
+
+ mc_list:
+- read_lock(&nl_table_lock);
++ read_lock_irqsave(&nl_table_lock, flags);
+ sk_for_each_bound(sk, &tbl->mc_list) {
+ if (sk_hashed(sk))
+ continue;
+@@ -167,13 +168,13 @@ mc_list:
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+- sock_i_ino(sk)) < 0) {
++ __sock_i_ino(sk)) < 0) {
+ ret = 1;
+ break;
+ }
+ num++;
+ }
+- read_unlock(&nl_table_lock);
++ read_unlock_irqrestore(&nl_table_lock, flags);
+
+ done:
+ cb->args[0] = num;
+diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
+index c1d9be636933c..d8345ed57c954 100644
+--- a/net/nfc/llcp.h
++++ b/net/nfc/llcp.h
+@@ -201,7 +201,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
+ void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
+ void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock);
+ struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+-struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
+ int nfc_llcp_local_put(struct nfc_llcp_local *local);
+ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+ struct nfc_llcp_sock *sock);
+diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
+index 41e3a20c89355..e2680a3bef799 100644
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -359,6 +359,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
+ struct sk_buff *skb;
+ struct nfc_llcp_local *local;
+ u16 size = 0;
++ int err;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL)
+@@ -368,8 +369,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
+ size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+- if (skb == NULL)
+- return -ENOMEM;
++ if (skb == NULL) {
++ err = -ENOMEM;
++ goto out;
++ }
+
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+@@ -379,8 +382,11 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
+
+ nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
+
+- return nfc_data_exchange(dev, local->target_idx, skb,
++ err = nfc_data_exchange(dev, local->target_idx, skb,
+ nfc_llcp_recv, local);
++out:
++ nfc_llcp_local_put(local);
++ return err;
+ }
+
+ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+@@ -390,7 +396,8 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+ const u8 *service_name_tlv = NULL;
+ const u8 *miux_tlv = NULL;
+ const u8 *rw_tlv = NULL;
+- u8 service_name_tlv_length, miux_tlv_length, rw_tlv_length, rw;
++ u8 service_name_tlv_length = 0;
++ u8 miux_tlv_length, rw_tlv_length, rw;
+ int err;
+ u16 size = 0;
+ __be16 miux;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index a27e1842b2a09..f60e424e06076 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -17,6 +17,8 @@
+ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
+
+ static LIST_HEAD(llcp_devices);
++/* Protects llcp_devices list */
++static DEFINE_SPINLOCK(llcp_devices_lock);
+
+ static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
+
+@@ -141,7 +143,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
+ write_unlock(&local->raw_sockets.lock);
+ }
+
+-struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
++static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
+ {
+ kref_get(&local->ref);
+
+@@ -169,7 +171,6 @@ static void local_release(struct kref *ref)
+
+ local = container_of(ref, struct nfc_llcp_local, ref);
+
+- list_del(&local->list);
+ local_cleanup(local);
+ kfree(local);
+ }
+@@ -282,12 +283,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t)
+ struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
+ {
+ struct nfc_llcp_local *local;
++ struct nfc_llcp_local *res = NULL;
+
++ spin_lock(&llcp_devices_lock);
+ list_for_each_entry(local, &llcp_devices, list)
+- if (local->dev == dev)
++ if (local->dev == dev) {
++ res = nfc_llcp_local_get(local);
++ break;
++ }
++ spin_unlock(&llcp_devices_lock);
++
++ return res;
++}
++
++static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev)
++{
++ struct nfc_llcp_local *local, *tmp;
++
++ spin_lock(&llcp_devices_lock);
++ list_for_each_entry_safe(local, tmp, &llcp_devices, list)
++ if (local->dev == dev) {
++ list_del(&local->list);
++ spin_unlock(&llcp_devices_lock);
+ return local;
++ }
++ spin_unlock(&llcp_devices_lock);
+
+- pr_debug("No device found\n");
++ pr_warn("Shutting down device not found\n");
+
+ return NULL;
+ }
+@@ -608,12 +630,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
+
+ *general_bytes_len = local->gb_len;
+
++ nfc_llcp_local_put(local);
++
+ return local->gb;
+ }
+
+ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
+ {
+ struct nfc_llcp_local *local;
++ int err;
+
+ if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN)
+ return -EINVAL;
+@@ -630,12 +655,16 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
+
+ if (memcmp(local->remote_gb, llcp_magic, 3)) {
+ pr_err("MAC does not support LLCP\n");
+- return -EINVAL;
++ err = -EINVAL;
++ goto out;
+ }
+
+- return nfc_llcp_parse_gb_tlv(local,
++ err = nfc_llcp_parse_gb_tlv(local,
+ &local->remote_gb[3],
+ local->remote_gb_len - 3);
++out:
++ nfc_llcp_local_put(local);
++ return err;
+ }
+
+ static u8 nfc_llcp_dsap(const struct sk_buff *pdu)
+@@ -1517,6 +1546,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
+
+ __nfc_llcp_recv(local, skb);
+
++ nfc_llcp_local_put(local);
++
+ return 0;
+ }
+
+@@ -1533,6 +1564,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+
+ /* Close and purge all existing sockets */
+ nfc_llcp_socket_release(local, true, 0);
++
++ nfc_llcp_local_put(local);
+ }
+
+ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+@@ -1558,6 +1591,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+ mod_timer(&local->link_timer,
+ jiffies + msecs_to_jiffies(local->remote_lto));
+ }
++
++ nfc_llcp_local_put(local);
+ }
+
+ int nfc_llcp_register_device(struct nfc_dev *ndev)
+@@ -1608,7 +1643,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
+
+ void nfc_llcp_unregister_device(struct nfc_dev *dev)
+ {
+- struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
++ struct nfc_llcp_local *local = nfc_llcp_remove_local(dev);
+
+ if (local == NULL) {
+ pr_debug("No such device\n");
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 77642d18a3b43..645677f84dba2 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ }
+
+ llcp_sock->dev = dev;
+- llcp_sock->local = nfc_llcp_local_get(local);
++ llcp_sock->local = local;
+ llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+ llcp_sock->service_name_len = min_t(unsigned int,
+ llcp_addr.service_name_len,
+@@ -186,7 +186,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
+ }
+
+ llcp_sock->dev = dev;
+- llcp_sock->local = nfc_llcp_local_get(local);
++ llcp_sock->local = local;
+ llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+
+ nfc_llcp_sock_link(&local->raw_sockets, sk);
+@@ -696,22 +696,22 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ if (dev->dep_link_up == false) {
+ ret = -ENOLINK;
+ device_unlock(&dev->dev);
+- goto put_dev;
++ goto sock_llcp_put_local;
+ }
+ device_unlock(&dev->dev);
+
+ if (local->rf_mode == NFC_RF_INITIATOR &&
+ addr->target_idx != local->target_idx) {
+ ret = -ENOLINK;
+- goto put_dev;
++ goto sock_llcp_put_local;
+ }
+
+ llcp_sock->dev = dev;
+- llcp_sock->local = nfc_llcp_local_get(local);
++ llcp_sock->local = local;
+ llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ ret = -ENOMEM;
+- goto sock_llcp_put_local;
++ goto sock_llcp_nullify;
+ }
+
+ llcp_sock->reserved_ssap = llcp_sock->ssap;
+@@ -757,11 +757,13 @@ sock_unlink:
+ sock_llcp_release:
+ nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
+-sock_llcp_put_local:
+- nfc_llcp_local_put(llcp_sock->local);
++sock_llcp_nullify:
+ llcp_sock->local = NULL;
+ llcp_sock->dev = NULL;
+
++sock_llcp_put_local:
++ nfc_llcp_local_put(local);
++
+ put_dev:
+ nfc_put_device(dev);
+
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index b9264e730fd93..e9ac6a6f934e7 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1039,11 +1039,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ rc = -ENOMEM;
+- goto exit;
++ goto put_local;
+ }
+
+ rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
+
++put_local:
++ nfc_llcp_local_put(local);
++
+ exit:
+ device_unlock(&dev->dev);
+
+@@ -1105,7 +1108,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
+ if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
+ if (dev->dep_link_up) {
+ rc = -EINPROGRESS;
+- goto exit;
++ goto put_local;
+ }
+
+ local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
+@@ -1117,6 +1120,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
+ if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
+ local->miux = cpu_to_be16(miux);
+
++put_local:
++ nfc_llcp_local_put(local);
++
+ exit:
+ device_unlock(&dev->dev);
+
+@@ -1172,7 +1178,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
+
+ if (rc != 0) {
+ rc = -EINVAL;
+- goto exit;
++ goto put_local;
+ }
+
+ if (!sdp_attrs[NFC_SDP_ATTR_URI])
+@@ -1191,7 +1197,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
+ sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
+ if (sdreq == NULL) {
+ rc = -ENOMEM;
+- goto exit;
++ goto put_local;
+ }
+
+ tlvs_len += sdreq->tlv_len;
+@@ -1201,10 +1207,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
+
+ if (hlist_empty(&sdreq_list)) {
+ rc = -EINVAL;
+- goto exit;
++ goto put_local;
+ }
+
+ rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
++
++put_local:
++ nfc_llcp_local_put(local);
++
+ exit:
+ device_unlock(&dev->dev);
+
+diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
+index de2ec66d7e83a..0b1e6466f4fbf 100644
+--- a/net/nfc/nfc.h
++++ b/net/nfc/nfc.h
+@@ -52,6 +52,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len);
+ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
+ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
+ struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
++int nfc_llcp_local_put(struct nfc_llcp_local *local);
+ int __init nfc_llcp_init(void);
+ void nfc_llcp_exit(void);
+ void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 218e0982c3707..0932cbf568ee9 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -8280,6 +8280,22 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
+ return retval;
+ }
+
++static bool sctp_bpf_bypass_getsockopt(int level, int optname)
++{
++ if (level == SOL_SCTP) {
++ switch (optname) {
++ case SCTP_SOCKOPT_PEELOFF:
++ case SCTP_SOCKOPT_PEELOFF_FLAGS:
++ case SCTP_SOCKOPT_CONNECTX3:
++ return true;
++ default:
++ return false;
++ }
++ }
++
++ return false;
++}
++
+ static int sctp_hash(struct sock *sk)
+ {
+ /* STUB */
+@@ -9649,6 +9665,7 @@ struct proto sctp_prot = {
+ .shutdown = sctp_shutdown,
+ .setsockopt = sctp_setsockopt,
+ .getsockopt = sctp_getsockopt,
++ .bpf_bypass_getsockopt = sctp_bpf_bypass_getsockopt,
+ .sendmsg = sctp_sendmsg,
+ .recvmsg = sctp_recvmsg,
+ .bind = sctp_bind,
+@@ -9704,6 +9721,7 @@ struct proto sctpv6_prot = {
+ .shutdown = sctp_shutdown,
+ .setsockopt = sctp_setsockopt,
+ .getsockopt = sctp_getsockopt,
++ .bpf_bypass_getsockopt = sctp_bpf_bypass_getsockopt,
+ .sendmsg = sctp_sendmsg,
+ .recvmsg = sctp_recvmsg,
+ .bind = sctp_bind,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index a22fe7587fa6f..70207d8a318a4 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -796,6 +796,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ struct svc_rdma_recv_ctxt *ctxt;
+ int ret;
+
++ /* Prevent svc_xprt_release() from releasing pages in rq_pages
++ * when returning 0 or an error.
++ */
++ rqstp->rq_respages = rqstp->rq_pages;
++ rqstp->rq_next_page = rqstp->rq_respages;
++
+ rqstp->rq_xprt_ctxt = NULL;
+
+ ctxt = NULL;
+@@ -819,12 +825,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ DMA_FROM_DEVICE);
+ svc_rdma_build_arg_xdr(rqstp, ctxt);
+
+- /* Prevent svc_xprt_release from releasing pages in rq_pages
+- * if we return 0 or an error.
+- */
+- rqstp->rq_respages = rqstp->rq_pages;
+- rqstp->rq_next_page = rqstp->rq_respages;
+-
+ ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
+ if (ret < 0)
+ goto out_err;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index b3ec9eaec36b3..609b79fe4a748 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -721,22 +721,6 @@ int wiphy_register(struct wiphy *wiphy)
+ return -EINVAL;
+ }
+
+- /*
+- * if a wiphy has unsupported modes for regulatory channel enforcement,
+- * opt-out of enforcement checking
+- */
+- if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) |
+- BIT(NL80211_IFTYPE_P2P_CLIENT) |
+- BIT(NL80211_IFTYPE_AP) |
+- BIT(NL80211_IFTYPE_MESH_POINT) |
+- BIT(NL80211_IFTYPE_P2P_GO) |
+- BIT(NL80211_IFTYPE_ADHOC) |
+- BIT(NL80211_IFTYPE_P2P_DEVICE) |
+- BIT(NL80211_IFTYPE_NAN) |
+- BIT(NL80211_IFTYPE_AP_VLAN) |
+- BIT(NL80211_IFTYPE_MONITOR)))
+- wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
+-
+ if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) &&
+ (wiphy->regulatory_flags &
+ (REGULATORY_CUSTOM_REG |
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 26f11e4746c05..c8a1b925413b3 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2391,9 +2391,17 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* no enforcement required */
+ break;
++ case NL80211_IFTYPE_OCB:
++ if (!wdev->u.ocb.chandef.chan)
++ continue;
++ chandef = wdev->u.ocb.chandef;
++ break;
++ case NL80211_IFTYPE_NAN:
++ /* we have no info, but NAN is also pretty universal */
++ continue;
+ default:
+ /* others not implemented for now */
+- WARN_ON(1);
++ WARN_ON_ONCE(1);
+ break;
+ }
+
+@@ -2452,9 +2460,7 @@ static void reg_check_chans_work(struct work_struct *work)
+ rtnl_lock();
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+- if (!(rdev->wiphy.regulatory_flags &
+- REGULATORY_IGNORE_STALE_KICKOFF))
+- reg_leave_invalid_chans(&rdev->wiphy);
++ reg_leave_invalid_chans(&rdev->wiphy);
+
+ rtnl_unlock();
+ }
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index b3829ed844f84..68f9b6f7bf584 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -259,117 +259,152 @@ bool cfg80211_is_element_inherited(const struct element *elem,
+ }
+ EXPORT_SYMBOL(cfg80211_is_element_inherited);
+
+-static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+- const u8 *subelement, size_t subie_len,
+- u8 *new_ie, gfp_t gfp)
++static size_t cfg80211_copy_elem_with_frags(const struct element *elem,
++ const u8 *ie, size_t ie_len,
++ u8 **pos, u8 *buf, size_t buf_len)
+ {
+- u8 *pos, *tmp;
+- const u8 *tmp_old, *tmp_new;
+- const struct element *non_inherit_elem;
+- u8 *sub_copy;
++ if (WARN_ON((u8 *)elem < ie || elem->data > ie + ie_len ||
++ elem->data + elem->datalen > ie + ie_len))
++ return 0;
+
+- /* copy subelement as we need to change its content to
+- * mark an ie after it is processed.
+- */
+- sub_copy = kmemdup(subelement, subie_len, gfp);
+- if (!sub_copy)
++ if (elem->datalen + 2 > buf + buf_len - *pos)
+ return 0;
+
+- pos = &new_ie[0];
++ memcpy(*pos, elem, elem->datalen + 2);
++ *pos += elem->datalen + 2;
++
++ /* Finish if it is not fragmented */
++ if (elem->datalen != 255)
++ return *pos - buf;
++
++ ie_len = ie + ie_len - elem->data - elem->datalen;
++ ie = (const u8 *)elem->data + elem->datalen;
++
++ for_each_element(elem, ie, ie_len) {
++ if (elem->id != WLAN_EID_FRAGMENT)
++ break;
++
++ if (elem->datalen + 2 > buf + buf_len - *pos)
++ return 0;
++
++ memcpy(*pos, elem, elem->datalen + 2);
++ *pos += elem->datalen + 2;
+
+- /* set new ssid */
+- tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
+- if (tmp_new) {
+- memcpy(pos, tmp_new, tmp_new[1] + 2);
+- pos += (tmp_new[1] + 2);
++ if (elem->datalen != 255)
++ break;
+ }
+
+- /* get non inheritance list if exists */
+- non_inherit_elem =
+- cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+- sub_copy, subie_len);
++ return *pos - buf;
++}
+
+- /* go through IEs in ie (skip SSID) and subelement,
+- * merge them into new_ie
++static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
++ const u8 *subie, size_t subie_len,
++ u8 *new_ie, size_t new_ie_len)
++{
++ const struct element *non_inherit_elem, *parent, *sub;
++ u8 *pos = new_ie;
++ u8 id, ext_id;
++ unsigned int match_len;
++
++ non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
++ subie, subie_len);
++
++ /* We copy the elements one by one from the parent to the generated
++ * elements.
++ * If they are not inherited (included in subie or in the non
++ * inheritance element), then we copy all occurrences the first time
++ * we see this element type.
+ */
+- tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+- tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
+-
+- while (tmp_old + 2 - ie <= ielen &&
+- tmp_old + tmp_old[1] + 2 - ie <= ielen) {
+- if (tmp_old[0] == 0) {
+- tmp_old++;
++ for_each_element(parent, ie, ielen) {
++ if (parent->id == WLAN_EID_FRAGMENT)
+ continue;
++
++ if (parent->id == WLAN_EID_EXTENSION) {
++ if (parent->datalen < 1)
++ continue;
++
++ id = WLAN_EID_EXTENSION;
++ ext_id = parent->data[0];
++ match_len = 1;
++ } else {
++ id = parent->id;
++ match_len = 0;
+ }
+
+- if (tmp_old[0] == WLAN_EID_EXTENSION)
+- tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy,
+- subie_len);
+- else
+- tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy,
+- subie_len);
++ /* Find first occurrence in subie */
++ sub = cfg80211_find_elem_match(id, subie, subie_len,
++ &ext_id, match_len, 0);
+
+- if (!tmp) {
+- const struct element *old_elem = (void *)tmp_old;
++ /* Copy from parent if not in subie and inherited */
++ if (!sub &&
++ cfg80211_is_element_inherited(parent, non_inherit_elem)) {
++ if (!cfg80211_copy_elem_with_frags(parent,
++ ie, ielen,
++ &pos, new_ie,
++ new_ie_len))
++ return 0;
+
+- /* ie in old ie but not in subelement */
+- if (cfg80211_is_element_inherited(old_elem,
+- non_inherit_elem)) {
+- memcpy(pos, tmp_old, tmp_old[1] + 2);
+- pos += tmp_old[1] + 2;
+- }
+- } else {
+- /* ie in transmitting ie also in subelement,
+- * copy from subelement and flag the ie in subelement
+- * as copied (by setting eid field to WLAN_EID_SSID,
+- * which is skipped anyway).
+- * For vendor ie, compare OUI + type + subType to
+- * determine if they are the same ie.
+- */
+- if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
+- if (tmp_old[1] >= 5 && tmp[1] >= 5 &&
+- !memcmp(tmp_old + 2, tmp + 2, 5)) {
+- /* same vendor ie, copy from
+- * subelement
+- */
+- memcpy(pos, tmp, tmp[1] + 2);
+- pos += tmp[1] + 2;
+- tmp[0] = WLAN_EID_SSID;
+- } else {
+- memcpy(pos, tmp_old, tmp_old[1] + 2);
+- pos += tmp_old[1] + 2;
+- }
+- } else {
+- /* copy ie from subelement into new ie */
+- memcpy(pos, tmp, tmp[1] + 2);
+- pos += tmp[1] + 2;
+- tmp[0] = WLAN_EID_SSID;
+- }
++ continue;
+ }
+
+- if (tmp_old + tmp_old[1] + 2 - ie == ielen)
+- break;
++ /* Already copied if an earlier element had the same type */
++ if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie,
++ &ext_id, match_len, 0))
++ continue;
+
+- tmp_old += tmp_old[1] + 2;
++ /* Not inheriting, copy all similar elements from subie */
++ while (sub) {
++ if (!cfg80211_copy_elem_with_frags(sub,
++ subie, subie_len,
++ &pos, new_ie,
++ new_ie_len))
++ return 0;
++
++ sub = cfg80211_find_elem_match(id,
++ sub->data + sub->datalen,
++ subie_len + subie -
++ (sub->data +
++ sub->datalen),
++ &ext_id, match_len, 0);
++ }
+ }
+
+- /* go through subelement again to check if there is any ie not
+- * copied to new ie, skip ssid, capability, bssid-index ie
++ /* The above misses elements that are included in subie but not in the
++ * parent, so do a pass over subie and append those.
++ * Skip the non-tx BSSID caps and non-inheritance element.
+ */
+- tmp_new = sub_copy;
+- while (tmp_new + 2 - sub_copy <= subie_len &&
+- tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
+- if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
+- tmp_new[0] == WLAN_EID_SSID)) {
+- memcpy(pos, tmp_new, tmp_new[1] + 2);
+- pos += tmp_new[1] + 2;
++ for_each_element(sub, subie, subie_len) {
++ if (sub->id == WLAN_EID_NON_TX_BSSID_CAP)
++ continue;
++
++ if (sub->id == WLAN_EID_FRAGMENT)
++ continue;
++
++ if (sub->id == WLAN_EID_EXTENSION) {
++ if (sub->datalen < 1)
++ continue;
++
++ id = WLAN_EID_EXTENSION;
++ ext_id = sub->data[0];
++ match_len = 1;
++
++ if (ext_id == WLAN_EID_EXT_NON_INHERITANCE)
++ continue;
++ } else {
++ id = sub->id;
++ match_len = 0;
+ }
+- if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
+- break;
+- tmp_new += tmp_new[1] + 2;
++
++ /* Processed if one was included in the parent */
++ if (cfg80211_find_elem_match(id, ie, ielen,
++ &ext_id, match_len, 0))
++ continue;
++
++ if (!cfg80211_copy_elem_with_frags(sub, subie, subie_len,
++ &pos, new_ie, new_ie_len))
++ return 0;
+ }
+
+- kfree(sub_copy);
+ return pos - new_ie;
+ }
+
+@@ -2217,7 +2252,7 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
+ new_ie_len = cfg80211_gen_new_ie(ie, ielen,
+ profile,
+ profile_len, new_ie,
+- gfp);
++ IEEE80211_MAX_DATA_LEN);
+ if (!new_ie_len)
+ continue;
+
+@@ -2266,118 +2301,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL(cfg80211_inform_bss_data);
+
+-static void
+-cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
+- struct cfg80211_inform_bss *data,
+- struct ieee80211_mgmt *mgmt, size_t len,
+- struct cfg80211_non_tx_bss *non_tx_data,
+- gfp_t gfp)
+-{
+- enum cfg80211_bss_frame_type ftype;
+- const u8 *ie = mgmt->u.probe_resp.variable;
+- size_t ielen = len - offsetof(struct ieee80211_mgmt,
+- u.probe_resp.variable);
+-
+- ftype = ieee80211_is_beacon(mgmt->frame_control) ?
+- CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+-
+- cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
+- le64_to_cpu(mgmt->u.probe_resp.timestamp),
+- le16_to_cpu(mgmt->u.probe_resp.beacon_int),
+- ie, ielen, non_tx_data, gfp);
+-}
+-
+-static void
+-cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
+- struct cfg80211_bss *nontrans_bss,
+- struct ieee80211_mgmt *mgmt, size_t len)
+-{
+- u8 *ie, *new_ie, *pos;
+- const struct element *nontrans_ssid;
+- const u8 *trans_ssid, *mbssid;
+- size_t ielen = len - offsetof(struct ieee80211_mgmt,
+- u.probe_resp.variable);
+- size_t new_ie_len;
+- struct cfg80211_bss_ies *new_ies;
+- const struct cfg80211_bss_ies *old;
+- size_t cpy_len;
+-
+- lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+-
+- ie = mgmt->u.probe_resp.variable;
+-
+- new_ie_len = ielen;
+- trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+- if (!trans_ssid)
+- return;
+- new_ie_len -= trans_ssid[1];
+- mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
+- /*
+- * It's not valid to have the MBSSID element before SSID
+- * ignore if that happens - the code below assumes it is
+- * after (while copying things inbetween).
+- */
+- if (!mbssid || mbssid < trans_ssid)
+- return;
+- new_ie_len -= mbssid[1];
+-
+- nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID);
+- if (!nontrans_ssid)
+- return;
+-
+- new_ie_len += nontrans_ssid->datalen;
+-
+- /* generate new ie for nontrans BSS
+- * 1. replace SSID with nontrans BSS' SSID
+- * 2. skip MBSSID IE
+- */
+- new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
+- if (!new_ie)
+- return;
+-
+- new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
+- if (!new_ies)
+- goto out_free;
+-
+- pos = new_ie;
+-
+- /* copy the nontransmitted SSID */
+- cpy_len = nontrans_ssid->datalen + 2;
+- memcpy(pos, nontrans_ssid, cpy_len);
+- pos += cpy_len;
+- /* copy the IEs between SSID and MBSSID */
+- cpy_len = trans_ssid[1] + 2;
+- memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len)));
+- pos += (mbssid - (trans_ssid + cpy_len));
+- /* copy the IEs after MBSSID */
+- cpy_len = mbssid[1] + 2;
+- memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len)));
+-
+- /* update ie */
+- new_ies->len = new_ie_len;
+- new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+- new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
+- memcpy(new_ies->data, new_ie, new_ie_len);
+- if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+- old = rcu_access_pointer(nontrans_bss->proberesp_ies);
+- rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies);
+- rcu_assign_pointer(nontrans_bss->ies, new_ies);
+- if (old)
+- kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+- } else {
+- old = rcu_access_pointer(nontrans_bss->beacon_ies);
+- rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
+- cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss),
+- new_ies, old);
+- rcu_assign_pointer(nontrans_bss->ies, new_ies);
+- if (old)
+- kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+- }
+-
+-out_free:
+- kfree(new_ie);
+-}
+-
+ /* cfg80211_inform_bss_width_frame helper */
+ static struct cfg80211_bss *
+ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
+@@ -2519,51 +2442,31 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+ {
+- struct cfg80211_bss *res, *tmp_bss;
++ struct cfg80211_bss *res;
+ const u8 *ie = mgmt->u.probe_resp.variable;
+- const struct cfg80211_bss_ies *ies1, *ies2;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
++ enum cfg80211_bss_frame_type ftype;
+ struct cfg80211_non_tx_bss non_tx_data = {};
+
+ res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
+ len, gfp);
++ if (!res)
++ return NULL;
+
+ /* don't do any further MBSSID handling for S1G */
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return res;
+
+- if (!res || !wiphy->support_mbssid ||
+- !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+- return res;
+- if (wiphy->support_only_he_mbssid &&
+- !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+- return res;
+-
++ ftype = ieee80211_is_beacon(mgmt->frame_control) ?
++ CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+ non_tx_data.tx_bss = res;
+- /* process each non-transmitting bss */
+- cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
+- &non_tx_data, gfp);
+-
+- spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+
+- /* check if the res has other nontransmitting bss which is not
+- * in MBSSID IE
+- */
+- ies1 = rcu_access_pointer(res->ies);
+-
+- /* go through nontrans_list, if the timestamp of the BSS is
+- * earlier than the timestamp of the transmitting BSS then
+- * update it
+- */
+- list_for_each_entry(tmp_bss, &res->nontrans_list,
+- nontrans_list) {
+- ies2 = rcu_access_pointer(tmp_bss->ies);
+- if (ies2->tsf < ies1->tsf)
+- cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
+- mgmt, len);
+- }
+- spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
++ /* process each non-transmitting bss */
++ cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
++ le64_to_cpu(mgmt->u.probe_resp.timestamp),
++ le16_to_cpu(mgmt->u.probe_resp.beacon_int),
++ ie, ielen, &non_tx_data, gfp);
+
+ return res;
+ }
+diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c
+index 8dfe09a92feca..822b0742b8154 100644
+--- a/samples/bpf/tcp_basertt_kern.c
++++ b/samples/bpf/tcp_basertt_kern.c
+@@ -47,7 +47,7 @@ int bpf_basertt(struct bpf_sock_ops *skops)
+ case BPF_SOCK_OPS_BASE_RTT:
+ n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
+ cong, sizeof(cong));
+- if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) {
++ if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) {
+ /* Set base_rtt to 80us */
+ rv = 80;
+ } else if (n) {
+diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c
+index 0a5c704badd00..d91f27cbcfa99 100644
+--- a/samples/bpf/xdp1_kern.c
++++ b/samples/bpf/xdp1_kern.c
+@@ -39,7 +39,7 @@ static int parse_ipv6(void *data, u64 nh_off, void *data_end)
+ return ip6h->nexthdr;
+ }
+
+-#define XDPBUFSIZE 64
++#define XDPBUFSIZE 60
+ SEC("xdp.frags")
+ int xdp_prog1(struct xdp_md *ctx)
+ {
+diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
+index 67804ecf7ce37..8bca674451ed1 100644
+--- a/samples/bpf/xdp2_kern.c
++++ b/samples/bpf/xdp2_kern.c
+@@ -55,7 +55,7 @@ static int parse_ipv6(void *data, u64 nh_off, void *data_end)
+ return ip6h->nexthdr;
+ }
+
+-#define XDPBUFSIZE 64
++#define XDPBUFSIZE 60
+ SEC("xdp.frags")
+ int xdp_prog1(struct xdp_md *ctx)
+ {
+diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
+index 4703f652c0098..fc19f67039bda 100644
+--- a/scripts/Makefile.modfinal
++++ b/scripts/Makefile.modfinal
+@@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=))
+ part-of-module = y
+
+ quiet_cmd_cc_o_c = CC [M] $@
+- cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI), $(c_flags)) -c -o $@ $<
++ cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
+
+ %.mod.o: %.mod.c FORCE
+ $(call if_changed_dep,cc_o_c)
+diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
+index 10176dec97eac..3cd6ca15f390d 100644
+--- a/scripts/Makefile.vmlinux
++++ b/scripts/Makefile.vmlinux
+@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
+
+ ifdef CONFIG_MODULES
+ KASAN_SANITIZE_.vmlinux.export.o := n
++GCOV_PROFILE_.vmlinux.export.o := n
+ targets += .vmlinux.export.o
+ vmlinux: .vmlinux.export.o
+ endif
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 5b3964b39709f..b1163bad652aa 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1156,6 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+ if (relsym->st_name != 0)
+ return relsym;
+
++ /*
++ * Strive to find a better symbol name, but the resulting name may not
++ * match the symbol referenced in the original code.
++ */
+ relsym_secindex = get_secindex(elf, relsym);
+ for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
+ if (get_secindex(elf, sym) != relsym_secindex)
+@@ -1292,49 +1296,12 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
+
+ static int is_executable_section(struct elf_info* elf, unsigned int section_index)
+ {
+- if (section_index > elf->num_sections)
++ if (section_index >= elf->num_sections)
+ fatal("section_index is outside elf->num_sections!\n");
+
+ return ((elf->sechdrs[section_index].sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR);
+ }
+
+-/*
+- * We rely on a gross hack in section_rel[a]() calling find_extable_entry_size()
+- * to know the sizeof(struct exception_table_entry) for the target architecture.
+- */
+-static unsigned int extable_entry_size = 0;
+-static void find_extable_entry_size(const char* const sec, const Elf_Rela* r)
+-{
+- /*
+- * If we're currently checking the second relocation within __ex_table,
+- * that relocation offset tells us the offsetof(struct
+- * exception_table_entry, fixup) which is equal to sizeof(struct
+- * exception_table_entry) divided by two. We use that to our advantage
+- * since there's no portable way to get that size as every architecture
+- * seems to go with different sized types. Not pretty but better than
+- * hard-coding the size for every architecture..
+- */
+- if (!extable_entry_size)
+- extable_entry_size = r->r_offset * 2;
+-}
+-
+-static inline bool is_extable_fault_address(Elf_Rela *r)
+-{
+- /*
+- * extable_entry_size is only discovered after we've handled the
+- * _second_ relocation in __ex_table, so only abort when we're not
+- * handling the first reloc and extable_entry_size is zero.
+- */
+- if (r->r_offset && extable_entry_size == 0)
+- fatal("extable_entry size hasn't been discovered!\n");
+-
+- return ((r->r_offset == 0) ||
+- (r->r_offset % extable_entry_size == 0));
+-}
+-
+-#define is_second_extable_reloc(Start, Cur, Sec) \
+- (((Cur) == (Start) + 1) && (strcmp("__ex_table", (Sec)) == 0))
+-
+ static void report_extable_warnings(const char* modname, struct elf_info* elf,
+ const struct sectioncheck* const mismatch,
+ Elf_Rela* r, Elf_Sym* sym,
+@@ -1390,22 +1357,9 @@ static void extable_mismatch_handler(const char* modname, struct elf_info *elf,
+ "You might get more information about where this is\n"
+ "coming from by using scripts/check_extable.sh %s\n",
+ fromsec, (long)r->r_offset, tosec, modname);
+- else if (!is_executable_section(elf, get_secindex(elf, sym))) {
+- if (is_extable_fault_address(r))
+- fatal("The relocation at %s+0x%lx references\n"
+- "section \"%s\" which is not executable, IOW\n"
+- "it is not possible for the kernel to fault\n"
+- "at that address. Something is seriously wrong\n"
+- "and should be fixed.\n",
+- fromsec, (long)r->r_offset, tosec);
+- else
+- fatal("The relocation at %s+0x%lx references\n"
+- "section \"%s\" which is not executable, IOW\n"
+- "the kernel will fault if it ever tries to\n"
+- "jump to it. Something is seriously wrong\n"
+- "and should be fixed.\n",
+- fromsec, (long)r->r_offset, tosec);
+- }
++ else if (!is_executable_section(elf, get_secindex(elf, sym)))
++ error("%s+0x%lx references non-executable section '%s'\n",
++ fromsec, (long)r->r_offset, tosec);
+ }
+
+ static void check_section_mismatch(const char *modname, struct elf_info *elf,
+@@ -1463,19 +1417,33 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+ #define R_ARM_THM_JUMP19 51
+ #endif
+
++static int32_t sign_extend32(int32_t value, int index)
++{
++ uint8_t shift = 31 - index;
++
++ return (int32_t)(value << shift) >> shift;
++}
++
+ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+ {
+ unsigned int r_typ = ELF_R_TYPE(r->r_info);
++ Elf_Sym *sym = elf->symtab_start + ELF_R_SYM(r->r_info);
++ void *loc = reloc_location(elf, sechdr, r);
++ uint32_t inst;
++ int32_t offset;
+
+ switch (r_typ) {
+ case R_ARM_ABS32:
+- /* From ARM ABI: (S + A) | T */
+- r->r_addend = (int)(long)
+- (elf->symtab_start + ELF_R_SYM(r->r_info));
++ inst = TO_NATIVE(*(uint32_t *)loc);
++ r->r_addend = inst + sym->st_value;
+ break;
+ case R_ARM_PC24:
+ case R_ARM_CALL:
+ case R_ARM_JUMP24:
++ inst = TO_NATIVE(*(uint32_t *)loc);
++ offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
++ r->r_addend = offset + sym->st_value + 8;
++ break;
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ case R_ARM_THM_JUMP19:
+@@ -1580,8 +1548,6 @@ static void section_rela(const char *modname, struct elf_info *elf,
+ /* Skip special sections */
+ if (is_shndx_special(sym->st_shndx))
+ continue;
+- if (is_second_extable_reloc(start, rela, fromsec))
+- find_extable_entry_size(fromsec, &r);
+ check_section_mismatch(modname, elf, &r, sym, fromsec);
+ }
+ }
+@@ -1639,8 +1605,6 @@ static void section_rel(const char *modname, struct elf_info *elf,
+ /* Skip special sections */
+ if (is_shndx_special(sym->st_shndx))
+ continue;
+- if (is_second_extable_reloc(start, rel, fromsec))
+- find_extable_entry_size(fromsec, &r);
+ check_section_mismatch(modname, elf, &r, sym, fromsec);
+ }
+ }
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 7b23f52c70c5f..a0af4c0f971ca 100755
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -62,18 +62,14 @@ install_linux_image () {
+ ${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install
+ fi
+
+- if is_enabled CONFIG_MODULES; then
+- ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
+- rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
+- rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
+- if [ "${SRCARCH}" = um ] ; then
+- mkdir -p "${pdir}/usr/lib/uml/modules"
+- mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}"
+- fi
+- fi
++ ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
++ rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
++ rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
+
+ # Install the kernel
+ if [ "${ARCH}" = um ] ; then
++ mkdir -p "${pdir}/usr/lib/uml/modules"
++ mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}"
+ mkdir -p "${pdir}/usr/bin" "${pdir}/usr/share/doc/${pname}"
+ cp System.map "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}/System.map"
+ cp ${KCONFIG_CONFIG} "${pdir}/usr/share/doc/${pname}/config"
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index 033804f5a5f20..0dae649f3740c 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -40,7 +40,7 @@ static const char evm_hmac[] = "hmac(sha1)";
+ /**
+ * evm_set_key() - set EVM HMAC key from the kernel
+ * @key: pointer to a buffer with the key data
+- * @size: length of the key data
++ * @keylen: length of the key data
+ *
+ * This function allows setting the EVM HMAC key from the kernel
+ * without using the "encrypted" key subsystem keys. It can be used
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index cf24c5255583c..c9b6e2a43478a 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -318,7 +318,6 @@ int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+ /**
+ * evm_read_protected_xattrs - read EVM protected xattr names, lengths, values
+ * @dentry: dentry of the read xattrs
+- * @inode: inode of the read xattrs
+ * @buffer: buffer xattr names, lengths or values are copied to
+ * @buffer_size: size of buffer
+ * @type: n: names, l: lengths, v: values
+@@ -390,6 +389,7 @@ int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
++ * @iint: inode integrity metadata
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+@@ -795,7 +795,9 @@ static int evm_attr_change(struct mnt_idmap *idmap,
+
+ /**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
++ * @idmap: idmap of the mount
+ * @dentry: pointer to the affected dentry
++ * @attr: iattr structure containing the new file attributes
+ *
+ * Permit update of file attributes when files have a valid EVM signature,
+ * except in the case of them having an immutable portable signature.
+diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
+index fb25723c65bc4..3e7bee30080f2 100644
+--- a/security/integrity/ima/ima_modsig.c
++++ b/security/integrity/ima/ima_modsig.c
+@@ -89,6 +89,9 @@ int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+
+ /**
+ * ima_collect_modsig - Calculate the file hash without the appended signature.
++ * @modsig: parsed module signature
++ * @buf: data to verify the signature on
++ * @size: data size
+ *
+ * Since the modsig is part of the file contents, the hash used in its signature
+ * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 3ca8b7348c2e4..c9b3bd8f1bb9c 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -721,6 +721,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
+ * @secid: LSM secid of the task to be validated
+ * @func: IMA hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
++ * @flags: IMA actions to consider (e.g. IMA_MEASURE | IMA_APPRAISE)
+ * @pcr: set the pcr to extend
+ * @template_desc: the template that should be used for this rule
+ * @func_data: func specific data, may be NULL
+@@ -1915,7 +1916,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+
+ /**
+ * ima_parse_add_rule - add a rule to ima_policy_rules
+- * @rule - ima measurement policy rule
++ * @rule: ima measurement policy rule
+ *
+ * Avoid locking by allowing just one writer at a time in ima_write_policy()
+ * Returns the length of the rule parsed, an error code on failure
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 9afc5906d662e..80a65b8ad7b9b 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -2069,8 +2069,8 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
+ .dev_disconnect = snd_ac97_dev_disconnect,
+ };
+
+- if (rac97)
+- *rac97 = NULL;
++ if (!rac97)
++ return -EINVAL;
+ if (snd_BUG_ON(!bus || !template))
+ return -EINVAL;
+ if (snd_BUG_ON(template->num >= 4))
+diff --git a/sound/soc/amd/acp/acp-pdm.c b/sound/soc/amd/acp/acp-pdm.c
+index 66ec6b6a59723..f8030b79ac17c 100644
+--- a/sound/soc/amd/acp/acp-pdm.c
++++ b/sound/soc/amd/acp/acp-pdm.c
+@@ -176,7 +176,7 @@ static void acp_dmic_dai_shutdown(struct snd_pcm_substream *substream,
+
+ /* Disable DMIC interrupts */
+ ext_int_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, 0));
+- ext_int_ctrl |= ~PDM_DMA_INTR_MASK;
++ ext_int_ctrl &= ~PDM_DMA_INTR_MASK;
+ writel(ext_int_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, 0));
+ }
+
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index f7d7a9c91e04c..87775378362e7 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -52,7 +52,12 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(dac_vol_tlv, -9600, 50, 1);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
+-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
++
++static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(alc_target_tlv,
++ 0, 10, TLV_DB_SCALE_ITEM(-1650, 150, 0),
++ 11, 11, TLV_DB_SCALE_ITEM(-150, 0, 0),
++);
++
+ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
+ 0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
+ 8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
+@@ -115,7 +120,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
+ alc_max_gain_tlv),
+ SOC_SINGLE_TLV("ALC Capture Min Volume", ES8316_ADC_ALC2, 0, 28, 0,
+ alc_min_gain_tlv),
+- SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 10, 0,
++ SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 11, 0,
+ alc_target_tlv),
+ SOC_SINGLE("ALC Capture Hold Time", ES8316_ADC_ALC3, 0, 10, 0),
+ SOC_SINGLE("ALC Capture Decay Time", ES8316_ADC_ALC4, 4, 10, 0),
+@@ -364,13 +369,11 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int count = 0;
+
+ es8316->sysclk = freq;
++ es8316->sysclk_constraints.list = NULL;
++ es8316->sysclk_constraints.count = 0;
+
+- if (freq == 0) {
+- es8316->sysclk_constraints.list = NULL;
+- es8316->sysclk_constraints.count = 0;
+-
++ if (freq == 0)
+ return 0;
+- }
+
+ ret = clk_set_rate(es8316->mclk, freq);
+ if (ret)
+@@ -386,8 +389,10 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ es8316->allowed_rates[count++] = freq / ratio;
+ }
+
+- es8316->sysclk_constraints.list = es8316->allowed_rates;
+- es8316->sysclk_constraints.count = count;
++ if (count) {
++ es8316->sysclk_constraints.list = es8316->allowed_rates;
++ es8316->sysclk_constraints.count = count;
++ }
+
+ return 0;
+ }
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 1292a845c4244..d8e99b263ab21 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -228,6 +228,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
+
+ dai_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%s",
+ fe_name_pref, args.np->full_name + 1);
++ if (!dai_name)
++ return -ENOMEM;
+
+ dev_info(pdev->dev.parent, "DAI FE name:%s\n", dai_name);
+
+@@ -236,6 +238,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ capture_dai_name =
+ devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s %s",
+ dai_name, "CPU-Capture");
++ if (!capture_dai_name)
++ return -ENOMEM;
+ }
+
+ priv->dai[i].cpus = &dlc[0];
+@@ -266,6 +270,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ "AUDMIX-Playback-%d", i);
+ be_cp = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "AUDMIX-Capture-%d", i);
++ if (!be_name || !be_pb || !be_cp)
++ return -ENOMEM;
+
+ priv->dai[num_dai + i].cpus = &dlc[3];
+ priv->dai[num_dai + i].codecs = &dlc[4];
+@@ -293,6 +299,9 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ priv->dapm_routes[i].source =
+ devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s %s",
+ dai_name, "CPU-Playback");
++ if (!priv->dapm_routes[i].source)
++ return -ENOMEM;
++
+ priv->dapm_routes[i].sink = be_pb;
+ priv->dapm_routes[num_dai + i].source = be_pb;
+ priv->dapm_routes[num_dai + i].sink = be_cp;
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 767fa89d08708..1ac5abc721c68 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -413,7 +413,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_mtlrvp"),
+ },
+- .driver_data = (void *)(RT711_JD1 | SOF_SDW_TGL_HDMI),
++ .driver_data = (void *)(RT711_JD1),
+ },
+ {}
+ };
+diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
+index da16e6a27cccd..0675d6a464138 100644
+--- a/tools/bpf/bpftool/feature.c
++++ b/tools/bpf/bpftool/feature.c
+@@ -167,12 +167,12 @@ static int get_vendor_id(int ifindex)
+ return strtol(buf, NULL, 0);
+ }
+
+-static int read_procfs(const char *path)
++static long read_procfs(const char *path)
+ {
+ char *endptr, *line = NULL;
+ size_t len = 0;
+ FILE *fd;
+- int res;
++ long res;
+
+ fd = fopen(path, "r");
+ if (!fd)
+@@ -194,7 +194,7 @@ static int read_procfs(const char *path)
+
+ static void probe_unprivileged_disabled(void)
+ {
+- int res;
++ long res;
+
+ /* No support for C-style ouptut */
+
+@@ -216,14 +216,14 @@ static void probe_unprivileged_disabled(void)
+ printf("Unable to retrieve required privileges for bpf() syscall\n");
+ break;
+ default:
+- printf("bpf() syscall restriction has unknown value %d\n", res);
++ printf("bpf() syscall restriction has unknown value %ld\n", res);
+ }
+ }
+ }
+
+ static void probe_jit_enable(void)
+ {
+- int res;
++ long res;
+
+ /* No support for C-style ouptut */
+
+@@ -245,7 +245,7 @@ static void probe_jit_enable(void)
+ printf("Unable to retrieve JIT-compiler status\n");
+ break;
+ default:
+- printf("JIT-compiler status has unknown value %d\n",
++ printf("JIT-compiler status has unknown value %ld\n",
+ res);
+ }
+ }
+@@ -253,7 +253,7 @@ static void probe_jit_enable(void)
+
+ static void probe_jit_harden(void)
+ {
+- int res;
++ long res;
+
+ /* No support for C-style ouptut */
+
+@@ -275,7 +275,7 @@ static void probe_jit_harden(void)
+ printf("Unable to retrieve JIT hardening status\n");
+ break;
+ default:
+- printf("JIT hardening status has unknown value %d\n",
++ printf("JIT hardening status has unknown value %ld\n",
+ res);
+ }
+ }
+@@ -283,7 +283,7 @@ static void probe_jit_harden(void)
+
+ static void probe_jit_kallsyms(void)
+ {
+- int res;
++ long res;
+
+ /* No support for C-style ouptut */
+
+@@ -302,14 +302,14 @@ static void probe_jit_kallsyms(void)
+ printf("Unable to retrieve JIT kallsyms export status\n");
+ break;
+ default:
+- printf("JIT kallsyms exports status has unknown value %d\n", res);
++ printf("JIT kallsyms exports status has unknown value %ld\n", res);
+ }
+ }
+ }
+
+ static void probe_jit_limit(void)
+ {
+- int res;
++ long res;
+
+ /* No support for C-style ouptut */
+
+@@ -322,7 +322,7 @@ static void probe_jit_limit(void)
+ printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ break;
+ default:
+- printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
++ printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res);
+ }
+ }
+ }
+diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
+index ac548a7baa73e..4b8079f294f65 100644
+--- a/tools/bpf/resolve_btfids/Makefile
++++ b/tools/bpf/resolve_btfids/Makefile
+@@ -67,7 +67,7 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU
+ LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+ LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+
+-HOSTCFLAGS += -g \
++HOSTCFLAGS_resolve_btfids += -g \
+ -I$(srctree)/tools/include \
+ -I$(srctree)/tools/include/uapi \
+ -I$(LIBBPF_INCLUDE) \
+@@ -76,7 +76,7 @@ HOSTCFLAGS += -g \
+
+ LIBS = $(LIBELF_LIBS) -lz
+
+-export srctree OUTPUT HOSTCFLAGS Q HOSTCC HOSTLD HOSTAR
++export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
+ include $(srctree)/tools/build/Makefile.include
+
+ $(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
+diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
+index 5ec1871acb2fc..85a29cd69154e 100644
+--- a/tools/lib/bpf/bpf_helpers.h
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -77,16 +77,21 @@
+ /*
+ * Helper macros to manipulate data structures
+ */
+-#ifndef offsetof
+-#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
+-#endif
+-#ifndef container_of
++
++/* offsetof() definition that uses __builtin_offset() might not preserve field
++ * offset CO-RE relocation properly, so force-redefine offsetof() using
++ * old-school approach which works with CO-RE correctly
++ */
++#undef offsetof
++#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
++
++/* redefined container_of() to ensure we use the above offsetof() macro */
++#undef container_of
+ #define container_of(ptr, type, member) \
+ ({ \
+ void *__mptr = (void *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+-#endif
+
+ /*
+ * Compiler (optimization) barrier.
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 580985ee55458..4d9f30bf7f014 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -2250,9 +2250,25 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data,
+- __u8 bits_offset)
++ __u8 bits_offset,
++ __u8 bit_sz)
+ {
+- __s64 size = btf__resolve_size(d->btf, id);
++ __s64 size;
++
++ if (bit_sz) {
++ /* bits_offset is at most 7. bit_sz is at most 128. */
++ __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
++
++ /* When bit_sz is non zero, it is called from
++ * btf_dump_struct_data() where it only cares about
++ * negative error value.
++ * Return nr_bytes in success case to make it
++ * consistent as the regular integer case below.
++ */
++ return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
++ }
++
++ size = btf__resolve_size(d->btf, id);
+
+ if (size < 0 || size >= INT_MAX) {
+ pr_warn("unexpected size [%zu] for id [%u]\n",
+@@ -2407,7 +2423,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
+ {
+ int size, err = 0;
+
+- size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset);
++ size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
+ if (size < 0)
+ return size;
+ err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
+diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
+index 195ccfdef7aa1..005907cb97d8c 100644
+--- a/tools/perf/arch/x86/util/Build
++++ b/tools/perf/arch/x86/util/Build
+@@ -10,6 +10,7 @@ perf-y += evlist.o
+ perf-y += mem-events.o
+ perf-y += evsel.o
+ perf-y += iostat.o
++perf-y += env.o
+
+ perf-$(CONFIG_DWARF) += dwarf-regs.o
+ perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+diff --git a/tools/perf/arch/x86/util/env.c b/tools/perf/arch/x86/util/env.c
+new file mode 100644
+index 0000000000000..3e537ffb1353a
+--- /dev/null
++++ b/tools/perf/arch/x86/util/env.c
+@@ -0,0 +1,19 @@
++// SPDX-License-Identifier: GPL-2.0
++#include "linux/string.h"
++#include "util/env.h"
++#include "env.h"
++
++bool x86__is_amd_cpu(void)
++{
++ struct perf_env env = { .total_mem = 0, };
++ static int is_amd; /* 0: Uninitialized, 1: Yes, -1: No */
++
++ if (is_amd)
++ goto ret;
++
++ perf_env__cpuid(&env);
++ is_amd = env.cpuid && strstarts(env.cpuid, "AuthenticAMD") ? 1 : -1;
++ perf_env__exit(&env);
++ret:
++ return is_amd >= 1 ? true : false;
++}
+diff --git a/tools/perf/arch/x86/util/env.h b/tools/perf/arch/x86/util/env.h
+new file mode 100644
+index 0000000000000..d78f080b6b3f8
+--- /dev/null
++++ b/tools/perf/arch/x86/util/env.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _X86_ENV_H
++#define _X86_ENV_H
++
++bool x86__is_amd_cpu(void);
++
++#endif /* _X86_ENV_H */
+diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
+index ea3972d785d10..d72390cdf391d 100644
+--- a/tools/perf/arch/x86/util/evsel.c
++++ b/tools/perf/arch/x86/util/evsel.c
+@@ -7,6 +7,7 @@
+ #include "linux/string.h"
+ #include "evsel.h"
+ #include "util/debug.h"
++#include "env.h"
+
+ #define IBS_FETCH_L3MISSONLY (1ULL << 59)
+ #define IBS_OP_L3MISSONLY (1ULL << 16)
+@@ -97,23 +98,10 @@ void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
+ {
+ struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
+ static int warned_once;
+- /* 0: Uninitialized, 1: Yes, -1: No */
+- static int is_amd;
+
+- if (warned_once || is_amd == -1)
++ if (warned_once || !x86__is_amd_cpu())
+ return;
+
+- if (!is_amd) {
+- struct perf_env *env = evsel__env(evsel);
+-
+- if (!perf_env__cpuid(env) || !env->cpuid ||
+- !strstarts(env->cpuid, "AuthenticAMD")) {
+- is_amd = -1;
+- return;
+- }
+- is_amd = 1;
+- }
+-
+ evsel_pmu = evsel__find_pmu(evsel);
+ if (!evsel_pmu)
+ return;
+diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
+index f683ac702247c..efc0fae9ed0a7 100644
+--- a/tools/perf/arch/x86/util/mem-events.c
++++ b/tools/perf/arch/x86/util/mem-events.c
+@@ -4,6 +4,7 @@
+ #include "map_symbol.h"
+ #include "mem-events.h"
+ #include "linux/string.h"
++#include "env.h"
+
+ static char mem_loads_name[100];
+ static bool mem_loads_name__init;
+@@ -26,28 +27,12 @@ static struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
+ E("mem-ldst", "ibs_op//", "ibs_op"),
+ };
+
+-static int perf_mem_is_amd_cpu(void)
+-{
+- struct perf_env env = { .total_mem = 0, };
+-
+- perf_env__cpuid(&env);
+- if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
+- return 1;
+- return -1;
+-}
+-
+ struct perf_mem_event *perf_mem_events__ptr(int i)
+ {
+- /* 0: Uninitialized, 1: Yes, -1: No */
+- static int is_amd;
+-
+ if (i >= PERF_MEM_EVENTS__MAX)
+ return NULL;
+
+- if (!is_amd)
+- is_amd = perf_mem_is_amd_cpu();
+-
+- if (is_amd == 1)
++ if (x86__is_amd_cpu())
+ return &perf_mem_events_amd[i];
+
+ return &perf_mem_events_intel[i];
+diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
+index 814e9afc86f6e..5efb29a7564af 100644
+--- a/tools/perf/builtin-bench.c
++++ b/tools/perf/builtin-bench.c
+@@ -21,6 +21,7 @@
+ #include "builtin.h"
+ #include "bench/bench.h"
+
++#include <locale.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -258,6 +259,7 @@ int cmd_bench(int argc, const char **argv)
+
+ /* Unbuffered output */
+ setvbuf(stdout, NULL, _IONBF, 0);
++ setlocale(LC_ALL, "");
+
+ if (argc < 2) {
+ /* No collection specified. */
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index d8c174a719383..72a3faa28c394 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -2425,6 +2425,9 @@ out_put:
+ return ret;
+ }
+
++// Used when scr->per_event_dump is not set
++static struct evsel_script es_stdout;
++
+ static int process_attr(struct perf_tool *tool, union perf_event *event,
+ struct evlist **pevlist)
+ {
+@@ -2433,7 +2436,6 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
+ struct evsel *evsel, *pos;
+ u64 sample_type;
+ int err;
+- static struct evsel_script *es;
+
+ err = perf_event__process_attr(tool, event, pevlist);
+ if (err)
+@@ -2443,14 +2445,13 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
+ evsel = evlist__last(*pevlist);
+
+ if (!evsel->priv) {
+- if (scr->per_event_dump) {
++ if (scr->per_event_dump) {
+ evsel->priv = evsel_script__new(evsel, scr->session->data);
+- } else {
+- es = zalloc(sizeof(*es));
+- if (!es)
++ if (!evsel->priv)
+ return -ENOMEM;
+- es->fp = stdout;
+- evsel->priv = es;
++ } else { // Replicate what is done in perf_script__setup_per_event_dump()
++ es_stdout.fp = stdout;
++ evsel->priv = &es_stdout;
+ }
+ }
+
+@@ -2756,7 +2757,6 @@ out_err_fclose:
+ static int perf_script__setup_per_event_dump(struct perf_script *script)
+ {
+ struct evsel *evsel;
+- static struct evsel_script es_stdout;
+
+ if (script->per_event_dump)
+ return perf_script__fopen_per_event_dump(script);
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index eeba93ae3b584..f5a6d08cf07f6 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -777,6 +777,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ all_counters_use_bpf = false;
+ }
+
++ evlist__reset_aggr_stats(evsel_list);
++
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
+ counter = evlist_cpu_itr.evsel;
+
+diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
+index a98e4ab66040e..365b61aea519a 100755
+--- a/tools/perf/tests/shell/test_task_analyzer.sh
++++ b/tools/perf/tests/shell/test_task_analyzer.sh
+@@ -5,6 +5,12 @@
+ tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
+ err=0
+
++# set PERF_EXEC_PATH to find scripts in the source directory
++perfdir=$(dirname "$0")/../..
++if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
++ export PERF_EXEC_PATH=$perfdir
++fi
++
+ cleanup() {
+ rm -f perf.data
+ rm -f perf.data.old
+@@ -31,7 +37,7 @@ report() {
+
+ check_exec_0() {
+ if [ $? != 0 ]; then
+- report 1 "invokation of ${$1} command failed"
++ report 1 "invocation of $1 command failed"
+ fi
+ }
+
+@@ -44,9 +50,20 @@ find_str_or_fail() {
+ fi
+ }
+
++# check if perf is compiled with libtraceevent support
++skip_no_probe_record_support() {
++ perf record -e "sched:sched_switch" -a -- sleep 1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2
++ return 0
++}
++
+ prepare_perf_data() {
+ # 1s should be sufficient to catch at least some switches
+ perf record -e sched:sched_switch -a -- sleep 1 > /dev/null 2>&1
++ # check if perf data file got created in above step.
++ if [ ! -e "perf.data" ]; then
++ printf "FAIL: perf record failed to create \"perf.data\" \n"
++ return 1
++ fi
+ }
+
+ # check standard inkvokation with no arguments
+@@ -134,6 +151,13 @@ test_csvsummary_extended() {
+ find_str_or_fail "Out-Out;" csvsummary ${FUNCNAME[0]}
+ }
+
++skip_no_probe_record_support
++err=$?
++if [ $err -ne 0 ]; then
++ echo "WARN: Skipping tests. No libtraceevent support"
++ cleanup
++ exit $err
++fi
+ prepare_perf_data
+ test_basic
+ test_ns_rename
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index b074144097710..3bff678745635 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -1103,7 +1103,7 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
+ ret = die_get_typename(vr_die, buf);
+ if (ret < 0) {
+ pr_debug("Failed to get type, make it unknown.\n");
+- ret = strbuf_add(buf, " (unknown_type)", 14);
++ ret = strbuf_add(buf, "(unknown_type)", 14);
+ }
+
+ return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 1a7358b46ad4e..72549fd79992b 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -457,16 +457,24 @@ static inline int evsel__group_idx(struct evsel *evsel)
+ }
+
+ /* Iterates group WITHOUT the leader. */
+-#define for_each_group_member(_evsel, _leader) \
+-for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
+- (_evsel) && (_evsel)->core.leader == (&_leader->core); \
+- (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
++#define for_each_group_member_head(_evsel, _leader, _head) \
++for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
++ (_evsel) && &(_evsel)->core.node != (_head) && \
++ (_evsel)->core.leader == &(_leader)->core; \
++ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
++
++#define for_each_group_member(_evsel, _leader) \
++ for_each_group_member_head(_evsel, _leader, &(_leader)->evlist->core.entries)
+
+ /* Iterates group WITH the leader. */
+-#define for_each_group_evsel(_evsel, _leader) \
+-for ((_evsel) = _leader; \
+- (_evsel) && (_evsel)->core.leader == (&_leader->core); \
+- (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
++#define for_each_group_evsel_head(_evsel, _leader, _head) \
++for ((_evsel) = _leader; \
++ (_evsel) && &(_evsel)->core.node != (_head) && \
++ (_evsel)->core.leader == &(_leader)->core; \
++ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
++
++#define for_each_group_evsel(_evsel, _leader) \
++ for_each_group_evsel_head(_evsel, _leader, &(_leader)->evlist->core.entries)
+
+ static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
+ {
+diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
+index bd22c4932d10e..6fa3a306f301d 100644
+--- a/tools/perf/util/evsel_fprintf.c
++++ b/tools/perf/util/evsel_fprintf.c
+@@ -2,6 +2,7 @@
+ #include <inttypes.h>
+ #include <stdio.h>
+ #include <stdbool.h>
++#include "util/evlist.h"
+ #include "evsel.h"
+ #include "util/evsel_fprintf.h"
+ #include "util/event.h"
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index ad01c9e1ff12b..625eedb84eecc 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -88,8 +88,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
+ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
+ xdp_features
+
+-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read $(OUTPUT)/sign-file
+-TEST_GEN_FILES += liburandom_read.so
++TEST_GEN_FILES += liburandom_read.so urandom_read sign-file
+
+ # Emit succinct information message describing current building step
+ # $1 - generic step name (e.g., CC, LINK, etc);
+diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+index 5338d2ea04603..2a9a30650350e 100644
+--- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c
++++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+@@ -183,7 +183,7 @@ cleanup:
+
+ void serial_test_check_mtu(void)
+ {
+- __u32 mtu_lo;
++ int mtu_lo;
+
+ if (test__start_subtest("bpf_check_mtu XDP-attach"))
+ test_check_mtu_xdp_attach();
+diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
+index 8b9949bb833d7..2e9bdf2e91351 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -1232,45 +1232,46 @@ static bool cmp_str_seq(const char *log, const char *exp)
+ return true;
+ }
+
+-static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
++static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
+ {
++ __u32 buf_element_size = sizeof(struct bpf_insn);
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 xlated_prog_len;
+- __u32 buf_element_size = sizeof(struct bpf_insn);
++ struct bpf_insn *buf;
+
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("bpf_prog_get_info_by_fd failed");
+- return -1;
++ return NULL;
+ }
+
+ xlated_prog_len = info.xlated_prog_len;
+ if (xlated_prog_len % buf_element_size) {
+ printf("Program length %d is not multiple of %d\n",
+ xlated_prog_len, buf_element_size);
+- return -1;
++ return NULL;
+ }
+
+ *cnt = xlated_prog_len / buf_element_size;
+- *buf = calloc(*cnt, buf_element_size);
++ buf = calloc(*cnt, buf_element_size);
+ if (!buf) {
+ perror("can't allocate xlated program buffer");
+- return -ENOMEM;
++ return NULL;
+ }
+
+ bzero(&info, sizeof(info));
+ info.xlated_prog_len = xlated_prog_len;
+- info.xlated_prog_insns = (__u64)(unsigned long)*buf;
++ info.xlated_prog_insns = (__u64)(unsigned long)buf;
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("second bpf_prog_get_info_by_fd failed");
+ goto out_free_buf;
+ }
+
+- return 0;
++ return buf;
+
+ out_free_buf:
+- free(*buf);
+- return -1;
++ free(buf);
++ return NULL;
+ }
+
+ static bool is_null_insn(struct bpf_insn *insn)
+@@ -1403,7 +1404,8 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog)
+ if (!check_expected && !check_unexpected)
+ goto out;
+
+- if (get_xlated_program(fd_prog, &buf, &cnt)) {
++ buf = get_xlated_program(fd_prog, &cnt);
++ if (!buf) {
+ printf("FAIL: can't get xlated program\n");
+ result = false;
+ goto out;
+diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
+index f4f7c0aef702b..a2a90f4bfe9fe 100644
+--- a/tools/testing/selftests/cgroup/test_memcontrol.c
++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
+@@ -292,6 +292,7 @@ static int test_memcg_protection(const char *root, bool min)
+ char *children[4] = {NULL};
+ const char *attribute = min ? "memory.min" : "memory.low";
+ long c[4];
++ long current;
+ int i, attempts;
+ int fd;
+
+@@ -400,7 +401,8 @@ static int test_memcg_protection(const char *root, bool min)
+ goto cleanup;
+ }
+
+- if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
++ current = min ? MB(50) : MB(30);
++ if (!values_close(cg_read_long(parent[1], "memory.current"), current, 3))
+ goto cleanup;
+
+ if (!reclaim_until(children[0], MB(10)))
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index 275491be3da2f..cafd14b1ed2ab 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -835,6 +835,7 @@ EOF
+ fi
+
+ # clean up any leftovers
++ echo 0 > /sys/bus/netdevsim/del_device
+ $probed && rmmod netdevsim
+
+ if [ $ret -ne 0 ]; then
+diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
+index f57720c52c0f9..84f6bb98ce993 100644
+--- a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
++++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
+@@ -5,4 +5,4 @@ rcutree.gp_init_delay=3
+ rcutree.gp_cleanup_delay=3
+ rcutree.kthread_prio=2
+ threadirqs
+-tree.use_softirq=0
++rcutree.use_softirq=0
+diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+index 64f864f1f361f..8e50bfd4b710d 100644
+--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
++++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+@@ -4,4 +4,4 @@ rcutree.gp_init_delay=3
+ rcutree.gp_cleanup_delay=3
+ rcutree.kthread_prio=2
+ threadirqs
+-tree.use_softirq=0
++rcutree.use_softirq=0
+diff --git a/tools/testing/selftests/vDSO/vdso_test_clock_getres.c b/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
+index 15dcee16ff726..38d46a8bf7cba 100644
+--- a/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
++++ b/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
+@@ -84,12 +84,12 @@ static inline int vdso_test_clock(unsigned int clock_id)
+
+ int main(int argc, char **argv)
+ {
+- int ret;
++ int ret = 0;
+
+ #if _POSIX_TIMERS > 0
+
+ #ifdef CLOCK_REALTIME
+- ret = vdso_test_clock(CLOCK_REALTIME);
++ ret += vdso_test_clock(CLOCK_REALTIME);
+ #endif
+
+ #ifdef CLOCK_BOOTTIME