Re: Linux 6.1.28

From: Greg Kroah-Hartman
Date: Thu May 11 2023 - 10:37:32 EST


diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst
index 4d151fbe2058..f9bf18ea6509 100644
--- a/Documentation/block/inline-encryption.rst
+++ b/Documentation/block/inline-encryption.rst
@@ -142,7 +142,7 @@ Therefore, we also introduce *blk-crypto-fallback*, which is an implementation
of inline encryption using the kernel crypto API. blk-crypto-fallback is built
into the block layer, so it works on any block device without any special setup.
Essentially, when a bio with an encryption context is submitted to a
-request_queue that doesn't support that encryption context, the block layer will
+block_device that doesn't support that encryption context, the block layer will
handle en/decryption of the bio using blk-crypto-fallback.

For encryption, the data cannot be encrypted in-place, as callers usually rely
@@ -187,7 +187,7 @@ API presented to users of the block layer

``blk_crypto_config_supported()`` allows users to check ahead of time whether
inline encryption with particular crypto settings will work on a particular
-request_queue -- either via hardware or via blk-crypto-fallback. This function
+block_device -- either via hardware or via blk-crypto-fallback. This function
takes in a ``struct blk_crypto_config`` which is like blk_crypto_key, but omits
the actual bytes of the key and instead just contains the algorithm, data unit
size, etc. This function can be useful if blk-crypto-fallback is disabled.
@@ -195,7 +195,7 @@ size, etc. This function can be useful if blk-crypto-fallback is disabled.
``blk_crypto_init_key()`` allows users to initialize a blk_crypto_key.

Users must call ``blk_crypto_start_using_key()`` before actually starting to use
-a blk_crypto_key on a request_queue (even if ``blk_crypto_config_supported()``
+a blk_crypto_key on a block_device (even if ``blk_crypto_config_supported()``
was called earlier). This is needed to initialize blk-crypto-fallback if it
will be needed. This must not be called from the data path, as this may have to
allocate resources, which may deadlock in that case.
@@ -207,7 +207,7 @@ for en/decryption. Users don't need to worry about freeing the bio_crypt_ctx
later, as that happens automatically when the bio is freed or reset.

Finally, when done using inline encryption with a blk_crypto_key on a
-request_queue, users must call ``blk_crypto_evict_key()``. This ensures that
+block_device, users must call ``blk_crypto_evict_key()``. This ensures that
the key is evicted from all keyslots it may be programmed into and unlinked from
any kernel data structures it may be linked into.

@@ -221,9 +221,9 @@ as follows:
5. ``blk_crypto_evict_key()`` (after all I/O has completed)
6. Zeroize the blk_crypto_key (this has no dedicated function)

-If a blk_crypto_key is being used on multiple request_queues, then
+If a blk_crypto_key is being used on multiple block_devices, then
``blk_crypto_config_supported()`` (if used), ``blk_crypto_start_using_key()``,
-and ``blk_crypto_evict_key()`` must be called on each request_queue.
+and ``blk_crypto_evict_key()`` must be called on each block_device.

API presented to device drivers
===============================
diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
index 1de11e7f33bb..c8d803097d81 100644
--- a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
@@ -27,6 +27,7 @@ properties:
const: 0

clocks:
+ minItems: 3
maxItems: 5

clock-names:
diff --git a/Makefile b/Makefile
index a5cfcd0a85a9..2d221b879c48 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
-SUBLEVEL = 27
+SUBLEVEL = 28
EXTRAVERSION =
-NAME = Hurr durr I'ma ninja sloth
+NAME = Curry Ramen

# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index 28a6a9345be5..2dbee248a126 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -612,6 +612,22 @@ &i2c3 {
clock-frequency = <100000>;
};

+&mcspi1 {
+ status = "disabled";
+};
+
+&mcspi2 {
+ status = "disabled";
+};
+
+&mcspi3 {
+ status = "disabled";
+};
+
+&mcspi4 {
+ status = "disabled";
+};
+
&usb_otg_hs {
interface-type = <0>;
usb-phy = <&usb2_phy>;
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index a39b940d5853..4b57e9f5bc64 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -1271,7 +1271,7 @@ &gfx3d1 30
gpu_opp_table: opp-table {
compatible = "operating-points-v2";

- opp-320000000 {
+ opp-450000000 {
opp-hz = /bits/ 64 <450000000>;
};

diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index b23591110bd2..02e13d8c222a 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -426,8 +426,8 @@ pcie0: pci@40000000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
- <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
+ ranges = <0x81000000 0x0 0x00000000 0x40200000 0x0 0x00100000>,
+ <0x82000000 0x0 0x40300000 0x40300000 0x0 0x00d00000>;

interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 90c08b51680a..0ce58cff3016 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -1085,8 +1085,8 @@ pcie0: pci@1b500000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00010000 /* downstream I/O */
- 0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x0fe00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x08000000 0x08000000 0x0 0x07e00000>; /* MEM */

interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -1136,8 +1136,8 @@ pcie1: pci@1b700000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00010000 /* downstream I/O */
- 0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x31e00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x2e000000 0x2e000000 0x0 0x03e00000>; /* MEM */

interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -1187,8 +1187,8 @@ pcie2: pci@1b900000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00010000 /* downstream I/O */
- 0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x35e00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x32000000 0x32000000 0x0 0x03e00000>; /* MEM */

interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
index 29fdf29fdb8c..a4bf1d5ee206 100644
--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
@@ -303,6 +303,45 @@ qpic_nand: nand-controller@1b30000 {
status = "disabled";
};

+ pcie_ep: pcie-ep@1c00000 {
+ compatible = "qcom,sdx55-pcie-ep";
+ reg = <0x01c00000 0x3000>,
+ <0x40000000 0xf1d>,
+ <0x40000f20 0xc8>,
+ <0x40001000 0x1000>,
+ <0x40200000 0x100000>,
+ <0x01c03000 0x3000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+ "mmio";
+
+ qcom,perst-regs = <&tcsr 0xb258 0xb270>;
+
+ clocks = <&gcc GCC_PCIE_AUX_CLK>,
+ <&gcc GCC_PCIE_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_PCIE_SLEEP_CLK>,
+ <&gcc GCC_PCIE_0_CLKREF_CLK>;
+ clock-names = "aux", "cfg", "bus_master", "bus_slave",
+ "slave_q2a", "sleep", "ref";
+
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global", "doorbell";
+ reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
+ resets = <&gcc GCC_PCIE_BCR>;
+ reset-names = "core";
+ power-domains = <&gcc PCIE_GDSC>;
+ phys = <&pcie0_lane>;
+ phy-names = "pciephy";
+ max-link-speed = <3>;
+ num-lanes = <2>;
+
+ status = "disabled";
+ };
+
pcie0_phy: phy@1c07000 {
compatible = "qcom,sdx55-qmp-pcie-phy";
reg = <0x01c07000 0x1c4>;
@@ -400,45 +439,6 @@ sdhc_1: mmc@8804000 {
status = "disabled";
};

- pcie_ep: pcie-ep@40000000 {
- compatible = "qcom,sdx55-pcie-ep";
- reg = <0x01c00000 0x3000>,
- <0x40000000 0xf1d>,
- <0x40000f20 0xc8>,
- <0x40001000 0x1000>,
- <0x40200000 0x100000>,
- <0x01c03000 0x3000>;
- reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
- "mmio";
-
- qcom,perst-regs = <&tcsr 0xb258 0xb270>;
-
- clocks = <&gcc GCC_PCIE_AUX_CLK>,
- <&gcc GCC_PCIE_CFG_AHB_CLK>,
- <&gcc GCC_PCIE_MSTR_AXI_CLK>,
- <&gcc GCC_PCIE_SLV_AXI_CLK>,
- <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
- <&gcc GCC_PCIE_SLEEP_CLK>,
- <&gcc GCC_PCIE_0_CLKREF_CLK>;
- clock-names = "aux", "cfg", "bus_master", "bus_slave",
- "slave_q2a", "sleep", "ref";
-
- interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "global", "doorbell";
- reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
- wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
- resets = <&gcc GCC_PCIE_BCR>;
- reset-names = "core";
- power-domains = <&gcc PCIE_GDSC>;
- phys = <&pcie0_lane>;
- phy-names = "pciephy";
- max-link-speed = <3>;
- num-lanes = <2>;
-
- status = "disabled";
- };
-
remoteproc_mpss: remoteproc@4080000 {
compatible = "qcom,sdx55-mpss-pas";
reg = <0x04080000 0x4040>;
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
index a9d2bec99014..e15a3b2a9b39 100644
--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -1880,6 +1880,21 @@ pins {
};
};

+ spi1_pins_b: spi1-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 5, AF5)>, /* SPI1_SCK */
+ <STM32_PINMUX('B', 5, AF5)>; /* SPI1_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 6, AF5)>; /* SPI1_MISO */
+ bias-disable;
+ };
+ };
+
spi2_pins_a: spi2-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, AF5)>, /* SPI2_SCK */
@@ -2448,19 +2463,4 @@ pins2 {
bias-disable;
};
};
-
- spi1_pins_b: spi1-1 {
- pins1 {
- pinmux = <STM32_PINMUX('A', 5, AF5)>, /* SPI1_SCK */
- <STM32_PINMUX('B', 5, AF5)>; /* SPI1_MOSI */
- bias-disable;
- drive-push-pull;
- slew-rate = <1>;
- };
-
- pins2 {
- pinmux = <STM32_PINMUX('A', 6, AF5)>; /* SPI1_MISO */
- bias-disable;
- };
- };
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
index c1f3ba9c39f6..997ef90614c1 100644
--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -33,10 +33,12 @@ &wifi0 {

&port01 {
bus-range = <2 2>;
+ status = "okay";
};

&port02 {
bus-range = <3 3>;
+ status = "okay";
ethernet0: ethernet@0,0 {
reg = <0x30000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
@@ -44,6 +46,14 @@ ethernet0: ethernet@0,0 {
};
};

+&pcie0_dart_1 {
+ status = "okay";
+};
+
+&pcie0_dart_2 {
+ status = "okay";
+};
+
&i2c2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
index ecb10d237a05..3fd444ac8ae4 100644
--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
@@ -21,21 +21,6 @@ &wifi0 {
brcm,board-type = "apple,honshu";
};

-/*
- * Remove unused PCIe ports and disable the associated DARTs.
- */
-
-&pcie0_dart_1 {
- status = "disabled";
-};
-
-&pcie0_dart_2 {
- status = "disabled";
-};
-
-/delete-node/ &port01;
-/delete-node/ &port02;
-
&i2c2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j313.dts b/arch/arm64/boot/dts/apple/t8103-j313.dts
index df741737b8e6..9399c42de8a4 100644
--- a/arch/arm64/boot/dts/apple/t8103-j313.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j313.dts
@@ -20,18 +20,3 @@ / {
&wifi0 {
brcm,board-type = "apple,shikoku";
};
-
-/*
- * Remove unused PCIe ports and disable the associated DARTs.
- */
-
-&pcie0_dart_1 {
- status = "disabled";
-};
-
-&pcie0_dart_2 {
- status = "disabled";
-};
-
-/delete-node/ &port01;
-/delete-node/ &port02;
diff --git a/arch/arm64/boot/dts/apple/t8103-j456.dts b/arch/arm64/boot/dts/apple/t8103-j456.dts
index 8c6bf9592510..d80223fa4229 100644
--- a/arch/arm64/boot/dts/apple/t8103-j456.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j456.dts
@@ -51,13 +51,23 @@ hpm3: usb-pd@3c {

&port01 {
bus-range = <2 2>;
+ status = "okay";
};

&port02 {
bus-range = <3 3>;
+ status = "okay";
ethernet0: ethernet@0,0 {
reg = <0x30000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 10 18 00 00 00];
};
};
+
+&pcie0_dart_1 {
+ status = "okay";
+};
+
+&pcie0_dart_2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
index fe7c0aaf7d62..e24ccb49e33c 100644
--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
@@ -33,6 +33,7 @@ &wifi0 {

&port02 {
bus-range = <3 3>;
+ status = "okay";
ethernet0: ethernet@0,0 {
reg = <0x30000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
@@ -40,12 +41,6 @@ ethernet0: ethernet@0,0 {
};
};

-/*
- * Remove unused PCIe port and disable the associated DART.
- */
-
-&pcie0_dart_1 {
- status = "disabled";
+&pcie0_dart_2 {
+ status = "okay";
};
-
-/delete-node/ &port01;
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index a4d195e9eb8c..84fd1b1b48f6 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -428,6 +428,7 @@ pcie0_dart_1: iommu@682008000 {
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 699 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&ps_apcie_gp>;
+ status = "disabled";
};

pcie0_dart_2: iommu@683008000 {
@@ -437,6 +438,7 @@ pcie0_dart_2: iommu@683008000 {
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 702 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&ps_apcie_gp>;
+ status = "disabled";
};

pcie0: pcie@690000000 {
@@ -511,6 +513,7 @@ port01: pci@1,0 {
<0 0 0 2 &port01 0 0 0 1>,
<0 0 0 3 &port01 0 0 0 2>,
<0 0 0 4 &port01 0 0 0 3>;
+ status = "disabled";
};

port02: pci@2,0 {
@@ -530,6 +533,7 @@ port02: pci@2,0 {
<0 0 0 2 &port02 0 0 0 1>,
<0 0 0 3 &port02 0 0 0 2>,
<0 0 0 4 &port02 0 0 0 3>;
+ status = "disabled";
};
};
};
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
index 839ca33178b0..d94a53d68320 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
@@ -120,7 +120,7 @@ ethernet-phy@3 {
};

&leds {
- led-power@11 {
+ led@11 {
reg = <0x11>;
function = LED_FUNCTION_POWER;
color = <LED_COLOR_ID_WHITE>;
@@ -130,7 +130,7 @@ led-power@11 {
pinctrl-0 = <&pins_led_17_a>;
};

- led-wan-red@12 {
+ led@12 {
reg = <0x12>;
function = LED_FUNCTION_WAN;
color = <LED_COLOR_ID_RED>;
@@ -139,7 +139,7 @@ led-wan-red@12 {
pinctrl-0 = <&pins_led_18_a>;
};

- led-wps@14 {
+ led@14 {
reg = <0x14>;
function = LED_FUNCTION_WPS;
color = <LED_COLOR_ID_WHITE>;
@@ -148,7 +148,7 @@ led-wps@14 {
pinctrl-0 = <&pins_led_20_a>;
};

- led-wan-white@15 {
+ led@15 {
reg = <0x15>;
function = LED_FUNCTION_WAN;
color = <LED_COLOR_ID_WHITE>;
@@ -157,7 +157,7 @@ led-wan-white@15 {
pinctrl-0 = <&pins_led_21_a>;
};

- led-lan@19 {
+ led@19 {
reg = <0x19>;
function = LED_FUNCTION_LAN;
color = <LED_COLOR_ID_WHITE>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
index dac9d3b4e91d..df7134854206 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
@@ -253,7 +253,7 @@ phy12: ethernet-phy@c {
};
};

- procmon: syscon@280000 {
+ procmon: bus@280000 {
compatible = "simple-bus";
reg = <0x280000 0x1000>;
ranges;
@@ -532,7 +532,7 @@ nand-controller@1800 {
reg = <0x1800 0x600>, <0x2000 0x10>;
reg-names = "nand", "nand-int-base";
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "nand";
+ interrupt-names = "nand_ctlrdy";
status = "okay";

nandcs: nand@0 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 4b314435f8fd..50367da93cd7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -935,7 +935,7 @@ mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
regulator-min-microvolt = <606250>;
- regulator-max-microvolt = <1193750>;
+ regulator-max-microvolt = <800000>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
index 5cdc7ac1a9c0..c7de5e3b071e 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
@@ -742,8 +742,7 @@ pmi8994_mpp2_userled4: mpp2-userled4-state {
&pmi8994_spmi_regulators {
vdd_s2-supply = <&vph_pwr>;

- vdd_gfx: s2@1700 {
- reg = <0x1700 0x100>;
+ vdd_gfx: s2 {
regulator-name = "VDD_GFX";
regulator-min-microvolt = <980000>;
regulator-max-microvolt = <980000>;
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index a7c7ca980a71..c3492a383155 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -436,10 +436,8 @@ pcie0: pci@20000000 {
phys = <&pcie_phy0>;
phy-names = "pciephy";

- ranges = <0x81000000 0 0x20200000 0 0x20200000
- 0 0x10000>, /* downstream I/O */
- <0x82000000 0 0x20220000 0 0x20220000
- 0 0xfde0000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x0 0x20200000 0x0 0x10000>,
+ <0x82000000 0x0 0x20220000 0x0 0x20220000 0x0 0xfde0000>;

interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 05b97b05d446..3f7cf3fdd319 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -750,10 +750,8 @@ pcie1: pci@10000000 {
phys = <&pcie_phy1>;
phy-names = "pciephy";

- ranges = <0x81000000 0 0x10200000 0x10200000
- 0 0x10000>, /* downstream I/O */
- <0x82000000 0 0x10220000 0x10220000
- 0 0xfde0000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x10200000 0x0 0x10000>, /* I/O */
+ <0x82000000 0x0 0x10220000 0x10220000 0x0 0xfde0000>; /* MEM */

interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -814,10 +812,8 @@ pcie0: pci@20000000 {
phys = <&pcie_phy0>;
phy-names = "pciephy";

- ranges = <0x81000000 0 0x20200000 0x20200000
- 0 0x10000>, /* downstream I/O */
- <0x82000000 0 0x20220000 0x20220000
- 0 0xfde0000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x20200000 0x0 0x10000>, /* I/O */
+ <0x82000000 0x0 0x20220000 0x20220000 0x0 0xfde0000>; /* MEM */

interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
index 465b2828acbd..13b8823ae063 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
@@ -60,11 +60,6 @@ reserved@5000000 {
reg = <0x0 0x05000000 0x0 0x1a00000>;
no-map;
};
-
- reserved@6c00000 {
- reg = <0x0 0x06c00000 0x0 0x400000>;
- no-map;
- };
};
};

diff --git a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
index 7e2c0dcc11ab..4801d973f9d7 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+++ b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, Huawei Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Petr Vorel <petr.vorel@xxxxxxxxx>
+ * Copyright (c) 2021-2023, Petr Vorel <petr.vorel@xxxxxxxxx>
*/

/dts-v1/;
@@ -30,13 +30,18 @@ reserved-memory {
#size-cells = <2>;
ranges;

+ cont_splash_mem: memory@3401000 {
+ reg = <0 0x03401000 0 0x1000000>;
+ no-map;
+ };
+
tzapp_mem: tzapp@4800000 {
reg = <0 0x04800000 0 0x1900000>;
no-map;
};

- removed_region: reserved@6300000 {
- reg = <0 0x06300000 0 0xD00000>;
+ reserved@6300000 {
+ reg = <0 0x06300000 0 0x700000>;
no-map;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
index f9d8bd09e074..c6145ee8b278 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
@@ -542,8 +542,7 @@ hd3ss460_en: en-high-state {
};

&pmi8994_spmi_regulators {
- vdd_gfx: s2@1700 {
- reg = <0x1700 0x100>;
+ vdd_gfx: s2 {
regulator-min-microvolt = <980000>;
regulator-max-microvolt = <980000>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
index ff60b7004d26..2ecf455db830 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
@@ -173,8 +173,7 @@ &pmi8994_spmi_regulators {
* power domain.. which still isn't enough and forces us to bind
* OXILI_CX and OXILI_GX together!
*/
- vdd_gfx: s2@1700 {
- reg = <0x1700 0x100>;
+ vdd_gfx: s2 {
regulator-name = "VDD_GFX";
regulator-min-microvolt = <980000>;
regulator-max-microvolt = <980000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index ded5b7ceeaf9..7ed59e698c14 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -227,6 +227,11 @@ adsp_mem: memory@c9400000 {
reg = <0 0xc9400000 0 0x3f00000>;
no-map;
};
+
+ reserved@6c00000 {
+ reg = <0 0x06c00000 0 0x400000>;
+ no-map;
+ };
};

smd {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index c103034372fd..67b87915d822 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -1828,8 +1828,8 @@ pcie0: pcie@600000 {

#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
- <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0c200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;

device_type = "pci";

@@ -1882,8 +1882,8 @@ pcie1: pcie@608000 {

#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
- <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0d200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;

device_type = "pci";

@@ -1933,8 +1933,8 @@ pcie2: pcie@610000 {

#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
- <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0e200000 0x0 0x100000>,
+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;

device_type = "pci";

diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index f05f16ac5cc1..29c60bb56ed5 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -916,7 +916,7 @@ pcie0: pci@1c00000 {
phy-names = "pciephy";
status = "disabled";

- ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
+ ranges = <0x01000000 0x0 0x00000000 0x1b200000 0x0 0x100000>,
<0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;

#interrupt-cells = <1>;
@@ -1513,7 +1513,7 @@ stm: stm@6002000 {
compatible = "arm,coresight-stm", "arm,primecell";
reg = <0x06002000 0x1000>,
<0x16280000 0x180000>;
- reg-names = "stm-base", "stm-data-base";
+ reg-names = "stm-base", "stm-stimulus-base";
status = "disabled";

clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
index 542c215dde10..82b60e988d0f 100644
--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
@@ -49,8 +49,6 @@ pmi8994_lpg: pwm {

pmi8994_spmi_regulators: regulators {
compatible = "qcom,pmi8994-regulators";
- #address-cells = <1>;
- #size-cells = <1>;
};

pmi8994_wled: wled@d800 {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
index 850776c5323d..70d5a7aa8873 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
@@ -26,7 +26,7 @@ trackpad: trackpad@2c {
interrupt-parent = <&tlmm>;
interrupts = <58 IRQ_TYPE_EDGE_FALLING>;

- vcc-supply = <&pp3300_fp_tp>;
+ vdd-supply = <&pp3300_fp_tp>;
hid-descr-addr = <0x20>;

wakeup-source;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
index 56d787785fd5..2e35c69a978f 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
@@ -39,7 +39,7 @@ trackpad: trackpad@15 {
interrupt-parent = <&tlmm>;
interrupts = <0 IRQ_TYPE_EDGE_FALLING>;

- vcc-supply = <&pp3300_fp_tp>;
+ vdd-supply = <&pp3300_fp_tp>;
post-power-on-delay-ms = <100>;
hid-descr-addr = <0x0001>;

diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index b16886f71517..6f0ee4e13ef1 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -1521,7 +1521,7 @@ pinmux-data {
};
};

- qspi_data12: qspi-data12 {
+ qspi_data23: qspi-data23 {
pinmux-data {
pins = "gpio66", "gpio67";
function = "qspi_data";
diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
index 4566722bf4dd..8f5d82885e44 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
@@ -33,7 +33,7 @@ trackpad: trackpad@2c {
interrupts = <7 IRQ_TYPE_EDGE_FALLING>;

hid-descr-addr = <0x20>;
- vcc-supply = <&pp3300_z1>;
+ vdd-supply = <&pp3300_z1>;

wakeup-source;
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 346da6af51ac..0cdc579f26de 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -2023,7 +2023,7 @@ pcie1: pci@1c08000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;

interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
@@ -3590,12 +3590,17 @@ eud: eud@88e0000 {
<0 0x88e2000 0 0x1000>;
interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
port@0 {
+ reg = <0>;
eud_ep: endpoint {
remote-endpoint = <&usb2_role_switch>;
};
};
port@1 {
+ reg = <1>;
eud_con: endpoint {
remote-endpoint = <&con_eud>;
};
@@ -3606,7 +3611,11 @@ eud_con: endpoint {
eud_typec: connector {
compatible = "usb-c-connector";
ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
port@0 {
+ reg = <0>;
con_eud: endpoint {
remote-endpoint = <&eud_con>;
};
@@ -4336,7 +4345,7 @@ qspi_data01: qspi-data01-pins {
function = "qspi_data";
};

- qspi_data12: qspi-data12-pins {
+ qspi_data23: qspi-data23-pins {
pins = "gpio16", "gpio17";
function = "qspi_data";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index d761da47220d..a99eda497101 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -198,7 +198,7 @@ CPU0: cpu@0 {
reg = <0x0 0x0>;
enable-method = "psci";
capacity-dmips-mhz = <611>;
- dynamic-power-coefficient = <290>;
+ dynamic-power-coefficient = <154>;
qcom,freq-domain = <&cpufreq_hw 0>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
@@ -222,7 +222,7 @@ CPU1: cpu@100 {
reg = <0x0 0x100>;
enable-method = "psci";
capacity-dmips-mhz = <611>;
- dynamic-power-coefficient = <290>;
+ dynamic-power-coefficient = <154>;
qcom,freq-domain = <&cpufreq_hw 0>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
@@ -243,7 +243,7 @@ CPU2: cpu@200 {
reg = <0x0 0x200>;
enable-method = "psci";
capacity-dmips-mhz = <611>;
- dynamic-power-coefficient = <290>;
+ dynamic-power-coefficient = <154>;
qcom,freq-domain = <&cpufreq_hw 0>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
@@ -264,7 +264,7 @@ CPU3: cpu@300 {
reg = <0x0 0x300>;
enable-method = "psci";
capacity-dmips-mhz = <611>;
- dynamic-power-coefficient = <290>;
+ dynamic-power-coefficient = <154>;
qcom,freq-domain = <&cpufreq_hw 0>;
operating-points-v2 = <&cpu0_opp_table>;
interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
@@ -2226,8 +2226,8 @@ pcie0: pci@1c00000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
- <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0xd00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0xd00000>;

interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -2331,7 +2331,7 @@ pcie1: pci@1c08000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;

interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 47e09d96f609..78ae4b9eaa10 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -1783,8 +1783,8 @@ pcie0: pci@1c00000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
- <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;

interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -1879,7 +1879,7 @@ pcie1: pci@1c08000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;

interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 29e352a57731..e93955525a10 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -1808,8 +1808,8 @@ pcie0: pci@1c00000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
- <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;

interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
@@ -1917,7 +1917,7 @@ pcie1: pci@1c08000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;

interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
@@ -2025,7 +2025,7 @@ pcie2: pci@1c10000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x64200000 0x0 0x64200000 0x0 0x100000>,
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x64200000 0x0 0x100000>,
<0x02000000 0x0 0x64300000 0x0 0x64300000 0x0 0x3d00000>;

interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts b/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
index 9c4cfd995ff2..e87514d8fd84 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
@@ -341,6 +341,9 @@ &ufs_mem_phy {

&usb_1 {
status = "okay";
+};
+
+&usb_1_dwc3 {
dr_mode = "peripheral";
};

diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index 4714d7bf03b9..128542582b3d 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -1722,8 +1722,8 @@ pcie0: pci@1c00000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
- <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;

interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -1831,8 +1831,8 @@ pcie1: pci@1c08000 {
#address-cells = <3>;
#size-cells = <2>;

- ranges = <0x01000000 0x0 0x40200000 0 0x40200000 0x0 0x100000>,
- <0x02000000 0x0 0x40300000 0 0x40300000 0x0 0x1fd00000>;
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;

interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -1879,8 +1879,8 @@ pcie1: pci@1c08000 {
phys = <&pcie1_lane>;
phy-names = "pciephy";

- perst-gpio = <&tlmm 97 GPIO_ACTIVE_LOW>;
- enable-gpio = <&tlmm 99 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 97 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 99 GPIO_ACTIVE_HIGH>;

pinctrl-names = "default";
pinctrl-0 = <&pcie1_default_state>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 151e32ac0368..ec7c7851519f 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -49,17 +49,14 @@ cluster1_opp: opp-table-1 {
opp-shared;
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
opp-suspend;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 3053b4b21497..3ed31ffd73a2 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -49,17 +49,14 @@ cluster1_opp: opp-table-1 {
opp-shared;
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
opp-suspend;
};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
index 689aa4ba416b..a4738842f064 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
@@ -5,7 +5,6 @@
* Copyright (C) 2022 Renesas Electronics Corp.
*/

-#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/r9a07g043-cpg.h>

/ {
@@ -107,11 +106,10 @@ ssi0: ssi@10049c00 {
compatible = "renesas,r9a07g043-ssi",
"renesas,rz-ssi";
reg = <0 0x10049c00 0 0x400>;
- interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ interrupts = <SOC_PERIPHERAL_IRQ(326) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(327) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(328) IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G043_SSI0_PCLK2>,
<&cpg CPG_MOD R9A07G043_SSI0_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -128,11 +126,10 @@ ssi1: ssi@1004a000 {
compatible = "renesas,r9a07g043-ssi",
"renesas,rz-ssi";
reg = <0 0x1004a000 0 0x400>;
- interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ interrupts = <SOC_PERIPHERAL_IRQ(330) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(331) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(332) IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G043_SSI1_PCLK2>,
<&cpg CPG_MOD R9A07G043_SSI1_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -149,11 +146,9 @@ ssi2: ssi@1004a400 {
compatible = "renesas,r9a07g043-ssi",
"renesas,rz-ssi";
reg = <0 0x1004a400 0 0x400>;
- interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ interrupts = <SOC_PERIPHERAL_IRQ(334) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(337) IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rt";
clocks = <&cpg CPG_MOD R9A07G043_SSI2_PCLK2>,
<&cpg CPG_MOD R9A07G043_SSI2_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -170,11 +165,10 @@ ssi3: ssi@1004a800 {
compatible = "renesas,r9a07g043-ssi",
"renesas,rz-ssi";
reg = <0 0x1004a800 0 0x400>;
- interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ interrupts = <SOC_PERIPHERAL_IRQ(338) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(339) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(340) IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G043_SSI3_PCLK2>,
<&cpg CPG_MOD R9A07G043_SSI3_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -190,9 +184,9 @@ ssi3: ssi@1004a800 {
spi0: spi@1004ac00 {
compatible = "renesas,r9a07g043-rspi", "renesas,rspi-rz";
reg = <0 0x1004ac00 0 0x400>;
- interrupts = <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(415) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(413) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(414) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&cpg CPG_MOD R9A07G043_RSPI0_CLKB>;
resets = <&cpg R9A07G043_RSPI0_RST>;
@@ -208,9 +202,9 @@ spi0: spi@1004ac00 {
spi1: spi@1004b000 {
compatible = "renesas,r9a07g043-rspi", "renesas,rspi-rz";
reg = <0 0x1004b000 0 0x400>;
- interrupts = <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(418) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(416) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(417) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&cpg CPG_MOD R9A07G043_RSPI1_CLKB>;
resets = <&cpg R9A07G043_RSPI1_RST>;
@@ -226,9 +220,9 @@ spi1: spi@1004b000 {
spi2: spi@1004b400 {
compatible = "renesas,r9a07g043-rspi", "renesas,rspi-rz";
reg = <0 0x1004b400 0 0x400>;
- interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(421) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(419) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(420) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "error", "rx", "tx";
clocks = <&cpg CPG_MOD R9A07G043_RSPI2_CLKB>;
resets = <&cpg R9A07G043_RSPI2_RST>;
@@ -245,12 +239,12 @@ scif0: serial@1004b800 {
compatible = "renesas,scif-r9a07g043",
"renesas,scif-r9a07g044";
reg = <0 0x1004b800 0 0x400>;
- interrupts = <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(380) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(382) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(383) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(381) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(384) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(384) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi",
"bri", "dri", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCIF0_CLK_PCK>;
@@ -264,12 +258,12 @@ scif1: serial@1004bc00 {
compatible = "renesas,scif-r9a07g043",
"renesas,scif-r9a07g044";
reg = <0 0x1004bc00 0 0x400>;
- interrupts = <GIC_SPI 385 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 387 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 388 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(385) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(387) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(388) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(386) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(389) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(389) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi",
"bri", "dri", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCIF1_CLK_PCK>;
@@ -283,12 +277,12 @@ scif2: serial@1004c000 {
compatible = "renesas,scif-r9a07g043",
"renesas,scif-r9a07g044";
reg = <0 0x1004c000 0 0x400>;
- interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(390) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(392) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(393) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(391) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(394) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(394) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi",
"bri", "dri", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCIF2_CLK_PCK>;
@@ -302,12 +296,12 @@ scif3: serial@1004c400 {
compatible = "renesas,scif-r9a07g043",
"renesas,scif-r9a07g044";
reg = <0 0x1004c400 0 0x400>;
- interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(395) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(397) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(398) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(396) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(399) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(399) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi",
"bri", "dri", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCIF3_CLK_PCK>;
@@ -321,12 +315,12 @@ scif4: serial@1004c800 {
compatible = "renesas,scif-r9a07g043",
"renesas,scif-r9a07g044";
reg = <0 0x1004c800 0 0x400>;
- interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(400) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(402) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(403) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(401) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(404) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(404) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi",
"bri", "dri", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCIF4_CLK_PCK>;
@@ -339,10 +333,10 @@ scif4: serial@1004c800 {
sci0: serial@1004d000 {
compatible = "renesas,r9a07g043-sci", "renesas,sci";
reg = <0 0x1004d000 0 0x400>;
- interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 406 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 407 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(405) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(406) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(407) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(408) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCI0_CLKP>;
clock-names = "fck";
@@ -354,10 +348,10 @@ sci0: serial@1004d000 {
sci1: serial@1004d400 {
compatible = "renesas,r9a07g043-sci", "renesas,sci";
reg = <0 0x1004d400 0 0x400>;
- interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 410 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 411 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(409) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(410) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(411) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(412) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "eri", "rxi", "txi", "tei";
clocks = <&cpg CPG_MOD R9A07G043_SCI1_CLKP>;
clock-names = "fck";
@@ -369,14 +363,14 @@ sci1: serial@1004d400 {
canfd: can@10050000 {
compatible = "renesas,r9a07g043-canfd", "renesas,rzg2l-canfd";
reg = <0 0x10050000 0 0x8000>;
- interrupts = <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(426) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(427) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(422) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(424) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(428) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(423) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(425) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(429) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "g_err", "g_recc",
"ch0_err", "ch0_rec", "ch0_trx",
"ch1_err", "ch1_rec", "ch1_trx";
@@ -405,14 +399,14 @@ i2c0: i2c@10058000 {
#size-cells = <0>;
compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
reg = <0 0x10058000 0 0x400>;
- interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 348 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 349 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(350) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(348) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(349) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(352) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(353) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(351) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(354) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(355) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tei", "ri", "ti", "spi", "sti",
"naki", "ali", "tmoi";
clocks = <&cpg CPG_MOD R9A07G043_I2C0_PCLK>;
@@ -427,14 +421,14 @@ i2c1: i2c@10058400 {
#size-cells = <0>;
compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
reg = <0 0x10058400 0 0x400>;
- interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 356 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 357 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(358) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(356) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(357) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(360) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(361) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(359) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(362) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(363) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tei", "ri", "ti", "spi", "sti",
"naki", "ali", "tmoi";
clocks = <&cpg CPG_MOD R9A07G043_I2C1_PCLK>;
@@ -449,14 +443,14 @@ i2c2: i2c@10058800 {
#size-cells = <0>;
compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
reg = <0 0x10058800 0 0x400>;
- interrupts = <GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(366) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(364) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(365) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(368) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(369) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(367) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(370) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(371) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tei", "ri", "ti", "spi", "sti",
"naki", "ali", "tmoi";
clocks = <&cpg CPG_MOD R9A07G043_I2C2_PCLK>;
@@ -471,14 +465,14 @@ i2c3: i2c@10058c00 {
#size-cells = <0>;
compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
reg = <0 0x10058c00 0 0x400>;
- interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 372 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(374) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(372) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(373) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(376) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(377) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(375) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(378) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(379) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tei", "ri", "ti", "spi", "sti",
"naki", "ali", "tmoi";
clocks = <&cpg CPG_MOD R9A07G043_I2C3_PCLK>;
@@ -491,7 +485,7 @@ i2c3: i2c@10058c00 {
adc: adc@10059000 {
compatible = "renesas,r9a07g043-adc", "renesas,rzg2l-adc";
reg = <0 0x10059000 0 0x400>;
- interrupts = <GIC_SPI 347 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <SOC_PERIPHERAL_IRQ(347) IRQ_TYPE_EDGE_RISING>;
clocks = <&cpg CPG_MOD R9A07G043_ADC_ADCLK>,
<&cpg CPG_MOD R9A07G043_ADC_PCLK>;
clock-names = "adclk", "pclk";
@@ -551,10 +545,10 @@ cpg: clock-controller@11010000 {
sysc: system-controller@11020000 {
compatible = "renesas,r9a07g043-sysc";
reg = <0 0x11020000 0 0x10000>;
- interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "lpm_int", "ca55stbydone_int",
"cm33stbyr_int", "ca55_deny";
status = "disabled";
@@ -578,23 +572,23 @@ dmac: dma-controller@11820000 {
"renesas,rz-dmac";
reg = <0 0x11820000 0 0x10000>,
<0 0x11830000 0 0x10000>;
- interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <SOC_PERIPHERAL_IRQ(141) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(125) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(126) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(127) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(128) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(129) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(130) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(131) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(132) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(133) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(134) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(135) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(136) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(137) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(138) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(139) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(140) IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
@@ -623,8 +617,8 @@ sdhi0: mmc@11c00000 {
compatible = "renesas,sdhi-r9a07g043",
"renesas,rcar-gen3-sdhi";
reg = <0x0 0x11c00000 0 0x10000>;
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(104) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(105) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_SDHI0_IMCLK>,
<&cpg CPG_MOD R9A07G043_SDHI0_CLK_HS>,
<&cpg CPG_MOD R9A07G043_SDHI0_IMCLK2>,
@@ -639,8 +633,8 @@ sdhi1: mmc@11c10000 {
compatible = "renesas,sdhi-r9a07g043",
"renesas,rcar-gen3-sdhi";
reg = <0x0 0x11c10000 0 0x10000>;
- interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(106) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(107) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_SDHI1_IMCLK>,
<&cpg CPG_MOD R9A07G043_SDHI1_CLK_HS>,
<&cpg CPG_MOD R9A07G043_SDHI1_IMCLK2>,
@@ -655,9 +649,9 @@ eth0: ethernet@11c20000 {
compatible = "renesas,r9a07g043-gbeth",
"renesas,rzg2l-gbeth";
reg = <0 0x11c20000 0 0x10000>;
- interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(84) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(85) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(86) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "mux", "fil", "arp_ns";
phy-mode = "rgmii";
clocks = <&cpg CPG_MOD R9A07G043_ETH0_CLK_AXI>,
@@ -675,9 +669,9 @@ eth1: ethernet@11c30000 {
compatible = "renesas,r9a07g043-gbeth",
"renesas,rzg2l-gbeth";
reg = <0 0x11c30000 0 0x10000>;
- interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(87) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(88) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(89) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "mux", "fil", "arp_ns";
phy-mode = "rgmii";
clocks = <&cpg CPG_MOD R9A07G043_ETH1_CLK_AXI>,
@@ -705,7 +699,7 @@ phyrst: usbphy-ctrl@11c40000 {
ohci0: usb@11c50000 {
compatible = "generic-ohci";
reg = <0 0x11c50000 0 0x100>;
- interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(91) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2H0_HCLK>;
resets = <&phyrst 0>,
@@ -719,7 +713,7 @@ ohci0: usb@11c50000 {
ohci1: usb@11c70000 {
compatible = "generic-ohci";
reg = <0 0x11c70000 0 0x100>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(96) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2H1_HCLK>;
resets = <&phyrst 1>,
@@ -733,7 +727,7 @@ ohci1: usb@11c70000 {
ehci0: usb@11c50100 {
compatible = "generic-ehci";
reg = <0 0x11c50100 0 0x100>;
- interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(92) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2H0_HCLK>;
resets = <&phyrst 0>,
@@ -748,7 +742,7 @@ ehci0: usb@11c50100 {
ehci1: usb@11c70100 {
compatible = "generic-ehci";
reg = <0 0x11c70100 0 0x100>;
- interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(97) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2H1_HCLK>;
resets = <&phyrst 1>,
@@ -764,7 +758,7 @@ usb2_phy0: usb-phy@11c50200 {
compatible = "renesas,usb2-phy-r9a07g043",
"renesas,rzg2l-usb2-phy";
reg = <0 0x11c50200 0 0x700>;
- interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(94) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2H0_HCLK>;
resets = <&phyrst 0>;
@@ -777,7 +771,7 @@ usb2_phy1: usb-phy@11c70200 {
compatible = "renesas,usb2-phy-r9a07g043",
"renesas,rzg2l-usb2-phy";
reg = <0 0x11c70200 0 0x700>;
- interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(99) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2H1_HCLK>;
resets = <&phyrst 1>;
@@ -790,10 +784,10 @@ hsusb: usb@11c60000 {
compatible = "renesas,usbhs-r9a07g043",
"renesas,rza2-usbhs";
reg = <0 0x11c60000 0 0x10000>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(100) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(101) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(102) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(103) IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
<&cpg CPG_MOD R9A07G043_USB_U2P_EXR_CPUCLK>;
resets = <&phyrst 0>,
@@ -812,8 +806,8 @@ wdt0: watchdog@12800800 {
clocks = <&cpg CPG_MOD R9A07G043_WDT0_PCLK>,
<&cpg CPG_MOD R9A07G043_WDT0_CLK>;
clock-names = "pclk", "oscclk";
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <SOC_PERIPHERAL_IRQ(49) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(50) IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "wdt", "perrout";
resets = <&cpg R9A07G043_WDT0_PRESETN>;
power-domains = <&cpg>;
@@ -839,7 +833,7 @@ ostm0: timer@12801000 {
compatible = "renesas,r9a07g043-ostm",
"renesas,ostm";
reg = <0x0 0x12801000 0x0 0x400>;
- interrupts = <GIC_SPI 46 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <SOC_PERIPHERAL_IRQ(46) IRQ_TYPE_EDGE_RISING>;
clocks = <&cpg CPG_MOD R9A07G043_OSTM0_PCLK>;
resets = <&cpg R9A07G043_OSTM0_PRESETZ>;
power-domains = <&cpg>;
@@ -850,7 +844,7 @@ ostm1: timer@12801400 {
compatible = "renesas,r9a07g043-ostm",
"renesas,ostm";
reg = <0x0 0x12801400 0x0 0x400>;
- interrupts = <GIC_SPI 47 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <SOC_PERIPHERAL_IRQ(47) IRQ_TYPE_EDGE_RISING>;
clocks = <&cpg CPG_MOD R9A07G043_OSTM1_PCLK>;
resets = <&cpg R9A07G043_OSTM1_PRESETZ>;
power-domains = <&cpg>;
@@ -861,7 +855,7 @@ ostm2: timer@12801800 {
compatible = "renesas,r9a07g043-ostm",
"renesas,ostm";
reg = <0x0 0x12801800 0x0 0x400>;
- interrupts = <GIC_SPI 48 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <SOC_PERIPHERAL_IRQ(48) IRQ_TYPE_EDGE_RISING>;
clocks = <&cpg CPG_MOD R9A07G043_OSTM2_PCLK>;
resets = <&cpg R9A07G043_OSTM2_PRESETZ>;
power-domains = <&cpg>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
new file mode 100644
index 000000000000..96f935bc2d4d
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G2UL SoC
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define SOC_PERIPHERAL_IRQ(nr) GIC_SPI nr
+
+#include "r9a07g043.dtsi"
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts
index 059885a01ede..01483b4302c2 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts
@@ -17,7 +17,7 @@
#define SW_SW0_DEV_SEL 1
#define SW_ET0_EN_N 1

-#include "r9a07g043.dtsi"
+#include "r9a07g043u.dtsi"
#include "rzg2ul-smarc-som.dtsi"
#include "rzg2ul-smarc.dtsi"

diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index 2283d4fb8736..7dbf6a6292f4 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -174,9 +174,8 @@ ssi0: ssi@10049c00 {
reg = <0 0x10049c00 0 0x400>;
interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G044_SSI0_PCLK2>,
<&cpg CPG_MOD R9A07G044_SSI0_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -195,9 +194,8 @@ ssi1: ssi@1004a000 {
reg = <0 0x1004a000 0 0x400>;
interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G044_SSI1_PCLK2>,
<&cpg CPG_MOD R9A07G044_SSI1_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -215,10 +213,8 @@ ssi2: ssi@1004a400 {
"renesas,rz-ssi";
reg = <0 0x1004a400 0 0x400>;
interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ interrupt-names = "int_req", "dma_rt";
clocks = <&cpg CPG_MOD R9A07G044_SSI2_PCLK2>,
<&cpg CPG_MOD R9A07G044_SSI2_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -237,9 +233,8 @@ ssi3: ssi@1004a800 {
reg = <0 0x1004a800 0 0x400>;
interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G044_SSI3_PCLK2>,
<&cpg CPG_MOD R9A07G044_SSI3_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
index 358d4c34465f..e000510b90a4 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
@@ -174,9 +174,8 @@ ssi0: ssi@10049c00 {
reg = <0 0x10049c00 0 0x400>;
interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G054_SSI0_PCLK2>,
<&cpg CPG_MOD R9A07G054_SSI0_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -195,9 +194,8 @@ ssi1: ssi@1004a000 {
reg = <0 0x1004a000 0 0x400>;
interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G054_SSI1_PCLK2>,
<&cpg CPG_MOD R9A07G054_SSI1_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -215,10 +213,8 @@ ssi2: ssi@1004a400 {
"renesas,rz-ssi";
reg = <0 0x1004a400 0 0x400>;
interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ interrupt-names = "int_req", "dma_rt";
clocks = <&cpg CPG_MOD R9A07G054_SSI2_PCLK2>,
<&cpg CPG_MOD R9A07G054_SSI2_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
@@ -237,9 +233,8 @@ ssi3: ssi@1004a800 {
reg = <0 0x1004a800 0 0x400>;
interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
+ <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "int_req", "dma_rx", "dma_tx";
clocks = <&cpg CPG_MOD R9A07G054_SSI3_PCLK2>,
<&cpg CPG_MOD R9A07G054_SSI3_PCLK_SFR>,
<&audio_clk1>, <&audio_clk2>;
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index edcf6b271881..eb8690a6be16 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -354,7 +354,7 @@ main_gpio0: gpio@600000 {
<193>, <194>, <195>;
interrupt-controller;
#interrupt-cells = <2>;
- ti,ngpio = <87>;
+ ti,ngpio = <92>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 77 0>;
@@ -371,7 +371,7 @@ main_gpio1: gpio@601000 {
<183>, <184>, <185>;
interrupt-controller;
#interrupt-cells = <2>;
- ti,ngpio = <88>;
+ ti,ngpio = <52>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 78 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-am625.dtsi b/arch/arm64/boot/dts/ti/k3-am625.dtsi
index 887f31c23fef..31b37abbb8d5 100644
--- a/arch/arm64/boot/dts/ti/k3-am625.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am625.dtsi
@@ -96,7 +96,7 @@ cpu3: cpu@3 {
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
- cache-size = <0x40000>;
+ cache-size = <0x80000>;
cache-line-size = <64>;
cache-sets = <512>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
index 576dbce80ad8..b08a083d722d 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
@@ -26,8 +26,9 @@ chosen {

memory@80000000 {
device_type = "memory";
- /* 2G RAM */
- reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ /* 4G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
+ <0x00000008 0x80000000 0x00000000 0x80000000>;
};

reserved-memory {
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
index 331d89fda29d..f1ebaec404fb 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
@@ -96,7 +96,7 @@ cpu3: cpu@3 {
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
- cache-size = <0x40000>;
+ cache-size = <0x80000>;
cache-line-size = <64>;
cache-sets = <512>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index 603ddda5127f..e9b36c419bec 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -1093,7 +1093,6 @@ main_sdhci0: mmc@4f80000 {
ti,itap-del-sel-mmc-hs = <0xa>;
ti,itap-del-sel-ddr52 = <0x3>;
ti,trm-icp = <0x8>;
- ti,strobe-sel = <0x77>;
dma-coherent;
};

diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index d427f4556b6e..b2062eeee59e 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -15,6 +15,7 @@
*/

#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>

.text
@@ -620,12 +621,12 @@ SYM_FUNC_END(aesbs_decrypt8)
.endm

.align 4
-SYM_FUNC_START(aesbs_ecb_encrypt)
+SYM_TYPED_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
SYM_FUNC_END(aesbs_ecb_encrypt)

.align 4
-SYM_FUNC_START(aesbs_ecb_decrypt)
+SYM_TYPED_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
SYM_FUNC_END(aesbs_ecb_decrypt)

@@ -799,11 +800,11 @@ SYM_FUNC_END(__xts_crypt8)
ret
.endm

-SYM_FUNC_START(aesbs_xts_encrypt)
+SYM_TYPED_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
SYM_FUNC_END(aesbs_xts_encrypt)

-SYM_FUNC_START(aesbs_xts_decrypt)
+SYM_TYPED_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
SYM_FUNC_END(aesbs_xts_decrypt)

diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7b7e05c02691..13d437bcbf58 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
int kernel_active_single_step(void);
+void kernel_rewind_single_step(struct pt_regs *regs);

#ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e2b45c937c58..b5a8e8b3c691 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -127,6 +127,9 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;

+ /* Protects VM-scoped configuration data */
+ struct mutex config_lock;
+
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
@@ -398,6 +401,7 @@ struct kvm_vcpu_arch {

/* vcpu power state */
struct kvm_mp_state mp_state;
+ spinlock_t mp_state_lock;

/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index 8297bccf0784..5cd4d09bc69d 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -9,15 +9,16 @@
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18

- .macro scs_load tsk
- ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
+ .macro scs_load_current
+ get_current_task scs_sp
+ ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP]
.endm

.macro scs_save tsk
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
- .macro scs_load tsk
+ .macro scs_load_current
.endm

.macro scs_save tsk
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 3da09778267e..64f2ecbdfe5c 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -438,6 +438,11 @@ int kernel_active_single_step(void)
}
NOKPROBE_SYMBOL(kernel_active_single_step);

+void kernel_rewind_single_step(struct pt_regs *regs)
+{
+ set_regs_spsr_ss(regs);
+}
+
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e28137d64b76..3671d9521d4f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -272,7 +272,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
alternative_else_nop_endif
1:

- scs_load tsk
+ scs_load_current
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
@@ -845,7 +845,7 @@ SYM_FUNC_START(cpu_switch_to)
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
scs_save x0
- scs_load x1
+ scs_load_current
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -873,19 +873,19 @@ NOKPROBE(ret_from_fork)
*/
SYM_FUNC_START(call_on_irq_stack)
#ifdef CONFIG_SHADOW_CALL_STACK
- stp scs_sp, xzr, [sp, #-16]!
+ get_current_task x16
+ scs_save x16
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
#endif
+
/* Create a frame record to save our LR and SP (implicit in FP) */
stp x29, x30, [sp, #-16]!
mov x29, sp

ldr_this_cpu x16, irq_stack_ptr, x17
- mov x15, #IRQ_STACK_SIZE
- add x16, x16, x15

/* Move to the new stack and call the function there */
- mov sp, x16
+ add sp, x16, #IRQ_STACK_SIZE
blr x1

/*
@@ -894,9 +894,7 @@ SYM_FUNC_START(call_on_irq_stack)
*/
mov sp, x29
ldp x29, x30, [sp], #16
-#ifdef CONFIG_SHADOW_CALL_STACK
- ldp scs_sp, xzr, [sp], #16
-#endif
+ scs_load_current
ret
SYM_FUNC_END(call_on_irq_stack)
NOKPROBE(call_on_irq_stack)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2196aad7b55b..cdbbc95eb49d 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping)
stp xzr, xzr, [sp, #S_STACKFRAME]
add x29, sp, #S_STACKFRAME

- scs_load \tsk
+ scs_load_current

adr_l \tmp1, __per_cpu_offset
ldr w\tmp2, [\tsk, #TSK_TI_CPU]
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index cda9c1e9864f..4e1f983df3d1 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
*/
if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs);
+ else
+ kernel_rewind_single_step(linux_regs);
err = 0;
break;
default:
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 6ce6888cf73d..35481d51aada 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -146,6 +146,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
return ret;

+ mutex_init(&kvm->arch.config_lock);
+
+#ifdef CONFIG_LOCKDEP
+ /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
+ mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
+ mutex_unlock(&kvm->arch.config_lock);
+ mutex_unlock(&kvm->lock);
+#endif
+
ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
goto out_free_stage2_pgd;
@@ -324,6 +334,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err;

+ spin_lock_init(&vcpu->arch.mp_state_lock);
+
+#ifdef CONFIG_LOCKDEP
+ /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
+ mutex_lock(&vcpu->mutex);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+ mutex_unlock(&vcpu->mutex);
+#endif
+
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@@ -441,34 +461,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}

-void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}

+void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+ spin_lock(&vcpu->arch.mp_state_lock);
+ __kvm_arm_vcpu_power_off(vcpu);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+}
+
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
+ return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
}

static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
{
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
kvm_make_request(KVM_REQ_SUSPEND, vcpu);
kvm_vcpu_kick(vcpu);
}

static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
+ return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
}

int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- *mp_state = vcpu->arch.mp_state;
+ *mp_state = READ_ONCE(vcpu->arch.mp_state);

return 0;
}
@@ -478,12 +505,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
int ret = 0;

+ spin_lock(&vcpu->arch.mp_state_lock);
+
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.mp_state = *mp_state;
+ WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
break;
case KVM_MP_STATE_STOPPED:
- kvm_arm_vcpu_power_off(vcpu);
+ __kvm_arm_vcpu_power_off(vcpu);
break;
case KVM_MP_STATE_SUSPENDED:
kvm_arm_vcpu_suspend(vcpu);
@@ -492,6 +521,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
ret = -EINVAL;
}

+ spin_unlock(&vcpu->arch.mp_state_lock);
+
return ret;
}

@@ -585,9 +616,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (kvm_vm_is_protected(kvm))
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);

return ret;
}
@@ -1202,7 +1233,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
kvm_arm_vcpu_power_off(vcpu);
else
- vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);

return 0;
}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 817fdd1ab778..dd20b8688d23 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -951,7 +951,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,

switch (attr->group) {
case KVM_ARM_VCPU_PMU_V3_CTRL:
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 950e35b993d2..1f5beebf6217 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
if (val & ~fw_reg_features)
return -EINVAL;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);

if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
val != *fw_reg_bmap) {
@@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)

WRITE_ONCE(*fw_reg_bmap, val);
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index ed12c5355afb..c7e5f6a28c28 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -850,7 +850,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
struct arm_pmu *arm_pmu;
int ret = -ENXIO;

- mutex_lock(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.config_lock);
mutex_lock(&arm_pmus_lock);

list_for_each_entry(entry, &arm_pmus, entry) {
@@ -870,7 +870,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
}

mutex_unlock(&arm_pmus_lock);
- mutex_unlock(&kvm->lock);
return ret;
}

@@ -878,22 +877,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
struct kvm *kvm = vcpu->kvm;

+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV;

if (vcpu->arch.pmu.created)
return -EBUSY;

- mutex_lock(&kvm->lock);
if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
- if (!kvm->arch.arm_pmu) {
- mutex_unlock(&kvm->lock);
+ if (!kvm->arch.arm_pmu)
return -ENODEV;
- }
}
- mutex_unlock(&kvm->lock);

switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: {
@@ -937,19 +934,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL;

- mutex_lock(&kvm->lock);
-
- if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
- mutex_unlock(&kvm->lock);
+ if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
return -EBUSY;
- }

if (!kvm->arch.pmu_filter) {
kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
- if (!kvm->arch.pmu_filter) {
- mutex_unlock(&kvm->lock);
+ if (!kvm->arch.pmu_filter)
return -ENOMEM;
- }

/*
* The default depends on the first applied filter.
@@ -968,8 +959,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
else
bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);

- mutex_unlock(&kvm->lock);
-
return 0;
}
case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 7fbc4c1b9df0..5767e6baa61a 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -62,6 +62,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
+ int ret = PSCI_RET_SUCCESS;
unsigned long cpu_id;

cpu_id = smccc_get_arg1(source_vcpu);
@@ -76,11 +77,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
+
+ spin_lock(&vcpu->arch.mp_state_lock);
if (!kvm_arm_vcpu_stopped(vcpu)) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
- return PSCI_RET_ALREADY_ON;
+ ret = PSCI_RET_ALREADY_ON;
else
- return PSCI_RET_INVALID_PARAMS;
+ ret = PSCI_RET_INVALID_PARAMS;
+
+ goto out_unlock;
}

reset_state = &vcpu->arch.reset_state;
@@ -96,7 +101,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
reset_state->r0 = smccc_get_arg3(source_vcpu);

- WRITE_ONCE(reset_state->reset, true);
+ reset_state->reset = true;
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);

/*
@@ -108,7 +113,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_vcpu_wake_up(vcpu);

- return PSCI_RET_SUCCESS;
+out_unlock:
+ spin_unlock(&vcpu->arch.mp_state_lock);
+ return ret;
}

static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
@@ -168,8 +175,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
* after this call is handled and before the VCPUs have been
* re-initialized.
*/
- kvm_for_each_vcpu(i, tmp, vcpu->kvm)
- tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ spin_lock(&tmp->arch.mp_state_lock);
+ WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ spin_unlock(&tmp->arch.mp_state_lock);
+ }
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);

memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
@@ -229,7 +239,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32

static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
int ret = 1;
@@ -254,9 +263,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
kvm_psci_narrow_to_32bit(vcpu);
fallthrough;
case PSCI_0_2_FN64_CPU_ON:
- mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
kvm_psci_narrow_to_32bit(vcpu);
@@ -395,7 +402,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)

static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;

@@ -405,9 +411,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
- mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5ae18472205a..f9d070473614 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -200,7 +200,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)

is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);

- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.config_lock);

if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
/*
@@ -253,17 +253,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
bool loaded;
u32 pstate;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_set_vm_width(vcpu);
- if (!ret) {
- reset_state = vcpu->arch.reset_state;
- WRITE_ONCE(vcpu->arch.reset_state.reset, false);
- }
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);

if (ret)
return ret;

+ spin_lock(&vcpu->arch.mp_state_lock);
+ reset_state = vcpu->arch.reset_state;
+ vcpu->arch.reset_state.reset = false;
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);

diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index 78cde687383c..07aa0437125a 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
if (iter) {
iter = ERR_PTR(-EBUSY);
@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter))
iter = NULL;
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return iter;
}

@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v))
return;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array);
kfree(iter);
kvm->arch.vgic.iter = NULL;
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
}

static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index f6d4f4052555..8c1d2d7128db 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i;
int ret;

- if (irqchip_in_kernel(kvm))
- return -EEXIST;
-
/*
* This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2
@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV;

+ /* Must be held to avoid race with vCPU creation */
+ lockdep_assert_held(&kvm->lock);
+
ret = -EBUSY;
if (!lock_all_vcpus(kvm))
return ret;

+ mutex_lock(&kvm->arch.config_lock);
+
+ if (irqchip_in_kernel(kvm)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu))
goto out_unlock;
@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);

out_unlock:
+ mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
return ret;
}
@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_register_redist_iodev(vcpu);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
}
return ret;
}
@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded.
- * Must be called with kvm->lock held!
*/
int vgic_init(struct kvm *kvm)
{
@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i;
unsigned long idx;

+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (vgic_initialized(kvm))
return 0;

@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}

-/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;

+ lockdep_assert_held(&kvm->arch.config_lock);
+
vgic_debug_destroy(kvm);

kvm_for_each_vcpu(i, vcpu, kvm)
@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)

void kvm_vgic_destroy(struct kvm *kvm)
{
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
}

/**
@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
ret = vgic_init(kvm);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
}

return ret;
@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (likely(vgic_ready(kvm)))
return 0;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm))
goto out;

@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
dist->ready = true;

out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}

diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 733b53055f97..c9a03033d507 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -1958,6 +1958,16 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock);

+ /* Yep, even more trickery for lock ordering... */
+#ifdef CONFIG_LOCKDEP
+ mutex_lock(&dev->kvm->arch.config_lock);
+ mutex_lock(&its->cmd_lock);
+ mutex_lock(&its->its_lock);
+ mutex_unlock(&its->its_lock);
+ mutex_unlock(&its->cmd_lock);
+ mutex_unlock(&dev->kvm->arch.config_lock);
+#endif
+
its->vgic_its_base = VGIC_ADDR_UNDEF;

INIT_LIST_HEAD(&its->device_list);
@@ -2045,6 +2055,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,

mutex_lock(&dev->kvm->lock);

+ if (!lock_all_vcpus(dev->kvm)) {
+ mutex_unlock(&dev->kvm->lock);
+ return -EBUSY;
+ }
+
+ mutex_lock(&dev->kvm->arch.config_lock);
+
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO;
goto out;
@@ -2058,11 +2075,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out;
}

- if (!lock_all_vcpus(dev->kvm)) {
- ret = -EBUSY;
- goto out;
- }
-
addr = its->vgic_its_base + offset;

len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@@ -2076,8 +2088,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else {
*reg = region->its_read(dev->kvm, its, addr, len);
}
- unlock_all_vcpus(dev->kvm);
out:
+ mutex_unlock(&dev->kvm->arch.config_lock);
+ unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
@@ -2749,14 +2762,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return 0;

mutex_lock(&kvm->lock);
- mutex_lock(&its->its_lock);

if (!lock_all_vcpus(kvm)) {
- mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock);
return -EBUSY;
}

+ mutex_lock(&kvm->arch.config_lock);
+ mutex_lock(&its->its_lock);
+
switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its);
@@ -2769,8 +2783,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break;
}

- unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock);
+ mutex_unlock(&kvm->arch.config_lock);
+ unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index edeac2380591..07e727023deb 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV;
}

- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);

return r;
}
@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr))
return -EFAULT;

- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.config_lock);
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
}

out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.config_lock);

if (!r && !write)
r = put_user(addr, uaddr);
@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31))
return -EINVAL;

- mutex_lock(&dev->kvm->lock);
+ mutex_lock(&dev->kvm->arch.config_lock);

if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY;
@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS;

- mutex_unlock(&dev->kvm->lock);
+ mutex_unlock(&dev->kvm->arch.config_lock);

return ret;
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
- mutex_lock(&dev->kvm->lock);
+ mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_init(dev->kvm);
- mutex_unlock(&dev->kvm->lock);
+ mutex_unlock(&dev->kvm->arch.config_lock);
return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
+
+ mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
+ mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
@@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,

mutex_lock(&dev->kvm->lock);

+ if (!lock_all_vcpus(dev->kvm)) {
+ mutex_unlock(&dev->kvm->lock);
+ return -EBUSY;
+ }
+
+ mutex_lock(&dev->kvm->arch.config_lock);
+
ret = vgic_init(dev->kvm);
if (ret)
goto out;

- if (!lock_all_vcpus(dev->kvm)) {
- ret = -EBUSY;
- goto out;
- }
-
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break;
}

- unlock_all_vcpus(dev->kvm);
out:
+ mutex_unlock(&dev->kvm->arch.config_lock);
+ unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);

if (!ret && !is_write)
@@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,

mutex_lock(&dev->kvm->lock);

- if (unlikely(!vgic_initialized(dev->kvm))) {
- ret = -EBUSY;
- goto out;
+ if (!lock_all_vcpus(dev->kvm)) {
+ mutex_unlock(&dev->kvm->lock);
+ return -EBUSY;
}

- if (!lock_all_vcpus(dev->kvm)) {
+ mutex_lock(&dev->kvm->arch.config_lock);
+
+ if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
}
@@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break;
}

- unlock_all_vcpus(dev->kvm);
out:
+ mutex_unlock(&dev->kvm->arch.config_lock);
+ unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);

if (!ret && uaccess && !is_write) {
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 91201f743033..472b18ac92a2 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: {
bool was_enabled, is_hwsgi;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);

was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq;
@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);

- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
}
case GICD_TYPER:
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
index b32d434c1d4a..a95f99b93dd6 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio.c
@@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);

val = __vgic_mmio_read_active(vcpu, addr, len);

vgic_access_active_finish(vcpu, intid);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);

return val;
}
@@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);

__vgic_mmio_write_cactive(vcpu, addr, len, val);

vgic_access_active_finish(vcpu, intid);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
}

int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);

__vgic_mmio_write_sactive(vcpu, addr, len, val);

vgic_access_active_finish(vcpu, intid);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
}

int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index a413718be92b..3bb003478060 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized
*
* We may be called each time a vITS is created, or when the
- * vgic is initialized. This relies on kvm->lock to be
- * held. In both cases, the number of vcpus should now be
- * fixed.
+ * vgic is initialized. In both cases, the number of vcpus
+ * should now be fixed.
*/
int vgic_v4_init(struct kvm *kvm)
{
@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret;
unsigned long i;

+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */

@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
/**
* vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed
- *
- * Relies on kvm->lock to be held.
*/
void vgic_v4_teardown(struct kvm *kvm)
{
struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i;

+ lockdep_assert_held(&kvm->arch.config_lock);
+
if (!its_vm->vpes)
return;

diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index d97e6080b421..0a005da83ae6 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/*
* Locking order is always:
* kvm->lock (mutex)
- * its->cmd_lock (mutex)
- * its->its_lock (mutex)
- * vgic_cpu->ap_list_lock must be taken with IRQs disabled
- * kvm->lpi_list_lock must be taken with IRQs disabled
- * vgic_irq->irq_lock must be taken with IRQs disabled
+ * vcpu->mutex (mutex)
+ * kvm->arch.config_lock (mutex)
+ * its->cmd_lock (mutex)
+ * its->its_lock (mutex)
+ * vgic_cpu->ap_list_lock must be taken with IRQs disabled
+ * kvm->lpi_list_lock must be taken with IRQs disabled
+ * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index bd3ba276e69c..03b632c56899 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
* 'data' contains an integer that corresponds to the feature we're
* testing
*/
-static int proc_salinfo_show(struct seq_file *m, void *v)
+static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
{
unsigned long data = (unsigned long)v;
seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 24901d809301..1e9eaa107eb7 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -77,7 +77,7 @@ void *per_cpu_init(void)
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}

-static inline void
+static inline __init void
alloc_per_cpu_data(void)
{
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index f993cb36c062..921db957d2e6 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)

pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
- p4d = p4d_offset(pgd, addr);
+ p4d = p4d_offset(pgd, taddr);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, taddr);
if (pud_present(*pud)) {
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
index f24cbb4a39b5..892765b742bb 100644
--- a/arch/mips/fw/lib/cmdline.c
+++ b/arch/mips/fw/lib/cmdline.c
@@ -53,7 +53,7 @@ char *fw_getenv(char *envname)
{
char *result = NULL;

- if (_fw_envp != NULL) {
+ if (_fw_envp != NULL && fw_envp(0) != NULL) {
/*
* Return a pointer to the given environment variable.
* YAMON uses "name", "value" pairs, while U-Boot uses
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 54a87bba35ca..a130c4dac48d 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -173,7 +173,6 @@ handler: ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
/* r30 already save */ ;\
-/* l.sw PT_GPR30(r1),r30*/ ;\
l.sw PT_GPR31(r1),r31 ;\
TRACE_IRQS_OFF_ENTRY ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
@@ -211,9 +210,8 @@ handler: ;\
l.sw PT_GPR27(r1),r27 ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
- /* r31 already saved */ ;\
- l.sw PT_GPR30(r1),r30 ;\
-/* l.sw PT_GPR31(r1),r31 */ ;\
+ /* r30 already saved */ ;\
+ l.sw PT_GPR31(r1),r31 ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30 ;\
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 9a0018f1f42c..541370d14559 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
+ depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */

#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
+ depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */

#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 2b16d8d6598f..c37010a13586 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -248,9 +248,6 @@ ENTRY_CFI(real64_call_asm)
/* save fn */
copy %arg2, %r31

- /* set up the new ap */
- ldo 64(%arg1), %r29
-
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
@@ -262,7 +259,9 @@ ENTRY_CFI(real64_call_asm)
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */

+ /* set up real-mode stack and real-mode ap */
tophys_r1 %sp
+ ldo -16(%sp), %r29 /* Reference param save area */

b,l rfi_virt2real,%r2
nop
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 295f76df13b5..13fad4f0a6d8 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -34,6 +34,8 @@ endif

BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
+ $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
+ $(call cc-option,-mno-mma) \
$(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
$(LINUXINCLUDE)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1e8b2e04e626..8fda87af2fa5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1310,6 +1310,11 @@
#define PVR_VER_E500MC 0x8023
#define PVR_VER_E5500 0x8024
#define PVR_VER_E6500 0x8040
+#define PVR_VER_7450 0x8000
+#define PVR_VER_7455 0x8001
+#define PVR_VER_7447 0x8002
+#define PVR_VER_7447A 0x8003
+#define PVR_VER_7448 0x8004

/*
* For the 8xx processors, all of them report the same PVR family for
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 767ab166933b..f8d3caad4cf3 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -421,7 +421,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
- memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
+ memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
}

return buf;
diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index 552d51a925d3..db451b9aac35 100644
--- a/arch/powerpc/perf/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
@@ -417,9 +417,9 @@ struct power_pmu mpc7450_pmu = {

static int __init init_mpc7450_pmu(void)
{
- unsigned int pvr = mfspr(SPRN_PVR);
-
- if (PVR_VER(pvr) != PVR_7450)
+ if (!pvr_version_is(PVR_VER_7450) && !pvr_version_is(PVR_VER_7455) &&
+ !pvr_version_is(PVR_VER_7447) && !pvr_version_is(PVR_VER_7447A) &&
+ !pvr_version_is(PVR_VER_7448))
return -ENODEV;

return register_power_pmu(&mpc7450_pmu);
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 42abeba4f698..079cb3627eac 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -986,7 +986,7 @@ static void __init mpc5121_clk_provide_migration_support(void)

#define NODE_PREP do { \
of_address_to_resource(np, 0, &res); \
- snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
+ snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \
} while (0)

#define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 609bda2ad5dd..4d9200bdba78 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -145,7 +145,7 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np)
}
io_base = ioremap(res.start, resource_size(&res));

- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);

__flipper_quiesce(io_base);

diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 380b4285cce4..4d2d92de30af 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -171,7 +171,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
return NULL;
}

- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);

__hlwd_quiesce(io_base);

diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index f4e654a9d4ff..219659f2ede0 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -74,8 +74,8 @@ static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible)

hw_regs = ioremap(res.start, resource_size(&res));
if (hw_regs) {
- pr_info("%s at 0x%08x mapped to 0x%p\n", name,
- res.start, hw_regs);
+ pr_info("%s at 0x%pa mapped to 0x%p\n", name,
+ &res.start, hw_regs);
}

out_put:
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 5af4c35ff584..0e42f7bad7db 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -217,9 +217,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)

(hose)->ops = &tsi108_direct_pci_ops;

- printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
- "Firmware bus number: %d->%d\n",
- rsrc.start, hose->first_busno, hose->last_busno);
+ pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n",
+ &rsrc.start, hose->first_busno, hose->last_busno);

/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 2a0ef738695e..9baddaee5623 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -293,7 +293,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid);
-int sbi_probe_extension(int ext);
+long sbi_probe_extension(int ext);

/* Check if current SBI specification version is 0.1 or not */
static inline int sbi_spec_is_0_1(void)
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
index 8275f237a59d..eb479a88a954 100644
--- a/arch/riscv/kernel/cpu_ops.c
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spinwait = {
void __init cpu_set_ops(int cpuid)
{
#if IS_ENABLED(CONFIG_RISCV_SBI)
- if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
+ if (sbi_probe_extension(SBI_EXT_HSM)) {
if (!cpuid)
pr_info("SBI HSM extension detected\n");
cpu_ops[cpuid] = &cpu_ops_sbi;
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 775d3322b422..5238026f7c0d 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -581,19 +581,18 @@ static void sbi_srst_power_off(void)
* sbi_probe_extension() - Check if an SBI extension ID is supported or not.
* @extid: The extension ID to be probed.
*
- * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
+ * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
*/
-int sbi_probe_extension(int extid)
+long sbi_probe_extension(int extid)
{
struct sbiret ret;

ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
0, 0, 0, 0, 0);
if (!ret.error)
- if (ret.value)
- return ret.value;
+ return ret.value;

- return -ENOTSUPP;
+ return 0;
}
EXPORT_SYMBOL(sbi_probe_extension);

@@ -662,26 +661,26 @@ void __init sbi_init(void)
if (!sbi_spec_is_0_1()) {
pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
sbi_get_firmware_id(), sbi_get_firmware_version());
- if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
+ if (sbi_probe_extension(SBI_EXT_TIME)) {
__sbi_set_timer = __sbi_set_timer_v02;
pr_info("SBI TIME extension detected\n");
} else {
__sbi_set_timer = __sbi_set_timer_v01;
}
- if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
+ if (sbi_probe_extension(SBI_EXT_IPI)) {
__sbi_send_ipi = __sbi_send_ipi_v02;
pr_info("SBI IPI extension detected\n");
} else {
__sbi_send_ipi = __sbi_send_ipi_v01;
}
- if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
+ if (sbi_probe_extension(SBI_EXT_RFENCE)) {
__sbi_rfence = __sbi_rfence_v02;
pr_info("SBI RFENCE extension detected\n");
} else {
__sbi_rfence = __sbi_rfence_v01;
}
if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
- (sbi_probe_extension(SBI_EXT_SRST) > 0)) {
+ sbi_probe_extension(SBI_EXT_SRST)) {
pr_info("SBI SRST extension detected\n");
pm_power_off = sbi_srst_power_off;
sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index df2d8716851f..62eedad7ec09 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -84,7 +84,7 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}

- if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
+ if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
kvm_info("require SBI RFENCE extension\n");
return -ENODEV;
}
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 3620ecac2fa1..e47aeb6f05a6 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -630,6 +630,13 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;

+ /* We need minimum second+third level pages */
+ ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
+ if (ret) {
+ kvm_err("Failed to topup G-stage cache\n");
+ return ret;
+ }
+
mmap_read_lock(current->mm);

vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -650,6 +657,15 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE)
gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;

+ /*
+ * Read mmu_invalidate_seq so that KVM can detect if the results of
+ * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
+ * kvm->mmu_lock.
+ *
+ * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
+ * with the smp_wmb() in kvm_mmu_invalidate_end().
+ */
+ mmu_seq = kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);

if (vma_pagesize != PGDIR_SIZE &&
@@ -659,15 +675,6 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return -EFAULT;
}

- /* We need minimum second+third level pages */
- ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
- if (ret) {
- kvm_err("Failed to topup G-stage cache\n");
- return ret;
- }
-
- mmu_seq = kvm->mmu_invalidate_seq;
-
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 6f47ced3175b..6315a3c94225 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -842,8 +842,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
* this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
* entry.
*/
-static void __init create_fdt_early_page_table(pgd_t *pgdir,
- uintptr_t fix_fdt_va,
+static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
uintptr_t dtb_pa)
{
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
@@ -1033,8 +1032,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_kernel_page_table(early_pg_dir, true);

/* Setup early mapping for FDT early scan */
- create_fdt_early_page_table(early_pg_dir,
- __fix_to_virt(FIX_FDT), dtb_pa);
+ create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);

/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 830e7de65e3a..20a9f991a6d7 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -59,10 +59,6 @@ struct ptd_mm_info {
};

enum address_markers_idx {
-#ifdef CONFIG_KASAN
- KASAN_SHADOW_START_NR,
- KASAN_SHADOW_END_NR,
-#endif
FIXMAP_START_NR,
FIXMAP_END_NR,
PCI_IO_START_NR,
@@ -74,6 +70,10 @@ enum address_markers_idx {
VMALLOC_START_NR,
VMALLOC_END_NR,
PAGE_OFFSET_NR,
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START_NR,
+ KASAN_SHADOW_END_NR,
+#endif
#ifdef CONFIG_64BIT
MODULES_MAPPING_NR,
KERNEL_MAPPING_NR,
@@ -82,10 +82,6 @@ enum address_markers_idx {
};

static struct addr_marker address_markers[] = {
-#ifdef CONFIG_KASAN
- {0, "Kasan shadow start"},
- {0, "Kasan shadow end"},
-#endif
{0, "Fixmap start"},
{0, "Fixmap end"},
{0, "PCI I/O start"},
@@ -97,6 +93,10 @@ static struct addr_marker address_markers[] = {
{0, "vmalloc() area"},
{0, "vmalloc() end"},
{0, "Linear mapping"},
+#ifdef CONFIG_KASAN
+ {0, "Kasan shadow start"},
+ {0, "Kasan shadow end"},
+#endif
#ifdef CONFIG_64BIT
{0, "Modules/BPF mapping"},
{0, "Kernel mapping"},
@@ -362,10 +362,6 @@ static int __init ptdump_init(void)
{
unsigned int i, j;

-#ifdef CONFIG_KASAN
- address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
- address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
-#endif
address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
@@ -377,6 +373,10 @@ static int __init ptdump_init(void)
address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
+#ifdef CONFIG_KASAN
+ address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
+ address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
+#endif
#ifdef CONFIG_64BIT
address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index a76b94e41e91..8ddfe9989f5f 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -382,7 +382,7 @@ static int __init sq_api_init(void)
if (unlikely(!sq_cache))
return ret;

- sq_bitmap = kzalloc(size, GFP_KERNEL);
+ sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL);
if (unlikely(!sq_bitmap))
goto out;

diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index cbaf174d8efd..b3af2d45bbbb 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -125,6 +125,8 @@

#define INTEL_FAM6_LUNARLAKE_M 0xBD

+#define INTEL_FAM6_ARROWLAKE 0xC6
+
/* "Small Core" Processors (Atom/E-Core) */

#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 20d9a604da7c..770557110051 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -422,10 +422,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
- rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
- } while (rsvd != new);
+ } while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));

- rsvd &= ~APIC_EILVT_MASKED;
+ rsvd = new & ~APIC_EILVT_MASKED;
if (rsvd && rsvd != vector)
pr_info("LVT offset %d assigned for vector 0x%02x\n",
offset, rsvd);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a868b76cd3d4..efa87b6bb1cd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2480,17 +2480,21 @@ static int io_apic_get_redir_entries(int ioapic)

unsigned int arch_dynirq_lower_bound(unsigned int from)
{
+ unsigned int ret;
+
/*
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
- if (!ioapic_initialized)
- return gsi_top;
+ ret = ioapic_dynirq_base ? : gsi_top;
+
/*
- * For DT enabled machines ioapic_dynirq_base is irrelevant and not
- * updated. So simply return @from if ioapic_dynirq_base == 0.
+ * For DT enabled machines ioapic_dynirq_base is irrelevant and
+ * always 0. gsi_top can be 0 if there is no IO/APIC registered.
+ * 0 is an invalid interrupt number for dynamic allocations. Return
+ * @from instead.
*/
- return ioapic_dynirq_base ? : from;
+ return ret ? : from;
}

#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 10fb5b5c9efa..5518272061bf 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -235,10 +235,10 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
* A list of the banks enabled on each logical CPU. Controls which respective
* descriptors to initialize later in mce_threshold_create_device().
*/
-static DEFINE_PER_CPU(unsigned int, bank_map);
+static DEFINE_PER_CPU(u64, bank_map);

/* Map of banks that have more than MCA_MISC0 available. */
-static DEFINE_PER_CPU(u32, smca_misc_banks_map);
+static DEFINE_PER_CPU(u64, smca_misc_banks_map);

static void amd_threshold_interrupt(void);
static void amd_deferred_error_interrupt(void);
@@ -267,7 +267,7 @@ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu)
return;

if (low & MASK_BLKPTR_LO)
- per_cpu(smca_misc_banks_map, cpu) |= BIT(bank);
+ per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank);

}

@@ -528,7 +528,7 @@ static u32 smca_get_block_address(unsigned int bank, unsigned int block,
if (!block)
return MSR_AMD64_SMCA_MCx_MISC(bank);

- if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank)))
+ if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank)))
return 0;

return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
@@ -572,7 +572,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
int new;

if (!block)
- per_cpu(bank_map, cpu) |= (1 << bank);
+ per_cpu(bank_map, cpu) |= BIT_ULL(bank);

memset(&b, 0, sizeof(b));
b.cpu = cpu;
@@ -884,7 +884,7 @@ static void amd_threshold_interrupt(void)
return;

for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
continue;

first_block = bp[bank]->blocks;
@@ -1362,7 +1362,7 @@ int mce_threshold_create_device(unsigned int cpu)
return -ENOMEM;

for (bank = 0; bank < numbanks; ++bank) {
- if (!(this_cpu_read(bank_map) & (1 << bank)))
+ if (!(this_cpu_read(bank_map) & BIT_ULL(bank)))
continue;
err = threshold_create_bank(bp, cpu, bank);
if (err) {
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 831613959a92..34d9e899e471 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -291,12 +291,16 @@ static void __init ms_hyperv_init_platform(void)
* To mirror what Windows does we should extract CPU management
* features and use the ReservedIdentityBit to detect if Linux is the
* root partition. But that requires negotiating CPU management
- * interface (a process to be finalized).
+ * interface (a process to be finalized). For now, use the privilege
+ * flag as the indicator for running as root.
*
- * For now, use the privilege flag as the indicator for running as
- * root.
+ * Hyper-V should never specify running as root and as a Confidential
+ * VM. But to protect against a compromised/malicious Hyper-V trying
+ * to exploit root behavior to expose Confidential VM memory, ignore
+ * the root partition setting if also a Confidential VM.
*/
- if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) {
+ if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
+ !(ms_hyperv.priv_high & HV_ISOLATION)) {
hv_root_partition = true;
pr_info("Hyper-V: running as root partition\n");
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index bc868958e91f..4c9116d223df 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7830,6 +7830,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
break;

+ case x86_intercept_pause:
+ /*
+ * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
+ * with vanilla NOPs in the emulator. Apply the interception
+ * check only to actual PAUSE instructions. Don't check
+ * PAUSE-loop-exiting, software can't expect a given PAUSE to
+ * exit, i.e. KVM is within its rights to allow L2 to execute
+ * the PAUSE.
+ */
+ if ((info->rep_prefix != REPE_PREFIX) ||
+ !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
+ return X86EMUL_CONTINUE;
+
+ break;
+
/* TODO: check more intercepts... */
default:
break;
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index e6818ffaddbf..8ac5597dc69c 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -65,6 +65,23 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
return rq->crypt_ctx;
}

+static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
+{
+ return rq->crypt_keyslot;
+}
+
+blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ struct blk_crypto_keyslot **slot_ptr);
+
+void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
+
+int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key);
+
+bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
+ const struct blk_crypto_config *cfg);
+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */

static inline int blk_crypto_sysfs_register(struct request_queue *q)
@@ -105,6 +122,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
return false;
}

+static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
+{
+ return false;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */

void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
@@ -139,14 +161,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
return true;
}

-blk_status_t __blk_crypto_init_request(struct request *rq);
-static inline blk_status_t blk_crypto_init_request(struct request *rq)
+blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
+static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
{
if (blk_crypto_rq_is_encrypted(rq))
- return __blk_crypto_init_request(rq);
+ return __blk_crypto_rq_get_keyslot(rq);
return BLK_STS_OK;
}

+void __blk_crypto_rq_put_keyslot(struct request *rq);
+static inline void blk_crypto_rq_put_keyslot(struct request *rq)
+{
+ if (blk_crypto_rq_has_keyslot(rq))
+ __blk_crypto_rq_put_keyslot(rq);
+}
+
void __blk_crypto_free_request(struct request *rq);
static inline void blk_crypto_free_request(struct request *rq)
{
@@ -185,7 +214,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
{

if (blk_crypto_rq_is_encrypted(rq))
- return blk_crypto_init_request(rq);
+ return blk_crypto_rq_get_keyslot(rq);
return BLK_STS_OK;
}

diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
index 96c511967386..3290c03c9918 100644
--- a/block/blk-crypto-profile.c
+++ b/block/blk-crypto-profile.c
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
+#include "blk-crypto-internal.h"

struct blk_crypto_keyslot {
atomic_t slot_refs;
@@ -353,28 +354,16 @@ bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
return true;
}

-/**
- * __blk_crypto_evict_key() - Evict a key from a device.
- * @profile: the crypto profile of the device
- * @key: the key to evict. It must not still be used in any I/O.
- *
- * If the device has keyslots, this finds the keyslot (if any) that contains the
- * specified key and calls the driver's keyslot_evict function to evict it.
- *
- * Otherwise, this just calls the driver's keyslot_evict function if it is
- * implemented, passing just the key (without any particular keyslot). This
- * allows layered devices to evict the key from their underlying devices.
- *
- * Context: Process context. Takes and releases profile->lock.
- * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY
- * if the keyslot is still in use, or another -errno value on other
- * error.
+/*
+ * This is an internal function that evicts a key from an inline encryption
+ * device that can be either a real device or the blk-crypto-fallback "device".
+ * It is used only by blk_crypto_evict_key(); see that function for details.
*/
int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key)
{
struct blk_crypto_keyslot *slot;
- int err = 0;
+ int err;

if (profile->num_slots == 0) {
if (profile->ll_ops.keyslot_evict) {
@@ -388,22 +377,30 @@ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,

blk_crypto_hw_enter(profile);
slot = blk_crypto_find_keyslot(profile, key);
- if (!slot)
- goto out_unlock;
+ if (!slot) {
+ /*
+ * Not an error, since a key not in use by I/O is not guaranteed
+ * to be in a keyslot. There can be more keys than keyslots.
+ */
+ err = 0;
+ goto out;
+ }

if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
+ /* BUG: key is still in use by I/O */
err = -EBUSY;
- goto out_unlock;
+ goto out_remove;
}
err = profile->ll_ops.keyslot_evict(profile, key,
blk_crypto_keyslot_index(slot));
- if (err)
- goto out_unlock;
-
+out_remove:
+ /*
+ * Callers free the key even on error, so unlink the key from the hash
+ * table and clear slot->key even on error.
+ */
hlist_del(&slot->hash_node);
slot->key = NULL;
- err = 0;
-out_unlock:
+out:
blk_crypto_hw_exit(profile);
return err;
}
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index a496aaef85ba..6733286d506f 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/blk-crypto-profile.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>

#include "blk-crypto-internal.h"
@@ -218,27 +219,27 @@ static bool bio_crypt_check_alignment(struct bio *bio)
return true;
}

-blk_status_t __blk_crypto_init_request(struct request *rq)
+blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
{
return blk_crypto_get_keyslot(rq->q->crypto_profile,
rq->crypt_ctx->bc_key,
&rq->crypt_keyslot);
}

-/**
- * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
- *
- * @rq: The request whose crypto fields to uninitialize.
- *
- * Completely uninitializes the crypto fields of a request. If a keyslot has
- * been programmed into some inline encryption hardware, that keyslot is
- * released. The rq->crypt_ctx is also freed.
- */
-void __blk_crypto_free_request(struct request *rq)
+void __blk_crypto_rq_put_keyslot(struct request *rq)
{
blk_crypto_put_keyslot(rq->crypt_keyslot);
+ rq->crypt_keyslot = NULL;
+}
+
+void __blk_crypto_free_request(struct request *rq)
+{
+ /* The keyslot, if one was needed, should have been released earlier. */
+ if (WARN_ON_ONCE(rq->crypt_keyslot))
+ __blk_crypto_rq_put_keyslot(rq);
+
mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
- blk_crypto_rq_set_defaults(rq);
+ rq->crypt_ctx = NULL;
}

/**
@@ -267,7 +268,6 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
{
struct bio *bio = *bio_ptr;
const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
- struct blk_crypto_profile *profile;

/* Error if bio has no data. */
if (WARN_ON_ONCE(!bio_has_data(bio))) {
@@ -284,10 +284,9 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
* Success if device supports the encryption context, or if we succeeded
* in falling back to the crypto API.
*/
- profile = bdev_get_queue(bio->bi_bdev)->crypto_profile;
- if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg))
+ if (blk_crypto_config_supported_natively(bio->bi_bdev,
+ &bc_key->crypto_cfg))
return true;
-
if (blk_crypto_fallback_bio_prep(bio_ptr))
return true;
fail:
@@ -352,22 +351,29 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
return 0;
}

+bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ const struct blk_crypto_config *cfg)
+{
+ return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile,
+ cfg);
+}
+
/*
* Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
- * request queue it's submitted to supports inline crypto, or the
+ * block_device it's submitted to supports inline crypto, or the
* blk-crypto-fallback is enabled and supports the cfg).
*/
-bool blk_crypto_config_supported(struct request_queue *q,
+bool blk_crypto_config_supported(struct block_device *bdev,
const struct blk_crypto_config *cfg)
{
return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
- __blk_crypto_cfg_supported(q->crypto_profile, cfg);
+ blk_crypto_config_supported_natively(bdev, cfg);
}

/**
* blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
+ * @bdev: block device to operate on
* @key: A key to use on the device
- * @q: the request queue for the device
*
* Upper layers must call this function to ensure that either the hardware
* supports the key's crypto settings, or the crypto API fallback has transforms
@@ -379,37 +385,48 @@ bool blk_crypto_config_supported(struct request_queue *q,
* blk-crypto-fallback is either disabled or the needed algorithm
* is disabled in the crypto API; or another -errno code.
*/
-int blk_crypto_start_using_key(const struct blk_crypto_key *key,
- struct request_queue *q)
+int blk_crypto_start_using_key(struct block_device *bdev,
+ const struct blk_crypto_key *key)
{
- if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
+ if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
return 0;
return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
}

/**
- * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
- * it may have been programmed into
- * @q: The request queue who's associated inline encryption hardware this key
- * might have been programmed into
- * @key: The key to evict
+ * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
+ * @bdev: a block_device on which I/O using the key may have been done
+ * @key: the key to evict
+ *
+ * For a given block_device, this function removes the given blk_crypto_key from
+ * the keyslot management structures and evicts it from any underlying hardware
+ * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
*
- * Upper layers (filesystems) must call this function to ensure that a key is
- * evicted from any hardware that it might have been programmed into. The key
- * must not be in use by any in-flight IO when this function is called.
+ * Upper layers must call this before freeing the blk_crypto_key. It must be
+ * called for every block_device the key may have been used on. The key must no
+ * longer be in use by any I/O when this function is called.
*
- * Return: 0 on success or if the key wasn't in any keyslot; -errno on error.
+ * Context: May sleep.
*/
-int blk_crypto_evict_key(struct request_queue *q,
- const struct blk_crypto_key *key)
+void blk_crypto_evict_key(struct block_device *bdev,
+ const struct blk_crypto_key *key)
{
- if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
- return __blk_crypto_evict_key(q->crypto_profile, key);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int err;

+ if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
+ err = __blk_crypto_evict_key(q->crypto_profile, key);
+ else
+ err = blk_crypto_fallback_evict_key(key);
/*
- * If the request_queue didn't support the key, then blk-crypto-fallback
- * may have been used, so try to evict the key from blk-crypto-fallback.
+ * An error can only occur here if the key failed to be evicted from a
+ * keyslot (due to a hardware or driver issue) or is allegedly still in
+ * use by I/O (due to a kernel bug). Even in these cases, the key is
+ * still unlinked from the keyslot management structures, and the caller
+ * is allowed and expected to free it right away. There's nothing
+ * callers can do to handle errors, so just log them and return void.
*/
- return blk_crypto_fallback_evict_key(key);
+ if (err)
+ pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
}
EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index bfc33fa9a063..00d59d2288f0 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -232,7 +232,9 @@ enum {

/* 1/64k is granular enough and can easily be handled w/ u32 */
WEIGHT_ONE = 1 << 16,
+};

+enum {
/*
* As vtime is used to calculate the cost of each IO, it needs to
* be fairly high precision. For example, it should be able to
@@ -256,6 +258,11 @@ enum {
VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
VRATE_CLAMP_ADJ_PCT = 4,

+ /* switch iff the conditions are met for longer than this */
+ AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
+};
+
+enum {
/* if IOs end up waiting for requests, issue less */
RQ_WAIT_BUSY_PCT = 5,

@@ -294,9 +301,6 @@ enum {
/* don't let cmds which take a very long time pin lagging for too long */
MAX_LAGGING_PERIODS = 10,

- /* switch iff the conditions are met for longer than this */
- AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
-
/*
* Count IO size in 4k pages. The 12bit shift helps keeping
* size-proportional components of cost calculation in closer
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 17ac532105a9..cc7f6a4a255c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -863,6 +863,8 @@ static struct request *attempt_merge(struct request_queue *q,
if (!blk_discard_mergable(req))
elv_merge_requests(q, req, next);

+ blk_crypto_rq_put_keyslot(next);
+
/*
* 'next' is going away, so update stats accordingly
*/
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1b04a1c48ee5..1ab41fbca094 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -808,6 +808,12 @@ static void blk_complete_request(struct request *req)
req->q->integrity.profile->complete_fn(req, total_bytes);
#endif

+ /*
+ * Upper layers may call blk_crypto_evict_key() anytime after the last
+ * bio_endio(). Therefore, the keyslot must be released before that.
+ */
+ blk_crypto_rq_put_keyslot(req);
+
blk_account_io_completion(req, total_bytes);

do {
@@ -873,6 +879,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

+ /*
+ * Upper layers may call blk_crypto_evict_key() anytime after the last
+ * bio_endio(). Therefore, the keyslot must be released before that.
+ */
+ if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
+ __blk_crypto_rq_put_keyslot(req);
+
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)) &&
!test_bit(GD_DEAD, &req->q->disk->state)) {
@@ -1300,7 +1313,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
* device, directly accessing the plug instead of using blk_mq_plug()
* should not have any consequences.
*/
- if (current->plug)
+ if (current->plug && !at_head)
blk_add_rq_to_plug(current->plug, rq);
else
blk_mq_sched_insert_request(rq, at_head, true, false);
@@ -2955,7 +2968,7 @@ void blk_mq_submit_bio(struct bio *bio)

blk_mq_bio_to_request(rq, bio, nr_segs);

- ret = blk_crypto_init_request(rq);
+ ret = blk_crypto_rq_get_keyslot(rq);
if (ret != BLK_STS_OK) {
bio->bi_status = ret;
bio_endio(bio);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 2ea01b5c1aca..da9407b7d4ab 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -189,7 +189,7 @@ void blk_stat_disable_accounting(struct request_queue *q)
unsigned long flags;

spin_lock_irqsave(&q->stats->lock, flags);
- if (!--q->stats->accounting)
+ if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
spin_unlock_irqrestore(&q->stats->lock, flags);
}
@@ -200,7 +200,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
unsigned long flags;

spin_lock_irqsave(&q->stats->lock, flags);
- if (!q->stats->accounting++)
+ if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock_irqrestore(&q->stats->lock, flags);
}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5c69ff8e8fa5..c72622f20f52 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -472,7 +472,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;

- BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
+ if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
+ return;
+
if (alg->cra_destroy)
alg->cra_destroy(alg);

diff --git a/crypto/drbg.c b/crypto/drbg.c
index 982d4ca4526d..ff4ebbc68efa 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
const int err = PTR_ERR(drbg->jent);

drbg->jent = NULL;
- if (fips_enabled || err != -ENOENT)
+ if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3b6146b1e25c..a16b7de73d16 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -586,6 +586,7 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device,
acpi_remove_notify_handler(device->handle, type,
acpi_notify_device);
}
+ acpi_os_wait_events_complete();
}

/* Handle events targeting \_SB device (at present only graceful shutdown) */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index f2588aba8421..aea8c994caea 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,6 +23,7 @@

#define pr_fmt(fmt) "ACPI: PM: " fmt

+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -1022,6 +1023,21 @@ void acpi_resume_power_resources(void)
}
#endif

+static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
+ {
+ /*
+ * The Toshiba Click Mini has a CPR3 power-resource which must
+ * be on for the touchscreen to work, but which is not in any
+ * _PR? lists. The other 2 affected power-resources are no-ops.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Click Mini L9W-B"),
+ },
+ },
+ {}
+};
+
/**
* acpi_turn_off_unused_power_resources - Turn off power resources not in use.
*/
@@ -1029,6 +1045,9 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;

+ if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ return;
+
mutex_lock(&power_resource_list_lock);

list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 8c3f82c9fff3..18fb04523f93 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -14,6 +14,8 @@
#include <linux/acpi.h>
#include <acpi/processor.h>

+#include <xen/xen.h>
+
#include "internal.h"

static bool __init processor_physically_present(acpi_handle handle)
@@ -47,6 +49,15 @@ static bool __init processor_physically_present(acpi_handle handle)
return false;
}

+ if (xen_initial_domain())
+ /*
+ * When running as a Xen dom0 the number of processors Linux
+ * sees can be different from the real number of processors on
+ * the system, and we still need to execute _PDC for all of
+ * them.
+ */
+ return xen_processor_present(acpi_id);
+
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);

diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f0f41959faea..0556c4720d3f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -297,20 +297,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},

- /*
- * Older models with nvidia GPU which need acpi_video backlight
- * control and where the old nvidia binary driver series does not
- * call acpi_video_register_backlight().
- */
- {
- .callback = video_detect_force_video,
- /* ThinkPad W530 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
- },
- },
-
/*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index ed752cbbe636..c8025921c129 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -328,6 +328,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
{
u32 epid;
struct viot_endpoint *ep;
+ struct device *aliased_dev = data;
u32 domain_nr = pci_domain_nr(pdev->bus);

list_for_each_entry(ep, &viot_pci_ranges, list) {
@@ -338,7 +339,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
epid = ((domain_nr - ep->segment_start) << 16) +
dev_id - ep->bdf_start + ep->endpoint_id;

- return viot_dev_iommu_init(&pdev->dev, ep->viommu,
+ return viot_dev_iommu_init(aliased_dev, ep->viommu,
epid);
}
}
@@ -372,7 +373,7 @@ int viot_iommu_configure(struct device *dev)
{
if (dev_is_pci(dev))
return pci_for_each_dma_alias(to_pci_dev(dev),
- viot_pci_dev_iommu_init, NULL);
+ viot_pci_dev_iommu_init, dev);
else if (dev_is_platform(dev))
return viot_mmio_dev_iommu_init(to_platform_device(dev));
return -ENODEV;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index f30256a524be..c440d1af197a 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -38,11 +38,10 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
/*
* For non DT/ACPI systems, assume unique level 1 caches,
- * system-wide shared caches for all other levels. This will be used
- * only if arch specific code has not populated shared_cpu_map
+ * system-wide shared caches for all other levels.
*/
if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
- return !(this_leaf->level == 1);
+ return (this_leaf->level != 1) && (sib_leaf->level != 1);

if ((sib_leaf->attributes & CACHE_ID) &&
(this_leaf->attributes & CACHE_ID))
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c98849577d4..7af8e33735a3 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -487,7 +487,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
bool cpu_is_hotpluggable(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable
+ && tick_nohz_cpu_hotpluggable(cpu);
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);

diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 54010eac6ca9..4ba09abbcaf6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1280,7 +1280,7 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
- REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
+ REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);

if (!octx) {
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 02893600db39..795be33f2892 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -354,7 +354,6 @@ static void btsdio_remove(struct sdio_func *func)

BT_DBG("func %p", func);

- cancel_work_sync(&data->work);
if (!data)
return;

diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index 26d0eddb1477..55e909f8cb25 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -393,6 +393,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
const char *fw_name;
void *buf;
dma_addr_t dma_addr;
@@ -510,14 +511,18 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
}

error_fw_load:
- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
- wake_up_all(&mhi_cntrl->state_event);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_FW_DL_ERR)
+ wake_up_all(&mhi_cntrl->state_event);
}

int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
{
struct image_info *image_info = mhi_cntrl->fbc_image;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
int ret;

if (!image_info)
@@ -528,8 +533,11 @@ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
&image_info->mhi_buf[image_info->entries - 1]);
if (ret) {
dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
- wake_up_all(&mhi_cntrl->state_event);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_FW_DL_ERR)
+ wake_up_all(&mhi_cntrl->state_event);
}

return ret;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index bf672de35131..04fbccff65ac 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -516,6 +516,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return -EIO;
}

+ if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
+ dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
+ return -ERANGE;
+ }
+
/* Setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
mhi_cntrl->wake_set = false;
@@ -532,6 +538,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
return -EIO;
}

+ if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
+ dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
+ return -ERANGE;
+ }
+
/* Setup event db address for each ev_ring */
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index df0fbfee7b78..0c3a009ed9bb 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -503,7 +503,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
}
write_unlock_irq(&mhi_cntrl->pm_lock);

- if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
+ if (pm_state != MHI_PM_SYS_ERR_DETECT)
goto exit_intvec;

switch (ee) {
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 39565cf74b2c..df45e0af9238 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -162,7 +162,8 @@ config IPMI_KCS_BMC_SERIO

config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+ depends on MFD_SYSCON
+ select REGMAP_MMIO
tristate "BT IPMI bmc driver"
help
Provides a driver for the BT (Block Transfer) IPMI interface
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index a5ddebb1edea..d48061ec27dd 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -557,8 +557,10 @@ static void retry_timeout(struct timer_list *t)

if (waiting)
start_get(ssif_info);
- if (resend)
+ if (resend) {
start_resend(ssif_info);
+ ssif_inc_stat(ssif_info, send_retries);
+ }
}

static void watch_timeout(struct timer_list *t)
@@ -784,9 +786,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
/*
- * Don't abort here, maybe it was a queued
- * response to a previous command.
+ * Recv error response, give up.
*/
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Invalid response getting flags: %x %x\n",
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 409682d06309..5165f6d3da22 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -678,7 +678,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
+ !tpm_amd_is_rng_defective(chip))
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 3f98e587b3e8..eecfbd7e9786 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -136,16 +136,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}

-static int release_locality(struct tpm_chip *chip, int l)
+static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l)
+{
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+ return 0;
+}
+
+static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);

- tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+ mutex_lock(&priv->locality_count_mutex);
+ priv->locality_count--;
+ if (priv->locality_count == 0)
+ __tpm_tis_relinquish_locality(priv, l);
+ mutex_unlock(&priv->locality_count_mutex);

return 0;
}

-static int request_locality(struct tpm_chip *chip, int l)
+static int __tpm_tis_request_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop, timeout;
@@ -186,6 +197,20 @@ static int request_locality(struct tpm_chip *chip, int l)
return -1;
}

+static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ mutex_lock(&priv->locality_count_mutex);
+ if (priv->locality_count == 0)
+ ret = __tpm_tis_request_locality(chip, l);
+ if (!ret)
+ priv->locality_count++;
+ mutex_unlock(&priv->locality_count_mutex);
+ return ret;
+}
+
static u8 tpm_tis_status(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
@@ -652,7 +677,7 @@ static int probe_itpm(struct tpm_chip *chip)
if (vendor != TPM_VID_INTEL)
return 0;

- if (request_locality(chip, 0) != 0)
+ if (tpm_tis_request_locality(chip, 0) != 0)
return -EBUSY;

rc = tpm_tis_send_data(chip, cmd_getticks, len);
@@ -673,7 +698,7 @@ static int probe_itpm(struct tpm_chip *chip)

out:
tpm_tis_ready(chip);
- release_locality(chip, priv->locality);
+ tpm_tis_relinquish_locality(chip, priv->locality);

return rc;
}
@@ -732,25 +757,17 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}

-static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
{
const char *desc = "attempting to generate an interrupt";
u32 cap2;
cap_t cap;
int ret;

- ret = request_locality(chip, 0);
- if (ret < 0)
- return ret;
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
else
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
-
- release_locality(chip, 0);
-
- return ret;
}

/* Register the IRQ and issue a command that will cause an interrupt. If an
@@ -773,52 +790,55 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
}
priv->irq = irq;

+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
&original_int_vec);
- if (rc < 0)
+ if (rc < 0) {
+ tpm_tis_relinquish_locality(chip, priv->locality);
return rc;
+ }

rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
if (rc < 0)
- return rc;
+ goto restore_irqs;

rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
if (rc < 0)
- return rc;
+ goto restore_irqs;

/* Clear all existing */
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
if (rc < 0)
- return rc;
-
+ goto restore_irqs;
/* Turn on */
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
intmask | TPM_GLOBAL_INT_ENABLE);
if (rc < 0)
- return rc;
+ goto restore_irqs;

priv->irq_tested = false;

/* Generate an interrupt by having the core call through to
* tpm_tis_send
*/
- rc = tpm_tis_gen_interrupt(chip);
- if (rc < 0)
- return rc;
+ tpm_tis_gen_interrupt(chip);

+restore_irqs:
/* tpm_tis_send will either confirm the interrupt is working or it
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- rc = tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
- if (rc < 0)
- return rc;
-
- return 1;
+ tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ rc = -1;
}

- return 0;
+ tpm_tis_relinquish_locality(chip, priv->locality);
+
+ return rc;
}

/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
@@ -932,8 +952,8 @@ static const struct tpm_class_ops tpm_tis = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
- .request_locality = request_locality,
- .relinquish_locality = release_locality,
+ .request_locality = tpm_tis_request_locality,
+ .relinquish_locality = tpm_tis_relinquish_locality,
.clk_enable = tpm_tis_clkrun_enable,
};

@@ -967,6 +987,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+ priv->locality_count = 0;
+ mutex_init(&priv->locality_count_mutex);

dev_set_drvdata(&chip->dev, priv);

@@ -1013,14 +1035,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
intmask &= ~TPM_GLOBAL_INT_ENABLE;

- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0) {
rc = -ENODEV;
goto out_err;
}

tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
- release_locality(chip, 0);
+ tpm_tis_relinquish_locality(chip, 0);

rc = tpm_chip_start(chip);
if (rc)
@@ -1080,13 +1102,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
* proper timeouts for the driver.
*/

- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0)
goto out_err;

rc = tpm_get_timeouts(chip);

- release_locality(chip, 0);
+ tpm_tis_relinquish_locality(chip, 0);

if (rc) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
@@ -1094,17 +1116,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
goto out_err;
}

- if (irq) {
+ if (irq)
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
- if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- dev_err(&chip->dev, FW_BUG
+ else
+ tpm_tis_probe_irq(chip, intmask);
+
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+ dev_err(&chip->dev, FW_BUG
"TPM interrupt not working, polling instead\n");

- disable_interrupts(chip);
- }
- } else {
- tpm_tis_probe_irq(chip, intmask);
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+ disable_interrupts(chip);
+ tpm_tis_relinquish_locality(chip, 0);
}
}

@@ -1165,28 +1191,27 @@ int tpm_tis_resume(struct device *dev)
struct tpm_chip *chip = dev_get_drvdata(dev);
int ret;

+ ret = tpm_tis_request_locality(chip, 0);
+ if (ret < 0)
+ return ret;
+
if (chip->flags & TPM_CHIP_FLAG_IRQ)
tpm_tis_reenable_interrupts(chip);

ret = tpm_pm_resume(dev);
if (ret)
- return ret;
+ goto out;

/*
* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- ret = request_locality(chip, 0);
- if (ret < 0)
- return ret;
-
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
tpm1_do_selftest(chip);
+out:
+ tpm_tis_relinquish_locality(chip, 0);

- release_locality(chip, 0);
- }
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);
#endif
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index b68479e0de10..1d51d5168fb6 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -91,6 +91,8 @@ enum tpm_tis_flags {

struct tpm_tis_data {
u16 manufacturer_id;
+ struct mutex locality_count_mutex;
+ unsigned int locality_count;
int locality;
int irq;
bool irq_tested;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index d757003004cb..0882ed01d5c2 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -668,7 +668,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,

ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
parent_rate, true);
- if (ret <= 0) {
+ if (ret < 0) {
hw = ERR_PTR(ret);
goto free;
}
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 2ef819606c41..1a4e6340f95c 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
pclk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
if (rc < 0)
goto err;
if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
rc = 0;
goto err;
}
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }

clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get clock %d for %pOF\n",
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index a2aaa14fc1ae..f6674110a88e 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -15,6 +15,7 @@
#include "clk.h"

#define PLL_CTRL 0x0
+#define HW_CTRL_SEL BIT(16)
#define CLKMUX_BYPASS BIT(2)
#define CLKMUX_EN BIT(1)
#define POWERUP_MASK BIT(0)
@@ -60,18 +61,20 @@ struct clk_fracn_gppll {
};

/*
- * Fvco = Fref * (MFI + MFN / MFD)
- * Fout = Fvco / (rdiv * odiv)
+ * Fvco = (Fref / rdiv) * (MFI + MFN / MFD)
+ * Fout = Fvco / odiv
+ * The (Fref / rdiv) should be in range 20MHz to 40MHz
+ * The Fvco should be in range 2.5Ghz to 5Ghz
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
- PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
+ PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
- PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
- PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
+ PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
+ PLL_FRACN_GP(498000000U, 166, 0, 1, 0, 8),
PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
- PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
- PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
+ PLL_FRACN_GP(400000000U, 200, 0, 1, 0, 12),
+ PLL_FRACN_GP(393216000U, 163, 84, 100, 0, 10)
};

struct imx_fracn_gppll_clk imx_fracn_gppll = {
@@ -191,6 +194,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,

rate = imx_get_pll_settings(pll, drate);

+ /* Hardware control select disable. PLL is control by register */
+ tmp = readl_relaxed(pll->base + PLL_CTRL);
+ tmp &= ~HW_CTRL_SEL;
+ writel_relaxed(tmp, pll->base + PLL_CTRL);
+
/* Disable output */
tmp = readl_relaxed(pll->base + PLL_CTRL);
tmp &= ~CLKMUX_EN;
diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
index 8eb1af2d6429..ca0e4a3aa454 100644
--- a/drivers/clk/imx/clk-imx8ulp.c
+++ b/drivers/clk/imx/clk-imx8ulp.c
@@ -200,8 +200,8 @@ static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
clks[IMX8ULP_CLK_NIC_AD_DIVPLAT] = imx_clk_hw_divider_flags("nic_ad_divplat", "nic_sel", base + 0x34, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "nic_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "nic_ad_divplat", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "xbar_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "xbar_divbus", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);

clks[IMX8ULP_CLK_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("sosc_div1_gate", "sosc", base + 0x108, 7);
clks[IMX8ULP_CLK_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("sosc_div2_gate", "sosc", base + 0x108, 15);
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 6ba398eb7df9..4287bd3f545e 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -15,41 +15,17 @@

#include <dt-bindings/clock/mt2701-clk.h>

-#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate_regs audio0_cg_regs = {
.set_ofs = 0x0,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 435ed4819d56..b0f057207945 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
.sta_ofs = 0x0110,
};

-#define GATE_BDP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &bdp0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_BDP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &bdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_BDP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &bdp1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_BDP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate bdp_clks[] = {
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index edf1e2ed2b59..601358748750 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -16,14 +16,8 @@ static const struct mtk_gate_regs eth_cg_regs = {
.sta_ofs = 0x0030,
};

-#define GATE_ETH(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &eth_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate eth_clks[] = {
GATE_ETH(CLK_ETHSYS_HSDMA, "hsdma_clk", "ethif_sel", 5),
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 1458109d99d9..8d1fc8e3336e 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -16,14 +16,8 @@

#include <dt-bindings/clock/mt2701-clk.h>

-#define GATE_G3D(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &g3d_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_G3D(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &g3d_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate_regs g3d_cg_regs = {
.sta_ofs = 0x0,
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index 434cbbe8c037..edeeb033a235 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -16,14 +16,8 @@ static const struct mtk_gate_regs hif_cg_regs = {
.sta_ofs = 0x0030,
};

-#define GATE_HIF(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &hif_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_HIF(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &hif_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate hif_clks[] = {
GATE_HIF(CLK_HIFSYS_USB0PHY, "usb0_phy_clk", "ethpll_500m_ck", 21),
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 7e53deb7f990..eb172473f075 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0000,
};

-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 9ea7abad99d2..eb069f3bc9a2 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs disp1_cg_regs = {
.sta_ofs = 0x0110,
};

-#define GATE_DISP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &disp0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_DISP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &disp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_DISP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &disp1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_DISP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mm_clks[] = {
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index d3089da0ab62..0f07c5d731df 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x0008,
};

-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate vdec_clks[] = {
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 9b442af37e67..1c3a93143dc5 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -636,14 +636,8 @@ static const struct mtk_gate_regs top_aud_cg_regs = {
.sta_ofs = 0x012C,
};

-#define GATE_TOP_AUD(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top_aud_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP_AUD(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top_aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate top_clks[] = {
GATE_TOP_AUD(CLK_TOP_AUD_48K_TIMING, "a1sys_hp_ck", "aud_mux1_div",
@@ -701,14 +695,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
.sta_ofs = 0x0048,
};

-#define GATE_ICG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate infra_clks[] = {
GATE_ICG(CLK_INFRA_DBG, "dbgclk", "axi_sel", 0),
@@ -822,23 +810,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
.sta_ofs = 0x001c,
};

-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate peri_clks[] = {
GATE_PERI0(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 31),
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 684d03e9f6de..5e668651dd90 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs bdp_cg_regs = {
.sta_ofs = 0x100,
};

-#define GATE_BDP(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &bdp_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_BDP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &bdp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate bdp_clks[] = {
GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 335049cdc856..3ffa51384e6b 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 07ba7c5e80af..8c768d5ce24d 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs jpgdec_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_JPGDEC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &jpgdec_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_JPGDEC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &jpgdec_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate jpgdec_clks[] = {
GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index 42f8cf3ecf4c..8949315c2dd2 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_MFG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mfg_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 7d44b09b8a0a..ad6daa8f28a8 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -30,32 +30,14 @@ static const struct mtk_gate_regs mm2_cg_regs = {
.sta_ofs = 0x220,
};

-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mm_clks[] = {
/* MM0 */
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index 6296ed5c5b55..572290dd43c8 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x8,
};

-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate vdec_clks[] = {
/* VDEC0 */
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index b9bfc35de629..9588eb03016e 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &venc_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 56980dd6c2ea..d6c2cc183b1a 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -958,23 +958,11 @@ static const struct mtk_gate_regs top1_cg_regs = {
.sta_ofs = 0x424,
};

-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate top_clks[] = {
/* TOP0 */
@@ -998,14 +986,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
.sta_ofs = 0x48,
};

-#define GATE_INFRA(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate infra_clks[] = {
GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
@@ -1035,32 +1017,14 @@ static const struct mtk_gate_regs peri2_cg_regs = {
.sta_ofs = 0x42c,
};

-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_PERI2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate peri_clks[] = {
/* PERI0 */
@@ -1283,15 +1247,25 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;

clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;

- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (r)
+ goto free_clk_data;

r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r) {
+ dev_err(&pdev->dev, "Cannot register clock provider: %d\n", r);
+ goto unregister_plls;
+ }

- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
+ return 0;

+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
return r;
}

diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 0aa6c0d352ca..5682e0302eee 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs audio1_cg_regs = {
.sta_ofs = 0x4,
};

-#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate audio_clks[] = {
/* AUDIO0 */
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index 25f2bef38126..6e7d192c19cb 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_CAM(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &cam_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0),
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index a62303ef4f41..cfbc907988af 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0),
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index 25c829fc3866..f2b9dc808480 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mipi0a_cg_regs = {
.sta_ofs = 0x80,
};

-#define GATE_MIPI0A(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mipi0a_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_MIPI0A(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mipi0a_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate mipi0a_clks[] = {
GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A,
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index bda774668a36..a4570c9dbefa 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mm_cg_regs = {
.sta_ofs = 0x100,
};

-#define GATE_MM(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mm_clks[] = {
/* MM */
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index 2bc1fbde87da..75d72b9b4032 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &venc_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0),
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
index e9b9e6729733..665981fc411f 100644
--- a/drivers/clk/mediatek/clk-mt6765.c
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -483,32 +483,14 @@ static const struct mtk_gate_regs top2_cg_regs = {
.sta_ofs = 0x320,
};

-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

-#define GATE_TOP2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate top_clks[] = {
/* TOP0 */
@@ -559,41 +541,17 @@ static const struct mtk_gate_regs ifr5_cg_regs = {
.sta_ofs = 0xc8,
};

-#define GATE_IFR2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_IFR3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_IFR4(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr4_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR4(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_IFR5(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ifr5_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IFR5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ifr5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate ifr_clks[] = {
/* INFRA_TOPAXI */
@@ -674,14 +632,8 @@ static const struct mtk_gate_regs apmixed_cg_regs = {
.sta_ofs = 0x14,
};

-#define GATE_APMIXED(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &apmixed_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate apmixed_clks[] = {
/* AUDIO0 */
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 7c6a53fbb8be..06441393478f 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -16,14 +16,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0000,
};

-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_sel", 11),
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 0846011fc894..99a63f46642f 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -23,23 +23,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
.sta_ofs = 0x0110,
};

-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
-}
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
-}
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mm_clks[] = {
GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index 6120fccc859f..8622ddd87a5b 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x0008,
};

-#define GATE_VDEC0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
-}
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_VDEC1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
-}
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate vdec_clks[] = {
GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "vdec_sel", 8),
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index 834d3834d2bb..928d611a476e 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
.sta_ofs = 0x0000,
};

-#define GATE_VENC(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &venc_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_0, "venc_0", "mm_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index b89f325a4b9b..78339cb35beb 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -420,40 +420,22 @@ static const struct mtk_gate_regs infra2_cg_regs = {
.sta_ofs = 0x00b0,
};

-#define GATE_ICG0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
-}
+#define GATE_ICG0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_ICG1(_id, _name, _parent, _shift) \
- GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0)
+#define GATE_ICG1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- .flags = _flags, \
-}
+#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)

-#define GATE_ICG2(_id, _name, _parent, _shift) \
- GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0)
+#define GATE_ICG2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- .flags = _flags, \
-}
+#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)

/*
* Clock gates dramc and dramc_b are needed by the DRAM controller.
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 9f2e5aa7b5d9..b17731fa1144 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -16,41 +16,17 @@

#include <dt-bindings/clock/mt7622-clk.h>

-#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &audio3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUDIO3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate_regs audio0_cg_regs = {
.set_ofs = 0x0,
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index 43de0477d5d9..a60190e83418 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -16,14 +16,8 @@

#include <dt-bindings/clock/mt7622-clk.h>

-#define GATE_ETH(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &eth_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate_regs eth_cg_regs = {
.set_ofs = 0x30,
@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
.sta_ofs = 0xE4,
};

-#define GATE_SGMII(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &sgmii_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate sgmii_clks[] = {
GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 67e96231dd25..55baa6d06a20 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -16,23 +16,11 @@

#include <dt-bindings/clock/mt7622-clk.h>

-#define GATE_PCIE(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &pcie_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_PCIE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

-#define GATE_SSUSB(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ssusb_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SSUSB(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate_regs pcie_cg_regs = {
.set_ofs = 0x30,
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 3b55f8641fae..eebbb8790693 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -50,59 +50,28 @@
_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
NULL, "clkxtal")

-#define GATE_APMIXED(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &apmixed_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_APMIXED_AO(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv, CLK_IS_CRITICAL)

-#define GATE_INFRA(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_TOP0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0_AO(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &peri0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
+
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static DEFINE_SPINLOCK(mt7622_clk_lock);

@@ -350,7 +319,7 @@ static const struct mtk_pll_data plls[] = {
};

static const struct mtk_gate apmixed_clks[] = {
- GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+ GATE_APMIXED_AO(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
};

static const struct mtk_gate infra_clks[] = {
@@ -485,7 +454,7 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
- GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+ GATE_PERI0_AO(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
@@ -513,12 +482,12 @@ static struct mtk_composite infra_muxes[] = {

static struct mtk_composite top_muxes[] = {
/* CLK_CFG_0 */
- MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
- 0x040, 0, 3, 7),
- MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
- 0x040, 8, 1, 15),
- MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
- 0x040, 16, 1, 23),
+ MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0, 3, 7, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x040, 8, 1, 15, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x040, 16, 1, 23, CLK_IS_CRITICAL),
MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
0x040, 24, 3, 31),

@@ -655,10 +624,6 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
clk_data);

- clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
-
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}

@@ -701,9 +666,6 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_gates(node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);

- clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
- clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk);
-
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}

@@ -730,8 +692,6 @@ static int mtk_pericfg_init(struct platform_device *pdev)
if (r)
return r;

- clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
-
mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);

return 0;
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index 282dd6559465..b0c8fa3b8bbe 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -16,14 +16,8 @@

#include <dt-bindings/clock/mt7629-clk.h>

-#define GATE_ETH(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &eth_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate_regs eth_cg_regs = {
.set_ofs = 0x30,
@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
.sta_ofs = 0xE4,
};

-#define GATE_SGMII(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &sgmii_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate sgmii_clks[2][4] = {
{
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index 0c8b9e139789..3628811a2f57 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -16,23 +16,11 @@

#include <dt-bindings/clock/mt7629-clk.h>

-#define GATE_PCIE(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &pcie_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_PCIE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

-#define GATE_SSUSB(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &ssusb_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SSUSB(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate_regs pcie_cg_regs = {
.set_ofs = 0x30,
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index e4a08c811adc..0bc88b7d171b 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -50,41 +50,17 @@
_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
NULL, "clk20m")

-#define GATE_APMIXED(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &apmixed_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

-#define GATE_INFRA(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static DEFINE_SPINLOCK(mt7629_clk_lock);

diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index 7868c0728e96..c21e1d672384 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -22,12 +22,8 @@ static const struct mtk_gate_regs sgmii0_cg_regs = {
.sta_ofs = 0xe4,
};

-#define GATE_SGMII0(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &sgmii0_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate sgmii0_clks[] __initconst = {
GATE_SGMII0(CLK_SGMII0_TX250M_EN, "sgmii0_tx250m_en", "top_xtal", 2),
@@ -42,12 +38,8 @@ static const struct mtk_gate_regs sgmii1_cg_regs = {
.sta_ofs = 0xe4,
};

-#define GATE_SGMII1(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &sgmii1_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_SGMII1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &sgmii1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate sgmii1_clks[] __initconst = {
GATE_SGMII1(CLK_SGMII1_TX250M_EN, "sgmii1_tx250m_en", "top_xtal", 2),
@@ -62,12 +54,8 @@ static const struct mtk_gate_regs eth_cg_regs = {
.sta_ofs = 0x30,
};

-#define GATE_ETH(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &eth_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr_inv, \
- }
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)

static const struct mtk_gate eth_clks[] __initconst = {
GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x_sel", 6),
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index 49666047bf0e..74e68a719730 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -87,26 +87,14 @@ static const struct mtk_gate_regs infra2_cg_regs = {
.sta_ofs = 0x68,
};

-#define GATE_INFRA0(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &infra0_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_INFRA1(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &infra1_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_INFRA2(_id, _name, _parent, _shift) \
- { \
- .id = _id, .name = _name, .parent_name = _parent, \
- .regs = &infra2_cg_regs, .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate infra_clks[] = {
/* INFRA0 */
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index b68888a034c4..3ea06d2ec2f1 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -2,6 +2,8 @@
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: James Liao <jamesjj.liao@xxxxxxxxxxxx>
+ * Copyright (c) 2023 Collabora, Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@xxxxxxxxxxxxx>
*/

#include <linux/clk.h>
@@ -390,7 +392,7 @@ static const struct mtk_composite top_muxes[] __initconst = {
MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0164, 24, 3, 31),
/* CLK_CFG_9 */
MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, 0x0168, 0, 2, 7),
- MUX_GATE(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15),
+ MUX_GATE_FLAGS(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15, CLK_IS_CRITICAL),
MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x0168, 16, 3, 23),
MUX_GATE(CLK_TOP_HDMIPLL_SEL, "hdmipll_sel", hdmipll_parents, 0x0168, 24, 2, 31),
};
@@ -401,14 +403,12 @@ static const struct mtk_gate_regs infra_cg_regs = {
.sta_ofs = 0x0048,
};

-#define GATE_ICG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &infra_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_ICG_AO(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)

static const struct mtk_gate infra_clks[] __initconst = {
GATE_ICG(CLK_INFRA_PMIC_WRAP, "pmic_wrap_ck", "axi_sel", 23),
@@ -417,7 +417,7 @@ static const struct mtk_gate infra_clks[] __initconst = {
GATE_ICG(CLK_INFRA_CCIF0_AP_CTRL, "ccif0_ap_ctrl", "axi_sel", 20),
GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "cpum_tck_in", 15),
- GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+ GATE_ICG_AO(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
GATE_ICG(CLK_INFRA_MFGAXI, "mfgaxi_ck", "axi_sel", 7),
GATE_ICG(CLK_INFRA_DEVAPC, "devapc_ck", "axi_sel", 6),
GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "aud_intbus_sel", 5),
@@ -438,23 +438,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
.sta_ofs = 0x001c,
};

-#define GATE_PERI0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_PERI1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &peri1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_PERI1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate peri_gates[] __initconst = {
/* PERI0 */
@@ -551,8 +539,6 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
&mt8135_clk_lock, clk_data);

- clk_prepare_enable(clk_data->hws[CLK_TOP_CCI_SEL]->clk);
-
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
@@ -570,8 +556,6 @@ static void __init mtk_infrasys_init(struct device_node *node)
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);

- clk_prepare_enable(clk_data->hws[CLK_INFRA_M4U]->clk);
-
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index ce1ae8d243c3..b5ac196cd945 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -23,14 +23,9 @@ static const struct mtk_gate_regs aud_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_AUD(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &aud_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUD(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+

static const struct mtk_gate aud_clks[] __initconst = {
GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index e359e563d2b7..4e7c0772b4f9 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -23,14 +23,8 @@ static const struct mtk_gate_regs img_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_IMG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &img_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate img_clks[] __initconst = {
GATE_IMG(CLK_IMG_LARB1_SMI, "img_larb1_smi", "smi_mm", 0),
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index 4fd82fe87d6e..192714498b2e 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -23,14 +23,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_MFG(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mfg_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mfg_clks[] __initconst = {
GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "ahb_infra_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index 73910060577f..a94961b7b8cc 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -29,23 +29,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
.sta_ofs = 0x110,
};

-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mm_clks[] = {
/* MM0 */
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index ee4fffb6859d..38f0ba357d59 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -29,23 +29,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
.sta_ofs = 0x8,
};

-#define GATE_VDEC0_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC0_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_VDEC1_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &vdec1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_VDEC1_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

static const struct mtk_gate vdec_clks[] __initconst = {
/* VDEC0 */
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index 8abf42c2030c..5826eabdc9c7 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -25,23 +25,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
.sta_ofs = 0x0110,
};

-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate mt8173_mm_clks[] = {
/* MM0 */
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index 90f48068a8de..a3dafc719799 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -22,14 +22,8 @@ static const struct mtk_gate_regs aud_cg_regs = {
.sta_ofs = 0x0,
};

-#define GATE_AUD(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &aud_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_AUD(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)

static const struct mtk_gate aud_clks[] __initconst = {
GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index b96db88893e2..056953d594c6 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -525,59 +525,23 @@ static const struct mtk_gate_regs top5_cg_regs = {
.sta_ofs = 0x44,
};

-#define GATE_TOP1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_TOP2(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_TOP2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_TOP2_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top2_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_TOP2_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_TOP3(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top3_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
+#define GATE_TOP3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

-#define GATE_TOP4_I(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top4_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr_inv, \
- }
+#define GATE_TOP4_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top4_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)

-#define GATE_TOP5(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &top5_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_no_setclr, \
- }
+#define GATE_TOP5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)

static const struct mtk_gate top_clks[] __initconst = {
/* TOP1 */
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 4f0a19db7ed7..cc5d7dee59f0 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -374,14 +374,13 @@ static void mpfs_reset_unregister_adev(void *_adev)
struct auxiliary_device *adev = _adev;

auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}

static void mpfs_reset_adev_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);

- auxiliary_device_uninit(adev);
-
kfree(adev);
}

diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 96b149365912..24755dc841f9 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -26,7 +26,6 @@ enum {
P_DISP_CC_PLL0_OUT_MAIN,
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
- P_DSI1_PHY_PLL_OUT_DSICLK,
P_GPLL0_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -71,7 +70,6 @@ static const struct parent_map disp_cc_parent_map_0[] = {
static const struct clk_parent_data disp_cc_parent_data_0[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "dsi0_phy_pll_out_byteclk" },
- { .fw_name = "core_bi_pll_test_se" },
};

static const struct parent_map disp_cc_parent_map_1[] = {
@@ -80,7 +78,6 @@ static const struct parent_map disp_cc_parent_map_1[] = {

static const struct clk_parent_data disp_cc_parent_data_1[] = {
{ .fw_name = "bi_tcxo" },
- { .fw_name = "core_bi_pll_test_se" },
};

static const struct parent_map disp_cc_parent_map_2[] = {
@@ -91,7 +88,6 @@ static const struct parent_map disp_cc_parent_map_2[] = {
static const struct clk_parent_data disp_cc_parent_data_2[] = {
{ .fw_name = "bi_tcxo_ao" },
{ .fw_name = "gcc_disp_gpll0_div_clk_src" },
- { .fw_name = "core_bi_pll_test_se" },
};

static const struct parent_map disp_cc_parent_map_3[] = {
@@ -104,20 +100,16 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &disp_cc_pll0.clkr.hw },
{ .fw_name = "gcc_disp_gpll0_clk_src" },
- { .fw_name = "core_bi_pll_test_se" },
};

static const struct parent_map disp_cc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
- { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
};

static const struct clk_parent_data disp_cc_parent_data_4[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "dsi0_phy_pll_out_dsiclk" },
- { .fw_name = "dsi1_phy_pll_out_dsiclk" },
- { .fw_name = "core_bi_pll_test_se" },
};

static const struct parent_map disp_cc_parent_map_5[] = {
@@ -126,7 +118,6 @@ static const struct parent_map disp_cc_parent_map_5[] = {

static const struct clk_parent_data disp_cc_parent_data_5[] = {
{ .fw_name = "sleep_clk" },
- { .fw_name = "core_bi_pll_test_se" },
};

static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index 7792b8f23704..096deff2ba25 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -1243,7 +1243,8 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parents_12,
.num_parents = ARRAY_SIZE(gcc_parents_12),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
+ .flags = CLK_OPS_PARENT_ENABLE,
},
};

diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
index 565f9912039f..631419caf695 100644
--- a/drivers/clk/qcom/gcc-sm6115.c
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -694,7 +694,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = {
.parent_data = gcc_parents_7,
.num_parents = ARRAY_SIZE(gcc_parents_7),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -715,7 +715,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = {
.parent_data = gcc_parents_9,
.num_parents = ARRAY_SIZE(gcc_parents_9),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -738,7 +738,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -753,7 +753,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -768,7 +768,7 @@ static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
.parent_data = gcc_parents_4,
.num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -790,7 +790,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -805,7 +805,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -820,7 +820,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -835,7 +835,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
.parent_data = gcc_parents_3,
.num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -857,7 +857,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
.parent_data = gcc_parents_8,
.num_parents = ARRAY_SIZE(gcc_parents_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -881,7 +881,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
.parent_data = gcc_parents_8,
.num_parents = ARRAY_SIZE(gcc_parents_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -916,7 +916,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -941,7 +941,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -956,7 +956,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -971,7 +971,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -986,7 +986,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
.parent_data = gcc_parents_5,
.num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1001,7 +1001,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
.parent_data = gcc_parents_6,
.num_parents = ARRAY_SIZE(gcc_parents_6),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1024,7 +1024,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
.parent_data = gcc_parents_10,
.num_parents = ARRAY_SIZE(gcc_parents_10),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1046,7 +1046,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
.parent_data = gcc_parents_7,
.num_parents = ARRAY_SIZE(gcc_parents_7),
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1116,7 +1116,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.name = "gcc_pdm2_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1329,7 +1329,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
.name = "gcc_ufs_phy_axi_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1351,7 +1351,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
.name = "gcc_ufs_phy_ice_core_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1392,7 +1392,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.name = "gcc_ufs_phy_unipro_core_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1414,7 +1414,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.name = "gcc_usb30_prim_master_clk_src",
.parent_data = gcc_parents_0,
.num_parents = ARRAY_SIZE(gcc_parents_0),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1483,7 +1483,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
.parent_data = gcc_parents_13,
.num_parents = ARRAY_SIZE(gcc_parents_13),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
index c3731f96c8e6..430ef407a834 100644
--- a/drivers/clk/qcom/gcc-sm8350.c
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -17,6 +17,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "gdsc.h"
#include "reset.h"

@@ -167,26 +168,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "core_bi_pll_test_se" },
};

-static const struct parent_map gcc_parent_map_4[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_4[] = {
- { .fw_name = "pcie_0_pipe_clk", },
- { .fw_name = "bi_tcxo" },
-};
-
-static const struct parent_map gcc_parent_map_5[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_5[] = {
- { .fw_name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_6[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -289,32 +270,30 @@ static const struct clk_parent_data gcc_parent_data_14[] = {
{ .fw_name = "bi_tcxo" },
};

-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x6b054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_4,
- .num_parents = ARRAY_SIZE(gcc_parent_data_4),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};

-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x8d054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_5,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_5,
- .num_parents = ARRAY_SIZE(gcc_parent_data_5),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 1339f9211a14..134eb1529ede 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -696,6 +696,8 @@ static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
.config = &lpass_audio_cc_sc7280_regmap_config,
.clks = lpass_cc_sc7280_clocks,
.num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
+ .gdscs = lpass_aon_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(lpass_aon_cc_sc7280_gdscs),
};

static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index 5c1e17bd0d76..8486d7135ab1 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -118,14 +118,18 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
goto destroy_pm_clk;
}

- lpass_regmap_config.name = "qdsp6ss";
- desc = &lpass_qdsp6ss_sc7280_desc;
-
- ret = qcom_cc_probe_by_index(pdev, 0, desc);
- if (ret)
- goto destroy_pm_clk;
+ if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ lpass_regmap_config.name = "qdsp6ss";
+ lpass_regmap_config.max_register = 0x3f;
+ desc = &lpass_qdsp6ss_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ if (ret)
+ goto destroy_pm_clk;
+ }

lpass_regmap_config.name = "top_cc";
+ lpass_regmap_config.max_register = 0x4;
desc = &lpass_cc_top_sc7280_desc;

ret = qcom_cc_probe_by_index(pdev, 1, desc);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 306910a3a0d3..9ebd6c451b3d 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1263,7 +1263,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
RK3399_CLKGATE_CON(10), 7, GFLAGS),

- COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
+ COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),

/* gic */
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index 9996c0542520..b1c248498be4 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -257,21 +257,25 @@ int __init davinci_timer_register(struct clk *clk,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
pr_err("Unable to request memory region\n");
- return -EBUSY;
+ rv = -EBUSY;
+ goto exit_clk_disable;
}

base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
pr_err("Unable to map the register range\n");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto exit_mem_region;
}

davinci_timer_init(base);
tick_rate = clk_get_rate(clk);

clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
- if (!clockevent)
- return -ENOMEM;
+ if (!clockevent) {
+ rv = -ENOMEM;
+ goto exit_iounmap_base;
+ }

clockevent->dev.name = "tim12";
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
@@ -296,7 +300,7 @@ int __init davinci_timer_register(struct clk *clk,
"clockevent/tim12", clockevent);
if (rv) {
pr_err("Unable to request the clockevent interrupt\n");
- return rv;
+ goto exit_free_clockevent;
}

davinci_clocksource.dev.rating = 300;
@@ -323,13 +327,27 @@ int __init davinci_timer_register(struct clk *clk,
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
pr_err("Unable to register clocksource\n");
- return rv;
+ goto exit_free_irq;
}

sched_clock_register(davinci_timer_read_sched_clock,
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);

return 0;
+
+exit_free_irq:
+ free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
+ clockevent);
+exit_free_clockevent:
+ kfree(clockevent);
+exit_iounmap_base:
+ iounmap(base);
+exit_mem_region:
+ release_mem_region(timer_cfg->reg.start,
+ resource_size(&timer_cfg->reg));
+exit_clk_disable:
+ clk_disable_unprepare(clk);
+ return rv;
}

static int __init of_davinci_timer_register(struct device_node *np)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7e56a42750ea..285ba51b31f6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1727,7 +1727,7 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
* MHz. In such cases it is better to avoid getting into
* unnecessary frequency updates.
*/
- if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
+ if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
return policy->cur;

cpufreq_out_of_sync(policy, new_freq);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 7f2680bc9a0f..9a39a7ccfae9 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -373,13 +373,13 @@ static struct device *of_get_cci(struct device *cpu_dev)
struct platform_device *pdev;

np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
- if (IS_ERR_OR_NULL(np))
- return NULL;
+ if (!np)
+ return ERR_PTR(-ENODEV);

pdev = of_find_device_by_node(np);
of_node_put(np);
- if (IS_ERR_OR_NULL(pdev))
- return NULL;
+ if (!pdev)
+ return ERR_PTR(-ENODEV);

return &pdev->dev;
}
@@ -401,7 +401,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->ccifreq_bound = false;
if (info->soc_data->ccifreq_supported) {
info->cci_dev = of_get_cci(info->cpu_dev);
- if (IS_ERR_OR_NULL(info->cci_dev)) {
+ if (IS_ERR(info->cci_dev)) {
ret = PTR_ERR(info->cci_dev);
dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
return -ENODEV;
@@ -420,7 +420,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = PTR_ERR(info->inter_clk);
dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get intermediate clk\n", cpu);
- goto out_free_resources;
+ goto out_free_mux_clock;
}

info->proc_reg = regulator_get_optional(cpu_dev, "proc");
@@ -428,13 +428,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = PTR_ERR(info->proc_reg);
dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get proc regulator\n", cpu);
- goto out_free_resources;
+ goto out_free_inter_clock;
}

ret = regulator_enable(info->proc_reg);
if (ret) {
dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
- goto out_free_resources;
+ goto out_free_proc_reg;
}

/* Both presence and absence of sram regulator are valid cases. */
@@ -442,14 +442,14 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (IS_ERR(info->sram_reg)) {
ret = PTR_ERR(info->sram_reg);
if (ret == -EPROBE_DEFER)
- goto out_free_resources;
+ goto out_disable_proc_reg;

info->sram_reg = NULL;
} else {
ret = regulator_enable(info->sram_reg);
if (ret) {
dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
- goto out_free_resources;
+ goto out_free_sram_reg;
}
}

@@ -458,13 +458,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (ret) {
dev_err(cpu_dev,
"cpu%d: failed to get OPP-sharing information\n", cpu);
- goto out_free_resources;
+ goto out_disable_sram_reg;
}

ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
if (ret) {
dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
- goto out_free_resources;
+ goto out_disable_sram_reg;
}

ret = clk_prepare_enable(info->cpu_clk);
@@ -533,43 +533,41 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
out_free_opp_table:
dev_pm_opp_of_cpumask_remove_table(&info->cpus);

-out_free_resources:
- if (regulator_is_enabled(info->proc_reg))
- regulator_disable(info->proc_reg);
- if (info->sram_reg && regulator_is_enabled(info->sram_reg))
+out_disable_sram_reg:
+ if (info->sram_reg)
regulator_disable(info->sram_reg);

- if (!IS_ERR(info->proc_reg))
- regulator_put(info->proc_reg);
- if (!IS_ERR(info->sram_reg))
+out_free_sram_reg:
+ if (info->sram_reg)
regulator_put(info->sram_reg);
- if (!IS_ERR(info->cpu_clk))
- clk_put(info->cpu_clk);
- if (!IS_ERR(info->inter_clk))
- clk_put(info->inter_clk);
+
+out_disable_proc_reg:
+ regulator_disable(info->proc_reg);
+
+out_free_proc_reg:
+ regulator_put(info->proc_reg);
+
+out_free_inter_clock:
+ clk_put(info->inter_clk);
+
+out_free_mux_clock:
+ clk_put(info->cpu_clk);

return ret;
}

static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
{
- if (!IS_ERR(info->proc_reg)) {
- regulator_disable(info->proc_reg);
- regulator_put(info->proc_reg);
- }
- if (!IS_ERR(info->sram_reg)) {
+ regulator_disable(info->proc_reg);
+ regulator_put(info->proc_reg);
+ if (info->sram_reg) {
regulator_disable(info->sram_reg);
regulator_put(info->sram_reg);
}
- if (!IS_ERR(info->cpu_clk)) {
- clk_disable_unprepare(info->cpu_clk);
- clk_put(info->cpu_clk);
- }
- if (!IS_ERR(info->inter_clk)) {
- clk_disable_unprepare(info->inter_clk);
- clk_put(info->inter_clk);
- }
-
+ clk_disable_unprepare(info->cpu_clk);
+ clk_put(info->cpu_clk);
+ clk_disable_unprepare(info->inter_clk);
+ clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
}
@@ -695,6 +693,15 @@ static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
.ccifreq_supported = false,
};

+static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1360000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1360000,
+ .ccifreq_supported = false,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -713,20 +720,29 @@ static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
.ccifreq_supported = true,
};

+static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1310000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1310000,
+ .ccifreq_supported = false,
+};
+
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt7622", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt7623", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt8167", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
+ { .compatible = "mediatek,mt7623", .data = &mt7622_platform_data },
+ { .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
{ .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
{ .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
- { .compatible = "mediatek,mt8516", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8516", .data = &mt8516_platform_data },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d10bf7635a0d..749b60c78da5 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -13,7 +13,6 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
-#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/units.h>
@@ -57,8 +56,6 @@ struct qcom_cpufreq_data {
struct cpufreq_policy *policy;

bool per_core_dcvs;
-
- struct freq_qos_request throttle_freq_req;
};

static unsigned long cpu_hw_rate, xo_rate;
@@ -343,8 +340,6 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)

throttled_freq = freq_hz / HZ_PER_KHZ;

- freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
-
/* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);

@@ -437,14 +432,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
if (data->throttle_irq < 0)
return data->throttle_irq;

- ret = freq_qos_add_request(&policy->constraints,
- &data->throttle_freq_req, FREQ_QOS_MAX,
- FREQ_QOS_MAX_DEFAULT_VALUE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
- return ret;
- }
-
data->cancel_throttle = false;
data->policy = policy;

@@ -511,7 +498,6 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
if (data->throttle_irq <= 0)
return;

- freq_qos_remove_request(&data->throttle_freq_req);
free_irq(data->throttle_irq, data);
}

diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 05fe2902df9a..af7320a768d2 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -612,7 +612,7 @@ static int __init sbi_cpuidle_init(void)
* 2) SBI HSM extension is available
*/
if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
- sbi_probe_extension(SBI_EXT_HSM) <= 0) {
+ !sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n");
return 0;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4a618d80e106..db242234c1cf 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -810,6 +810,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
+ select CRYPTO_DES
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 32253a064d0f..3b79e0d83d40 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
const u32 rdsta_if = RDSTA_IF0 << sh_idx;
const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
const u32 rdsta_mask = rdsta_if | rdsta_pr;
+
+ /* Clear the contents before using the descriptor */
+ memset(desc, 0x00, CAAM_CMD_SZ * 7);
+
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
}

dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
- /* Clear the contents before recreating the descriptor */
- memset(desc, 0x00, CAAM_CMD_SZ * 7);
}

kfree(desc);
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 084d052fddcc..55411b494d69 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -451,9 +451,9 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
- { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index ad0d8c4a71ac..ba4613a8e84f 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1631,19 +1631,23 @@ static int safexcel_probe_generic(void *pdev,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
- return ret;
+ goto err_cleanup_rings;
}

priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
- if (!priv->ring[i].rdr_req)
- return -ENOMEM;
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }

ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
- if (!ring_irq)
- return -ENOMEM;
+ if (!ring_irq) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }

ring_irq->priv = priv;
ring_irq->ring = i;
@@ -1657,7 +1661,8 @@ static int safexcel_probe_generic(void *pdev,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
- return irq;
+ ret = irq;
+ goto err_cleanup_rings;
}

priv->ring[i].irq = irq;
@@ -1669,8 +1674,10 @@ static int safexcel_probe_generic(void *pdev,
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
- if (!priv->ring[i].workqueue)
- return -ENOMEM;
+ if (!priv->ring[i].workqueue) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }

priv->ring[i].requests = 0;
priv->ring[i].busy = false;
@@ -1687,16 +1694,26 @@ static int safexcel_probe_generic(void *pdev,
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}

ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}

return 0;
+
+err_cleanup_rings:
+ for (i = 0; i < priv->config.rings; i++) {
+ if (priv->ring[i].irq)
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
+ if (priv->ring[i].workqueue)
+ destroy_workqueue(priv->ring[i].workqueue);
+ }
+
+ return ret;
}

static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 0a55a4f34dcf..20f50d0e65f8 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -296,6 +296,7 @@ struct adf_accel_dev {
u8 pf_compat_ver;
} vf;
};
+ struct mutex state_lock; /* protect state of the device */
bool is_vf;
u32 accel_id;
};
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 7bb477c3ce25..bff613eec5c4 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -58,6 +58,9 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);

+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);

diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 4c752eed10fe..86ee36feefad 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -223,6 +223,7 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
map->attached = true;
list_add_tail(&map->list, &vfs_table);
}
+ mutex_init(&accel_dev->state_lock);
unlock:
mutex_unlock(&table_lock);
return ret;
@@ -269,6 +270,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
}
}
unlock:
+ mutex_destroy(&accel_dev->state_lock);
list_del(&accel_dev->list);
mutex_unlock(&table_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 33a9a46d6949..d6f331424617 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -389,3 +389,67 @@ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)

return 0;
}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+ accel_dev->accel_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+ }
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+ accel_dev->accel_id);
+ ret = -EALREADY;
+ goto out;
+ }
+
+ if (config && GET_HW_DATA(accel_dev)->dev_config) {
+ ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = adf_dev_init(accel_dev);
+ if (unlikely(ret))
+ goto out;
+
+ ret = adf_dev_start(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
index e8b078e719c2..3eb6611ab1b1 100644
--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/qat/qat_common/adf_sysfs.c
@@ -50,38 +50,21 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,

switch (ret) {
case DEV_DOWN:
- if (!adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already down\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Stopping device qat_dev%d\n", accel_id);

- ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ ret = adf_dev_down(accel_dev, true);
if (ret < 0)
return -EINVAL;

break;
case DEV_UP:
- if (adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already up\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Starting device qat_dev%d\n", accel_id);

- ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
- if (!ret)
- ret = adf_dev_init(accel_dev);
- if (!ret)
- ret = adf_dev_start(accel_dev);
-
+ ret = adf_dev_up(accel_dev, true);
if (ret < 0) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
- adf_dev_shutdown_cache_cfg(accel_dev);
+ adf_dev_down(accel_dev, true);
return ret;
}
break;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index d1d2caea5c62..5aa0726aafe6 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -214,8 +214,11 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,

lockdep_assert_held_write(&cxl_dpa_rwsem);

- if (!len)
- goto success;
+ if (!len) {
+ dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
+ port->id, cxled->cxld.id);
+ return -EINVAL;
+ }

if (cxled->dpa_res) {
dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
@@ -268,7 +271,6 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
cxled->mode = CXL_DECODER_MIXED;
}

-success:
port->hdm_end++;
get_device(&cxled->cxld.dev);
return 0;
@@ -727,6 +729,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id);
return -ENXIO;
}
+
+ if (size == 0) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed with zero size\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
port->commit_end = cxld->id;
} else {
/* unless / until type-2 drivers arrive, assume type-3 */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index d6c9781cd46a..bfc8ae214395 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -243,6 +243,7 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
+ u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
const struct at_xdmac_layout *layout;
struct at_xdmac_chan chan[];
@@ -1988,6 +1989,7 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
}
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);

at_xdmac_off(atxdmac);
clk_disable_unprepare(atxdmac->clk);
@@ -2027,7 +2029,8 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
wmb();
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ if (atxdmac->save_gs & atchan->mask)
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
return 0;
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 52bdf04aff51..ef4cdcf6beba 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -170,7 +170,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
}

-static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
struct dw_edma_chunk *child;
struct dw_edma_desc *desc;
@@ -178,16 +178,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)

vd = vchan_next_desc(&chan->vc);
if (!vd)
- return;
+ return 0;

desc = vd2dw_edma_desc(vd);
if (!desc)
- return;
+ return 0;

child = list_first_entry_or_null(&desc->chunk->list,
struct dw_edma_chunk, list);
if (!child)
- return;
+ return 0;

dw_edma_v0_core_start(child, !desc->xfer_sz);
desc->xfer_sz += child->ll_region.sz;
@@ -195,6 +195,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
list_del(&child->list);
kfree(child);
desc->chunks_alloc--;
+
+ return 1;
}

static int dw_edma_device_config(struct dma_chan *dchan,
@@ -277,9 +279,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
unsigned long flags;

+ if (!chan->configured)
+ return;
+
spin_lock_irqsave(&chan->vc.lock, flags);
- if (chan->configured && chan->request == EDMA_REQ_NONE &&
- chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
+ if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
+ chan->status == EDMA_ST_IDLE) {
chan->status = EDMA_ST_BUSY;
dw_edma_start_transfer(chan);
}
@@ -572,14 +577,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
switch (chan->request) {
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
- if (desc->chunks_alloc) {
- chan->status = EDMA_ST_BUSY;
- dw_edma_start_transfer(chan);
- } else {
+ if (!desc->chunks_alloc) {
list_del(&vd->node);
vchan_cookie_complete(vd);
- chan->status = EDMA_ST_IDLE;
}
+
+ /* Continue transferring if there are remaining chunks or issued requests.
+ */
+ chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
break;

case EDMA_REQ_STOP:
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 113834e1167b..d086ff1824f8 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -755,7 +755,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)

xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
- ret = EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
goto disable_reg_clk;
}
if (!IS_ERR(xor_dev->clk)) {
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 98d45ee4b4e3..db6d0dc308d2 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1966,7 +1966,6 @@ static int gpi_ch_init(struct gchan *gchan)
error_config_int:
gpi_free_ring(&gpii->ev_ring, gpii);
exit_gpi_init:
- mutex_unlock(&gpii->ctrl_lock);
return ret;
}

diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 7e2762f62eec..bc399469e959 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -510,7 +510,7 @@ static bool skx_rir_decode(struct decoded_addr *res)
}

static u8 skx_close_row[] = {
- 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
};

static u8 skx_close_column[] = {
@@ -518,7 +518,7 @@ static u8 skx_close_column[] = {
};

static u8 skx_open_row[] = {
- 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
};

static u8 skx_open_column[] = {
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index ffdad59ec81f..fe06dc193689 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1981,7 +1981,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return ret;

ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
- if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ if (!ret && !idr_is_empty(&sinfo->rx_idr))
ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);

return ret;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index cdbfe54c8146..51eb85354c05 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1418,8 +1418,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
- if (download_mode)
- qcom_scm_set_download_mode(false);
+ qcom_scm_set_download_mode(false);
}

static const struct of_device_id qcom_scm_dt_match[] = {
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index bde1f543f529..80f4e2d14e04 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1133,8 +1133,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return ret;

genpool = svc_create_memory_pool(pdev, sh_memory);
- if (!genpool)
- return -ENOMEM;
+ if (IS_ERR(genpool))
+ return PTR_ERR(genpool);

/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 13918c8c839e..833ce13ff6f8 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
/**
* fpga_bridge_get - get an exclusive reference to an fpga bridge
* @dev: parent device that fpga bridge was registered with
- * @info: fpga manager info
+ * @info: fpga image specific information
*
* Given a device, get an exclusive reference to an fpga bridge.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9df5dcedaf3e..9776e0b488cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -35,6 +35,7 @@
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
#include <linux/pci-p2pdma.h>
+#include <linux/apple-gmux.h>

#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
@@ -3942,12 +3943,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);

- if (amdgpu_device_supports_px(ddev)) {
- px = true;
+ px = amdgpu_device_supports_px(ddev);
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
+
+ if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- }

if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
@@ -4053,6 +4057,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
int idx;
+ bool px;

amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
@@ -4072,10 +4077,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)

kfree(adev->bios);
adev->bios = NULL;
- if (amdgpu_device_supports_px(adev_to_drm(adev))) {
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
+
+ if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
- }
+
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev);

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6c5ea99223ba..99b99f0b42c0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1763,7 +1763,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
dc_deinit_callbacks(adev->dm.dc);
#endif

- dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ if (adev->dm.dc)
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);

if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 26291db0a3cf..872d06fe1436 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);

+ if (!psr_su_set_y_granularity(dc, link, stream, &psr_config))
+ return false;
+
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);

}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index 3e5df27aa96f..1ce19d875358 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -26,6 +26,8 @@
#ifndef DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_
#define DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_

+enum dcn_pwr_state;
+
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
@@ -33,7 +35,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
-void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state);
void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_is_periodic_retraining_disabled(struct clk_mgr_internal *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index dda596fa1cd7..fee331accc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.

-CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)

DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 020f512e9690..e958f838c804 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1641,7 +1641,8 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ bool fast_validate,
+ bool allow_self_refresh_only)
{
bool out = false;
bool repopulate_pipes = false;
@@ -1668,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw(

dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);

- if (!fast_validate) {
+ if (!fast_validate || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1681,11 +1682,12 @@ noinline bool dcn30_internal_validate_bw(
if (vlevel < context->bw_ctx.dml.soc.num_states)
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (allow_self_refresh_only &&
+ (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If mode is unsupported or there's still no p-state support
+ * then fall back to favoring voltage.
*
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
@@ -2056,7 +2058,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();

DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();

if (pipe_cnt == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index 7d063c7d6a4b..8e6b8b7368fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ bool fast_validate,
+ bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index fddc21a5a04c..d825f11b4fea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1770,7 +1770,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();

DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();

// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 9918bccd6def..ffaa4e5b3fca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -1689,6 +1689,81 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}

+static bool filter_modes_for_single_channel_workaround(struct dc *dc,
+ struct dc_state *context)
+{
+ // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
+ if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
+ int total_phy_pix_clk = 0;
+
+ for (int i = 0; i < context->stream_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream)
+ total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
+
+ if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
+ return true;
+ }
+ return false;
+}
+
+bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ if (filter_modes_for_single_channel_workaround(dc, context))
+ goto validate_fail;
+
+ DC_FP_START();
+ // do not support self refresh only
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ DC_FP_END();
+
+ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ if (pipe_cnt == 0)
+ fast_validate = false;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1696,7 +1771,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
- .validate_bandwidth = dcn31_validate_bandwidth,
+ .validate_bandwidth = dcn314_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index 0dd3153aa5c1..49ffe71018df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -39,6 +39,10 @@ struct dcn314_resource_pool {
struct resource_pool base;
};

+bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index e1e92daba668..990dbd736e2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -636,7 +636,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);

if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 235259d6c5a1..9edd39322c82 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -907,3 +907,38 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
{
return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
}
+
+bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config)
+{
+ uint16_t pic_height;
+ uint8_t slice_height;
+
+ if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
+ (!dc->caps.edp_dsc_support ||
+ link->panel_config.dsc.disable_dsc_edp ||
+ !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ !stream->timing.dsc_cfg.num_slices_v))
+ return true;
+
+ pic_height = stream->timing.v_addressable +
+ stream->timing.v_border_top + stream->timing.v_border_bottom;
+
+ if (stream->timing.dsc_cfg.num_slices_v == 0)
+ return false;
+
+ slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
+
+ if (slice_height) {
+ if (config->su_y_granularity &&
+ (slice_height % config->su_y_granularity)) {
+ ASSERT(0);
+ return false;
+ }
+
+ config->su_y_granularity = slice_height;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 316452e9dbc9..bb16b37b83da 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
const struct dc_stream_state *stream);
bool mod_power_only_edp(const struct dc_state *context,
const struct dc_stream_state *stream);
+bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ed36088ebcfd..6d03459de561 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -161,10 +161,15 @@ int smu_get_dpm_freq_range(struct smu_context *smu,

int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
{
- if (!smu->ppt_funcs && !smu->ppt_funcs->set_gfx_power_up_by_imu)
- return -EOPNOTSUPP;
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;

- return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+ if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
+ ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+ if (ret)
+ dev_err(adev->dev, "Failed to enable gfx imu!\n");
+ }
+ return ret;
}

static u32 smu_get_mclk(void *handle, bool low)
@@ -195,6 +200,19 @@ static u32 smu_get_sclk(void *handle, bool low)
return clk_freq * 100;
}

+static int smu_set_gfx_imu_enable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
+ return 0;
+
+ return smu_set_gfx_power_up_by_imu(smu);
+}
+
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
bool enable)
{
@@ -1386,15 +1404,9 @@ static int smu_hw_init(void *handle)
}

if (smu->is_apu) {
- if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
- likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to Enable gfx imu!\n");
- return ret;
- }
- }
-
+ ret = smu_set_gfx_imu_enable(smu);
+ if (ret)
+ return ret;
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
@@ -1670,6 +1682,10 @@ static int smu_resume(void *handle)
return ret;
}

+ ret = smu_set_gfx_imu_enable(smu);
+ if (ret)
+ return ret;
+
smu_set_gfx_cgpg(smu, true);

smu->disable_uclk_switch = 0;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 258c79d4dab0..b8eeaf4736e7 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -103,22 +103,19 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode)
{
- int lanes;
+ unsigned long max_lane_freq;
struct mipi_dsi_device *dsi = adv->dsi;
+ u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);

- if (mode->clock > 80000)
- lanes = 4;
- else
- lanes = 3;
-
- /*
- * TODO: add support for dynamic switching of lanes
- * by using the bridge pre_enable() op . Till then filter
- * out the modes which shall need different number of lanes
- * than what was configured in the device tree.
- */
- if (lanes != dsi->lanes)
- return MODE_BAD;
+ /* Check max clock for either 7533 or 7535 */
+ if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500))
+ return MODE_CLOCK_HIGH;
+
+ /* Check max clock for each lane */
+ max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000);
+
+ if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes)
+ return MODE_CLOCK_HIGH;

return MODE_OK;
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 69b0b2b9cc1c..3b968ad187cf 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -557,8 +557,9 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
- schedule_delayed_work(&dev->mode_config.output_poll_work,
- 0);
+ mod_delayed_work(system_wq,
+ &dev->mode_config.output_poll_work,
+ 0);
}

/* Re-enable polling in case the global poll config changed. */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3f3982ae9974..455d9ae6c41c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1079,7 +1079,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}

- drm_WARN(encoder->base.dev, num_encoders != 1,
+ drm_WARN(state->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
num_encoders, pipe_name(master_crtc->pipe));

diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 7b8d7178d09a..39cab4a55f57 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -392,8 +392,10 @@ static int lima_pdev_probe(struct platform_device *pdev)

/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
+ if (IS_ERR(ddev)) {
+ err = PTR_ERR(ddev);
+ goto err_out0;
+ }

ddev->dev_private = ldev;
ldev->ddev = ddev;
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 9d085c05c49c..007af69e5026 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -806,10 +806,9 @@ static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read)
}

static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
- u32 addr, u8 *buf, size_t length)
+ u32 addr, u8 *buf, size_t length, u8 *reply_cmd)
{
int ret;
- u32 reply_cmd;

if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES ||
(cmd == DP_AUX_NATIVE_READ && !length)))
@@ -841,10 +840,10 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
/* Wait for feedback from sink device. */
ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read);

- reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
- AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
+ *reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
+ AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;

- if (ret || reply_cmd) {
+ if (ret) {
u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
@@ -1823,7 +1822,8 @@ static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev)
spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);

if (status & MTK_DP_THREAD_CABLE_STATE_CHG) {
- drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
+ if (mtk_dp->bridge.dev)
+ drm_helper_hpd_irq_event(mtk_dp->bridge.dev);

if (!mtk_dp->train_info.cable_plugged_in) {
mtk_dp_disable_sdp_aui(mtk_dp);
@@ -2070,7 +2070,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request,
msg->address + accessed_bytes,
msg->buffer + accessed_bytes,
- to_access);
+ to_access, &msg->reply);

if (ret) {
drm_info(mtk_dp->drm_dev,
@@ -2080,7 +2080,6 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
accessed_bytes += to_access;
} while (accessed_bytes < msg->size);

- msg->reply = DP_AUX_NATIVE_REPLY_ACK | DP_AUX_I2C_REPLY_ACK;
return msg->size;
err:
msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4f0dbeebb79f..02ff306f96f4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1750,6 +1750,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ unsigned int nr_rings;
int ret;

if (!pdev) {
@@ -1770,7 +1771,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)

check_speed_bin(&pdev->dev);

- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ nr_rings = 4;
+
+ if (adreno_is_a510(adreno_gpu))
+ nr_rings = 1;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index c5c4c93b3689..cd009d56d35d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -438,9 +438,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
*/
pm_runtime_enable(&pdev->dev);

- /* Make sure pm runtime is active and reset any previous errors */
- pm_runtime_set_active(&pdev->dev);
-
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 9c6817b5a194..547f9f2b9fcb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -654,7 +654,7 @@ static int dpu_encoder_virt_atomic_check(
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
dpu_rm_release(global_state, drm_enc);

- if (!crtc_state->active_changed || crtc_state->active)
+ if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
drm_enc, crtc_state, topology);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 3a844917da07..5d04957b1144 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -593,8 +593,12 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI);

ret = drm_panel_of_backlight(&nt->panel);
- if (ret)
+ if (ret) {
+ if (num_dsis == 2)
+ mipi_dsi_device_unregister(nt->dsi[1]);
+
return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ }

drm_panel_add(&nt->panel);

@@ -610,6 +614,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)

ret = mipi_dsi_attach(nt->dsi[i]);
if (ret < 0) {
+ /* If we fail to attach to either host, we're done */
+ if (num_dsis == 2)
+ mipi_dsi_device_unregister(nt->dsi[1]);
+
return dev_err_probe(dev, ret,
"Cannot attach to DSI%d host.\n", i);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b1787be31e92..7ecec7b04a8d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -109,8 +109,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
NULL);
- if (!renc)
- return -ENOMEM;
+ if (IS_ERR(renc))
+ return PTR_ERR(renc);

renc->output = output;

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index da8a69953706..9426f7976d22 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -261,9 +261,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);

- if (ret)
- drm_gem_vm_close(vma);
-
return ret;
}

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 21b61631f73a..86affe987a1c 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -344,6 +344,65 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}

+/* Called when we got a page, either from a pool or newly allocated */
+static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr,
+ unsigned long *num_pages,
+ struct page ***pages)
+{
+ unsigned int i;
+ int r;
+
+ if (*dma_addr) {
+ r = ttm_pool_map(pool, order, p, dma_addr);
+ if (r)
+ return r;
+ }
+
+ *num_pages -= 1 << order;
+ for (i = 1 << order; i; --i, ++(*pages), ++p)
+ **pages = p;
+
+ return 0;
+}
+
+/**
+ * ttm_pool_free_range() - Free a range of TTM pages
+ * @pool: The pool used for allocating.
+ * @tt: The struct ttm_tt holding the page pointers.
+ * @caching: The page caching mode used by the range.
+ * @start_page: index for first page to free.
+ * @end_page: index for last page to free + 1.
+ *
+ * During allocation the ttm_tt page-vector may be populated with ranges of
+ * pages with different attributes if allocation hit an error without being
+ * able to completely fulfill the allocation. This function can be used
+ * to free these individual ranges.
+ */
+static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ enum ttm_caching caching,
+ pgoff_t start_page, pgoff_t end_page)
+{
+ struct page **pages = tt->pages;
+ unsigned int order;
+ pgoff_t i, nr;
+
+ for (i = start_page; i < end_page; i += nr, pages += nr) {
+ struct ttm_pool_type *pt = NULL;
+
+ order = ttm_pool_page_order(pool, *pages);
+ nr = (1UL << order);
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i], nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ if (pt)
+ ttm_pool_type_give(pt, *pages);
+ else
+ ttm_pool_free_page(pool, caching, order, *pages);
+ }
+}
+
/**
* ttm_pool_alloc - Fill a ttm_tt object
*
@@ -359,12 +418,14 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
- unsigned long num_pages = tt->num_pages;
+ pgoff_t num_pages = tt->num_pages;
dma_addr_t *dma_addr = tt->dma_address;
struct page **caching = tt->pages;
struct page **pages = tt->pages;
+ enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
- unsigned int i, order;
+ pgoff_t caching_divide;
+ unsigned int order;
struct page *p;
int r;

@@ -385,45 +446,61 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
- bool apply_caching = false;
struct ttm_pool_type *pt;

+ page_caching = tt->caching;
pt = ttm_pool_select_type(pool, tt->caching, order);
p = pt ? ttm_pool_type_take(pt) : NULL;
if (p) {
- apply_caching = true;
- } else {
- p = ttm_pool_alloc_page(pool, gfp_flags, order);
- if (p && PageHighMem(p))
- apply_caching = true;
- }
-
- if (!p) {
- if (order) {
- --order;
- continue;
- }
- r = -ENOMEM;
- goto error_free_all;
- }
-
- if (apply_caching) {
r = ttm_pool_apply_caching(caching, pages,
tt->caching);
if (r)
goto error_free_page;
- caching = pages + (1 << order);
+
+ caching = pages;
+ do {
+ r = ttm_pool_page_allocated(pool, order, p,
+ &dma_addr,
+ &num_pages,
+ &pages);
+ if (r)
+ goto error_free_page;
+
+ caching = pages;
+ if (num_pages < (1 << order))
+ break;
+
+ p = ttm_pool_type_take(pt);
+ } while (p);
}

- if (dma_addr) {
- r = ttm_pool_map(pool, order, p, &dma_addr);
+ page_caching = ttm_cached;
+ while (num_pages >= (1 << order) &&
+ (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
+
+ if (PageHighMem(p)) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+ caching = pages;
+ }
+ r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
+ &num_pages, &pages);
if (r)
goto error_free_page;
+ if (PageHighMem(p))
+ caching = pages;
}

- num_pages -= 1 << order;
- for (i = 1 << order; i; --i)
- *(pages++) = p++;
+ if (!p) {
+ if (order) {
+ --order;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
}

r = ttm_pool_apply_caching(caching, pages, tt->caching);
@@ -433,15 +510,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
return 0;

error_free_page:
- ttm_pool_free_page(pool, tt->caching, order, p);
+ ttm_pool_free_page(pool, page_caching, order, p);

error_free_all:
num_pages = tt->num_pages - num_pages;
- for (i = 0; i < num_pages; ) {
- order = ttm_pool_page_order(pool, tt->pages[i]);
- ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
- i += 1 << order;
- }
+ caching_divide = caching - tt->pages;
+ ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);

return r;
}
@@ -457,27 +532,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
*/
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
{
- unsigned int i;
-
- for (i = 0; i < tt->num_pages; ) {
- struct page *p = tt->pages[i];
- unsigned int order, num_pages;
- struct ttm_pool_type *pt;
-
- order = ttm_pool_page_order(pool, p);
- num_pages = 1ULL << order;
- if (tt->dma_address)
- ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
-
- pt = ttm_pool_select_type(pool, tt->caching, order);
- if (pt)
- ttm_pool_type_give(pt, tt->pages[i]);
- else
- ttm_pool_free_page(pool, tt->caching, order,
- tt->pages[i]);
-
- i += num_pages;
- }
+ ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);

while (atomic_long_read(&allocated_pages) > page_pool_size)
ttm_pool_shrink();
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index c2a879734d40..e15754178395 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
+ mutex_destroy(&vfile->fence_mutex);
}
diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
index b08cf11f9a66..047696432eb2 100644
--- a/drivers/gpu/host1x/context.c
+++ b/drivers/gpu/host1x/context.c
@@ -13,6 +13,11 @@
#include "context.h"
#include "dev.h"

+static void host1x_memory_context_release(struct device *dev)
+{
+ /* context device is freed in host1x_memory_context_list_free() */
+}
+
int host1x_memory_context_list_init(struct host1x *host1x)
{
struct host1x_memory_context_list *cdl = &host1x->context_list;
@@ -53,28 +58,30 @@ int host1x_memory_context_list_init(struct host1x *host1x)
dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
ctx->dev.bus = &host1x_context_device_bus_type;
ctx->dev.parent = host1x->dev;
+ ctx->dev.release = host1x_memory_context_release;

dma_set_max_seg_size(&ctx->dev, UINT_MAX);

err = device_add(&ctx->dev);
if (err) {
dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
- goto del_devices;
+ put_device(&ctx->dev);
+ goto unreg_devices;
}

err = of_dma_configure_id(&ctx->dev, node, true, &i);
if (err) {
dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
i, err);
- device_del(&ctx->dev);
- goto del_devices;
+ device_unregister(&ctx->dev);
+ goto unreg_devices;
}

fwspec = dev_iommu_fwspec_get(&ctx->dev);
if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
- device_del(&ctx->dev);
- goto del_devices;
+ device_unregister(&ctx->dev);
+ goto unreg_devices;
}

ctx->stream_id = fwspec->ids[0] & 0xffff;
@@ -82,11 +89,12 @@ int host1x_memory_context_list_init(struct host1x *host1x)

return 0;

-del_devices:
+unreg_devices:
while (i--)
- device_del(&cdl->devs[i].dev);
+ device_unregister(&cdl->devs[i].dev);

kfree(cdl->devs);
+ cdl->devs = NULL;
cdl->len = 0;

return err;
@@ -97,7 +105,7 @@ void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
unsigned int i;

for (i = 0; i < cdl->len; i++)
- device_del(&cdl->devs[i].dev);
+ device_unregister(&cdl->devs[i].dev);

kfree(cdl->devs);
cdl->len = 0;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 47774b9ab3de..c936d6a51c0c 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -367,6 +367,14 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
return devm_add_action_or_reset(&pdev->dev, privdata->mp2_ops->remove, privdata);
}

+static void amd_sfh_shutdown(struct pci_dev *pdev)
+{
+ struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
+
+ if (mp2 && mp2->mp2_ops)
+ mp2->mp2_ops->stop_all(mp2);
+}
+
static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
{
struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
@@ -401,6 +409,7 @@ static struct pci_driver amd_mp2_pci_driver = {
.id_table = amd_mp2_pci_tbl,
.probe = amd_mp2_pci_probe,
.driver.pm = &amd_mp2_pm_ops,
+ .shutdown = amd_sfh_shutdown,
};
module_pci_driver(amd_mp2_pci_driver);

diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
index 0609fea581c9..6f0d332ccf51 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
@@ -218,7 +218,7 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
OFFSET_SENSOR_DATA_DEFAULT;
memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
get_common_inputs(&als_input.common_property, report_id);
- als_input.illuminance_value = als_data.lux;
+ als_input.illuminance_value = float_to_int(als_data.lux);
report_size = sizeof(als_input);
memcpy(input_report, &als_input, sizeof(als_input));
break;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index a1d6e08fab7d..bb8bd7892b67 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -112,6 +112,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->num_hid_devices = amd_sfh_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
if (cl_data->num_hid_devices == 0)
return -ENODEV;
+ cl_data->is_any_sensor_enabled = false;

INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@@ -170,6 +171,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
status = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;

if (status == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
cl_data->sensor_sts[i] = SENSOR_ENABLED;
rc = amdtp_hid_probe(i, cl_data);
if (rc) {
@@ -186,12 +188,21 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
goto cleanup;
}
+ } else {
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
}
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}

+ if (!cl_data->is_any_sensor_enabled) {
+ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
+ cl_data->is_any_sensor_enabled);
+ rc = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
return 0;

diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index c6df959ec725..4f81ef2d4f56 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -16,11 +16,11 @@ static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
{
struct sfh_cmd_response cmd_resp;

- /* Get response with status within a max of 1600 ms timeout */
+ /* Get response with status within a max of 10000 ms timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response.response == 0 &&
cmd_resp.response.cmd_id == cmd_id && (sid == 0xff ||
- cmd_resp.response.sensor_id == sid)), 500, 1600000))
+ cmd_resp.response.sensor_id == sid)), 500, 10000000))
return cmd_resp.response.response;

return -1;
@@ -33,6 +33,7 @@ static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor
cmd_base.ul = 0;
cmd_base.cmd.cmd_id = ENABLE_SENSOR;
cmd_base.cmd.intr_disable = 0;
+ cmd_base.cmd.sub_cmd_value = 1;
cmd_base.cmd.sensor_id = info.sensor_idx;

writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
@@ -45,6 +46,7 @@ static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd.cmd_id = DISABLE_SENSOR;
cmd_base.cmd.intr_disable = 0;
+ cmd_base.cmd.sub_cmd_value = 1;
cmd_base.cmd.sensor_id = sensor_idx;

writeq(0x0, privdata->mmio + AMD_C2P_MSG(1));
@@ -56,8 +58,10 @@ static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
struct sfh_cmd_base cmd_base;

cmd_base.ul = 0;
- cmd_base.cmd.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd.cmd_id = DISABLE_SENSOR;
cmd_base.cmd.intr_disable = 0;
+ /* 0xf indicates all sensors */
+ cmd_base.cmd.sensor_id = 0xf;

writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index ae47a369dc05..9d31d5b510eb 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -33,9 +33,9 @@ struct sfh_cmd_base {
struct {
u32 sensor_id : 4;
u32 cmd_id : 4;
- u32 sub_cmd_id : 6;
- u32 length : 12;
- u32 rsvd : 5;
+ u32 sub_cmd_id : 8;
+ u32 sub_cmd_value : 12;
+ u32 rsvd : 3;
u32 intr_disable : 1;
} cmd;
};
@@ -133,7 +133,7 @@ struct sfh_mag_data {

struct sfh_als_data {
struct sfh_common_data commondata;
- u16 lux;
+ u32 lux;
};

struct hpd_status {
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
index 5d776a185bd6..ce8c44e79221 100644
--- a/drivers/hte/hte-tegra194-test.c
+++ b/drivers/hte/hte-tegra194-test.c
@@ -6,6 +6,7 @@
*/

#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
index 49a27af22742..d1b579c82279 100644
--- a/drivers/hte/hte-tegra194.c
+++ b/drivers/hte/hte-tegra194.c
@@ -251,7 +251,7 @@ static int tegra_hte_map_to_line_id(u32 eid,
{

if (m) {
- if (eid > map_sz)
+ if (eid >= map_sz)
return -EINVAL;
if (m[eid].slice == NV_AON_SLICE_INVALID)
return -EINVAL;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 6e4c92b500b8..6a6ebcc896b1 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1604,9 +1604,9 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client)
int ret, i;
u8 val;

- ret = of_property_read_u32_array(client->dev.of_node,
- "adi,pwm-active-state", states,
- ARRAY_SIZE(states));
+ ret = device_property_read_u32_array(&client->dev,
+ "adi,pwm-active-state", states,
+ ARRAY_SIZE(states));
if (ret)
return ret;

diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 5a9d47a229e4..be8bbb1c3a02 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -75,6 +75,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);

#define ZEN_CUR_TEMP_SHIFT 21
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
+#define ZEN_CUR_TEMP_TJ_SEL_MASK GENMASK(17, 16)

struct k10temp_data {
struct pci_dev *pdev;
@@ -155,7 +156,8 @@ static long get_raw_temp(struct k10temp_data *data)

data->read_tempreg(data->pdev, &regval);
temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
- if (regval & data->temp_adjust_mask)
+ if ((regval & data->temp_adjust_mask) ||
+ (regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK)
temp -= 49000;
return temp;
}
diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
index aec294cc72d1..c7469d2cdedc 100644
--- a/drivers/hwmon/pmbus/fsp-3y.c
+++ b/drivers/hwmon/pmbus/fsp-3y.c
@@ -180,7 +180,6 @@ static struct pmbus_driver_info fsp3y_info[] = {
PMBUS_HAVE_FAN12,
.func[YM2151_PAGE_5VSB_LOG] =
PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT,
- PMBUS_HAVE_IIN,
.read_word_data = fsp3y_read_word_data,
.read_byte_data = fsp3y_read_byte_data,
},
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 43bbd5dc3d3b..f9a0ee49d8e8 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -870,6 +870,7 @@ int __init etm_perf_init(void)
etm_pmu.addr_filters_sync = etm_addr_filters_sync;
etm_pmu.addr_filters_validate = etm_addr_filters_validate;
etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
+ etm_pmu.module = THIS_MODULE;

ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
if (ret == 0)
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index f58943cb1341..8a5fdb150c44 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -833,8 +833,10 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/* Check i2c operating mode and switch if possible */
if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
- if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE)
- return -EAGAIN;
+ if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) {
+ ret = -EAGAIN;
+ goto out;
+ }

/* Set mode to master */
cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index f9ae520aed22..7ec252199706 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
u16 stat;

stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;

if (stat & mask)
ret = IRQ_WAKE_THREAD;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 277a02455cdd..effae4d46729 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -704,7 +704,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
err = xiic_start_xfer(i2c, msgs, num);
if (err < 0) {
dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
- return err;
+ goto out;
}

err = wait_for_completion_timeout(&i2c->completion, XIIC_XFER_TIMEOUT);
@@ -722,6 +722,8 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
err = (i2c->state == STATE_DONE) ? num : -EIO;
}
mutex_unlock(&i2c->lock);
+
+out:
pm_runtime_mark_last_busy(i2c->dev);
pm_runtime_put_autosuspend(i2c->dev);
return err;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index fd000345ec5c..849a697a467e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -639,7 +639,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)

static int palmas_gpadc_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);

if (adc->wakeup1_enable || adc->wakeup2_enable)
diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c
index 48a91a95e597..b658a75d4e3a 100644
--- a/drivers/iio/addac/stx104.c
+++ b/drivers/iio/addac/stx104.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>

@@ -69,10 +70,12 @@ struct stx104_reg {

/**
* struct stx104_iio - IIO device private data structure
+ * @lock: synchronization lock to prevent I/O race conditions
* @chan_out_states: channels' output states
* @reg: I/O address offset for the device registers
*/
struct stx104_iio {
+ struct mutex lock;
unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
struct stx104_reg __iomem *reg;
};
@@ -114,6 +117,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}

+ mutex_lock(&priv->lock);
+
/* select ADC channel */
iowrite8(chan->channel | (chan->channel << 4), &reg->achan);

@@ -124,6 +129,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
while (ioread8(&reg->cir_asr) & BIT(7));

*val = ioread16(&reg->ssr_ad);
+
+ mutex_unlock(&priv->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
/* get ADC bipolar/unipolar configuration */
@@ -178,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 65535)
return -EINVAL;

+ mutex_lock(&priv->lock);
+
priv->chan_out_states[chan->channel] = val;
iowrite16(val, &priv->reg->dac[chan->channel]);

+ mutex_unlock(&priv->lock);
return 0;
}
return -EINVAL;
@@ -351,6 +361,8 @@ static int stx104_probe(struct device *dev, unsigned int id)

indio_dev->name = dev_name(dev);

+ mutex_init(&priv->lock);
+
/* configure device for software trigger operation */
iowrite8(0, &priv->reg->acr);

diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
index 801e5a0ad496..f3648f20ef2c 100644
--- a/drivers/iio/light/max44009.c
+++ b/drivers/iio/light/max44009.c
@@ -528,6 +528,12 @@ static int max44009_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}

+static const struct of_device_id max44009_of_match[] = {
+ { .compatible = "maxim,max44009" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max44009_of_match);
+
static const struct i2c_device_id max44009_id[] = {
{ "max44009", 0 },
{ }
@@ -537,18 +543,13 @@ MODULE_DEVICE_TABLE(i2c, max44009_id);
static struct i2c_driver max44009_driver = {
.driver = {
.name = MAX44009_DRV_NAME,
+ .of_match_table = max44009_of_match,
},
.probe = max44009_probe,
.id_table = max44009_id,
};
module_i2c_driver(max44009_driver);

-static const struct of_device_id max44009_of_match[] = {
- { .compatible = "maxim,max44009" },
- { }
-};
-MODULE_DEVICE_TABLE(of, max44009_of_match);
-
MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@xxxxxxxxx>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1f9938a2c475..b7f902344289 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;

+ trace_icm_send_rej(&cm_id_priv->id, reason);
+
switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
@@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
return -EINVAL;
}

- trace_icm_send_rej(&cm_id_priv->id, reason);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_free_msg(msg);
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index c533c693e5e3..2eb41e6d94c4 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -112,6 +112,10 @@

#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000

+/* Hardware page size definition */
+#define ERDMA_HW_PAGE_SHIFT 12
+#define ERDMA_HW_PAGE_SIZE 4096
+
/* WQE related. */
#define EQE_SIZE 16
#define EQE_SHIFT 4
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 19c69ea1b0c0..654d8513873e 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -38,7 +38,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);

if (rdma_is_kernel_res(&qp->ibqp.res)) {
- u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
+ u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;

req.sq_cqn_mtt_cfg =
FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
@@ -66,13 +66,13 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP(
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
- ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
+ ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
req.sq_cqn_mtt_cfg |=
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);

req.rq_cqn_mtt_cfg = FIELD_PREP(
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
- ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
+ ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
req.rq_cqn_mtt_cfg |=
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);

@@ -163,7 +163,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
if (rdma_is_kernel_res(&cq->ibcq.res)) {
page_size = SZ_32M;
req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
- ilog2(page_size) - PAGE_SHIFT);
+ ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);

@@ -176,8 +176,9 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
} else {
mtt = &cq->user_cq.qbuf_mtt;
- req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
- ilog2(mtt->page_size) - PAGE_SHIFT);
+ req.cfg0 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
if (mtt->mtt_nents == 1) {
req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
@@ -618,7 +619,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
u32 rq_offset;
int ret;

- if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
+ if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
qp->attrs.rq_size * RQE_SIZE))
return -EINVAL;

@@ -628,7 +629,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
if (ret)
return ret;

- rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
+ rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
qp->user_qp.rq_offset = rq_offset;

ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 5d9a7b09ca37..8973a081d641 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -215,6 +215,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

ret = sdma_txadd_page(dd,
+ NULL,
txreq,
skb_frag_page(frag),
frag->bv_offset,
@@ -737,10 +738,13 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->tx_ring.shift = ilog2(tx_item_size);
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
tx_ring = &txq->tx_ring;
- for (j = 0; j < tx_ring_size; j++)
+ for (j = 0; j < tx_ring_size; j++) {
hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
kzalloc_node(sizeof(*tx->sdma_hdr),
GFP_KERNEL, priv->dd->node);
+ if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr)
+ goto free_txqs;
+ }

netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 7333646021bb..71b9ac018887 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -126,11 +126,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
if (node) {
- ret = -EINVAL;
+ ret = -EEXIST;
goto unlock;
}
__mmu_int_rb_insert(mnode, &handler->root);
- list_add(&mnode->list, &handler->lru_list);
+ list_add_tail(&mnode->list, &handler->lru_list);

ret = handler->ops->insert(handler->ops_arg, mnode);
if (ret) {
@@ -143,6 +143,19 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
return ret;
}

+/* Caller must hold handler lock */
+struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len)
+{
+ struct mmu_rb_node *node;
+
+ trace_hfi1_mmu_rb_search(addr, len);
+ node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
+ if (node)
+ list_move_tail(&node->list, &handler->lru_list);
+ return node;
+}
+
/* Caller must hold handler lock */
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
unsigned long addr,
@@ -167,32 +180,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}

-bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
- unsigned long addr, unsigned long len,
- struct mmu_rb_node **rb_node)
-{
- struct mmu_rb_node *node;
- unsigned long flags;
- bool ret = false;
-
- if (current->mm != handler->mn.mm)
- return ret;
-
- spin_lock_irqsave(&handler->lock, flags);
- node = __mmu_rb_search(handler, addr, len);
- if (node) {
- if (node->addr == addr && node->len == len)
- goto unlock;
- __mmu_int_rb_remove(node, &handler->root);
- list_del(&node->list); /* remove from LRU list */
- ret = true;
- }
-unlock:
- spin_unlock_irqrestore(&handler->lock, flags);
- *rb_node = node;
- return ret;
-}
-
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
{
struct mmu_rb_node *rbnode, *ptr;
@@ -206,8 +193,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
INIT_LIST_HEAD(&del_list);

spin_lock_irqsave(&handler->lock, flags);
- list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
- list) {
+ list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
&stop)) {
__mmu_int_rb_remove(rbnode, &handler->root);
@@ -219,36 +205,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
}
spin_unlock_irqrestore(&handler->lock, flags);

- while (!list_empty(&del_list)) {
- rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
- list_del(&rbnode->list);
+ list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
handler->ops->remove(handler->ops_arg, rbnode);
}
}

-/*
- * It is up to the caller to ensure that this function does not race with the
- * mmu invalidate notifier which may be calling the users remove callback on
- * 'node'.
- */
-void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node)
-{
- unsigned long flags;
-
- if (current->mm != handler->mn.mm)
- return;
-
- /* Validity of handler and node pointers has been checked by caller. */
- trace_hfi1_mmu_rb_remove(node->addr, node->len);
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_int_rb_remove(node, &handler->root);
- list_del(&node->list); /* remove from LRU list */
- spin_unlock_irqrestore(&handler->lock, flags);
-
- handler->ops->remove(handler->ops_arg, node);
-}
-
static int mmu_notifier_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 7417be2b9dc8..ed75acdb7b83 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -52,10 +52,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
struct mmu_rb_node *mnode);
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
-void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *mnode);
-bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
- unsigned long addr, unsigned long len,
- struct mmu_rb_node **rb_node);
+struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
+ unsigned long addr,
+ unsigned long len);

#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 8ed20392e9f0..bb2552dd29c1 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1593,22 +1593,7 @@ static inline void sdma_unmap_desc(
struct hfi1_devdata *dd,
struct sdma_desc *descp)
{
- switch (sdma_mapping_type(descp)) {
- case SDMA_MAP_SINGLE:
- dma_unmap_single(
- &dd->pcidev->dev,
- sdma_mapping_addr(descp),
- sdma_mapping_len(descp),
- DMA_TO_DEVICE);
- break;
- case SDMA_MAP_PAGE:
- dma_unmap_page(
- &dd->pcidev->dev,
- sdma_mapping_addr(descp),
- sdma_mapping_len(descp),
- DMA_TO_DEVICE);
- break;
- }
+ system_descriptor_complete(dd, descp);
}

/*
@@ -3128,7 +3113,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,

/* Add descriptor for coalesce buffer */
tx->desc_limit = MAX_DESC;
- return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
+ return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
addr, tx->tlen);
}

@@ -3167,10 +3152,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
return rval;
}
}
+
/* finish the one just added */
make_tx_sdma_desc(
tx,
SDMA_MAP_NONE,
+ NULL,
dd->sdma_pad_phys,
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
tx->num_desc++;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index b023fc461bd5..95aaec14c6c2 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -594,6 +594,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
static inline void make_tx_sdma_desc(
struct sdma_txreq *tx,
int type,
+ void *pinning_ctx,
dma_addr_t addr,
size_t len)
{
@@ -612,6 +613,7 @@ static inline void make_tx_sdma_desc(
<< SDMA_DESC0_PHY_ADDR_SHIFT) |
(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
<< SDMA_DESC0_BYTE_COUNT_SHIFT);
+ desc->pinning_ctx = pinning_ctx;
}

/* helper to extend txreq */
@@ -643,6 +645,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
static inline int _sdma_txadd_daddr(
struct hfi1_devdata *dd,
int type,
+ void *pinning_ctx,
struct sdma_txreq *tx,
dma_addr_t addr,
u16 len)
@@ -652,6 +655,7 @@ static inline int _sdma_txadd_daddr(
make_tx_sdma_desc(
tx,
type,
+ pinning_ctx,
addr, len);
WARN_ON(len > tx->tlen);
tx->num_desc++;
@@ -672,6 +676,7 @@ static inline int _sdma_txadd_daddr(
/**
* sdma_txadd_page() - add a page to the sdma_txreq
* @dd: the device to use for mapping
+ * @pinning_ctx: context to be released at descriptor retirement
* @tx: tx request to which the page is added
* @page: page to map
* @offset: offset within the page
@@ -687,6 +692,7 @@ static inline int _sdma_txadd_daddr(
*/
static inline int sdma_txadd_page(
struct hfi1_devdata *dd,
+ void *pinning_ctx,
struct sdma_txreq *tx,
struct page *page,
unsigned long offset,
@@ -714,8 +720,7 @@ static inline int sdma_txadd_page(
return -ENOSPC;
}

- return _sdma_txadd_daddr(
- dd, SDMA_MAP_PAGE, tx, addr, len);
+ return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
}

/**
@@ -749,7 +754,8 @@ static inline int sdma_txadd_daddr(
return rval;
}

- return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
+ return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
+ addr, len);
}

/**
@@ -795,8 +801,7 @@ static inline int sdma_txadd_kvaddr(
return -ENOSPC;
}

- return _sdma_txadd_daddr(
- dd, SDMA_MAP_SINGLE, tx, addr, len);
+ return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
}

struct iowait_work;
@@ -1030,4 +1035,5 @@ extern uint mod_num_sdma;

void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);

+void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
#endif
diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
index e262fb5c5ec6..fad946cb5e0d 100644
--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
@@ -19,6 +19,7 @@
struct sdma_desc {
/* private: don't use directly */
u64 qw[2];
+ void *pinning_ctx;
};

/**
diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h
index 187e9244fe5e..57900ebb7702 100644
--- a/drivers/infiniband/hw/hfi1/trace_mmu.h
+++ b/drivers/infiniband/hw/hfi1/trace_mmu.h
@@ -37,10 +37,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
TP_PROTO(unsigned long addr, unsigned long len),
TP_ARGS(addr, len));

-DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
- TP_PROTO(unsigned long addr, unsigned long len),
- TP_ARGS(addr, len));
-
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
TP_PROTO(unsigned long addr, unsigned long len),
TP_ARGS(addr, len));
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a71c5a36ceba..ae58b48afe07 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -24,7 +24,6 @@

#include "hfi.h"
#include "sdma.h"
-#include "mmu_rb.h"
#include "user_sdma.h"
#include "verbs.h" /* for the headers */
#include "common.h" /* for struct hfi1_tid_info */
@@ -39,11 +38,7 @@ static unsigned initial_pkt_count = 8;
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
-static int pin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec);
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned start, unsigned npages);
+static void user_sdma_free_request(struct user_sdma_request *req);
static int check_header_template(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr, u32 lrhlen,
u32 datalen);
@@ -81,6 +76,11 @@ static struct mmu_rb_ops sdma_rb_ops = {
.invalidate = sdma_rb_invalidate
};

+static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_remaining);
+
static int defer_packet_queue(
struct sdma_engine *sde,
struct iowait_work *wait,
@@ -410,6 +410,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ret = -EINVAL;
goto free_req;
}
+
/* Copy the header from the user buffer */
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
sizeof(req->hdr));
@@ -484,9 +485,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
memcpy(&req->iovs[i].iov,
iovec + idx++,
sizeof(req->iovs[i].iov));
- ret = pin_vector_pages(req, &req->iovs[i]);
- if (ret) {
- req->data_iovs = i;
+ if (req->iovs[i].iov.iov_len == 0) {
+ ret = -EINVAL;
goto free_req;
}
req->data_len += req->iovs[i].iov.iov_len;
@@ -584,7 +584,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
if (req->seqsubmitted)
wait_event(pq->busy.wait_dma,
(req->seqcomp == req->seqsubmitted - 1));
- user_sdma_free_request(req, true);
+ user_sdma_free_request(req);
pq_update(pq);
set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
}
@@ -696,48 +696,6 @@ static int user_sdma_txadd_ahg(struct user_sdma_request *req,
return ret;
}

-static int user_sdma_txadd(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec, u32 datalen,
- u32 *queued_ptr, u32 *data_sent_ptr,
- u64 *iov_offset_ptr)
-{
- int ret;
- unsigned int pageidx, len;
- unsigned long base, offset;
- u64 iov_offset = *iov_offset_ptr;
- u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
-
- base = (unsigned long)iovec->iov.iov_base;
- offset = offset_in_page(base + iovec->offset + iov_offset);
- pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
- PAGE_SHIFT);
- len = offset + req->info.fragsize > PAGE_SIZE ?
- PAGE_SIZE - offset : req->info.fragsize;
- len = min((datalen - queued), len);
- ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
- offset, len);
- if (ret) {
- SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
- return ret;
- }
- iov_offset += len;
- queued += len;
- data_sent += len;
- if (unlikely(queued < datalen && pageidx == iovec->npages &&
- req->iov_idx < req->data_iovs - 1)) {
- iovec->offset += iov_offset;
- iovec = &req->iovs[++req->iov_idx];
- iov_offset = 0;
- }
-
- *queued_ptr = queued;
- *data_sent_ptr = data_sent;
- *iov_offset_ptr = iov_offset;
- return ret;
-}
-
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
{
int ret = 0;
@@ -769,8 +727,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
maxpkts = req->info.npkts - req->seqnum;

while (npkts < maxpkts) {
- u32 datalen = 0, queued = 0, data_sent = 0;
- u64 iov_offset = 0;
+ u32 datalen = 0;

/*
* Check whether any of the completions have come back
@@ -863,27 +820,17 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
goto free_txreq;
}

- /*
- * If the request contains any data vectors, add up to
- * fragsize bytes to the descriptor.
- */
- while (queued < datalen &&
- (req->sent + data_sent) < req->data_len) {
- ret = user_sdma_txadd(req, tx, iovec, datalen,
- &queued, &data_sent, &iov_offset);
- if (ret)
- goto free_txreq;
- }
- /*
- * The txreq was submitted successfully so we can update
- * the counters.
- */
req->koffset += datalen;
if (req_opcode(req->info.ctrl) == EXPECTED)
req->tidoffset += datalen;
- req->sent += data_sent;
- if (req->data_len)
- iovec->offset += iov_offset;
+ req->sent += datalen;
+ while (datalen) {
+ ret = add_system_pages_to_sdma_packet(req, tx, iovec,
+ &datalen);
+ if (ret)
+ goto free_txreq;
+ iovec = &req->iovs[req->iov_idx];
+ }
list_add_tail(&tx->txreq.list, &req->txps);
/*
* It is important to increment this here as it is used to
@@ -920,133 +867,14 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
{
struct evict_data evict_data;
+ struct mmu_rb_handler *handler = pq->handler;

evict_data.cleared = 0;
evict_data.target = npages;
- hfi1_mmu_rb_evict(pq->handler, &evict_data);
+ hfi1_mmu_rb_evict(handler, &evict_data);
return evict_data.cleared;
}

-static int pin_sdma_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec,
- struct sdma_mmu_node *node,
- int npages)
-{
- int pinned, cleared;
- struct page **pages;
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
-
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
- memcpy(pages, node->pages, node->npages * sizeof(*pages));
-
- npages -= node->npages;
-retry:
- if (!hfi1_can_pin_pages(pq->dd, current->mm,
- atomic_read(&pq->n_locked), npages)) {
- cleared = sdma_cache_evict(pq, npages);
- if (cleared >= npages)
- goto retry;
- }
- pinned = hfi1_acquire_user_pages(current->mm,
- ((unsigned long)iovec->iov.iov_base +
- (node->npages * PAGE_SIZE)), npages, 0,
- pages + node->npages);
- if (pinned < 0) {
- kfree(pages);
- return pinned;
- }
- if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, node->npages, pinned);
- return -EFAULT;
- }
- kfree(node->pages);
- node->rb.len = iovec->iov.iov_len;
- node->pages = pages;
- atomic_add(pinned, &pq->n_locked);
- return pinned;
-}
-
-static void unpin_sdma_pages(struct sdma_mmu_node *node)
-{
- if (node->npages) {
- unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
- node->npages);
- atomic_sub(node->npages, &node->pq->n_locked);
- }
-}
-
-static int pin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec)
-{
- int ret = 0, pinned, npages;
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct sdma_mmu_node *node = NULL;
- struct mmu_rb_node *rb_node;
- struct iovec *iov;
- bool extracted;
-
- extracted =
- hfi1_mmu_rb_remove_unless_exact(pq->handler,
- (unsigned long)
- iovec->iov.iov_base,
- iovec->iov.iov_len, &rb_node);
- if (rb_node) {
- node = container_of(rb_node, struct sdma_mmu_node, rb);
- if (!extracted) {
- atomic_inc(&node->refcount);
- iovec->pages = node->pages;
- iovec->npages = node->npages;
- iovec->node = node;
- return 0;
- }
- }
-
- if (!node) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- node->rb.addr = (unsigned long)iovec->iov.iov_base;
- node->pq = pq;
- atomic_set(&node->refcount, 0);
- }
-
- iov = &iovec->iov;
- npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
- if (node->npages < npages) {
- pinned = pin_sdma_pages(req, iovec, node, npages);
- if (pinned < 0) {
- ret = pinned;
- goto bail;
- }
- node->npages += pinned;
- npages = node->npages;
- }
- iovec->pages = node->pages;
- iovec->npages = npages;
- iovec->node = node;
-
- ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
- if (ret) {
- iovec->node = NULL;
- goto bail;
- }
- return 0;
-bail:
- unpin_sdma_pages(node);
- kfree(node);
- return ret;
-}
-
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned start, unsigned npages)
-{
- hfi1_release_user_pages(mm, pages + start, npages, false);
- kfree(pages);
-}
-
static int check_header_template(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr, u32 lrhlen,
u32 datalen)
@@ -1388,7 +1216,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
if (req->seqcomp != req->info.npkts - 1)
return;

- user_sdma_free_request(req, false);
+ user_sdma_free_request(req);
set_comp_state(pq, cq, req->info.comp_idx, state, status);
pq_update(pq);
}
@@ -1399,10 +1227,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
wake_up(&pq->wait);
}

-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
+static void user_sdma_free_request(struct user_sdma_request *req)
{
- int i;
-
if (!list_empty(&req->txps)) {
struct sdma_txreq *t, *p;

@@ -1415,21 +1241,6 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
}
}

- for (i = 0; i < req->data_iovs; i++) {
- struct sdma_mmu_node *node = req->iovs[i].node;
-
- if (!node)
- continue;
-
- req->iovs[i].node = NULL;
-
- if (unpin)
- hfi1_mmu_rb_remove(req->pq->handler,
- &node->rb);
- else
- atomic_dec(&node->refcount);
- }
-
kfree(req->tids);
clear_bit(req->info.comp_idx, req->pq->req_in_use);
}
@@ -1447,6 +1258,368 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
idx, state, ret);
}

+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned int start, unsigned int npages)
+{
+ hfi1_release_user_pages(mm, pages + start, npages, false);
+ kfree(pages);
+}
+
+static void free_system_node(struct sdma_mmu_node *node)
+{
+ if (node->npages) {
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+ node->npages);
+ atomic_sub(node->npages, &node->pq->n_locked);
+ }
+ kfree(node);
+}
+
+static inline void acquire_node(struct sdma_mmu_node *node)
+{
+ atomic_inc(&node->refcount);
+ WARN_ON(atomic_read(&node->refcount) < 0);
+}
+
+static inline void release_node(struct mmu_rb_handler *handler,
+ struct sdma_mmu_node *node)
+{
+ atomic_dec(&node->refcount);
+ WARN_ON(atomic_read(&node->refcount) < 0);
+}
+
+static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_rb_node *rb_node;
+ struct sdma_mmu_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
+ if (!rb_node) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return NULL;
+ }
+ node = container_of(rb_node, struct sdma_mmu_node, rb);
+ acquire_node(node);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return node;
+}
+
+static int pin_system_pages(struct user_sdma_request *req,
+ uintptr_t start_address, size_t length,
+ struct sdma_mmu_node *node, int npages)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ int pinned, cleared;
+ struct page **pages;
+
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+retry:
+ if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
+ npages)) {
+ SDMA_DBG(req, "Evicting: nlocked %u npages %u",
+ atomic_read(&pq->n_locked), npages);
+ cleared = sdma_cache_evict(pq, npages);
+ if (cleared >= npages)
+ goto retry;
+ }
+
+ SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
+ start_address, node->npages, npages);
+ pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
+ pages);
+
+ if (pinned < 0) {
+ kfree(pages);
+ SDMA_DBG(req, "pinned %d", pinned);
+ return pinned;
+ }
+ if (pinned != npages) {
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
+ SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
+ return -EFAULT;
+ }
+ node->rb.addr = start_address;
+ node->rb.len = length;
+ node->pages = pages;
+ node->npages = npages;
+ atomic_add(pinned, &pq->n_locked);
+ SDMA_DBG(req, "done. pinned %d", pinned);
+ return 0;
+}
+
+static int add_system_pinning(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ unsigned long start, unsigned long len)
+
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct sdma_mmu_node *node;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->pq = pq;
+ ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
+ if (ret == 0) {
+ ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
+ if (ret)
+ free_system_node(node);
+ else
+ *node_p = node;
+
+ return ret;
+ }
+
+ kfree(node);
+ return ret;
+}
+
+static int get_system_cache_entry(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ size_t req_start, size_t req_len)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
+ u64 end = PFN_ALIGN(req_start + req_len);
+ struct mmu_rb_handler *handler = pq->handler;
+ int ret;
+
+ if ((end - start) == 0) {
+ SDMA_DBG(req,
+ "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
+ req_start, req_len, start, end);
+ return -EINVAL;
+ }
+
+ SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
+
+ while (1) {
+ struct sdma_mmu_node *node =
+ find_system_node(handler, start, end);
+ u64 prepend_len = 0;
+
+ SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
+ if (!node) {
+ ret = add_system_pinning(req, node_p, start,
+ end - start);
+ if (ret == -EEXIST) {
+ /*
+ * Another execution context has inserted a
+ * conficting entry first.
+ */
+ continue;
+ }
+ return ret;
+ }
+
+ if (node->rb.addr <= start) {
+ /*
+ * This entry covers at least part of the region. If it doesn't extend
+ * to the end, then this will be called again for the next segment.
+ */
+ *node_p = node;
+ return 0;
+ }
+
+ SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
+ node->rb.addr, atomic_read(&node->refcount));
+ prepend_len = node->rb.addr - start;
+
+ /*
+ * This node will not be returned, instead a new node
+ * will be. So release the reference.
+ */
+ release_node(handler, node);
+
+ /* Prepend a node to cover the beginning of the allocation */
+ ret = add_system_pinning(req, node_p, start, prepend_len);
+ if (ret == -EEXIST) {
+ /* Another execution context has inserted a conficting entry first. */
+ continue;
+ }
+ return ret;
+ }
+}
+
+static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct sdma_mmu_node *cache_entry,
+ size_t start,
+ size_t from_this_cache_entry)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ unsigned int page_offset;
+ unsigned int from_this_page;
+ size_t page_index;
+ void *ctx;
+ int ret;
+
+ /*
+ * Because the cache may be more fragmented than the memory that is being accessed,
+ * it's not strictly necessary to have a descriptor per cache entry.
+ */
+
+ while (from_this_cache_entry) {
+ page_index = PFN_DOWN(start - cache_entry->rb.addr);
+
+ if (page_index >= cache_entry->npages) {
+ SDMA_DBG(req,
+ "Request for page_index %zu >= cache_entry->npages %u",
+ page_index, cache_entry->npages);
+ return -EINVAL;
+ }
+
+ page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
+ from_this_page = PAGE_SIZE - page_offset;
+
+ if (from_this_page < from_this_cache_entry) {
+ ctx = NULL;
+ } else {
+ /*
+ * In the case they are equal the next line has no practical effect,
+ * but it's better to do a register to register copy than a conditional
+ * branch.
+ */
+ from_this_page = from_this_cache_entry;
+ ctx = cache_entry;
+ }
+
+ ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
+ cache_entry->pages[page_index],
+ page_offset, from_this_page);
+ if (ret) {
+ /*
+ * When there's a failure, the entire request is freed by
+ * user_sdma_send_pkts().
+ */
+ SDMA_DBG(req,
+ "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
+ ret, page_index, page_offset, from_this_page);
+ return ret;
+ }
+ start += from_this_page;
+ from_this_cache_entry -= from_this_page;
+ }
+ return 0;
+}
+
+static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ size_t from_this_iovec)
+{
+ struct mmu_rb_handler *handler = req->pq->handler;
+
+ while (from_this_iovec > 0) {
+ struct sdma_mmu_node *cache_entry;
+ size_t from_this_cache_entry;
+ size_t start;
+ int ret;
+
+ start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
+ ret = get_system_cache_entry(req, &cache_entry, start,
+ from_this_iovec);
+ if (ret) {
+ SDMA_DBG(req, "pin system segment failed %d", ret);
+ return ret;
+ }
+
+ from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
+ if (from_this_cache_entry > from_this_iovec)
+ from_this_cache_entry = from_this_iovec;
+
+ ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
+ from_this_cache_entry);
+ if (ret) {
+ /*
+ * We're guaranteed that there will be no descriptor
+ * completion callback that releases this node
+ * because only the last descriptor referencing it
+ * has a context attached, and a failure means the
+ * last descriptor was never added.
+ */
+ release_node(handler, cache_entry);
+ SDMA_DBG(req, "add system segment failed %d", ret);
+ return ret;
+ }
+
+ iovec->offset += from_this_cache_entry;
+ from_this_iovec -= from_this_cache_entry;
+ }
+
+ return 0;
+}
+
+static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining)
+{
+ size_t remaining_to_add = *pkt_data_remaining;
+ /*
+ * Walk through iovec entries, ensure the associated pages
+ * are pinned and mapped, add data to the packet until no more
+ * data remains to be added.
+ */
+ while (remaining_to_add > 0) {
+ struct user_sdma_iovec *cur_iovec;
+ size_t from_this_iovec;
+ int ret;
+
+ cur_iovec = iovec;
+ from_this_iovec = iovec->iov.iov_len - iovec->offset;
+
+ if (from_this_iovec > remaining_to_add) {
+ from_this_iovec = remaining_to_add;
+ } else {
+ /* The current iovec entry will be consumed by this pass. */
+ req->iov_idx++;
+ iovec++;
+ }
+
+ ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
+ from_this_iovec);
+ if (ret)
+ return ret;
+
+ remaining_to_add -= from_this_iovec;
+ }
+ *pkt_data_remaining = remaining_to_add;
+
+ return 0;
+}
+
+void system_descriptor_complete(struct hfi1_devdata *dd,
+ struct sdma_desc *descp)
+{
+ switch (sdma_mapping_type(descp)) {
+ case SDMA_MAP_SINGLE:
+ dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
+ sdma_mapping_len(descp), DMA_TO_DEVICE);
+ break;
+ case SDMA_MAP_PAGE:
+ dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
+ sdma_mapping_len(descp), DMA_TO_DEVICE);
+ break;
+ }
+
+ if (descp->pinning_ctx) {
+ struct sdma_mmu_node *node = descp->pinning_ctx;
+
+ release_node(node->rb.handler, node);
+ }
+}
+
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
unsigned long len)
{
@@ -1493,8 +1666,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);

- unpin_sdma_pages(node);
- kfree(node);
+ free_system_node(node);
}

static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index ea56eb57e656..a241836371dc 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -112,16 +112,11 @@ struct sdma_mmu_node {
struct user_sdma_iovec {
struct list_head list;
struct iovec iov;
- /* number of pages in this vector */
- unsigned int npages;
- /* array of pinned pages for this vector */
- struct page **pages;
/*
* offset into the virtual address space of the vector at
* which we last left off.
*/
u64 offset;
- struct sdma_mmu_node *node;
};

/* evict operation argument */
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index e6e17984553c..39ca32d9ae6a 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -778,8 +778,8 @@ static int build_verbs_tx_desc(

/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
- ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
- sde->dd->sdma_pad_phys, extra_bytes);
+ ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
+ extra_bytes);

bail_txadd:
return ret;
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index c3f0f8d877c3..727eedfba332 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -64,6 +64,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,

/* combine physically continuous fragments later? */
ret = sdma_txadd_page(sde->dd,
+ NULL,
&tx->txreq,
skb_frag_page(frag),
skb_frag_off(frag),
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b17d6ebc5b70..488c906c0432 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -412,9 +412,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
struct mlx4_ib_qp *qp,
struct mlx4_ib_create_qp *ucmd)
{
+ u32 cnt;
+
/* Sanity check SQ size before proceeding */
- if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
- ucmd->log_sq_stride >
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > dev->dev->caps.max_wqes)
+ return -EINVAL;
+ if (ucmd->log_sq_stride >
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 2211a0be16f3..f8e2baed27a5 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -666,7 +666,21 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
obj_id;

case MLX5_IB_OBJECT_DEVX_OBJ:
- return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
+ {
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ struct devx_obj *devx_uobj = uobj->object;
+
+ if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
+ devx_uobj->flow_counter_bulk_size) {
+ u64 end;
+
+ end = devx_uobj->obj_id +
+ devx_uobj->flow_counter_bulk_size;
+ return devx_uobj->obj_id <= obj_id && end > obj_id;
+ }
+
+ return devx_uobj->obj_id == obj_id;
+ }

default:
return false;
@@ -1517,10 +1531,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
goto obj_free;

if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
- u8 bulk = MLX5_GET(alloc_flow_counter_in,
- cmd_in,
- flow_counter_bulk);
- obj->flow_counter_bulk_size = 128UL * bulk;
+ u32 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk_log_size);
+
+ if (bulk)
+ bulk = 1 << bulk;
+ else
+ bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = bulk;
}

uobj->object = obj;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cf953d23d18d..f7d3643b08f5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4408,7 +4408,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -EINVAL;

if (attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ attr->port_num > dev->num_ports) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index d5105b5c9979..cb5cee3dee2b 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -380,6 +380,9 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
struct mlx5_mkey_seg *seg,
unsigned int access_flags)
{
+ bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev);
+
MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
@@ -387,8 +390,7 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
MLX5_SET(mkc, seg, lr, 1);
MLX5_SET(mkc, seg, relaxed_ordering_write,
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
- MLX5_SET(mkc, seg, relaxed_ordering_read,
- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+ MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read);
}

int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3acab569fbb9..2bdc4486c3da 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -464,8 +464,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi)
if (qps_inuse)
rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
qps_inuse);
- if (!rdi->qp_dev)
- return;

kfree(rdi->qp_dev->qp_table);
free_qpn_table(&rdi->qp_dev->qpn_table);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index dacc174604bf..65b5cda5457b 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -437,9 +437,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,

dev_dbg(&netdev->dev, "siw: event %lu\n", event);

- if (dev_net(netdev) != &init_net)
- return NOTIFY_OK;
-
base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
if (!base_dev)
return NOTIFY_OK;
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 05052b49107f..6bb9e9e81ff4 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -558,7 +558,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
data_len -= plen;
fp_off = 0;

- if (++seg > (int)MAX_ARRAY) {
+ if (++seg >= (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
siw_unmap_pages(iov, kmap_mask, seg-1);
wqe->processed -= c_tx->bytes_unsent;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index b360a1527cd1..7cca171478a2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2507,8 +2507,8 @@ isert_wait4cmds(struct iscsit_conn *conn)
isert_info("iscsit_conn %p\n", conn);

if (conn->sess) {
- target_stop_session(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
+ target_stop_cmd_counter(conn->cmd_cnt);
+ target_wait_for_cmds(conn->cmd_cnt);
}
}

diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3c3fae738c3e..25e799dba999 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -549,6 +549,7 @@ static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
*/
static int srpt_refresh_port(struct srpt_port *sport)
{
+ struct ib_mad_agent *mad_agent;
struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify;
struct ib_port_attr port_attr;
@@ -593,24 +594,26 @@ static int srpt_refresh_port(struct srpt_port *sport)
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);

- sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
- sport->port,
- IB_QPT_GSI,
- &reg_req, 0,
- srpt_mad_send_handler,
- srpt_mad_recv_handler,
- sport, 0);
- if (IS_ERR(sport->mad_agent)) {
+ mad_agent = ib_register_mad_agent(sport->sdev->device,
+ sport->port,
+ IB_QPT_GSI,
+ &reg_req, 0,
+ srpt_mad_send_handler,
+ srpt_mad_recv_handler,
+ sport, 0);
+ if (IS_ERR(mad_agent)) {
pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
dev_name(&sport->sdev->device->dev), sport->port,
- PTR_ERR(sport->mad_agent));
+ PTR_ERR(mad_agent));
sport->mad_agent = NULL;
memset(&port_modify, 0, sizeof(port_modify));
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
ib_modify_port(sport->sdev->device, sport->port, 0,
&port_modify);
-
+ return 0;
}
+
+ sport->mad_agent = mad_agent;
}

return 0;
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 5000f5fd9ec3..45c575df994e 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -134,7 +134,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
return -ENOENT;
}

- fw = rpi_firmware_get(fw_node);
+ fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
of_node_put(fw_node);
if (!fw)
return -EPROBE_DEFER;
@@ -160,7 +160,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
touchbuf = (u32)ts->fw_regs_phys;
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
&touchbuf, sizeof(touchbuf));
- rpi_firmware_put(fw);
if (error || touchbuf != 0) {
dev_warn(dev, "Failed to set touchbuf, %d\n", error);
return error;
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 6a9e6b563320..9047481fafd4 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -11,7 +11,6 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/slab.h>

@@ -499,12 +498,6 @@ int qnoc_probe(struct platform_device *pdev)
if (ret)
return ret;

- if (desc->has_bus_pd) {
- ret = dev_pm_domain_attach(dev, true);
- if (ret)
- return ret;
- }
-
provider = &qp->provider;
provider->dev = dev;
provider->set = qcom_icc_set;
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index a49af844ab13..02257b0d3d5c 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -91,7 +91,6 @@ struct qcom_icc_desc {
size_t num_nodes;
const char * const *clocks;
size_t num_clocks;
- bool has_bus_pd;
enum qcom_icc_type type;
const struct regmap_config *regmap_cfg;
unsigned int qos_offset;
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index 25a1a32bc611..14efd2761b7a 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -1823,7 +1823,6 @@ static const struct qcom_icc_desc msm8996_a0noc = {
.num_nodes = ARRAY_SIZE(a0noc_nodes),
.clocks = bus_a0noc_clocks,
.num_clocks = ARRAY_SIZE(bus_a0noc_clocks),
- .has_bus_pd = true,
.regmap_cfg = &msm8996_a0noc_regmap_config
};

diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 1d0a70c85333..5ecc17240eff 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -1002,8 +1002,8 @@ struct amd_ir_data {
*/
struct irq_cfg *cfg;
int ga_vector;
- int ga_root_ptr;
- int ga_tag;
+ u64 ga_root_ptr;
+ u32 ga_tag;
};

struct amd_irte_ops {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 20adb9b323d8..26fb78003889 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1657,10 +1657,6 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;

- /* Override supported page sizes */
- if (domain->flags & PD_GIOV_MASK)
- domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
/* Update device table */
set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
@@ -2039,6 +2035,8 @@ static int protection_domain_init_v2(struct protection_domain *domain)

domain->flags |= PD_GIOV_MASK;

+ domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+
if (domain_enable_v2(domain, 1)) {
domain_id_free(domain->id);
return -ENOMEM;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index bfb2f163c691..2bcd1f23d07d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1912,8 +1912,13 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
return NULL;

domain->type = type;
- /* Assume all sizes by default; the driver may override this later */
- domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ /*
+ * If not already set, assume all sizes by default; the driver
+ * may override this later
+ */
+ if (!domain->pgsize_bitmap)
+ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+
if (!domain->ops)
domain->ops = bus->iommu_ops->default_domain_ops;

diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 56d007582b6f..e93ca9dc37c8 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1237,6 +1237,14 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return PTR_ERR(data->bclk);
}

+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(35));
+ if (ret) {
+ dev_err(dev, "Failed to set dma_mask 35.\n");
+ return ret;
+ }
+ }
+
pm_runtime_enable(dev);

if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 499d0f215a8b..2378cfb7443e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -814,7 +814,7 @@ config LEDS_SPI_BYTE
config LEDS_TI_LMU_COMMON
tristate "LED driver for TI LMU"
depends on LEDS_CLASS
- depends on REGMAP
+ select REGMAP
help
Say Y to enable the LED driver for TI LMU devices.
This supports common features between the TI LM3532, LM3631, LM3632,
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 161bef65c6b7..62a968613cc3 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -691,8 +691,9 @@ tca6507_led_dt_init(struct device *dev)
if (fwnode_property_read_string(child, "label", &led.name))
led.name = fwnode_get_name(child);

- fwnode_property_read_string(child, "linux,default-trigger",
- &led.default_trigger);
+ if (fwnode_property_read_string(child, "linux,default-trigger",
+ &led.default_trigger))
+ led.default_trigger = NULL;

led.flags = 0;
if (fwnode_property_match_string(child, "compatible",
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 539a2ed4e13d..a0e717a986dc 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -86,6 +86,7 @@ config ADB_PMU_LED

config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
+ depends on ATA
depends on ADB_PMU_LED
depends on LEDS_CLASS
select LEDS_TRIGGERS
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index be5d4593db93..98fb31e00e20 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -171,6 +171,7 @@ static void wf_sat_release(struct kref *ref)

if (sat->nr >= 0)
sats[sat->nr] = NULL;
+ of_node_put(sat->node);
kfree(sat);
}

diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 853901acaeec..08aa840cccac 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -79,6 +79,13 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
return status & SCB_STATUS_BUSY_MASK;
}

+static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+
+ return !mpfs_mbox_busy(mbox);
+}
+
static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
{
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
@@ -182,7 +189,6 @@ static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)

mpfs_mbox_rx_data(chan);

- mbox_chan_txdone(chan, 0);
return IRQ_HANDLED;
}

@@ -212,6 +218,7 @@ static const struct mbox_chan_ops mpfs_mbox_ops = {
.send_data = mpfs_mbox_send_data,
.startup = mpfs_mbox_startup,
.shutdown = mpfs_mbox_shutdown,
+ .last_tx_done = mpfs_mbox_last_tx_done,
};

static int mpfs_mbox_probe(struct platform_device *pdev)
@@ -247,7 +254,8 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
mbox->controller.num_chans = 1;
mbox->controller.chans = mbox->chans;
mbox->controller.ops = &mpfs_mbox_ops;
- mbox->controller.txdone_irq = true;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 10u;

ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
if (ret) {
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 12e004ff1a14..e02a4a18e8c2 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -152,7 +152,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
struct zynqmp_ipi_message *msg;
u64 arg0, arg3;
struct arm_smccc_res res;
- int ret, i;
+ int ret, i, status = IRQ_NONE;

(void)irq;
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
@@ -170,11 +170,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
memcpy_fromio(msg->data, mchan->req_buf,
msg->len);
mbox_chan_received_data(chan, (void *)msg);
- return IRQ_HANDLED;
+ status = IRQ_HANDLED;
}
}
}
- return IRQ_NONE;
+ return status;
}

/**
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 29e0b85eeaf0..e088081b7a8a 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2205,6 +2205,7 @@ static int __init dm_clone_init(void)
r = dm_register_target(&clone_target);
if (r < 0) {
DMERR("Failed to register clone target");
+ kmem_cache_destroy(_hydration_cache);
return r;
}

diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 7efbdb42cf3b..3b34270ce607 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -124,9 +124,9 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* Direction r or w?
*/
arg_name = dm_shift_arg(as);
- if (!strcasecmp(arg_name, "w"))
+ if (arg_name && !strcasecmp(arg_name, "w"))
fc->corrupt_bio_rw = WRITE;
- else if (!strcasecmp(arg_name, "r"))
+ else if (arg_name && !strcasecmp(arg_name, "r"))
fc->corrupt_bio_rw = READ;
else {
ti->error = "Invalid corrupt bio direction (r or w)";
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 53f9f765df9f..a2b8f8781a99 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4646,11 +4646,13 @@ static int __init dm_integrity_init(void)
}

r = dm_register_target(&integrity_target);
-
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ kmem_cache_destroy(journal_io_cache);
+ return r;
+ }

- return r;
+ return 0;
}

static void __exit dm_integrity_exit(void)
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 41d55218b076..83aecd9250ba 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1151,10 +1151,13 @@ static int do_resume(struct dm_ioctl *param)
/* Do we need to load a new map ? */
if (new_map) {
sector_t old_size, new_size;
+ int srcu_idx;

/* Suspend if it isn't already suspended */
- if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ old_map = dm_get_live_table(md, &srcu_idx);
+ if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+ dm_put_live_table(md, srcu_idx);
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
if (!dm_suspended_md(md))
@@ -1539,11 +1542,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s
has_new_map = true;
}

- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
-
- __dev_status(hc->md, param);
md = hc->md;
up_write(&_hash_lock);
+
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+ __dev_status(md, param);
+
if (old_map) {
dm_sync_table(md);
dm_table_destroy(old_map);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 32b2d3b99d78..3acded2f976d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1203,21 +1203,12 @@ struct dm_crypto_profile {
struct mapped_device *md;
};

-struct dm_keyslot_evict_args {
- const struct blk_crypto_key *key;
- int err;
-};
-
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct dm_keyslot_evict_args *args = data;
- int err;
+ const struct blk_crypto_key *key = data;

- err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
- if (!args->err)
- args->err = err;
- /* Always try to evict the key from all devices. */
+ blk_crypto_evict_key(dev->bdev, key);
return 0;
}

@@ -1230,7 +1221,6 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
{
struct mapped_device *md =
container_of(profile, struct dm_crypto_profile, profile)->md;
- struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;

@@ -1243,11 +1233,12 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,

if (!ti->type->iterate_devices)
continue;
- ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
+ ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
+ (void *)key);
}

dm_put_live_table(md, srcu_idx);
- return args.err;
+ return 0;
}

static int
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 64e8ac429984..14a9988ec30b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -523,7 +523,7 @@ static int verity_verify_io(struct dm_verity_io *io)
sector_t cur_block = io->block + b;
struct ahash_request *req = verity_io_hash_req(v, io);

- if (v->validated_blocks &&
+ if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
likely(test_bit(cur_block, v->validated_blocks))) {
verity_bv_skip_block(v, io, iter);
continue;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9a6503f5cb98..67398394cc9c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -995,11 +995,15 @@ static bool stop_waiting_barrier(struct r10conf *conf)
(!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
return true;

- /* move on if recovery thread is blocked by us */
- if (conf->mddev->thread->tsk == current &&
- test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) &&
- conf->nr_queued > 0)
+ /*
+ * move on if io is issued from raid10d(), nr_pending is not released
+ * from original io(see handle_read_error()). All raise barrier is
+ * blocked until this io is done.
+ */
+ if (conf->mddev->thread->tsk == current) {
+ WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
return true;
+ }

return false;
}
@@ -1244,7 +1248,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
}
slot = r10_bio->read_slot;

- if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ if (!r10_bio->start_time &&
+ blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio);
read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);

@@ -1574,6 +1579,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
r10_bio->read_slot = -1;
+ r10_bio->start_time = 0;
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
conf->geo.raid_disks);

@@ -2609,11 +2615,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int d;
- struct bio *wbio, *wbio2;
+ struct bio *wbio = r10_bio->devs[1].bio;
+ struct bio *wbio2 = r10_bio->devs[1].repl_bio;
+
+ /* Need to test wbio2->bi_end_io before we call
+ * submit_bio_noacct as if the former is NULL,
+ * the latter is free to free wbio2.
+ */
+ if (wbio2 && !wbio2->bi_end_io)
+ wbio2 = NULL;

if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
fix_recovery_read_error(r10_bio);
- end_sync_request(r10_bio);
+ if (wbio->bi_end_io)
+ end_sync_request(r10_bio);
+ if (wbio2)
+ end_sync_request(r10_bio);
return;
}

@@ -2622,14 +2639,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* and submit the write request
*/
d = r10_bio->devs[1].devnum;
- wbio = r10_bio->devs[1].bio;
- wbio2 = r10_bio->devs[1].repl_bio;
- /* Need to test wbio2->bi_end_io before we call
- * submit_bio_noacct as if the former is NULL,
- * the latter is free to free wbio2.
- */
- if (wbio2 && !wbio2->bi_end_io)
- wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
@@ -2978,9 +2987,13 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
md_error(mddev, rdev);

rdev_dec_pending(rdev, mddev);
- allow_barrier(conf);
r10_bio->state = 0;
raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
+ /*
+ * allow_barrier after re-submit to ensure no sync io
+ * can be issued while regular io pending.
+ */
+ allow_barrier(conf);
}

static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
@@ -3289,10 +3302,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t chunk_mask = conf->geo.chunk_mask;
int page_idx = 0;

- if (!mempool_initialized(&conf->r10buf_pool))
- if (init_resync(conf))
- return 0;
-
/*
* Allow skipping a full rebuild for incremental assembly
* of a clean array, like RAID1 does.
@@ -3308,6 +3317,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
return mddev->dev_sectors - sector_nr;
}

+ if (!mempool_initialized(&conf->r10buf_pool))
+ if (init_resync(conf))
+ return 0;
+
skipped:
max_sector = mddev->dev_sectors;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -4004,6 +4017,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
return nc*fc;
}

+static void raid10_free_conf(struct r10conf *conf)
+{
+ if (!conf)
+ return;
+
+ mempool_exit(&conf->r10bio_pool);
+ kfree(conf->mirrors);
+ kfree(conf->mirrors_old);
+ kfree(conf->mirrors_new);
+ safe_put_page(conf->tmppage);
+ bioset_exit(&conf->bio_split);
+ kfree(conf);
+}
+
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
@@ -4086,13 +4113,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;

out:
- if (conf) {
- mempool_exit(&conf->r10bio_pool);
- kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
- bioset_exit(&conf->bio_split);
- kfree(conf);
- }
+ raid10_free_conf(conf);
return ERR_PTR(err);
}

@@ -4129,6 +4150,9 @@ static int raid10_run(struct mddev *mddev)
if (!conf)
goto out;

+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
if (mddev_is_clustered(conf->mddev)) {
int fc, fo;

@@ -4141,9 +4165,6 @@ static int raid10_run(struct mddev *mddev)
}
}

- mddev->thread = conf->thread;
- conf->thread = NULL;
-
if (mddev->queue) {
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
@@ -4283,10 +4304,7 @@ static int raid10_run(struct mddev *mddev)

out_free_conf:
md_unregister_thread(&mddev->thread);
- mempool_exit(&conf->r10bio_pool);
- safe_put_page(conf->tmppage);
- kfree(conf->mirrors);
- kfree(conf);
+ raid10_free_conf(conf);
mddev->private = NULL;
out:
return -EIO;
@@ -4294,15 +4312,7 @@ static int raid10_run(struct mddev *mddev)

static void raid10_free(struct mddev *mddev, void *priv)
{
- struct r10conf *conf = priv;
-
- mempool_exit(&conf->r10bio_pool);
- safe_put_page(conf->tmppage);
- kfree(conf->mirrors);
- kfree(conf->mirrors_old);
- kfree(conf->mirrors_new);
- bioset_exit(&conf->bio_split);
- kfree(conf);
+ raid10_free_conf(priv);
}

static void raid10_quiesce(struct mddev *mddev, int quiesce)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7b820b81d8c2..f787c9e5b10e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6079,6 +6079,38 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
return ret;
}

+/*
+ * If the bio covers multiple data disks, find sector within the bio that has
+ * the lowest chunk offset in the first chunk.
+ */
+static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
+ struct bio *bi)
+{
+ int sectors_per_chunk = conf->chunk_sectors;
+ int raid_disks = conf->raid_disks;
+ int dd_idx;
+ struct stripe_head sh;
+ unsigned int chunk_offset;
+ sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
+ sector_t sector;
+
+ /* We pass in fake stripe_head to get back parity disk numbers */
+ sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
+ chunk_offset = sector_div(sector, sectors_per_chunk);
+ if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
+ return r_sector;
+ /*
+ * Bio crosses to the next data disk. Check whether it's in the same
+ * chunk.
+ */
+ dd_idx++;
+ while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
+ dd_idx++;
+ if (dd_idx >= raid_disks)
+ return r_sector;
+ return r_sector + sectors_per_chunk - chunk_offset;
+}
+
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
md_account_bio(mddev, &bi);

+ /*
+ * Lets start with the stripe with the lowest chunk offset in the first
+ * chunk. That has the best chances of creating IOs adjacent to
+ * previous IOs in case of sequential IO and thus creates the most
+ * sequential IO pattern. We don't bother with the optimization when
+ * reshaping as the performance benefit is not worth the complexity.
+ */
+ if (likely(conf->reshape_progress == MaxSector))
+ logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
+ s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
+
add_wait_queue(&conf->wait_for_overlap, &wait);
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
@@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
continue;
}

- s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
+ s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);
if (s == stripe_cnt)
break;

diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 7c61873b7198..306dc35e925f 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -1472,21 +1472,26 @@ static int hi846_init_controls(struct hi846 *hi846)
if (ctrl_hdlr->error) {
dev_err(&client->dev, "v4l ctrl handler error: %d\n",
ctrl_hdlr->error);
- return ctrl_hdlr->error;
+ ret = ctrl_hdlr->error;
+ goto error;
}

ret = v4l2_fwnode_device_parse(&client->dev, &props);
if (ret)
- return ret;
+ goto error;

ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &hi846_ctrl_ops,
&props);
if (ret)
- return ret;
+ goto error;

hi846->sd.ctrl_handler = ctrl_hdlr;

return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ return ret;
}

static int hi846_set_video_mode(struct hi846 *hi846, int fps)
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index d034a67042e3..892cd97b7cab 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -941,6 +941,7 @@ static int max9286_v4l2_register(struct max9286_priv *priv)
static void max9286_v4l2_unregister(struct max9286_priv *priv)
{
fwnode_handle_put(priv->sd.fwnode);
+ v4l2_ctrl_handler_free(&priv->ctrls);
v4l2_async_unregister_subdev(&priv->sd);
max9286_v4l2_notifier_unregister(priv);
}
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index efa18d026ac3..aaf42ece0a11 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1709,46 +1709,6 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
return -ENXIO;
}

- ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
- OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
- if (ret)
- return ret;
-
- ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
- OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
- if (ret) {
- dev_err(&client->dev, "failed to set otp mode");
- return ret;
- }
-
- ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
- OV8856_REG_VALUE_08BIT,
- OV8856_OTP_LOAD_CTRL_ENABLE);
- if (ret) {
- dev_err(&client->dev, "failed to enable load control");
- return ret;
- }
-
- ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
- OV8856_REG_VALUE_08BIT, &val);
- if (ret) {
- dev_err(&client->dev, "failed to read module revision");
- return ret;
- }
-
- dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
- val,
- val == OV8856_2A_MODULE ? "2A" :
- val == OV8856_1B_MODULE ? "1B" : "unknown revision",
- client->addr);
-
- ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
- OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
- if (ret) {
- dev_err(&client->dev, "failed to exit streaming mode");
- return ret;
- }
-
ov8856->identified = true;

return 0;
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 4ac645a56c14..9e9c7c071acc 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -1176,6 +1176,7 @@ static void dm1105_remove(struct pci_dev *pdev)
struct dvb_demux *dvbdemux = &dev->demux;
struct dmx_demux *dmx = &dvbdemux->dmx;

+ cancel_work_sync(&dev->ir.work);
dm1105_ir_exit(dev);
dmx->close(dmx);
dvb_net_release(&dev->dvbnet);
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 6a5053126237..437dbe5e75e2 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -300,6 +300,7 @@ int saa7134_ts_start(struct saa7134_dev *dev)

int saa7134_ts_fini(struct saa7134_dev *dev)
{
+ del_timer_sync(&dev->ts_q.timeout);
saa7134_pgtable_free(dev->pci, &dev->ts_q.pt);
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 3f0b0933eed6..3e773690468b 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -185,6 +185,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev)
int saa7134_vbi_fini(struct saa7134_dev *dev)
{
/* nothing */
+ del_timer_sync(&dev->vbi_q.timeout);
return 0;
}

diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 4d8974c9fcc9..29124756a62b 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2146,6 +2146,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)

void saa7134_video_fini(struct saa7134_dev *dev)
{
+ del_timer_sync(&dev->video_q.timeout);
/* free stuff */
saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index b27e6bed85f0..4918547793dc 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -139,7 +139,31 @@ static const struct vpu_format vdec_formats[] = {
{0, 0, 0, 0},
};

+static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpu_inst *inst = ctrl_to_inst(ctrl);
+ struct vdec_t *vdec = inst->priv;
+ int ret = 0;
+
+ vpu_inst_lock(inst);
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
+ vdec->params.display_delay_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
+ vdec->params.display_delay = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ vpu_inst_unlock(inst);
+
+ return ret;
+}
+
static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
+ .s_ctrl = vdec_op_s_ctrl,
.g_volatile_ctrl = vpu_helper_g_volatile_ctrl,
};

@@ -152,6 +176,14 @@ static int vdec_ctrl_init(struct vpu_inst *inst)
if (ret)
return ret;

+ v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY,
+ 0, 0, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE,
+ 0, 1, 1, 0);
+
ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 2);
if (ctrl)
diff --git a/drivers/media/platform/amphion/vpu_codec.h b/drivers/media/platform/amphion/vpu_codec.h
index 528a93f08ecd..bac6d0d94f8a 100644
--- a/drivers/media/platform/amphion/vpu_codec.h
+++ b/drivers/media/platform/amphion/vpu_codec.h
@@ -55,7 +55,8 @@ struct vpu_encode_params {
struct vpu_decode_params {
u32 codec_format;
u32 output_format;
- u32 b_dis_reorder;
+ u32 display_delay_enable;
+ u32 display_delay;
u32 b_non_frame;
u32 frame_count;
u32 end_flag;
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index 9f2890730fd7..ae094cdc9bfc 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -640,7 +640,9 @@ static int vpu_malone_set_params(struct vpu_shared_addr *shared,
hc->jpg[instance].jpg_mjpeg_interlaced = 0;
}

- hc->codec_param[instance].disp_imm = params->b_dis_reorder ? 1 : 0;
+ hc->codec_param[instance].disp_imm = params->display_delay_enable ? 1 : 0;
+ if (malone_format != MALONE_FMT_AVC)
+ hc->codec_param[instance].disp_imm = 0;
hc->codec_param[instance].dbglog_enable = 0;
iface->dbglog_desc.level = 0;

diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
index 5f74ea3b7a52..8612a48bde10 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -566,7 +566,11 @@ static int mdp_m2m_open(struct file *file)
goto err_free_ctx;
}

- ctx->id = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
+ ret = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_unlock_mutex;
+ ctx->id = ret;
+
ctx->mdp_dev = mdp;

v4l2_fh_init(&ctx->fh, vdev);
@@ -617,6 +621,8 @@ static int mdp_m2m_open(struct file *file)
v4l2_fh_del(&ctx->fh);
err_exit_fh:
v4l2_fh_exit(&ctx->fh);
+ ida_free(&mdp->mdp_ida, ctx->id);
+err_unlock_mutex:
mutex_unlock(&mdp->m2m_lock);
err_free_ctx:
kfree(ctx);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
index 4e84a37ecdfc..36336d169bd9 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
@@ -4,6 +4,7 @@
* Author: Ping-Hsun Wu <ping-hsun.wu@xxxxxxxxxxxx>
*/

+#include <linux/math64.h>
#include <media/v4l2-common.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
@@ -428,14 +429,15 @@ const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
u32 bpl = pix_mp->plane_fmt[i].bytesperline;
u32 min_si, max_si;
u32 si = pix_mp->plane_fmt[i].sizeimage;
+ u64 di;

bpl = clamp(bpl, min_bpl, max_bpl);
pix_mp->plane_fmt[i].bytesperline = bpl;

- min_si = (bpl * pix_mp->height * fmt->depth[i]) /
- fmt->row_depth[i];
- max_si = (bpl * s.max_height * fmt->depth[i]) /
- fmt->row_depth[i];
+ di = (u64)bpl * pix_mp->height * fmt->depth[i];
+ min_si = (u32)div_u64(di, fmt->row_depth[i]);
+ di = (u64)bpl * s.max_height * fmt->depth[i];
+ max_si = (u32)div_u64(di, fmt->row_depth[i]);

si = clamp(si, min_si, max_si);
pix_mp->plane_fmt[i].sizeimage = si;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
index 641f533c417f..c99705681a03 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
@@ -39,10 +39,9 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
{
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
- struct mtk_q_data *q_data;
int num_frame_count = 0, i;
- bool ret = true;

+ fmt = &dec_pdata->vdec_formats[format_index];
for (i = 0; i < *dec_pdata->num_formats; i++) {
if (dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
continue;
@@ -50,27 +49,10 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
num_frame_count++;
}

- if (num_frame_count == 1)
+ if (num_frame_count == 1 || fmt->fourcc == V4L2_PIX_FMT_MM21)
return true;

- fmt = &dec_pdata->vdec_formats[format_index];
- q_data = &ctx->q_data[MTK_Q_DATA_SRC];
- switch (q_data->fmt->fourcc) {
- case V4L2_PIX_FMT_VP8_FRAME:
- if (fmt->fourcc == V4L2_PIX_FMT_MM21)
- ret = true;
- break;
- case V4L2_PIX_FMT_H264_SLICE:
- case V4L2_PIX_FMT_VP9_FRAME:
- if (fmt->fourcc == V4L2_PIX_FMT_MM21)
- ret = false;
- break;
- default:
- ret = true;
- break;
- }
-
- return ret;
+ return false;
}

static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
index 174a6eec2f54..42df901e8beb 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
@@ -451,7 +451,8 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
if (IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch))
destroy_workqueue(dev->core_workqueue);
err_res:
- pm_runtime_disable(dev->pm.dev);
+ if (!dev->vdec_pdata->is_subdev_supported)
+ pm_runtime_disable(dev->pm.dev);
err_dec_pm:
mtk_vcodec_fw_release(dev->fw_handler);
return ret;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
index 376db0e433d7..b753bf54ebd9 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
@@ -193,8 +193,16 @@ static int mtk_vdec_hw_probe(struct platform_device *pdev)
return ret;
}

+static int mtk_vdec_hw_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
static struct platform_driver mtk_vdec_driver = {
.probe = mtk_vdec_hw_probe,
+ .remove = mtk_vdec_hw_remove,
.driver = {
.name = "mtk-vdec-comp",
.of_match_table = mtk_vdec_hw_match,
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
index 035c86e7809f..29991551cf61 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
@@ -11,7 +11,7 @@
#include "mtk_vcodec_dec_pm.h"
#include "vdec_drv_if.h"

-static const struct mtk_video_fmt mtk_video_formats[] = {
+static struct mtk_video_fmt mtk_video_formats[] = {
{
.fourcc = V4L2_PIX_FMT_H264,
.type = MTK_FMT_DEC,
@@ -580,6 +580,16 @@ static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)

static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
{
+ unsigned int i;
+
+ if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) {
+ for (i = 0; i < num_supported_formats; i++) {
+ mtk_video_formats[i].frmsize.max_width =
+ VCODEC_DEC_4K_CODED_WIDTH;
+ mtk_video_formats[i].frmsize.max_height =
+ VCODEC_DEC_4K_CODED_HEIGHT;
+ }
+ }
}

static struct vb2_ops mtk_vdec_frame_vb2_ops = {
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
index ffbcee04dc26..3000db975e5f 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
@@ -258,8 +258,10 @@ static void mtk_vdec_worker(struct work_struct *work)
if (src_buf_req)
v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
} else {
- v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- v4l2_m2m_buf_done(vb2_v4l2_src, state);
+ if (ret != -EAGAIN) {
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(vb2_v4l2_src, state);
+ }
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
}
}
@@ -390,14 +392,14 @@ static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
if (num_formats)
return;

- if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
- mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
- cap_format_count++;
- }
if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MT21C) {
mtk_vcodec_add_formats(V4L2_PIX_FMT_MT21C, ctx);
cap_format_count++;
}
+ if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
+ mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
+ cap_format_count++;
+ }
if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_H264_SLICE) {
mtk_vcodec_add_formats(V4L2_PIX_FMT_H264_SLICE, ctx);
out_format_count++;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
index 955b2d0c8f53..999ce7ee5fdc 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
@@ -597,7 +597,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
mtk_vcodec_err(inst, "failed to get lat buffer");
- return -EINVAL;
+ return -EAGAIN;
}
share_info = lat_buf->private_data;
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
index cbb6728b8a40..cf16cf2807f0 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
@@ -2070,7 +2070,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&instance->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
mtk_vcodec_err(instance, "Failed to get VP9 lat buf\n");
- return -EBUSY;
+ return -EAGAIN;
}
pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
if (!pfc) {
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
index dc2004790a47..f3073d1e7f42 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
@@ -52,9 +52,26 @@ static struct list_head *vdec_get_buf_list(int hardware_index, struct vdec_lat_b
}
}

+static void vdec_msg_queue_inc(struct vdec_msg_queue *msg_queue, int hardware_index)
+{
+ if (hardware_index == MTK_VDEC_CORE)
+ atomic_inc(&msg_queue->core_list_cnt);
+ else
+ atomic_inc(&msg_queue->lat_list_cnt);
+}
+
+static void vdec_msg_queue_dec(struct vdec_msg_queue *msg_queue, int hardware_index)
+{
+ if (hardware_index == MTK_VDEC_CORE)
+ atomic_dec(&msg_queue->core_list_cnt);
+ else
+ atomic_dec(&msg_queue->lat_list_cnt);
+}
+
int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
{
struct list_head *head;
+ int status;

head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
if (!head) {
@@ -66,11 +83,18 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
list_add_tail(head, &msg_ctx->ready_queue);
msg_ctx->ready_num++;

- if (msg_ctx->hardware_index != MTK_VDEC_CORE)
+ vdec_msg_queue_inc(&buf->ctx->msg_queue, msg_ctx->hardware_index);
+ if (msg_ctx->hardware_index != MTK_VDEC_CORE) {
wake_up_all(&msg_ctx->ready_to_use);
- else
- queue_work(buf->ctx->dev->core_workqueue,
- &buf->ctx->msg_queue.core_work);
+ } else {
+ if (buf->ctx->msg_queue.core_work_cnt <
+ atomic_read(&buf->ctx->msg_queue.core_list_cnt)) {
+ status = queue_work(buf->ctx->dev->core_workqueue,
+ &buf->ctx->msg_queue.core_work);
+ if (status)
+ buf->ctx->msg_queue.core_work_cnt++;
+ }
+ }

mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
msg_ctx->hardware_index, buf, msg_ctx->ready_num);
@@ -127,6 +151,7 @@ struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
return NULL;
}
list_del(head);
+ vdec_msg_queue_dec(&buf->ctx->msg_queue, msg_ctx->hardware_index);

msg_ctx->ready_num--;
mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
@@ -156,11 +181,29 @@ void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t u

bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
{
+ struct vdec_lat_buf *buf, *tmp;
+ struct list_head *list_core[3];
+ struct vdec_msg_queue_ctx *core_ctx;
+ int ret, i, in_core_count = 0, count = 0;
long timeout_jiff;
- int ret;
+
+ core_ctx = &msg_queue->ctx->dev->msg_queue_core_ctx;
+ spin_lock(&core_ctx->ready_lock);
+ list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
+ if (buf && buf->ctx == msg_queue->ctx) {
+ list_core[in_core_count++] = &buf->core_list;
+ list_del(&buf->core_list);
+ }
+ }
+
+ for (i = 0; i < in_core_count; i++) {
+ list_add(list_core[in_core_count - (1 + i)], &core_ctx->ready_queue);
+ queue_work(msg_queue->ctx->dev->core_workqueue, &msg_queue->core_work);
+ }
+ spin_unlock(&core_ctx->ready_lock);

timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
- ret = wait_event_timeout(msg_queue->lat_ctx.ready_to_use,
+ ret = wait_event_timeout(msg_queue->ctx->msg_queue.core_dec_done,
msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
timeout_jiff);
if (ret) {
@@ -168,8 +211,20 @@ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
msg_queue->lat_ctx.ready_num);
return true;
}
- mtk_v4l2_err("failed with lat buf isn't full: %d",
- msg_queue->lat_ctx.ready_num);
+
+ spin_lock(&core_ctx->ready_lock);
+ list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
+ if (buf && buf->ctx == msg_queue->ctx) {
+ count++;
+ list_del(&buf->core_list);
+ }
+ }
+ spin_unlock(&core_ctx->ready_lock);
+
+ mtk_v4l2_err("failed with lat buf isn't full: list(%d %d) count:%d",
+ atomic_read(&msg_queue->lat_list_cnt),
+ atomic_read(&msg_queue->core_list_cnt), count);
+
return false;
}

@@ -206,6 +261,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
struct mtk_vcodec_dev *dev = ctx->dev;
struct vdec_lat_buf *lat_buf;
+ int status;

lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
if (!lat_buf)
@@ -221,11 +277,18 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);

- if (!list_empty(&dev->msg_queue_core_ctx.ready_queue)) {
- mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
- dev->msg_queue_core_ctx.ready_num);
- queue_work(dev->core_workqueue, &msg_queue->core_work);
+ wake_up_all(&ctx->msg_queue.core_dec_done);
+ spin_lock(&dev->msg_queue_core_ctx.ready_lock);
+ lat_buf->ctx->msg_queue.core_work_cnt--;
+
+ if (lat_buf->ctx->msg_queue.core_work_cnt <
+ atomic_read(&lat_buf->ctx->msg_queue.core_list_cnt)) {
+ status = queue_work(lat_buf->ctx->dev->core_workqueue,
+ &lat_buf->ctx->msg_queue.core_work);
+ if (status)
+ lat_buf->ctx->msg_queue.core_work_cnt++;
}
+ spin_unlock(&dev->msg_queue_core_ctx.ready_lock);
}

int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
@@ -239,12 +302,18 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
if (msg_queue->wdma_addr.size)
return 0;

+ msg_queue->ctx = ctx;
+ msg_queue->core_work_cnt = 0;
vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
+
+ atomic_set(&msg_queue->lat_list_cnt, 0);
+ atomic_set(&msg_queue->core_list_cnt, 0);
+ init_waitqueue_head(&msg_queue->core_dec_done);
+
msg_queue->wdma_addr.size =
vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
ctx->picinfo.buf_h);
-
err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
if (err) {
mtk_v4l2_err("failed to allocate wdma_addr buf");
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
index c43d427f5f54..a5d44bc97c16 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
@@ -72,6 +72,12 @@ struct vdec_lat_buf {
* @wdma_wptr_addr: ube write point
* @core_work: core hardware work
* @lat_ctx: used to store lat buffer list
+ * @ctx: point to mtk_vcodec_ctx
+ *
+ * @lat_list_cnt: used to record each instance lat list count
+ * @core_list_cnt: used to record each instance core list count
+ * @core_dec_done: core work queue decode done event
+ * @core_work_cnt: the number of core work in work queue
*/
struct vdec_msg_queue {
struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
@@ -82,6 +88,12 @@ struct vdec_msg_queue {

struct work_struct core_work;
struct vdec_msg_queue_ctx lat_ctx;
+ struct mtk_vcodec_ctx *ctx;
+
+ atomic_t lat_list_cnt;
+ atomic_t core_list_cnt;
+ wait_queue_head_t core_dec_done;
+ int core_work_cnt;
};

/**
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 4ceaba37e2e5..1a52c2ea2da5 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -31,15 +31,15 @@
*/
static const struct venus_format vdec_formats[] = {
{
- .pixfmt = V4L2_PIX_FMT_QC08C,
+ .pixfmt = V4L2_PIX_FMT_NV12,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
}, {
- .pixfmt = V4L2_PIX_FMT_QC10C,
+ .pixfmt = V4L2_PIX_FMT_QC08C,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
- },{
- .pixfmt = V4L2_PIX_FMT_NV12,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_QC10C,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
}, {
@@ -526,6 +526,7 @@ static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
+ struct vb2_queue *dst_vq;
struct hfi_frame_data fdata = {0};
int ret;

@@ -556,6 +557,13 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
inst->codec_state = VENUS_DEC_STATE_DRAIN;
inst->drain_active = true;
}
+ } else if (cmd->cmd == V4L2_DEC_CMD_START &&
+ inst->codec_state == VENUS_DEC_STATE_STOPPED) {
+ dst_vq = v4l2_m2m_get_vq(inst->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ vb2_clear_last_buffer_dequeued(dst_vq);
+
+ inst->codec_state = VENUS_DEC_STATE_DECODING;
}

unlock:
diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
index 37ecf489d112..dea22e357905 100644
--- a/drivers/media/platform/renesas/rcar_fdp1.c
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
@@ -2313,8 +2313,10 @@ static int fdp1_probe(struct platform_device *pdev)

/* Determine our clock rate */
clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto put_dev;
+ }

fdp1->clk_rate = clk_get_rate(clk);
clk_put(clk);
@@ -2323,7 +2325,7 @@ static int fdp1_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
if (ret) {
v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
- return ret;
+ goto put_dev;
}

/* M2M registration */
@@ -2393,6 +2395,8 @@ static int fdp1_probe(struct platform_device *pdev)
unreg_dev:
v4l2_device_unregister(&fdp1->v4l2_dev);

+put_dev:
+ rcar_fcp_put(fdp1->fcp);
return ret;
}

@@ -2404,6 +2408,7 @@ static int fdp1_remove(struct platform_device *pdev)
video_unregister_device(&fdp1->vfd);
v4l2_device_unregister(&fdp1->v4l2_dev);
pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(fdp1->fcp);

return 0;
}
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index dd74cc43920d..080da254b910 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -1309,6 +1309,8 @@ static int bdisp_probe(struct platform_device *pdev)
init_waitqueue_head(&bdisp->irq_queue);
INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
bdisp->work_queue = create_workqueue(BDISP_NAME);
+ if (!bdisp->work_queue)
+ return -ENOMEM;

spin_lock_init(&bdisp->slock);
mutex_init(&bdisp->lock);
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index a56c844d7f81..16795e07dc10 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -107,6 +107,8 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->map_name = RC_MAP_EMPTY;

gpio_dev->rcdev = rcdev;
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_init_wakeup(dev, true);

rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 2f1b718a9189..008a2a3e312e 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -414,7 +414,8 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)

/* Unbind all sub-devices in the notifier tree. */
static void
-v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
+ bool readd)
{
struct v4l2_subdev *sd, *tmp;

@@ -423,9 +424,11 @@ v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);

if (subdev_notifier)
- v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier, true);

v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
+ if (readd)
+ list_add_tail(&sd->asd->list, &notifier->waiting);
v4l2_async_cleanup(sd);

list_move(&sd->async_list, &subdev_list);
@@ -557,7 +560,7 @@ static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
/*
* On failure, unbind all sub-devices registered through this notifier.
*/
- v4l2_async_nf_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier, false);

err_unlock:
mutex_unlock(&list_lock);
@@ -607,7 +610,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;

- v4l2_async_nf_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier, false);

notifier->sd = NULL;
notifier->v4l2_dev = NULL;
@@ -805,7 +808,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier, false);

if (sd->asd)
v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 941b0267d09d..5c4af05ed044 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -277,6 +277,7 @@ static const struct of_device_id arizona_spi_of_match[] = {
{ .compatible = "cirrus,cs47l24", .data = (void *)CS47L24 },
{},
};
+MODULE_DEVICE_TABLE(of, arizona_spi_of_match);
#endif

static struct spi_driver arizona_spi_driver = {
diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
index 2ecd271de2fb..85021f94e587 100644
--- a/drivers/mfd/ocelot-spi.c
+++ b/drivers/mfd/ocelot-spi.c
@@ -130,6 +130,7 @@ static const struct regmap_config ocelot_spi_regmap_config = {

.write_flag_mask = 0x80,

+ .use_single_read = true,
.use_single_write = true,
.can_multi_write = false,

diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
index 7ae906ff8e35..fac02875fe7d 100644
--- a/drivers/mfd/tqmx86.c
+++ b/drivers/mfd/tqmx86.c
@@ -16,8 +16,8 @@
#include <linux/platform_data/i2c-ocores.h>
#include <linux/platform_device.h>

-#define TQMX86_IOBASE 0x160
-#define TQMX86_IOSIZE 0x3f
+#define TQMX86_IOBASE 0x180
+#define TQMX86_IOSIZE 0x20
#define TQMX86_IOBASE_I2C 0x1a0
#define TQMX86_IOSIZE_I2C 0xa
#define TQMX86_IOBASE_WATCHDOG 0x18b
@@ -25,14 +25,14 @@
#define TQMX86_IOBASE_GPIO 0x18d
#define TQMX86_IOSIZE_GPIO 0x4

-#define TQMX86_REG_BOARD_ID 0x20
+#define TQMX86_REG_BOARD_ID 0x00
#define TQMX86_REG_BOARD_ID_E38M 1
#define TQMX86_REG_BOARD_ID_50UC 2
#define TQMX86_REG_BOARD_ID_E38C 3
#define TQMX86_REG_BOARD_ID_60EB 4
-#define TQMX86_REG_BOARD_ID_E39M 5
-#define TQMX86_REG_BOARD_ID_E39C 6
-#define TQMX86_REG_BOARD_ID_E39x 7
+#define TQMX86_REG_BOARD_ID_E39MS 5
+#define TQMX86_REG_BOARD_ID_E39C1 6
+#define TQMX86_REG_BOARD_ID_E39C2 7
#define TQMX86_REG_BOARD_ID_70EB 8
#define TQMX86_REG_BOARD_ID_80UC 9
#define TQMX86_REG_BOARD_ID_110EB 11
@@ -40,18 +40,18 @@
#define TQMX86_REG_BOARD_ID_E40S 13
#define TQMX86_REG_BOARD_ID_E40C1 14
#define TQMX86_REG_BOARD_ID_E40C2 15
-#define TQMX86_REG_BOARD_REV 0x21
-#define TQMX86_REG_IO_EXT_INT 0x26
+#define TQMX86_REG_BOARD_REV 0x01
+#define TQMX86_REG_IO_EXT_INT 0x06
#define TQMX86_REG_IO_EXT_INT_NONE 0
#define TQMX86_REG_IO_EXT_INT_7 1
#define TQMX86_REG_IO_EXT_INT_9 2
#define TQMX86_REG_IO_EXT_INT_12 3
#define TQMX86_REG_IO_EXT_INT_MASK 0x3
#define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT 4
+#define TQMX86_REG_SAUC 0x17

-#define TQMX86_REG_I2C_DETECT 0x47
+#define TQMX86_REG_I2C_DETECT 0x1a7
#define TQMX86_REG_I2C_DETECT_SOFT 0xa5
-#define TQMX86_REG_I2C_INT_EN 0x49

static uint gpio_irq;
module_param(gpio_irq, uint, 0);
@@ -111,7 +111,7 @@ static const struct mfd_cell tqmx86_devs[] = {
},
};

-static const char *tqmx86_board_id_to_name(u8 board_id)
+static const char *tqmx86_board_id_to_name(u8 board_id, u8 sauc)
{
switch (board_id) {
case TQMX86_REG_BOARD_ID_E38M:
@@ -122,12 +122,12 @@ static const char *tqmx86_board_id_to_name(u8 board_id)
return "TQMxE38C";
case TQMX86_REG_BOARD_ID_60EB:
return "TQMx60EB";
- case TQMX86_REG_BOARD_ID_E39M:
- return "TQMxE39M";
- case TQMX86_REG_BOARD_ID_E39C:
- return "TQMxE39C";
- case TQMX86_REG_BOARD_ID_E39x:
- return "TQMxE39x";
+ case TQMX86_REG_BOARD_ID_E39MS:
+ return (sauc == 0xff) ? "TQMxE39M" : "TQMxE39S";
+ case TQMX86_REG_BOARD_ID_E39C1:
+ return "TQMxE39C1";
+ case TQMX86_REG_BOARD_ID_E39C2:
+ return "TQMxE39C2";
case TQMX86_REG_BOARD_ID_70EB:
return "TQMx70EB";
case TQMX86_REG_BOARD_ID_80UC:
@@ -160,9 +160,9 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)
case TQMX86_REG_BOARD_ID_E40C1:
case TQMX86_REG_BOARD_ID_E40C2:
return 24000;
- case TQMX86_REG_BOARD_ID_E39M:
- case TQMX86_REG_BOARD_ID_E39C:
- case TQMX86_REG_BOARD_ID_E39x:
+ case TQMX86_REG_BOARD_ID_E39MS:
+ case TQMX86_REG_BOARD_ID_E39C1:
+ case TQMX86_REG_BOARD_ID_E39C2:
return 25000;
case TQMX86_REG_BOARD_ID_E38M:
case TQMX86_REG_BOARD_ID_E38C:
@@ -176,7 +176,7 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)

static int tqmx86_probe(struct platform_device *pdev)
{
- u8 board_id, rev, i2c_det, io_ext_int_val;
+ u8 board_id, sauc, rev, i2c_det, io_ext_int_val;
struct device *dev = &pdev->dev;
u8 gpio_irq_cfg, readback;
const char *board_name;
@@ -206,14 +206,20 @@ static int tqmx86_probe(struct platform_device *pdev)
return -ENOMEM;

board_id = ioread8(io_base + TQMX86_REG_BOARD_ID);
- board_name = tqmx86_board_id_to_name(board_id);
+ sauc = ioread8(io_base + TQMX86_REG_SAUC);
+ board_name = tqmx86_board_id_to_name(board_id, sauc);
rev = ioread8(io_base + TQMX86_REG_BOARD_REV);

dev_info(dev,
"Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n",
board_name, board_id, rev >> 4, rev & 0xf);

- i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT);
+ /*
+ * The I2C_DETECT register is in the range assigned to the I2C driver
+ * later, so we don't extend TQMX86_IOSIZE. Use inb() for this one-off
+ * access instead of ioport_map + unmap.
+ */
+ i2c_det = inb(TQMX86_REG_I2C_DETECT);

if (gpio_irq_cfg) {
io_ext_int_val =
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 857b9851402a..abe79f6fd2a7 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -165,10 +165,16 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
- struct vmci_ctx *context = vmci_host_dev->context;
+ struct vmci_ctx *context;
__poll_t mask = 0;

if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ /*
+ * Read context only if ct_type == VMCIOBJ_CONTEXT to make
+ * sure that context is initialized
+ */
+ context = vmci_host_dev->context;
+
/* Check for VMCI calls to this VM context. */
if (wait)
poll_wait(filp, &context->host_context.wait_queue,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index e0266638381d..6ae68e379f7e 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -126,6 +126,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
}
+
/*
* The DAT[3:0] line signal levels and the CMD line signal level are
* not compatible with standard SDHC register. The line signal levels
@@ -137,6 +138,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
ret = value & 0x000fffff;
ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
ret |= (value << 1) & SDHCI_CMD_LVL;
+
+ /*
+ * Some controllers have unreliable Data Line Active
+ * bit for commands with busy signal. This affects
+ * Command Inhibit (data) bit. Just ignore it since
+ * MMC core driver has already polled card status
+ * with CMD13 after any command with busy siganl.
+ */
+ if (esdhc->quirk_ignore_data_inhibit)
+ ret &= ~SDHCI_DATA_INHIBIT;
return ret;
}

@@ -151,19 +162,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}

- /*
- * Some controllers have unreliable Data Line Active
- * bit for commands with busy signal. This affects
- * Command Inhibit (data) bit. Just ignore it since
- * MMC core driver has already polled card status
- * with CMD13 after any command with busy siganl.
- */
- if ((spec_reg == SDHCI_PRESENT_STATE) &&
- (esdhc->quirk_ignore_data_inhibit == true)) {
- ret = value & ~SDHCI_DATA_INHIBIT;
- return ret;
- }
-
ret = value;
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 686ada1a63e9..24518e5e1b5e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -884,8 +884,8 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,

/* OTP nvmem will be registered on the physical device */
config.dev = mtd->dev.parent;
- config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
- config.id = NVMEM_DEVID_NONE;
+ config.name = compatible;
+ config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
@@ -901,7 +901,6 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
nvmem = NULL;

of_node_put(np);
- kfree(config.name);

return nvmem;
}
@@ -936,6 +935,7 @@ static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,

static int mtd_otp_nvmem_add(struct mtd_info *mtd)
{
+ struct device *dev = mtd->dev.parent;
struct nvmem_device *nvmem;
ssize_t size;
int err;
@@ -949,7 +949,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
mtd_nvmem_user_otp_reg_read);
if (IS_ERR(nvmem)) {
- dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
+ dev_err(dev, "Failed to register OTP NVMEM device\n");
return PTR_ERR(nvmem);
}
mtd->otp_user_nvmem = nvmem;
@@ -967,7 +967,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
mtd_nvmem_fact_otp_reg_read);
if (IS_ERR(nvmem)) {
- dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
+ dev_err(dev, "Failed to register OTP NVMEM device\n");
err = PTR_ERR(nvmem);
goto err;
}
@@ -1019,10 +1019,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,

mtd_set_dev_defaults(mtd);

+ ret = mtd_otp_nvmem_add(mtd);
+ if (ret)
+ goto out;
+
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
ret = add_mtd_device(mtd);
if (ret)
- return ret;
+ goto out;
}

/* Prefer parsed partitions over driver-provided fallback */
@@ -1057,9 +1061,12 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}

- ret = mtd_otp_nvmem_add(mtd);
-
out:
+ if (ret) {
+ nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_factory_nvmem);
+ }
+
if (ret && device_is_registered(&mtd->dev))
del_mtd_device(mtd);

diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 75e694791d8d..9a7bea365acb 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2696,6 +2696,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor)

static int spi_nor_init(struct spi_nor *nor)
{
+ struct spi_nor_flash_parameter *params = nor->params;
int err;

err = spi_nor_octal_dtr_enable(nor, true);
@@ -2737,9 +2738,10 @@ static int spi_nor_init(struct spi_nor *nor)
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
- err = nor->params->set_4byte_addr_mode(nor, true);
+ err = params->set_4byte_addr_mode(nor, true);
if (err && err != -ENOTSUPP)
return err;
+ params->addr_mode_nbytes = 4;
}

return 0;
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 09c408c45a62..4e1d80746b04 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -946,7 +946,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
int offset, int len)
{
struct ubi_device *ubi = vol->ubi;
- int pnum, opnum, err, vol_id = vol->vol_id;
+ int pnum, opnum, err, err2, vol_id = vol->vol_id;

pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
@@ -981,10 +981,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
out_put:
up_read(&ubi->fm_eba_sem);

- if (err && pnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- else if (!err && opnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err && pnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ pnum, err2);
+ }
+ } else if (!err && opnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ opnum, err2);
+ }
+ }

return err;
}
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index fbcd5c2b13ae..7a6166a0c9bc 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1365,7 +1365,6 @@ static void qca8k_pcs_get_state(struct phylink_pcs *pcs,

state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
DUPLEX_HALF;

diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 823a329a921f..0dd391c84c13 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -651,7 +651,7 @@ static int nmclan_config(struct pcmcia_device *link)
} else {
pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
sig[0], sig[1]);
- return -ENODEV;
+ goto failed;
}
}

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index d8fb7d4ebd51..981cc3248047 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -283,7 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
{
struct mac_device *mac_dev;
struct dpaa_priv *priv;
- int i, err, error;
+ int i, error;
+ int err = 0;

priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9a60d6b207f7..a81f918091cc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1286,9 +1286,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+int i40e_get_partition_bw_setting(struct i40e_pf *pf);
+int i40e_set_partition_bw_setting(struct i40e_pf *pf);
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);

void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 42439f725aa4..86fac8f959bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;

ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
@@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;

ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
@@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;

/* We'll be allocating the buffer info memory first, then we can
@@ -182,10 +182,10 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;

/* No mapped memory needed yet, just the buffer info structures */
@@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+static int i40e_config_asq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;

/* Clear Head and Tail */
@@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+static int i40e_config_arq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;

/* Clear Head and Tail */
@@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
+static int i40e_init_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -393,9 +393,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
+static int i40e_init_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -445,9 +445,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
*
* The main shutdown routine for the Admin Send Queue
**/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static int i40e_shutdown_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

mutex_lock(&hw->aq.asq_mutex);

@@ -479,9 +479,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
*
* The main shutdown routine for the Admin Receive Queue
**/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static int i40e_shutdown_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

mutex_lock(&hw->aq.arq_mutex);

@@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-i40e_status i40e_init_adminq(struct i40e_hw *hw)
+int i40e_init_adminq(struct i40e_hw *hw)
{
u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
- i40e_status ret_code;
int retry = 0;
+ int ret_code;

/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-static i40e_status
+static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context)
{
- i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
+ int status = 0;
u32 val = 0;

if (hw->aq.asq.count == 0) {
@@ -984,7 +984,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
* Acquires the lock and calls the main send command execution
* routine.
**/
-i40e_status
+int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context)
{
- i40e_status status;
+ int status;

mutex_lock(&hw->aq.asq_mutex);
status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
@@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
return status;
}

-i40e_status
+int
i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
@@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
* routine. Returns the last Admin Queue status in aq_status
* to avoid race conditions in access to hw->aq.asq_last_status.
**/
-i40e_status
+int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
bool is_atomic_context,
enum i40e_admin_queue_err *aq_status)
{
- i40e_status status;
+ int status;

mutex_lock(&hw->aq.asq_mutex);
status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
@@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}

-i40e_status
+int
i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
{
- i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index cb8689222c8b..a6c9a9e343d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -20,16 +20,16 @@ enum i40e_memory_type {
};

/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
+int i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+int i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+int i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+int i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);

#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 10d7a982a5b9..8bcb98b85e3d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -541,7 +541,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
{
struct i40e_pf *pf = ldev->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
+ int err;

err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
0, msg, len, NULL);
@@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
- i40e_status err;
+ int err;

/* TODO: for now do not allow setting VF's VSI setting */
if (is_vf)
@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (err) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "couldn't get PF vsi config, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "update VSI ctxt for PE failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "update VSI ctxt for PE failed, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4f01e2a6b6bb..82e06272158d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -14,9 +14,9 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+int i40e_set_mac_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
+ int status = 0;

if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
@@ -124,154 +124,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
return hw->err_str;
}

-/**
- * i40e_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
-{
- switch (stat_err) {
- case 0:
- return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_PRIMARY_REQUESTS_PENDING:
- return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
- return hw->err_str;
-}
-
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
@@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
+int i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
@@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
*
* Internal function to get or set RSS look up table
**/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
+static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ int status;

if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
@@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
@@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
- bool set)
+static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_key *cmd_resp =
(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ int status;

if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
* @key: pointer to key info struct
*
**/
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
}
@@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
*
* set the RSS key per VSI
**/
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
@@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
* hw_addr, back, device_id, vendor_id, subsystem_device_id,
* subsystem_vendor_id, and revision_id
**/
-i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+int i40e_init_shared_code(struct i40e_hw *hw)
{
- i40e_status status = 0;
u32 port, ari, func_rid;
+ int status = 0;

i40e_set_mac_type(hw);

@@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
* @addrs: the requestor's mac addr store
* @cmd_details: pointer to command details structure or NULL
**/
-static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
- u16 *flags,
- struct i40e_aqc_mac_address_read_data *addrs,
- struct i40e_asq_cmd_details *cmd_details)
+static int
+i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_read *cmd_data =
(struct i40e_aqc_mac_address_read *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
@@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
* @mac_addr: address to write
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_write *cmd_data =
(struct i40e_aqc_mac_address_write *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
@@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
*
* Reads the adapter's MAC address from register
**/
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;

status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);

@@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
*
* Reads the adapter's Port MAC address
**/
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;

status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
@@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
*
* Reads the part number string from the EEPROM.
**/
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
- i40e_status status = 0;
u16 pba_word = 0;
u16 pba_size = 0;
u16 pba_ptr = 0;
+ int status = 0;
u16 i = 0;

status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
@@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
* @hw: pointer to the hardware structure
* @retry_limit: how many times to retry before failure
**/
-static i40e_status i40e_poll_globr(struct i40e_hw *hw,
- u32 retry_limit)
+static int i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
{
u32 cnt, reg = 0;

@@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,
* Assuming someone else has triggered a global reset,
* assure the global reset is complete and then reset the PF
**/
-i40e_status i40e_pf_reset(struct i40e_hw *hw)
+int i40e_pf_reset(struct i40e_hw *hw)
{
u32 cnt = 0;
u32 cnt1 = 0;
@@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
*
* Returns the various PHY abilities supported on the Port.
**/
-i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- i40e_status status;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
+ struct i40e_aq_desc desc;
+ int status;

if (!abilities)
return I40E_ERR_PARAM;
@@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
* of the PHY Config parameters. This status will be indicated by the
* command response.
**/
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aq_set_phy_config *cmd =
(struct i40e_aq_set_phy_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;

if (!config)
return I40E_ERR_PARAM;
@@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}

-static noinline_for_stack enum i40e_status_code
+static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp *abilities,
bool atomic_restart)
@@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,
*
* Set the requested flow control mode using set_phy_config.
**/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status;
+ int status;

*aq_failures = 0x0;

@@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*
* Tell the firmware that the driver is taking over from PXE
**/
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_clear_pxe *cmd =
(struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
@@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
*
* Sets up the link and restarts the Auto-Negotiation over the link.
**/
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_link_restart_an *cmd =
(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
@@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
*
* Returns the link status of the adapter.
**/
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_link_status *resp =
(struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
- i40e_status status;
bool tx_pause, rx_pause;
u16 command_flags;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);

@@ -1811,14 +1665,14 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
*
* Set link interrupt mask.
**/
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
- u16 mask,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_int_mask *cmd =
(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
@@ -1838,13 +1692,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Reset the external PHY.
**/
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1879,9 +1733,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
*
* Add a VSI context to the hardware.
**/
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -1889,7 +1743,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
@@ -1923,15 +1777,15 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -1951,15 +1805,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -1981,16 +1835,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
* @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc)
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2021,14 +1875,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
* @set: set multicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2054,16 +1909,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2090,16 +1945,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2132,15 +1987,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2167,14 +2022,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
*
* Set or clear the broadcast promiscuous flag (filter) for a given VSI.
**/
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 seid, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2200,15 +2055,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
* @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2230,9 +2085,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
* @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2240,7 +2095,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
@@ -2272,9 +2127,9 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
*
* Update a VSI context.
**/
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2282,7 +2137,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
@@ -2310,15 +2165,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
*
* Fill the buf with switch configuration returned from AdminQ command
**/
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *scfg =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
@@ -2344,15 +2199,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
*
* Set switch configuration bits
**/
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_switch_config *scfg =
(struct i40e_aqc_set_switch_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
@@ -2381,16 +2236,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
*
* Get the firmware version from the admin queue commands
**/
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_version *resp =
(struct i40e_aqc_get_version *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);

@@ -2420,14 +2275,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
*
* Send the driver version to the firmware
**/
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
- i40e_status status;
+ int status;
u16 len;

if (dv == NULL)
@@ -2462,9 +2317,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
- i40e_status status = 0;
+ int status = 0;

if (hw->phy.get_link_info) {
status = i40e_update_link_info(hw);
@@ -2483,10 +2338,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_update_link_info - update status of the HW network link
* @hw: pointer to the hw struct
**/
-noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- i40e_status status = 0;
+ int status = 0;

status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
@@ -2533,19 +2388,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
* This asks the FW to add a VEB between the uplink and downlink
* elements. If the uplink SEID is 0, this will be a floating VEB.
**/
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_veb *cmd =
(struct i40e_aqc_add_veb *)&desc.params.raw;
struct i40e_aqc_add_veb_completion *resp =
(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
- i40e_status status;
u16 veb_flags = 0;
+ int status;

/* SEIDs need to either both be set or both be 0 for floating VEB */
if (!!uplink_seid != !!downlink_seid)
@@ -2591,17 +2446,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* This retrieves the parameters for a particular VEB, specified by
* uplink_seid, and returns them to the caller.
**/
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id,
- bool *floating, u16 *statistic_index,
- u16 *vebs_used, u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
(struct i40e_aqc_get_veb_parameters_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;

if (veb_seid == 0)
return I40E_ERR_PARAM;
@@ -2685,7 +2540,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
*
* Add MAC/VLAN addresses to the HW filtering
**/
-i40e_status
+int
i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
@@ -2717,7 +2572,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
* It also calls _v2 versions of asq_send_command functions to
* get the aq_status on the stack.
**/
-i40e_status
+int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
@@ -2745,15 +2600,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
*
* Remove MAC/VLAN addresses from the HW filtering
**/
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
(struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;

if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
@@ -2792,7 +2648,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* It also calls _v2 versions of asq_send_command functions to
* get the aq_status on the stack.
**/
-i40e_status
+int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
@@ -2840,19 +2696,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
**/
-static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+static int i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_delete_mirror_rule *cmd =
(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
struct i40e_aqc_add_delete_mirror_rule_completion *resp =
(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;

buf_size = count * sizeof(*mr_list);

@@ -2900,10 +2756,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
@@ -2931,10 +2788,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
@@ -2963,14 +2821,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* send msg to vf
**/
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_pf_vf_message *cmd =
(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
cmd->id = cpu_to_le32(vfid);
@@ -2998,14 +2856,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
*
* Read the register using the admin queue commands
**/
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd_resp =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;

if (reg_val == NULL)
return I40E_ERR_PARAM;
@@ -3033,14 +2891,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
*
* Write to a register using the admin queue commands
**/
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);

@@ -3064,16 +2922,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
*
* requests common resource using the admin queue commands
**/
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd_resp =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);

@@ -3103,15 +2961,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
*
* release common resource using the admin queue commands
**/
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);

@@ -3135,15 +2993,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
*
* Read the NVM using the admin queue commands
**/
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;

/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3181,14 +3039,14 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
*
* Erase the NVM sector using the admin queue commands
**/
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;

/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3229,8 +3087,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u16 id, ocp_cfg_word0;
- i40e_status status;
u8 major_rev;
+ int status;
u32 i = 0;

cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -3471,14 +3329,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
*
* Get the device capabilities descriptions from the firmware
**/
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
struct i40e_aq_desc desc;
- i40e_status status = 0;
+ int status = 0;

cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;

@@ -3520,15 +3378,15 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
*
* Update the NVM using the admin queue commands
**/
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;

/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3573,13 +3431,13 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
*
* Rearrange NVM structure, available only for transition FW
**/
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_nvm_update *cmd;
- i40e_status status;
struct i40e_aq_desc desc;
+ int status;

cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;

@@ -3613,17 +3471,17 @@ i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
*
* Requests the complete LLDP MIB (entire packet).
**/
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_get_mib *cmd =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
struct i40e_aqc_lldp_get_mib *resp =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- i40e_status status;
+ int status;

if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3663,14 +3521,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
*
* Set the LLDP MIB.
**/
-enum i40e_status_code
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- enum i40e_status_code status;
struct i40e_aq_desc desc;
+ int status;

cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
if (buff_size == 0 || !buff)
@@ -3702,14 +3560,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes
**/
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_update_mib *cmd =
(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);

@@ -3731,14 +3589,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
* Restore LLDP Agent factory settings if @restore set to True. In other case
* only returns factory setting in AQ response.
**/
-enum i40e_status_code
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_restore *cmd =
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
- i40e_status status;
+ int status;

if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3768,14 +3626,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
*
* Stop or Shutdown the embedded LLDP Agent
**/
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_stop *cmd =
(struct i40e_aqc_lldp_stop *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);

@@ -3803,13 +3661,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
(struct i40e_aqc_lldp_start *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);

@@ -3835,14 +3693,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
* @dcb_enable: True if DCB configuration needs to be applied
*
**/
-enum i40e_status_code
+int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_dcb_parameters *cmd =
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
- i40e_status status;
+ int status;

if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -3868,12 +3726,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
*
* Get CEE DCBX mode operational configuration from firmware
**/
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;

if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3899,17 +3757,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
* and this function will call cpu_to_le16 to convert from Host byte order to
* Little Endian order.
**/
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_udp_tunnel *cmd =
(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
struct i40e_aqc_del_udp_tunnel_completion *resp =
(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);

@@ -3930,13 +3788,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
* @index: filter index
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_remove_udp_tunnel *cmd =
(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);

@@ -3955,13 +3813,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
*
* This deletes a switch element from the switch.
**/
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *cmd =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;

if (seid == 0)
return I40E_ERR_PARAM;
@@ -3985,11 +3843,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
* recomputed and modified. The retval field in the descriptor
* will be set to 0 when RPB is modified.
**/
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);

@@ -4009,15 +3867,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
*
* Generic command handler for Tx scheduler AQ commands
**/
-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
void *buff, u16 buff_size,
- enum i40e_admin_queue_opc opcode,
+ enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_tx_sched_ind *cmd =
(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
- i40e_status status;
+ int status;
bool cmd_param_flag = false;

switch (opcode) {
@@ -4067,14 +3925,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
* @max_credit: Max BW limit credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_configure_vsi_bw_limit *cmd =
(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
@@ -4095,10 +3953,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_configure_vsi_tc_bw,
@@ -4113,11 +3971,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
sizeof(*ets_data), opcode, cmd_details);
@@ -4130,7 +3989,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int
+i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details)
@@ -4147,10 +4007,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_bw_config,
@@ -4164,10 +4025,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration per TC
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_ets_sla_config,
@@ -4181,10 +4043,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's per TC BW config
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_ets_config,
@@ -4198,10 +4061,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold current ETS configuration for the Physical Port
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_port_ets_config,
@@ -4215,10 +4079,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_bw_config,
@@ -4237,8 +4102,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
* Returns 0 if the values passed are valid and within
* range else returns an error.
**/
-static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+static int
+i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 fcoe_fmax;
@@ -4324,11 +4190,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
* for a single PF. It is expected that these settings are programmed
* at the driver initialization time.
**/
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
- i40e_status ret = 0;
u32 hash_lut_size = 0;
+ int ret = 0;
u32 val;

if (!settings)
@@ -4398,11 +4264,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
* In return it will update the total number of perfect filter count in
* the stats member.
**/
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_control_packet_filter *cmd =
@@ -4411,7 +4277,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
(struct i40e_aqc_add_remove_control_packet_filter_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;

if (vsi_seid == 0)
return I40E_ERR_PARAM;
@@ -4457,7 +4323,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
- i40e_status status;
+ int status;

status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
seid, 0, true, NULL,
@@ -4479,14 +4345,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
* is not passed then only register at 'reg_addr0' is read.
*
**/
-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
- u32 reg_addr0, u32 *reg_val0,
- u32 reg_addr1, u32 *reg_val1)
+static int i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
{
struct i40e_aq_desc desc;
struct i40e_aqc_alternate_write *cmd_resp =
(struct i40e_aqc_alternate_write *)&desc.params.raw;
- i40e_status status;
+ int status;

if (!reg_val0)
return I40E_ERR_PARAM;
@@ -4515,12 +4381,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
*
* Suspend port's Tx traffic
**/
-i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;

cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
@@ -4537,11 +4403,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
*
* Resume port's Tx traffic
**/
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);

@@ -4611,18 +4477,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* Dump internal FW/HW data for debug purposes.
*
**/
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_dump_internals *cmd =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
struct i40e_aqc_debug_dump_internals *resp =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- i40e_status status;
+ int status;

if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -4663,12 +4529,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
*
* Read bw from the alternate ram for the given pf
**/
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw,
- bool *min_valid, bool *max_valid)
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
{
- i40e_status status;
u32 max_bw_addr, min_bw_addr;
+ int status;

/* Calculate the address of the min/max bw registers */
max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
@@ -4703,13 +4569,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
*
* Configure partitions guaranteed/max bw
**/
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
- struct i40e_aq_desc desc;
u16 bwd_size = sizeof(*bw_data);
+ struct i40e_aq_desc desc;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
@@ -4738,11 +4605,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;

@@ -4783,11 +4650,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
*
* Writes specified PHY register value
**/
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;

@@ -4824,13 +4691,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
- u8 port_num = hw->func_caps.mdio_port_num;

command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4898,13 +4765,13 @@ i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
- u32 command = 0;
- u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u16 retry = 1000;
+ u32 command = 0;

command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4965,10 +4832,10 @@ i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status;
+ int status;

switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
@@ -5004,10 +4871,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status;
+ int status;

switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
@@ -5056,17 +4923,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
*
* Blinks PHY link LED
**/
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
{
- i40e_status status = 0;
- u32 i;
- u16 led_ctl;
- u16 gpio_led_port;
- u16 led_reg;
u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u16 gpio_led_port;
u8 phy_addr = 0;
+ int status = 0;
+ u16 led_ctl;
u8 port_num;
+ u16 led_reg;
+ u32 i;

i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
@@ -5128,12 +4995,12 @@ i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
* @led_addr: LED register address
* @reg_val: read register value
**/
-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
- u32 *reg_val)
+static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;

*reg_val = 0;
@@ -5162,12 +5029,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
* @led_addr: LED register address
* @reg_val: register value to write
**/
-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
- u32 reg_val)
+static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;

if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
@@ -5197,17 +5064,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
* @val: original value of register to use
*
**/
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val)
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
{
- i40e_status status = 0;
u16 gpio_led_port;
u8 phy_addr = 0;
- u16 reg_val;
+ u32 reg_val_aq;
+ int status = 0;
u16 temp_addr;
+ u16 reg_val;
u8 port_num;
u32 i;
- u32 reg_val_aq;

if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status =
@@ -5252,12 +5119,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* Set led's on or off when controlled by the PHY
*
**/
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode)
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
{
- i40e_status status = 0;
u32 led_ctl = 0;
u32 led_reg = 0;
+ int status = 0;

status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
@@ -5301,14 +5168,14 @@ i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;

if (!reg_val)
return I40E_ERR_PARAM;
@@ -5332,8 +5199,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
u32 val = 0;

@@ -5367,14 +5234,14 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);

@@ -5394,8 +5261,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;

use_register = (((hw->aq.api_maj_ver == 1) &&
@@ -5457,16 +5324,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_set_phy_register.
**/
-enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
@@ -5502,16 +5369,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_get_phy_register.
**/
-enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
@@ -5542,18 +5409,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
* @error_info: returns error information
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
struct i40e_aqc_write_ddp_resp *resp;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
@@ -5586,15 +5452,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
- i40e_status status;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
@@ -5693,14 +5558,13 @@ i40e_find_section_in_profile(u32 section_type,
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
**/
-static enum
-i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
- struct i40e_profile_aq_section *aq)
+static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
{
- i40e_status status;
struct i40e_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
desc.flags |= cpu_to_le16(aq->flags);
@@ -5740,14 +5604,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
*
* Validates supported devices and profile's sections.
*/
-static enum i40e_status_code
+static int
i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id, bool rollback)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 vendor_dev_id;
+ int status = 0;
u32 dev_cnt;
u32 sec_off;
u32 i;
@@ -5805,16 +5669,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Handles the download of a complete package.
*/
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
- i40e_status status = 0;
- struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_aq_section *ddp_aq;
- u32 section_size = 0;
+ struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ int status = 0;
u32 sec_off;
u32 i;

@@ -5868,15 +5732,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Rolls back previously loaded package.
*/
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
u32 section_size = 0;
+ int status = 0;
u32 sec_off;
int i;

@@ -5920,15 +5784,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Register a profile to the list of loaded profiles.
*/
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
- i40e_status status = 0;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_info *pinfo;
u32 offset = 0, info = 0;
+ int status = 0;

sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -5962,7 +5826,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -5970,8 +5834,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -5999,7 +5863,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -6007,8 +5871,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;

i40e_fill_default_direct_cmd_desc(&desc,
@@ -6056,7 +5920,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -6064,8 +5928,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;

i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
@@ -6093,7 +5957,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -6101,8 +5965,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;

i40e_fill_default_direct_cmd_desc(&desc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 673f341f4c0c..90638b67f8dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -12,7 +12,7 @@
*
* Get the DCBX status from the Firmware
**/
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
{
u32 reg;

@@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Parse DCB configuration from the LLDPDU
**/
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_lldp_org_tlv *tlv;
- u16 type;
- u16 length;
u16 typelength;
u16 offset = 0;
+ int ret = 0;
+ u16 length;
+ u16 type;

if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
@@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
*
* Query DCB configuration from the Firmware
**/
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_virt_mem mem;
+ int ret = 0;
u8 *lldpmib;

/* Allocate the LLDPDU */
@@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(
*
* Get IEEE mode DCB configuration from the Firmware
**/
-static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
+ int ret = 0;

/* IEEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
@@ -797,11 +797,11 @@ static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
*
* Get DCB configuration from the Firmware
**/
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+int i40e_get_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
- struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ int ret = 0;

/* If Firmware version < v4.33 on X710/XL710, IEEE only */
if ((hw->mac.type == I40E_MAC_XL710) &&
@@ -867,11 +867,11 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
- i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
+ int ret = 0;

if (!hw->func_caps.dcb)
return I40E_NOT_SUPPORTED;
@@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
* Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
* Status of agent is reported via @lldp_status parameter.
**/
-enum i40e_status_code
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status)
{
struct i40e_virt_mem mem;
- i40e_status ret;
u8 *lldpmib;
+ int ret;

if (!lldp_status)
return I40E_ERR_PARAM;
@@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Set DCB configuration to the Firmware
**/
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+int i40e_set_dcb_config(struct i40e_hw *hw)
{
struct i40e_dcbx_config *dcbcfg;
struct i40e_virt_mem mem;
u8 mib_type, *lldpmib;
- i40e_status ret;
u16 miblen;
+ int ret;

/* update the hw local config */
dcbcfg = &hw->local_dcbx_config;
@@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
*
* send DCB configuration to FW
**/
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
{
u16 length, offset = 0, tlvid, typelength;
struct i40e_lldp_org_tlv *tlv;
@@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
*
* Reads the LLDP configuration data from NVM using passed addresses
**/
-static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg,
- u8 module, u32 word_offset)
+static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
{
u32 address, offset = (2 * word_offset);
- i40e_status ret;
__le16 raw_mem;
+ int ret;
u16 mem;

ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -1950,10 +1950,10 @@ static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
*
* Reads the LLDP configuration data from NVM
**/
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg)
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
{
- i40e_status ret = 0;
+ int ret = 0;
u32 mem;

if (!lldp_cfg)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2370ceecb061..6b60dc9b7736 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
struct i40e_rx_pb_config *old_pb_cfg,
struct i40e_rx_pb_config *new_pb_cfg);
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
- u16 *status);
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw,
- bool enable_mib_change);
-enum i40e_status_code
+int i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_get_dcb_config(struct i40e_hw *hw);
+int i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status);
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg);
+int i40e_set_dcb_config(struct i40e_hw *hw);
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index e32c61909b31..bba70bd5703b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB ETS configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB ETS configuration err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB PFC configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB PFC configuration err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index e1069ae658ad..7e8183762fd9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;

status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;

status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
*
* Register a profile to the list of loaded profiles.
*/
-static enum i40e_status_code
+static int
i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;

sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Removes DDP profile from the NIC.
**/
-static enum i40e_status_code
+static int
i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;

sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
struct i40e_profile_segment *profile_hdr;
struct i40e_profile_info pinfo;
struct i40e_package_header *pkg_hdr;
- i40e_status status;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 track_id;
int istatus;
+ int status;

pkg_hdr = (struct i40e_package_header *)data;
if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index c9dcd6d92c83..9954493cd448 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- i40e_status ret;
- u16 vid;
unsigned int v;
+ int ret;
+ u16 vid;

cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
+ int ret;

desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
@@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
u16 buffer_len;
u8 *buff;
+ int ret;

desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index ca229b0efeb6..97fe1787a8f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -10,8 +10,8 @@
* @reg: reg to be tested
* @mask: bits to be touched
**/
-static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
- u32 reg, u32 mask)
+static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
{
static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
@@ -74,9 +74,9 @@ const struct i40e_diag_reg_test_info i40e_reg_list[] = {
*
* Perform registers diagnostic test
**/
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+int i40e_diag_reg_test(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg, mask;
u32 elements;
u32 i, j;
@@ -115,9 +115,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
*
* Perform EEPROM diagnostic test
**/
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+int i40e_diag_eeprom_test(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
u16 reg_val;

/* read NVM control word and if NVM valid, validate EEPROM checksum*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 1db7c6d57231..c3ce5f35211f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info {

extern const struct i40e_diag_reg_test_info i40e_reg_list[];

-i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+int i40e_diag_reg_test(struct i40e_hw *hw);
+int i40e_diag_eeprom_test(struct i40e_hw *hw);

#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f6fa63e4253c..e632041aed5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
- i40e_status status = 0;
int timeout = 50;
+ int status = 0;
int err = 0;
__u32 speed;
u8 autoneg;
@@ -1453,8 +1453,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1463,8 +1463,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_update_link_info(hw);
if (status)
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));

} else {
@@ -1483,7 +1483,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
u32 flags = 0;
int err = 0;

@@ -1515,8 +1515,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1529,8 +1529,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
* (e.g. no physical connection etc.)
*/
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));
}

@@ -1545,7 +1545,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
int err = 0;
u8 fec_cfg;

@@ -1632,12 +1632,12 @@ static int i40e_nway_reset(struct net_device *netdev)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
- i40e_status ret = 0;
+ int ret = 0;

ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "link restart failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -1697,9 +1697,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
- i40e_status status;
u8 aq_failures;
int err = 0;
+ int status;
u32 is_an;

/* Changing the port's flow control is not supported if this isn't the
@@ -1753,20 +1753,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);

if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+ status,
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -2581,8 +2581,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- i40e_status status;
bool link_up = false;
+ int status;

netif_info(pf, hw, netdev, "link test\n");
status = i40e_get_link_status(&pf->hw, &link_up);
@@ -2805,11 +2805,11 @@ static int i40e_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- i40e_status ret = 0;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int blink_freq = 2;
u16 temp_status;
+ int ret = 0;

switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -5245,7 +5245,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 reset_needed = 0;
- i40e_status status;
+ int status;
u32 i, j;

orig_flags = READ_ONCE(pf->flags);
@@ -5360,8 +5360,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -5433,9 +5433,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
- "Starting FW LLDP agent failed: error: %s, %s\n",
- i40e_stat_str(&pf->hw,
- status),
+ "Starting FW LLDP agent failed: error: %d, %s\n",
+ status,
i40e_aq_str(&pf->hw,
adq_err));
return -EINVAL;
@@ -5475,8 +5474,8 @@ static int i40e_get_module_info(struct net_device *netdev,
u32 sff8472_comp = 0;
u32 sff8472_swap = 0;
u32 sff8636_rev = 0;
- i40e_status status;
u32 type = 0;
+ int status;

/* Check if firmware supports reading module EEPROM. */
if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
@@ -5580,8 +5579,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
bool is_sfp = false;
- i40e_status status;
u32 value = 0;
+ int status;
int i;

if (!ee || !ee->len || !data)
@@ -5622,10 +5621,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
- enum i40e_status_code status = 0;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = 0;

/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5687,11 +5686,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status = I40E_SUCCESS;
struct i40e_aq_set_phy_config config;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = I40E_SUCCESS;
__le16 eee_capability;

/* Deny parameters we don't support */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 163ee8c6311c..46f7950a0049 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -17,17 +17,17 @@
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
**/
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz)
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
+ int ret_code = I40E_SUCCESS;
struct i40e_dma_mem mem;
- i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;

if (NULL == hmc_info->sd_table.sd_entry) {
@@ -106,19 +106,19 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
**/
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg)
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
- u64 *pd_addr;
+ int ret_code = 0;
u64 page_desc;
+ u64 *pd_addr;

if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
@@ -185,15 +185,15 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
**/
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
+ int ret_code = 0;
u64 *pd_addr;

/* calculate index */
@@ -241,11 +241,11 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
**/
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;

/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -269,9 +269,9 @@ i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
* @idx: the page index
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;

@@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
**/
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;

sd_entry = &hmc_info->sd_table.sd_entry[idx];

@@ -318,9 +318,9 @@ i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
* @idx: segment descriptor index to find the relevant page descriptor
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;

diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 3113792afaff..9960da07a573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -187,28 +187,28 @@ struct i40e_hmc_info {
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz);
-
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg);
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);

#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d6e92ecddfbd..40c101f286d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
* Assumptions:
* - HMC Resource Profile has been selected before calling this function.
**/
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num)
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
{
struct i40e_hmc_obj_info *obj, *full_obj;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u64 l2fpm_size;
u32 size_exp;

@@ -229,11 +229,11 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
* 1. caller can deallocate the memory used by pd after this function
* returns.
**/
-static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

if (!i40e_prep_remove_pd_page(hmc_info, idx))
ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
@@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
* 1. caller can deallocate the memory used by backing storage after this
* function returns.
**/
-static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

if (!i40e_prep_remove_sd_bp(hmc_info, idx))
ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
@@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
**/
-static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_create_obj_info *info)
+static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 pd_idx = 0, pd_lmt = 0;
bool pd_error = false;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u64 sd_size;
u32 i, j;

@@ -435,13 +435,13 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
* - This function will be called after i40e_init_lan_hmc() and before
* any LAN/FCoE HMC objects can be created.
**/
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model)
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
{
struct i40e_hmc_lan_create_obj_info info;
- i40e_status ret_code = 0;
u8 hmc_fn_id = hw->hmc.hmc_fn_id;
struct i40e_hmc_obj_info *obj;
+ int ret_code = 0;

/* Initialize part of the create object info struct */
info.hmc_info = &hw->hmc;
@@ -520,13 +520,13 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
**/
-static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_delete_obj_info *info)
+static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u32 i, j;

if (NULL == info) {
@@ -632,10 +632,10 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
* This must be called by drivers as they are shutting down and being
* removed from the OS.
**/
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
{
struct i40e_hmc_lan_delete_obj_info info;
- i40e_status ret_code;
+ int ret_code;

info.hmc_info = &hw->hmc;
info.rsrc_type = I40E_HMC_LAN_FULL;
@@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,
* @context_bytes: pointer to the context bit array (DMA memory)
* @hmc_type: the type of HMC resource
**/
-static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
- u8 *context_bytes,
- enum i40e_hmc_lan_rsrc_type hmc_type)
+static int i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
{
/* clean the bit array */
memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
@@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
* @ce_info: a description of the struct to be filled
* @dest: the struct to be filled
**/
-static i40e_status i40e_set_hmc_context(u8 *context_bytes,
- struct i40e_context_ele *ce_info,
- u8 *dest)
+static int i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
{
int f;

@@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
* base pointer. This function is used for LAN Queue contexts.
**/
static
-i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
- enum i40e_hmc_lan_rsrc_type rsrc_type,
- u32 obj_idx)
+int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
{
struct i40e_hmc_info *hmc_info = &hw->hmc;
u32 obj_offset_in_sd, obj_offset_in_pd;
struct i40e_hmc_sd_entry *sd_entry;
struct i40e_hmc_pd_entry *pd_entry;
u32 pd_idx, pd_lmt, rel_pd_idx;
- i40e_status ret_code = 0;
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;

if (NULL == hmc_info) {
ret_code = I40E_ERR_BAD_PTR;
@@ -1042,11 +1042,11 @@ i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;

err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s)
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;

err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;

err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
@@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s)
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;

err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index c46a2c449e60..9f960404c2b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {
u32 count;
};

-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num);
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model);
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s);
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s);
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);

#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 68f390ce4f6e..0e01b1927c1c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_hash_lock);

if (vsi->type == I40E_VSI_MAIN) {
- i40e_status ret;
+ int ret;

ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
if (ret)
- netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %d, AQ ret %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
}

@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS key, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
{
struct i40e_hw *hw = &vsi->back->hw;
enum i40e_admin_queue_err aq_status;
- i40e_status aq_ret;
+ int aq_ret;

aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
- "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
- vsi_name, i40e_stat_str(hw, aq_ret),
+ "ignoring delete macvlan error on %s, err %d, aq_err %s\n",
+ vsi_name, aq_ret,
i40e_aq_str(hw, aq_status));
}
}
@@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
*
* Returns status indicating success or failure;
**/
-static i40e_status
+static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
- i40e_status aq_ret;
+ int aq_ret;

if (f->vlan == I40E_VLAN_ANY) {
aq_ret = i40e_aq_set_vsi_broadcast(hw,
@@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ int aq_ret;

if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "Set default VSI failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "Set default VSI failed, err %d, aq_err %s\n",
+ aq_ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
}
} else {
@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
true);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set unicast promisc failed, err %d, aq_err %s\n",
+ aq_ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
promisc, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set multicast promisc failed, err %d, aq_err %s\n",
+ aq_ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
unsigned int vlan_filters = 0;
char vsi_name[16] = "PF";
int filter_list_len = 0;
- i40e_status aq_ret = 0;
u32 changed_flags = 0;
struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_ret = 0;
int retval = 0;
u16 cmd_flags;
int list_size;
@@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed on %s, err %s aq_err %s\n",
+ "set multi promisc failed on %s, err %d aq_err %s\n",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ aq_ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
@@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ "Setting promiscuous %s failed on %s, err %d aq_err %s\n",
cur_promisc ? "on" : "off",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ aq_ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;

/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;

/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;

vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add pvid failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "add pvid failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
return -ENOENT;
@@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_txq tx_ctx;
- i40e_status err = 0;
u32 qtx_ctl = 0;
+ int err = 0;

if (ring_is_xdp(ring))
ring->xsk_pool = i40e_xsk_pool(ring);
@@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_rxq rx_ctx;
- i40e_status err = 0;
+ int err = 0;
bool ok;
int ret;

@@ -5524,16 +5524,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u32 tc_bw_max;
+ int ret;
int i;

/* Get the VSI level BW configuration */
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi bw config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5543,8 +5543,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi ets bw config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5585,7 +5585,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
+ int ret;
int i;

/* There is no need to reset BW when mqprio mode is on. */
@@ -5733,8 +5733,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)

ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -5789,8 +5789,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
&bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed querying vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed querying vsi bw info, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5856,8 +5856,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5869,8 +5869,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed updating vsi bw info, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5961,8 +5961,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret)
dev_err(&pf->pdev->dev,
- "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
- max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %d aq_err %s\n",
+ max_tx_rate, seid, ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -6037,8 +6037,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret)
dev_info(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to delete cloud filter, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, last_aq_status));
kfree(cfilter);
}
@@ -6172,8 +6172,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
kfree(lut);
return ret;
@@ -6271,8 +6271,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "add new vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add new vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -6303,7 +6303,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status ret;
+ int ret;
int i;

memset(&bw_data, 0, sizeof(bw_data));
@@ -6339,9 +6339,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
- i40e_status ret;
- int i;
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ int ret;
+ int i;

/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -6517,8 +6517,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
mode, NULL);
if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "couldn't set switch config bits, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw,
hw->aq.asq_last_status));

@@ -6718,8 +6718,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "VEB bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "VEB bw config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6728,8 +6728,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed getting veb bw config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}

@@ -6812,8 +6812,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Resume Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Resume Port Tx failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6837,8 +6837,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Suspend Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Suspend Port Tx failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6877,8 +6877,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
ret = i40e_set_dcb_config(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Set DCB Config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Set DCB Config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6994,8 +6994,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
i40e_aqc_opc_modify_switching_comp_ets, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Modify Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Modify Port ETS failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -7032,8 +7032,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
ret = i40e_aq_dcb_updated(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "DCB Updated failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "DCB Updated failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -7116,8 +7116,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
i40e_aqc_opc_enable_switching_comp_ets, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "Enable Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Enable Port ETS failed, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
@@ -7196,8 +7196,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
- "Query for DCB configuration failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Query for DCB configuration failed, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}

@@ -7415,15 +7415,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
* @pf: board private structure
* @is_up: whether the link state should be forced up or down
**/
-static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
{
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config = {0};
bool non_zero_phy_type = is_up;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
u64 mask;
u8 speed;
+ int err;

/* Card might've been put in an unstable state by other drivers
* and applications, which causes incorrect speed values being
@@ -7435,8 +7435,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %d last_status = %s\n",
+ err,
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7447,8 +7447,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %d last_status = %s\n",
+ err,
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7492,8 +7492,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)

if (err) {
dev_err(&pf->pdev->dev,
- "set phy config ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ "set phy config ret = %d last_status = %s\n",
+ err,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return err;
}
@@ -7656,11 +7656,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
* This function deletes a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_remove_macvlan_element_data element;
- i40e_status status;
+ int status;

memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
@@ -7682,12 +7682,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
* This function adds a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_add_macvlan_element_data element;
- i40e_status status;
u16 cmd_flags = 0;
+ int status;

ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
@@ -7833,8 +7833,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
rx_ring->netdev = NULL;
}
dev_info(&pf->pdev->dev,
- "Error adding mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error adding mac filter on macvlan err %d, aq_err %s\n",
+ ret,
i40e_aq_str(hw, aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7906,8 +7906,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -8122,8 +8122,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
ch->fwd = NULL;
} else {
dev_info(&pf->pdev->dev,
- "Error deleting mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error deleting mac filter on macvlan err %d, aq_err %s\n",
+ ret,
i40e_aq_str(hw, aq_err));
}
break;
@@ -8874,8 +8874,7 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
kfree(filter);
if (err) {
dev_err(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s\n",
- i40e_stat_str(&pf->hw, err));
+ "Failed to delete cloud filter, err %d\n", err);
return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
}

@@ -9437,8 +9436,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
dev_info(&pf->pdev->dev,
- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed querying DCB configuration data from firmware, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -9886,8 +9885,8 @@ static void i40e_link_event(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
- i40e_status status;
bool new_link, old_link;
+ int status;
#ifdef CONFIG_I40E_DCB
int err;
#endif /* CONFIG_I40E_DCB */
@@ -10098,9 +10097,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
struct i40e_arq_event_info event;
struct i40e_hw *hw = &pf->hw;
u16 pending, i = 0;
- i40e_status ret;
u16 opcode;
u32 oldval;
+ int ret;
u32 val;

/* Do not run clean AQ when PF reset fails */
@@ -10264,8 +10263,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10276,8 +10275,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10300,8 +10299,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10312,8 +10311,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10457,8 +10456,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
- "capability discovery failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "capability discovery failed, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENODEV;
@@ -10579,7 +10578,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
struct i40e_cloud_filter *cfilter;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- i40e_status ret;
+ int ret;

/* Add cloud filters back if they exist */
hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
@@ -10595,8 +10594,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)

if (ret) {
dev_dbg(&pf->pdev->dev,
- "Failed to rebuild cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to rebuild cloud filter, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -10614,7 +10613,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
static int i40e_rebuild_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
- i40e_status ret;
+ int ret;

if (list_empty(&vsi->ch_list))
return 0;
@@ -10690,7 +10689,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret = 0;
+ int ret = 0;
u32 v;

clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
@@ -10795,7 +10794,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;

ret = i40e_pf_reset(hw);
if (ret) {
@@ -10820,7 +10819,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
u32 val;
int v;

@@ -10836,8 +10835,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@@ -10948,8 +10947,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));

/* Rebuild the VSIs and VEBs that existed before reset.
@@ -11052,8 +11051,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -11084,9 +11083,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_set_promiscuous(pf, pf->cur_promisc);
if (ret)
dev_warn(&pf->pdev->dev,
- "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ "Failed to restore promiscuous setting: %s, err %d aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- i40e_stat_str(&pf->hw, ret),
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));

i40e_reset_all_vfs(pf, true);
@@ -12220,8 +12219,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
(struct i40e_aqc_get_set_rss_key_data *)seed);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS key, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12234,8 +12233,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS lut, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12510,11 +12509,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
* i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+int i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
- i40e_status status;
bool min_valid, max_valid;
u32 max_bw, min_bw;
+ int status;

status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
&min_valid, &max_valid);
@@ -12533,10 +12532,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
* i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+int i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
- i40e_status status;
+ int status;

memset(&bw_data, 0, sizeof(bw_data));

@@ -12555,12 +12554,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
* i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
- i40e_status ret;
u16 nvm_word;
+ int ret;

if (pf->hw.partition_id != 1) {
dev_info(&pf->pdev->dev,
@@ -12575,8 +12574,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for read access, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12592,8 +12591,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12606,8 +12605,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for write access, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12626,8 +12625,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "BW settings NOT SAVED, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:

@@ -12648,7 +12647,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
- i40e_status read_status = I40E_SUCCESS;
+ int read_status = I40E_SUCCESS;
u16 sr_emp_sr_settings_ptr = 0;
u16 features_enable = 0;
u16 link_behavior = 0;
@@ -12681,8 +12680,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)

err_nvm:
dev_warn(&pf->pdev->dev,
- "total-port-shutdown feature is off due to read nvm error: %s\n",
- i40e_stat_str(&pf->hw, read_status));
+ "total-port-shutdown feature is off due to read nvm error: %d\n",
+ read_status);
return ret;
}

@@ -13001,7 +13000,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
u8 type, filter_index;
- i40e_status ret;
+ int ret;

type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
I40E_AQC_TUNNEL_TYPE_NGE;
@@ -13009,8 +13008,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
NULL);
if (ret) {
- netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "add UDP port failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -13025,12 +13024,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- i40e_status ret;
+ int ret;

ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
- netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "delete UDP port failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -13919,8 +13918,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -13949,8 +13948,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13969,8 +13968,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13992,9 +13991,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* message and continue
*/
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %d aq_err %s\n",
enabled_tc,
- i40e_stat_str(&pf->hw, ret),
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -14088,8 +14087,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -14120,8 +14119,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get vsi bw info, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
@@ -14567,8 +14566,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14577,8 +14576,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw ets config failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14774,8 +14773,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't add VEB, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't add VEB, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14785,16 +14784,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB statistics idx, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB statistics idx, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB bw info, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
@@ -15004,8 +15003,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "get switch config failed err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
kfree(aq_buf);
@@ -15050,8 +15049,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't fetch switch config, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -15077,8 +15076,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -15415,13 +15414,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
*
* Return 0 on success, negative on failure.
**/
-static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+static int i40e_pf_loop_reset(struct i40e_pf *pf)
{
/* wait max 10 seconds for PF reset to succeed */
const unsigned long time_end = jiffies + 10 * HZ;
-
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;

ret = i40e_pf_reset(hw);
while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
@@ -15467,9 +15465,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)
* Return 0 if NIC is healthy or negative value when there are issues
* with resets
**/
-static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+static int i40e_handle_resets(struct i40e_pf *pf)
{
- const i40e_status pfr = i40e_pf_loop_reset(pf);
+ const int pfr = i40e_pf_loop_reset(pf);
const bool is_empr = i40e_check_fw_empr(pf);

if (is_empr || pfr != I40E_SUCCESS)
@@ -15608,13 +15606,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_aq_get_phy_abilities_resp abilities;
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
- i40e_status status;
#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 wol_nvm_bits;
u16 link_status;
+#ifdef CONFIG_I40E_DCB
+ int status;
+#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
u32 i;
@@ -15983,8 +15983,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));

/* Reconfigure hardware for allowing smaller MSS in the case
@@ -16002,8 +16002,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
+ err,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -16135,8 +16135,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %d last_status = %s\n",
+ err,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;

@@ -16146,8 +16146,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %d last_status = %s\n",
+ err,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));

/* make sure the MFS hasn't been set lower than the default */
@@ -16218,7 +16218,7 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- i40e_status ret_code;
+ int ret_code;
int i;

i40e_dbg_pf_exit(pf);
@@ -16466,9 +16466,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u8 mac_addr[6];
u16 flags = 0;
+ int ret;

/* Get current MAC address in case it's an LAA */
if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3a38bf8bcde7..17e3f26eee4a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -13,10 +13,10 @@
* in this file) as an equivalent of the FLASH part mapped into the SR.
* We are accessing FLASH always thru the Shadow RAM.
**/
-i40e_status i40e_init_nvm(struct i40e_hw *hw)
+int i40e_init_nvm(struct i40e_hw *hw)
{
struct i40e_nvm_info *nvm = &hw->nvm;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 fla, gens;
u8 sr_size;

@@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
* This function will request NVM ownership for reading
* via the proper Admin Command.
**/
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access)
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
{
- i40e_status ret_code = 0;
u64 gtime, timeout;
u64 time_left = 0;
+ int ret_code = 0;

if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
@@ -111,7 +111,7 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_SUCCESS;
+ int ret_code = I40E_SUCCESS;
u32 total_delay = 0;

if (hw->nvm.blank_nvm_mode)
@@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)
*
* Polls the SRCTL Shadow RAM register done bit.
**/
-static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;

/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
@@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;

if (offset >= hw->nvm.sr_size) {
@@ -216,13 +216,13 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
- u8 module_pointer, u32 offset,
- u16 words, void *data,
- bool last_command)
+static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;

memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;

ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
*data = le16_to_cpu(*(__le16 *)data);
@@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
* Do not use this function except in cases where the nvm lock is already
* taken via i40e_acquire_nvm().
**/
-static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset, u16 *data)
+static int __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset, u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_word_aq(hw, offset, data);
@@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM.
**/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr)
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
- i40e_status status;
u16 specific_ptr = 0;
u16 ptr_value = 0;
u32 offset = 0;
+ int status;

if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u16 index, word;

/* Loop thru the selected region */
@@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code;
- u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
+ u16 read_size;
+ int ret_code;
u16 i = 0;

do {
@@ -493,9 +493,9 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method.
**/
-static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset, u16 *words,
- u16 *data)
+static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset, u16 *words,
+ u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
@@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;

if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;

memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
-static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
+ int ret_code;
u16 *data;
u16 i = 0;

@@ -675,11 +675,11 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
**/
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+int i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code;
- u16 checksum;
__le16 le_sum;
+ int ret_code;
+ u16 checksum;

ret_code = i40e_calc_nvm_checksum(hw, &checksum);
if (!ret_code) {
@@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
* Performs checksum calculation and validates the NVM SW checksum. If the
* caller does not need checksum, the value can be NULL.
**/
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code = 0;
- u16 checksum_sr = 0;
u16 checksum_local = 0;
+ u16 checksum_sr = 0;
+ int ret_code = 0;

/* We must acquire the NVM lock in order to correctly synchronize the
* NVM accesses across multiple PFs. Without doing so it is possible
@@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}

-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *perrno);
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {
*
* Dispatches command depending on what update state is current
**/
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ int status;

/* assume success */
*perrno = 0;
@@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;

upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);

@@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;

upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);

@@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
+ int status = 0;

upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);

@@ -1187,8 +1187,8 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*/
if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
!retry_attempt) {
- i40e_status old_status = status;
u32 old_asq_status = hw->aq.asq_last_status;
+ int old_status = status;
u32 gtime;

gtime = rd32(hw, I40E_GLVFGEN_TIMER);
@@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
struct i40e_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
u32 aq_data_len;
+ int status;

i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
if (cmd->offset == 0xffff)
@@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
buff_size, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_exec_aq err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "%s err %d aq_err %s\n",
+ __func__, status,
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
@@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
u8 module, transaction;
+ int status;
bool last;

transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ int status = 0;
bool last;

transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
u8 preservation_flags;
+ int status = 0;
bool last;

transaction = i40e_nvmupd_get_transaction(cmd->config);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 2f6815b2f8df..2bd4de03dafa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -56,5 +56,4 @@ do { \
(h)->bus.func, ##__VA_ARGS__); \
} while (0)

-typedef enum i40e_status_code i40e_status;
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ebdcde6f1aeb..c9c3726eafbe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -16,29 +16,29 @@
*/

/* adminq functions */
-i40e_status i40e_init_adminq(struct i40e_hw *hw);
+int i40e_init_adminq(struct i40e_hw *hw);
void i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-i40e_status
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+int
i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int
i40e_asq_send_command_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status
+int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
-i40e_status
+int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -53,324 +53,307 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,

void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);

-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);

u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode);
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);

/* admin send queue commands */

-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
- u32 reg_addr, u64 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_reset);
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset);
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 vsi_id, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc);
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id, bool *floating,
- u16 *statistic_index, u16 *vebs_used,
- u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status
-i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status
-i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
+int i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);

-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
-i40e_aq_set_lldp_mib(struct i40e_hw *hw,
- u8 mib_type, void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
- bool dcb_enable,
- struct i40e_asq_cmd_details
- *cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
-i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_cloud_filters_element_bb *filters,
- u8 filter_count);
-enum i40e_status_code
-i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
- struct i40e_aqc_cloud_filters_element_data *filters,
- u8 filter_count);
-enum i40e_status_code
-i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
- struct i40e_aqc_cloud_filters_element_data *filters,
- u8 filter_count);
-enum i40e_status_code
-i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_cloud_filters_element_bb *filters,
- u8 filter_count);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
-enum i40e_status_code
-i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+int i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+int i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+int i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+int i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg);
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
-i40e_status i40e_init_shared_code(struct i40e_hw *hw);
-i40e_status i40e_pf_reset(struct i40e_hw *hw);
+int i40e_init_shared_code(struct i40e_hw *hw);
+int i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
-i40e_status i40e_update_link_info(struct i40e_hw *hw);
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw, bool *min_valid,
- bool *max_valid);
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+int i40e_update_link_info(struct i40e_hw *hw);
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+int i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size);
+int i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
-i40e_status i40e_init_nvm(struct i40e_hw *hw);
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access);
+int i40e_init_nvm(struct i40e_hw *hw);
+int i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr);
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum);
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *);
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data);
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data);
+int i40e_update_nvm_checksum(struct i40e_hw *hw);
+int i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);

-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+int i40e_set_mac_type(struct i40e_hw *hw);

extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];

@@ -419,41 +402,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
-i40e_status i40e_vf_reset(struct i40e_hw *hw);
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings);
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_vf_reset(struct i40e_hw *hw);
+int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ int v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-enum i40e_status_code
+int
i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int
i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
@@ -466,43 +449,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)

-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
-i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *
- cmd_details);
-i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *
- cmd_details);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,
struct i40e_profile_segment *profile);
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index db3714a65dc7..4d2782e76038 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -9,65 +9,30 @@ enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
- I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_LINK_SETUP = -8,
- I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_PRIMARY_REQUESTS_PENDING = -12,
- I40E_ERR_INVALID_LINK_SETTINGS = -13,
- I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
- I40E_ERR_RING_FULL = -20,
- I40E_ERR_INVALID_PD_ID = -21,
- I40E_ERR_INVALID_QP_ID = -22,
- I40E_ERR_INVALID_CQ_ID = -23,
- I40E_ERR_INVALID_CEQ_ID = -24,
- I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_INVALID_ARP_INDEX = -27,
- I40E_ERR_INVALID_FPM_FUNC_ID = -28,
- I40E_ERR_QP_INVALID_MSG_SIZE = -29,
- I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
- I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_INVALID_ALIGNMENT = -33,
- I40E_ERR_FLUSHED_QUEUE = -34,
- I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
- I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
- I40E_ERR_OPCODE_MISMATCH = -38,
- I40E_ERR_CQP_COMPL_ERROR = -39,
- I40E_ERR_INVALID_VF_ID = -40,
- I40E_ERR_INVALID_HMCFN_ID = -41,
- I40E_ERR_BACKING_PAGE_ERROR = -42,
- I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
- I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
- I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 635f93d60318..cb7cf672f697 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -17,7 +17,7 @@
**/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg,
+ int v_retval, u8 *msg,
u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
@@ -1246,13 +1246,13 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
* @vl: List of VLANs - apply filter for given VLANs
* @num_vlans: Number of elements in @vl
**/
-static i40e_status
+static int
i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
bool unicast_enable, s16 *vl, u16 num_vlans)
{
- i40e_status aq_ret, aq_tmp = 0;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int aq_ret, aq_tmp = 0;
int i;

/* No VLAN to set promisc on, set on VSI */
@@ -1264,9 +1264,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;

dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ aq_ret,
i40e_aq_str(&pf->hw, aq_err));

return aq_ret;
@@ -1280,9 +1280,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;

dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ aq_ret,
i40e_aq_str(&pf->hw, aq_err));
}

@@ -1297,9 +1297,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;

dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ aq_ret,
i40e_aq_str(&pf->hw, aq_err));

if (!aq_tmp)
@@ -1313,9 +1313,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;

dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ aq_ret,
i40e_aq_str(&pf->hw, aq_err));

if (!aq_tmp)
@@ -1339,13 +1339,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
* Called from the VF to configure the promiscuous mode of
* VF vsis and from the VF reset path to reset promiscuous mode.
**/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
- u16 vsi_id,
- bool allmulti,
- bool alluni)
+static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
{
- i40e_status aq_ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
+ int aq_ret = I40E_SUCCESS;
struct i40e_vsi *vsi;
u16 num_vlans;
s16 *vl;
@@ -1955,7 +1955,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
struct i40e_pf *pf;
struct i40e_hw *hw;
int abs_vf_id;
- i40e_status aq_ret;
+ int aq_ret;

/* validate the request */
if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
@@ -1987,7 +1987,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum virtchnl_ops opcode,
- i40e_status retval)
+ int retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
}
@@ -2091,9 +2091,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int num_vsis = 1;
+ int aq_ret = 0;
size_t len = 0;
int ret;

@@ -2221,9 +2221,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
bool allmulti = false;
bool alluni = false;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2308,10 +2308,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
u16 vsi_id, vsi_queue_id = 0;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2458,8 +2458,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
+ int aq_ret = 0;
u16 vsi_id;
- i40e_status aq_ret = 0;
int i;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -2574,7 +2574,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;

if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
@@ -2632,7 +2632,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2783,7 +2783,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
struct i40e_vsi *vsi;

memset(&stats, 0, sizeof(struct i40e_eth_stats));
@@ -2926,7 +2926,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -2998,7 +2998,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3071,7 +3071,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;

if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
@@ -3142,7 +3142,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3198,7 +3198,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;

if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
@@ -3227,7 +3227,7 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
{
struct virtchnl_iwarp_qvlist_info *qvlist_info =
(struct virtchnl_iwarp_qvlist_info *)msg;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;

if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
@@ -3263,7 +3263,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
@@ -3293,7 +3293,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u16 i;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3328,7 +3328,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int len = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3365,7 +3365,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3389,8 +3389,8 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
**/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3415,8 +3415,8 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
**/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3615,8 +3615,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
if (ret)
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
+ vf->vf_id, ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));

@@ -3642,7 +3642,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
struct hlist_node *node;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3718,8 +3718,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
+ vf->vf_id, ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err;
}
@@ -3773,7 +3773,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_cloud_filter *cfilter = NULL;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3852,8 +3852,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to add cloud filter, err %d aq_err %s\n",
+ vf->vf_id, ret,
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -3882,7 +3882,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int i, adq_request_qps = 0;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u64 speed = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3994,7 +3994,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;

if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e88e3dfac8c2..0051aa676e19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2634,6 +2634,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
return 0;
}

+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 16;
+ else
+ return 64;
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2642,7 +2650,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,

switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
+ cmd->data = min_t(int, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
@@ -3044,14 +3053,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}

-static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
-{
- if (adapter->hw.mac.type < ixgbe_mac_X550)
- return 16;
- else
- return 64;
-}
-
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
return IXGBE_RSS_KEY_SIZE;
@@ -3100,8 +3101,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);

- if (hfunc)
- return -EINVAL;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;

/* Fill out the redirection table */
if (indir) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 0571e40c6ee5..02bb9d43ff9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -396,7 +396,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
return ret;
}

-void mlx5_detach_device(struct mlx5_core_dev *dev)
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
@@ -426,7 +426,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)

adrv = to_auxiliary_drv(adev->dev.driver);

- if (adrv->suspend) {
+ if (adrv->suspend && suspend) {
adrv->suspend(adev, pm);
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 97e9ec44a759..3749eb83d9e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -108,7 +108,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;

- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
@@ -166,7 +166,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,

switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
@@ -200,7 +200,7 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- ret = mlx5_load_one_devl_locked(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, true);
break;
default:
/* Unsupported action should not get to this function */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 4e48946c4c2a..0290e0dea539 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -106,22 +106,17 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
}

struct mlx5e_post_act_handle *
-mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
+mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr)
{
- u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
struct mlx5e_post_act_handle *handle;
- struct mlx5_flow_attr *post_attr;
int err;

handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
- if (!handle || !post_attr) {
- kfree(post_attr);
+ if (!handle) {
kfree(handle);
return ERR_PTR(-ENOMEM);
}

- memcpy(post_attr, attr, attr_sz);
post_attr->chain = 0;
post_attr->prio = 0;
post_attr->ft = post_act->ft;
@@ -145,7 +140,6 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
return handle;

err_xarray:
- kfree(post_attr);
kfree(handle);
return ERR_PTR(err);
}
@@ -164,7 +158,6 @@ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_han
if (!IS_ERR_OR_NULL(handle->rule))
mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id);
- kfree(handle->attr);
kfree(handle);
}

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
index f476774c0b75..40b8df184af5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
@@ -19,7 +19,7 @@ void
mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act);

struct mlx5e_post_act_handle *
-mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr);
+mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr);

void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index f2c2c752bd1c..c57b09727524 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -14,10 +14,10 @@

#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)

-static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
+static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
.max_num_groups = 0, /* default num of groups */
- .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
+ .flags = 0,
};

struct mlx5e_tc_psample {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 7cd36f4ac3ef..eba601487eb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -776,6 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
+ ft->t = NULL;
fs_err(fs, "fail to create promisc table err=%d\n", err);
return err;
}
@@ -803,7 +804,7 @@ static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)

static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{
- if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ if (!fs->promisc.ft.t)
return;
mlx5e_del_promisc_rule(fs);
mlx5_destroy_flow_table(fs->promisc.ft.t);
@@ -1471,6 +1472,8 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,

void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
+ if (!fs)
+ return;
mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs);
mlx5e_fs_vlan_free(fs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5e01de4c3203..94d010e2d5ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5201,6 +5201,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
+ priv->fs = NULL;
}

static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 0f744131c686..9bd1a93a512d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -783,6 +783,7 @@ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
+ priv->fs = NULL;
}

static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -949,6 +950,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
err_free_fs:
mlx5e_fs_cleanup(priv->fs);
+ priv->fs = NULL;
return err;
}

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
index 9e72118f2e4c..749c3957a128 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -11,7 +11,7 @@ struct mlx5_vport_key {
u16 prio;
u16 vport;
u16 vhca_id;
- const struct esw_vport_tbl_namespace *vport_ns;
+ struct esw_vport_tbl_namespace *vport_ns;
} __packed;

struct mlx5_vport_table {
@@ -21,6 +21,14 @@ struct mlx5_vport_table {
struct mlx5_vport_key key;
};

+static void
+esw_vport_tbl_init(struct mlx5_eswitch *esw, struct esw_vport_tbl_namespace *ns)
+{
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ ns->flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+}
+
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
const struct esw_vport_tbl_namespace *vport_ns)
@@ -80,6 +88,7 @@ mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
u32 hkey;

mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ esw_vport_tbl_init(esw, attr->vport_ns);
hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) {
@@ -127,6 +136,7 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
u32 hkey;

mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ esw_vport_tbl_init(esw, attr->vport_ns);
hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 5db76af35d3f..6e6e0864063f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -668,7 +668,7 @@ struct mlx5_vport_tbl_attr {
u32 chain;
u16 prio;
u16 vport;
- const struct esw_vport_tbl_namespace *vport_ns;
+ struct esw_vport_tbl_namespace *vport_ns;
};

struct mlx5_flow_table *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 64e5b9f29206..519526a4810e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -72,7 +72,7 @@

#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)

-static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
+static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
.flags = 0,
@@ -733,7 +733,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
kfree(dest);
return rule;
err_chain_src_rewrite:
- esw_put_dest_tables_loop(esw, attr, 0, i);
mlx5_esw_vporttbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
@@ -776,7 +775,6 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (fwd_rule) {
mlx5_esw_vporttbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
- esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else {
if (split)
mlx5_esw_vporttbl_put(esw, &fwd_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 1e46f9afa40e..d219f8417d93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -150,11 +150,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
- mlx5_load_one(dev, false);
+ mlx5_load_one(dev, true);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
@@ -484,8 +484,8 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
}
err = fw_reset->ret;
if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
- mlx5_unload_one_devl_locked(dev);
- mlx5_load_one_devl_locked(dev, false);
+ mlx5_unload_one_devl_locked(dev, false);
+ mlx5_load_one_devl_locked(dev, true);
}
out:
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 879555ba847d..e42e4ac231c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -699,7 +699,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
* requests from the kernel.
*/
mlx5_core_err(dev, "Driver is in error state. Unloading\n");
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
}
}

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 59914f66857d..cc8057c4f908 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1495,12 +1495,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
return ret;
}

-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend)
{
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);

- mlx5_detach_device(dev);
+ mlx5_detach_device(dev, suspend);

if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
@@ -1515,12 +1515,12 @@ void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
mutex_unlock(&dev->intf_state_mutex);
}

-void mlx5_unload_one(struct mlx5_core_dev *dev)
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
{
struct devlink *devlink = priv_to_devlink(dev);

devl_lock(devlink);
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, suspend);
devl_unlock(devlink);
}

@@ -1793,7 +1793,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,

mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, true);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);

@@ -1950,7 +1950,7 @@ static void shutdown(struct pci_dev *pdev)
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
mlx5_pci_disable_device(dev);
}

@@ -1958,7 +1958,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);

- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, true);

return 0;
}
@@ -2001,7 +2001,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
}

int mlx5_recover_device(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a806e3de7b7c..1a35b3c2a367 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -236,7 +236,7 @@ void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
int mlx5_adev_init(struct mlx5_core_dev *dev);

int mlx5_attach_device(struct mlx5_core_dev *dev);
-void mlx5_detach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
@@ -319,8 +319,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
-void mlx5_unload_one(struct mlx5_core_dev *dev);
-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7b4783ce213e..a7377619ba6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -74,7 +74,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);

- mlx5_unload_one(sf_dev->mdev);
+ mlx5_unload_one(sf_dev->mdev, false);
}

static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 102ddc7e206a..29ffaf35559d 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -367,7 +367,8 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
* Returns 0 on success, error code otherwise. In case of an error this
* function will free the SKB.
*/
-int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
{
return __ef100_enqueue_skb(tx_queue, skb, NULL);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6656d76b6766..cf682a9e3fff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -39,6 +39,24 @@ struct rk_gmac_ops {
u32 regs[];
};

+static const char * const rk_clocks[] = {
+ "aclk_mac", "pclk_mac", "mac_clk_tx", "clk_mac_speed",
+};
+
+static const char * const rk_rmii_clocks[] = {
+ "mac_clk_rx", "clk_mac_ref", "clk_mac_refout",
+};
+
+enum rk_clocks_index {
+ RK_ACLK_MAC = 0,
+ RK_PCLK_MAC,
+ RK_MAC_CLK_TX,
+ RK_CLK_MAC_SPEED,
+ RK_MAC_CLK_RX,
+ RK_CLK_MAC_REF,
+ RK_CLK_MAC_REFOUT,
+};
+
struct rk_priv_data {
struct platform_device *pdev;
phy_interface_t phy_iface;
@@ -51,15 +69,9 @@ struct rk_priv_data {
bool clock_input;
bool integrated_phy;

+ struct clk_bulk_data *clks;
+ int num_clks;
struct clk *clk_mac;
- struct clk *gmac_clkin;
- struct clk *mac_clk_rx;
- struct clk *mac_clk_tx;
- struct clk *clk_mac_ref;
- struct clk *clk_mac_refout;
- struct clk *clk_mac_speed;
- struct clk *aclk_mac;
- struct clk *pclk_mac;
struct clk *clk_phy;

struct reset_control *phy_reset;
@@ -104,10 +116,11 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)

static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
int ret;

- if (IS_ERR(bsp_priv->clk_mac_speed)) {
+ if (!clk_mac_speed) {
dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
return;
}
@@ -116,7 +129,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_SPEED_10M);

- ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+ ret = clk_set_rate(clk_mac_speed, 2500000);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
__func__, ret);
@@ -124,7 +137,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_SPEED_100M);

- ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+ ret = clk_set_rate(clk_mac_speed, 25000000);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
__func__, ret);
@@ -1066,6 +1079,7 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)

static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
unsigned long rate;
int ret;
@@ -1085,7 +1099,7 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
return;
}

- ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ ret = clk_set_rate(clk_mac_speed, rate);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
__func__, rate, ret);
@@ -1371,6 +1385,7 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)

static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
unsigned long rate;
int ret;
@@ -1390,7 +1405,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
return;
}

- ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ ret = clk_set_rate(clk_mac_speed, rate);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
__func__, rate, ret);
@@ -1398,6 +1413,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)

static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
unsigned long rate;
int ret;
@@ -1414,7 +1430,7 @@ static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
return;
}

- ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ ret = clk_set_rate(clk_mac_speed, rate);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
__func__, rate, ret);
@@ -1475,68 +1491,50 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
struct device *dev = &bsp_priv->pdev->dev;
- int ret;
+ int phy_iface = bsp_priv->phy_iface;
+ int i, j, ret;

bsp_priv->clk_enabled = false;

- bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
- if (IS_ERR(bsp_priv->mac_clk_rx))
- dev_err(dev, "cannot get clock %s\n",
- "mac_clk_rx");
+ bsp_priv->num_clks = ARRAY_SIZE(rk_clocks);
+ if (phy_iface == PHY_INTERFACE_MODE_RMII)
+ bsp_priv->num_clks += ARRAY_SIZE(rk_rmii_clocks);

- bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
- if (IS_ERR(bsp_priv->mac_clk_tx))
- dev_err(dev, "cannot get clock %s\n",
- "mac_clk_tx");
+ bsp_priv->clks = devm_kcalloc(dev, bsp_priv->num_clks,
+ sizeof(*bsp_priv->clks), GFP_KERNEL);
+ if (!bsp_priv->clks)
+ return -ENOMEM;

- bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
- if (IS_ERR(bsp_priv->aclk_mac))
- dev_err(dev, "cannot get clock %s\n",
- "aclk_mac");
+ for (i = 0; i < ARRAY_SIZE(rk_clocks); i++)
+ bsp_priv->clks[i].id = rk_clocks[i];

- bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
- if (IS_ERR(bsp_priv->pclk_mac))
- dev_err(dev, "cannot get clock %s\n",
- "pclk_mac");
-
- bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
- if (IS_ERR(bsp_priv->clk_mac))
- dev_err(dev, "cannot get clock %s\n",
- "stmmaceth");
-
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
- bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
- if (IS_ERR(bsp_priv->clk_mac_ref))
- dev_err(dev, "cannot get clock %s\n",
- "clk_mac_ref");
-
- if (!bsp_priv->clock_input) {
- bsp_priv->clk_mac_refout =
- devm_clk_get(dev, "clk_mac_refout");
- if (IS_ERR(bsp_priv->clk_mac_refout))
- dev_err(dev, "cannot get clock %s\n",
- "clk_mac_refout");
- }
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ for (j = 0; j < ARRAY_SIZE(rk_rmii_clocks); j++)
+ bsp_priv->clks[i++].id = rk_rmii_clocks[j];
}

- bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
- if (IS_ERR(bsp_priv->clk_mac_speed))
- dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+ ret = devm_clk_bulk_get_optional(dev, bsp_priv->num_clks,
+ bsp_priv->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ /* "stmmaceth" will be enabled by the core */
+ bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+ ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");

if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
- } else {
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- clk_set_rate(bsp_priv->clk_mac, 50000000);
+ } else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ clk_set_rate(bsp_priv->clk_mac, 50000000);
}

if (plat->phy_node && bsp_priv->integrated_phy) {
bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0);
- if (IS_ERR(bsp_priv->clk_phy)) {
- ret = PTR_ERR(bsp_priv->clk_phy);
- dev_err(dev, "Cannot get PHY clock: %d\n", ret);
- return -EINVAL;
- }
+ ret = PTR_ERR_OR_ZERO(bsp_priv->clk_phy);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot get PHY clock\n");
clk_set_rate(bsp_priv->clk_phy, 50000000);
}

@@ -1545,77 +1543,36 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)

static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
{
- int phy_iface = bsp_priv->phy_iface;
+ int ret;

if (enable) {
if (!bsp_priv->clk_enabled) {
- if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- if (!IS_ERR(bsp_priv->mac_clk_rx))
- clk_prepare_enable(
- bsp_priv->mac_clk_rx);
-
- if (!IS_ERR(bsp_priv->clk_mac_ref))
- clk_prepare_enable(
- bsp_priv->clk_mac_ref);
-
- if (!IS_ERR(bsp_priv->clk_mac_refout))
- clk_prepare_enable(
- bsp_priv->clk_mac_refout);
- }
-
- if (!IS_ERR(bsp_priv->clk_phy))
- clk_prepare_enable(bsp_priv->clk_phy);
+ ret = clk_bulk_prepare_enable(bsp_priv->num_clks,
+ bsp_priv->clks);
+ if (ret)
+ return ret;

- if (!IS_ERR(bsp_priv->aclk_mac))
- clk_prepare_enable(bsp_priv->aclk_mac);
-
- if (!IS_ERR(bsp_priv->pclk_mac))
- clk_prepare_enable(bsp_priv->pclk_mac);
-
- if (!IS_ERR(bsp_priv->mac_clk_tx))
- clk_prepare_enable(bsp_priv->mac_clk_tx);
-
- if (!IS_ERR(bsp_priv->clk_mac_speed))
- clk_prepare_enable(bsp_priv->clk_mac_speed);
+ ret = clk_prepare_enable(bsp_priv->clk_phy);
+ if (ret)
+ return ret;

if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
bsp_priv->ops->set_clock_selection(bsp_priv,
bsp_priv->clock_input, true);

- /**
- * if (!IS_ERR(bsp_priv->clk_mac))
- * clk_prepare_enable(bsp_priv->clk_mac);
- */
mdelay(5);
bsp_priv->clk_enabled = true;
}
} else {
if (bsp_priv->clk_enabled) {
- if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- clk_disable_unprepare(bsp_priv->mac_clk_rx);
-
- clk_disable_unprepare(bsp_priv->clk_mac_ref);
-
- clk_disable_unprepare(bsp_priv->clk_mac_refout);
- }
-
+ clk_bulk_disable_unprepare(bsp_priv->num_clks,
+ bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);

- clk_disable_unprepare(bsp_priv->aclk_mac);
-
- clk_disable_unprepare(bsp_priv->pclk_mac);
-
- clk_disable_unprepare(bsp_priv->mac_clk_tx);
-
- clk_disable_unprepare(bsp_priv->clk_mac_speed);
-
if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
bsp_priv->ops->set_clock_selection(bsp_priv,
bsp_priv->clock_input, false);
- /**
- * if (!IS_ERR(bsp_priv->clk_mac))
- * clk_disable_unprepare(bsp_priv->clk_mac);
- */
+
bsp_priv->clk_enabled = false;
}
}
@@ -1629,9 +1586,6 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
int ret;
struct device *dev = &bsp_priv->pdev->dev;

- if (!ldo)
- return 0;
-
if (enable) {
ret = regulator_enable(ldo);
if (ret)
@@ -1679,14 +1633,11 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
}
}

- bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
+ bsp_priv->regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(bsp_priv->regulator)) {
- if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
- dev_err(dev, "phy regulator is not available yet, deferred probing\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
- dev_err(dev, "no regulator found\n");
- bsp_priv->regulator = NULL;
+ ret = PTR_ERR(bsp_priv->regulator);
+ dev_err_probe(dev, ret, "failed to get phy regulator\n");
+ return ERR_PTR(ret);
}

ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fa3ce3b0d9a5..490315723062 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -6346,6 +6346,10 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;

+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;

@@ -6353,16 +6357,18 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
ret = stmmac_vlan_update(priv, is_double);
if (ret) {
clear_bit(vid, priv->active_vlans);
- return ret;
+ goto err_pm_put;
}

if (priv->hw->num_vlan) {
ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
if (ret)
- return ret;
+ goto err_pm_put;
}
+err_pm_put:
+ pm_runtime_put(priv->device);

- return 0;
+ return ret;
}

static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b0c7ab74a82e..7cf8210ebbec 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2834,7 +2834,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
int i, qfe_slot = -1;
char prom_name[64];
u8 addr[ETH_ALEN];
- int err;
+ int err = -ENODEV;

/* Now make sure pci_dev cookie is there. */
#ifdef CONFIG_SPARC
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 70f88eae2a9e..dd88624593c7 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -329,7 +329,7 @@ static int xpcs_read_fault_c73(struct dw_xpcs *xpcs,
return 0;
}

-static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
+static int xpcs_read_link_c73(struct dw_xpcs *xpcs)
{
bool link = true;
int ret;
@@ -341,15 +341,6 @@ static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
if (!(ret & MDIO_STAT1_LSTATUS))
link = false;

- if (an) {
- ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- if (!(ret & MDIO_STAT1_LSTATUS))
- link = false;
- }
-
return link;
}

@@ -943,7 +934,7 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
int ret;

/* Link needs to be read first ... */
- state->link = xpcs_read_link_c73(xpcs, state->an_enabled) > 0 ? 1 : 0;
+ state->link = xpcs_read_link_c73(xpcs) > 0 ? 1 : 0;

/* ... and then we check the faults. */
ret = xpcs_read_fault_c73(xpcs, state);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 5663a184644d..766f86bdc4a0 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -395,6 +395,10 @@ static const struct sfp_quirk sfp_quirks[] = {

SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),

+ // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+ // 2600MBd in their EERPOM
+ SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
+
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
// their EEPROM
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
index d54d32ac9bc4..91f5d6d2d4e2 100644
--- a/drivers/net/wireguard/timers.c
+++ b/drivers/net/wireguard/timers.c
@@ -46,7 +46,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
peer->device->dev->name, peer->internal_id,
- &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2);
+ &peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2);

del_timer(&peer->timer_send_keepalive);
/* We drop all packets without a keypair and don't try again,
@@ -64,7 +64,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
++peer->timer_handshake_attempts;
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
peer->device->dev->name, peer->internal_id,
- &peer->endpoint.addr, REKEY_TIMEOUT,
+ &peer->endpoint.addr, (int)REKEY_TIMEOUT,
peer->timer_handshake_attempts + 1);

/* We clear the endpoint address src address, in case this is
@@ -94,7 +94,7 @@ static void wg_expired_new_handshake(struct timer_list *timer)

pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
peer->device->dev->name, peer->internal_id,
- &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT);
+ &peer->endpoint.addr, (int)(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT));
/* We clear the endpoint address src address, in case this is the cause
* of trouble.
*/
@@ -126,7 +126,7 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work)

pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
peer->device->dev->name, peer->internal_id,
- &peer->endpoint.addr, REJECT_AFTER_TIME * 3);
+ &peer->endpoint.addr, (int)REJECT_AFTER_TIME * 3);
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
wg_peer_put(peer);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index d34a4d6325b2..76f275ca53e9 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -859,11 +859,11 @@ static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
ab->pci.msi.ep_base_data = int_prop + 32;

for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res)
- return -ENODEV;
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;

- ab->pci.msi.irqs[i] = res->start;
+ ab->pci.msi.irqs[i] = ret;
}

set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
@@ -1063,6 +1063,12 @@ static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
struct iommu_domain *iommu;
size_t unmapped_size;

+ /* Chipsets not requiring MSA would have not initialized
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
if (ab_ahb->fw.use_tz)
return 0;

diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index 2107ec05d14f..5536e8642331 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -26,13 +26,13 @@ int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
void *buffer, u32 size)
{
- u32 *temp;
- int idx;
-
- size = size >> 2;
+ /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
+ * and the variable size is expected to be the number of u32 values
+ * to be stored, not the number of bytes.
+ */
+ size = size / sizeof(u32);

- for (idx = 0, temp = buffer; idx < size; idx++, temp++)
- *temp++ = ATH11K_DB_MAGIC_VALUE;
+ memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
}

static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 2a0d3afb0c99..0231783ad754 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -163,7 +163,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
-u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 86995e8dc913..a62ee05c5409 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -16,7 +16,7 @@
#include "pci.h"
#include "pcic.h"

-#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000

static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1ae7af02c364..1380811827a8 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -382,22 +382,23 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOBUFS;
}

+ mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
if (peer->vdev_id == param->vdev_id) {
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
return -EINVAL;
}

/* Assume sta is transitioning to another band.
* Remove here the peer from rhash.
*/
- mutex_lock(&ar->ab->tbl_mtx_lock);
ath11k_peer_rhash_delete(ar->ab, peer);
- mutex_unlock(&ar->ab->tbl_mtx_lock);
}
spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);

ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 2c9cec8b53d9..28a1e5eff204 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -113,15 +113,13 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}

- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- ret = -ENXIO;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
+ ret = irq;
goto err_iounmap;
}

- irq = res->start;
-
hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index d444b3d70ba2..58d3e86f6256 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -529,7 +529,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
ee->ee_n_piers[mode]++;

freq2 = (val >> 8) & 0xff;
- if (!freq2)
+ if (!freq2 || i >= max)
break;

pc[i++].freq = ath5k_eeprom_bin2freq(ee,
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index bde5a10d470c..af98e871199d 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -246,7 +246,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
return -EACCES;
}

- size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ size = sizeof(cid) + sizeof(addr) + sizeof(*param);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index c68848819a52..9b88d96bfe96 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -960,8 +960,8 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
* Thus the possibility of ar->htc_target being NULL
* via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
*/
- if (WARN_ON_ONCE(!target)) {
- ath6kl_err("Target not yet initialized\n");
+ if (!target) {
+ ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
status = -EINVAL;
goto free_skb;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f521dfa2f194..e0130beb304d 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -534,6 +534,24 @@ static struct ath9k_htc_hif hif_usb = {
.send = hif_usb_send,
};

+/* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream
+ * in case ath9k_hif_usb_rx_stream wasn't called next time to
+ * process the buffer and subsequently free it.
+ */
+static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->rx_lock, flags);
+ if (hif_dev->remain_skb) {
+ dev_kfree_skb_any(hif_dev->remain_skb);
+ hif_dev->remain_skb = NULL;
+ hif_dev->rx_remain_len = 0;
+ RX_STAT_INC(hif_dev, skb_dropped);
+ }
+ spin_unlock_irqrestore(&hif_dev->rx_lock, flags);
+}
+
static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
@@ -868,6 +886,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+ ath9k_hif_usb_free_rx_remain_skb(hif_dev);
}

static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 12c4408bbc3b..2cc913acfc2d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -6210,18 +6210,20 @@ static s32 brcmf_notify_rssi(struct brcmf_if *ifp,
{
struct brcmf_cfg80211_vif *vif = ifp->vif;
struct brcmf_rssi_be *info = data;
- s32 rssi, snr, noise;
+ s32 rssi, snr = 0, noise = 0;
s32 low, high, last;

- if (e->datalen < sizeof(*info)) {
+ if (e->datalen >= sizeof(*info)) {
+ rssi = be32_to_cpu(info->rssi);
+ snr = be32_to_cpu(info->snr);
+ noise = be32_to_cpu(info->noise);
+ } else if (e->datalen >= sizeof(rssi)) {
+ rssi = be32_to_cpu(*(__be32 *)data);
+ } else {
brcmf_err("insufficient RSSI event data\n");
return 0;
}

- rssi = be32_to_cpu(info->rssi);
- snr = be32_to_cpu(info->snr);
- noise = be32_to_cpu(info->noise);
-
low = vif->cqm_rssi_low;
high = vif->cqm_rssi_high;
last = vif->cqm_rssi_last;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index abf49022edbe..027360e63b92 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1038,7 +1038,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
range->range_data_size = reg->dev_addr.size;
for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
prph_val = iwl_read_prph(fwrt->trans, addr + i);
- if (prph_val == 0x5a5a5a5a)
+ if ((prph_val & ~0xf) == 0xa5a5a5a0)
return -EBUSY;
*val++ = cpu_to_le32(prph_val);
}
@@ -1388,13 +1388,13 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
if (!data)
return;

+ memset(data, 0, sizeof(*data));
+
/* make sure only one bit is set in only one fid */
if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
"fid1=%x, fid2=%x\n", fid1, fid2))
return;

- memset(data, 0, sizeof(*data));
-
if (fid1) {
fifo_idx = ffs(fid1) - 1;
if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
@@ -1562,7 +1562,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
- if (prph_data == 0x5a5a5a5a) {
+ if ((prph_data & ~0xf) == 0xa5a5a5a0) {
iwl_trans_release_nic_access(fwrt->trans);
return -EBUSY;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 43e997283db0..607e07ed2477 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -317,8 +317,10 @@ static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
const struct iwl_fw *fw = priv->fwrt->fw;

*pos = ++state->pos;
- if (*pos >= fw->ucode_capa.n_cmd_versions)
+ if (*pos >= fw->ucode_capa.n_cmd_versions) {
+ kfree(state);
return NULL;
+ }

return state;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3237d4b528b5..a1d34f3e7a9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -138,6 +138,12 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
goto err;

+ if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
+ alloc->req_size == 0) {
+ IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
+ return -EINVAL;
+ }
+
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;

return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
index ae4c2a3d63d5..3a3c13a41fc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2011, 2021 Intel Corporation
+ * Copyright (C) 2005-2011, 2021-2022 Intel Corporation
*/
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -57,6 +57,7 @@ void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...)
default:
break;
}
+ vaf.va = &args;
trace_iwlwifi_err(&vaf);
va_end(args);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 919b1f478b4c..bbdda3e1ff3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -563,6 +563,7 @@ static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw,
}

for (i = 0; i < IWL_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
/* wrapping isn't allowed, AP must rekey */
if (seq.tkip.iv32 > cur_rx_iv32)
cur_rx_iv32 = seq.tkip.iv32;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1e8123140973..022ec7ec0a2f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1745,6 +1745,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
if (ret < 0)
return ret;

+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
ret = -ENXIO;
@@ -1821,6 +1826,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
if (ret < 0)
return ret;

+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
rsp = (void *)hcmd.resp_pkt->data;
if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
ret = -ENXIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 49ca1e168fc5..eee98cebbb46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -384,9 +384,10 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
* Don't even try to decrypt a MCAST frame that was received
* before the managed vif is authorized, we'd fail anyway.
*/
- if (vif->type == NL80211_IFTYPE_STATION &&
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ vif->type == NL80211_IFTYPE_STATION &&
!mvmvif->authorized &&
- is_multicast_ether_addr(hdr->addr1)) {
+ ieee80211_has_protected(hdr->frame_control)) {
IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n");
kfree_skb(skb);
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1aadccd8841f..091225894037 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -193,8 +193,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
* Starting from Bz hardware, it calculates starting directly after
* the MAC header, so that matches mac80211's expectation.
*/
- if (skb->ip_summed == CHECKSUM_COMPLETE &&
- mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) {
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
struct {
u8 hdr[6];
__be16 type;
@@ -209,7 +208,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
- else
+ else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
/* mac80211 assumes full CSUM including SNAP header */
skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4f699862e7f7..85fadd1ef1ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -565,7 +565,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index bd50f52a1aad..54f11f60f11c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -599,7 +599,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
- int t = 0;
int iter;

IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -616,6 +615,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
usleep_range(1000, 2000);

for (iter = 0; iter < 10; iter++) {
+ int t = 0;
+
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
@@ -1522,19 +1523,16 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;

- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
UREG_DOORBELL_TO_ISR6_RESUME);
- } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
CSR_IPC_SLEEP_CONTROL_RESUME);
- iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
- UREG_DOORBELL_TO_ISR6_SLEEP_CTRL);
- } else {
+ else
return 0;
- }

ret = wait_event_timeout(trans_pcie->sx_waitq,
trans_pcie->sx_complete, 2 * HZ);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 478bffb7418d..c406cb1a102f 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -436,7 +436,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
free_skb:
status.skb = tx_info.skb;
hw = mt76_tx_status_get_hw(dev, tx_info.skb);
+ spin_lock_bh(&dev->rx_lock);
ieee80211_tx_status_ext(hw, &status);
+ spin_unlock_bh(&dev->rx_lock);

return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 7bcf7a6b67df..9c753c6aabef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -904,10 +904,11 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,

#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)

-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
- int timeout);
-
-#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout, int kick);
+#define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10)
+#define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
+#define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)

void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 49a511ae8161..6cff346d57a7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1279,8 +1279,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
if (wcidx >= MT7603_WTBL_STA || !sta)
goto out;

- if (mt7603_fill_txs(dev, msta, &info, txs_data))
+ if (mt7603_fill_txs(dev, msta, &info, txs_data)) {
+ spin_lock_bh(&dev->mt76.rx_lock);
ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+ spin_unlock_bh(&dev->mt76.rx_lock);
+ }

out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index a0412a29fb49..faed43b11ec9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1517,8 +1517,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];

- if (mt7615_fill_txs(dev, msta, &info, txs_data))
+ if (mt7615_fill_txs(dev, msta, &info, txs_data)) {
+ spin_lock_bh(&dev->mt76.rx_lock);
ieee80211_tx_status_noskb(mphy->hw, sta, &info);
+ spin_unlock_bh(&dev->mt76.rx_lock);
+ }

out:
rcu_read_unlock();
@@ -2340,7 +2343,7 @@ void mt7615_coredump_work(struct work_struct *work)
break;

skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
- if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
dev_kfree_skb(skb);
continue;
}
@@ -2350,6 +2353,8 @@ void mt7615_coredump_work(struct work_struct *work)

dev_kfree_skb(skb);
}
- dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
- GFP_KERNEL);
+
+ if (dump)
+ dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+ GFP_KERNEL);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 46ede1b72bbe..19f02b632a20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -539,7 +539,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
/* Fixed rata is available just for 802.11 txd */
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- bool multicast = is_multicast_ether_addr(hdr->addr1);
+ bool multicast = ieee80211_is_data(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1);
u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
multicast);
u32 val = MT_TXD6_FIXED_BW;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 025a237c1cce..546cbe21aab3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1561,8 +1561,16 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->channel_min_dwell_time = cpu_to_le16(duration);
req->channel_dwell_time = cpu_to_le16(duration);

- req->channels_num = min_t(u8, sreq->n_channels, 32);
- req->ext_channels_num = min_t(u8, ext_channels_num, 32);
+ if (sreq->n_channels == 0 || sreq->n_channels > 64) {
+ req->channel_type = 0;
+ req->channels_num = 0;
+ req->ext_channels_num = 0;
+ } else {
+ req->channel_type = 4;
+ req->channels_num = min_t(u8, sreq->n_channels, 32);
+ req->ext_channels_num = min_t(u8, ext_channels_num, 32);
+ }
+
for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
if (i >= 32)
chan = &req->ext_channels[i - 32];
@@ -1582,7 +1590,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
chan->channel_num = scan_list[i]->hw_value;
}
- req->channel_type = sreq->n_channels ? 4 : 0;

if (sreq->ie_len > 0) {
memcpy(req->ies, sreq->ie, sreq->ie_len);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 93d96739f802..48ef2a599267 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -631,8 +631,11 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,

mt76_tx_status_unlock(mdev, &list);

- if (!status.skb)
+ if (!status.skb) {
+ spin_lock_bh(&dev->mt76.rx_lock);
ieee80211_tx_status_ext(mt76_hw(dev), &status);
+ spin_unlock_bh(&dev->mt76.rx_lock);
+ }

if (!len)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index ee7ddda4288b..20a6724ab5db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -1228,6 +1228,8 @@ static const struct of_device_id mt7986_wmac_of_match[] = {
{},
};

+MODULE_DEVICE_TABLE(of, mt7986_wmac_of_match);
+
struct platform_driver mt7986_wmac_driver = {
.driver = {
.name = "mt7986-wmac",
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index d1f10f6d9adc..fd57c87a29ae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -66,6 +66,24 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)

static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
+ /* disable WFDMA0 */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
+ return -ETIMEDOUT;
+
+ /* disable dmashdl */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
+ MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+ mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+
if (force) {
/* reset */
mt76_clear(dev, MT_WFDMA0_RST,
@@ -77,24 +95,6 @@ static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
MT_WFDMA0_RST_LOGIC_RST);
}

- /* disable dmashdl */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
- MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
- mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
-
- /* disable WFDMA0 */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
- MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
-
- if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
- MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
- return -ETIMEDOUT;
-
return 0;
}

@@ -301,6 +301,10 @@ void mt7921_dma_cleanup(struct mt7921_dev *dev)
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);

+ mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
+
/* reset */
mt76_clear(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 67bf92969a7b..d3507e86e9cf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -16,24 +16,6 @@ static bool mt7921_disable_clc;
module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
MODULE_PARM_DESC(disable_clc, "disable CLC support");

-static int
-mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
-{
- struct mt7921_mcu_eeprom_info *res;
- u8 *buf;
-
- if (!skb)
- return -EINVAL;
-
- skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
-
- res = (struct mt7921_mcu_eeprom_info *)skb->data;
- buf = dev->eeprom.data + le32_to_cpu(res->addr);
- memcpy(buf, res->data, 16);
-
- return 0;
-}
-
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
@@ -60,8 +42,6 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
skb_pull(skb, sizeof(*rxd) + 4);
ret = le32_to_cpu(*(__le32 *)skb->data);
- } else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) {
- ret = mt7921_mcu_parse_eeprom(mdev, skb);
} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 8a53d8f286db..c64b0b4e9358 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -111,9 +111,10 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
napi_disable(&dev->mt76.napi[i]);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ cancel_work_sync(&dev->reset_work);

mt7921_tx_token_put(dev);
- mt7921_mcu_drv_pmctrl(dev);
+ __mt7921_mcu_drv_pmctrl(dev);
mt7921_dma_cleanup(dev);
mt7921_wfsys_reset(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -256,6 +257,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
+ u16 cmd;

ret = pcim_enable_device(pdev);
if (ret)
@@ -265,6 +267,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;

+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
pci_set_master(pdev);

ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 29c0ee330dbe..521bcd577640 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -252,7 +252,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,

ret = mt7921u_dma_init(dev, false);
if (ret)
- return ret;
+ goto error;

hw = mt76_hw(dev);
/* check hw sg support in order to enable AMSDU */
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 6c054850363f..4482e4ff7804 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -77,7 +77,9 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
}

hw = mt76_tx_status_get_hw(dev, skb);
+ spin_lock_bh(&dev->rx_lock);
ieee80211_tx_status_ext(hw, &status);
+ spin_unlock_bh(&dev->rx_lock);
}
rcu_read_unlock();
}
@@ -263,7 +265,9 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
if (cb->pktid < MT_PACKET_ID_FIRST) {
hw = mt76_tx_status_get_hw(dev, skb);
status.sta = wcid_to_sta(wcid);
+ spin_lock_bh(&dev->rx_lock);
ieee80211_tx_status_ext(hw, &status);
+ spin_unlock_bh(&dev->rx_lock);
goto out;
}

diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 581964425468..fc76c66ff1a5 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -24,23 +24,23 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
}
EXPORT_SYMBOL_GPL(__mt76_poll);

-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
- int timeout)
+bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout, int tick)
{
u32 cur;

- timeout /= 10;
+ timeout /= tick;
do {
cur = __mt76_rr(dev, offset) & mask;
if (cur == val)
return true;

- usleep_range(10000, 20000);
+ usleep_range(1000 * tick, 2000 * tick);
} while (timeout-- > 0);

return false;
}
-EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+EXPORT_SYMBOL_GPL(____mt76_poll_msec);

int mt76_wcid_alloc(u32 *mask, int size)
{
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 3a035afcf7f9..9a9cfd0ce402 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1091,6 +1091,7 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
}

kfree(rt2x00dev->spec.channels_info);
+ kfree(rt2x00dev->chan_survey);
}

static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 46767dc6d649..761aeec07cdd 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1700,6 +1700,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 0,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 0b1bc04cb6ad..9eb26dfe4ca9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -278,8 +278,8 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,

tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);

- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;

tmp[tmp_len] = '\0';

@@ -287,7 +287,7 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
num = sscanf(tmp, "%x %x %x", &addr, &val, &len);

if (num != 3)
- return count;
+ return -EINVAL;

switch (len) {
case 1:
@@ -375,8 +375,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,

tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);

- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;

tmp[tmp_len] = '\0';

@@ -386,7 +386,7 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
if (num != 4) {
rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
"Format is <path> <addr> <mask> <data>\n");
- return count;
+ return -EINVAL;
}

rtl_set_rfreg(hw, path, addr, bitmask, data);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 2afe64f2abe6..589caeff2033 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -233,7 +233,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,

ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
if (ret)
- return -EBUSY;
+ return ret;

idx++;
} while (1);
@@ -247,6 +247,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
+ int ret;

if (rtw_chip_wcpu_11ac(rtwdev)) {
rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
@@ -270,8 +271,9 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
return -EALREADY;

pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
- if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
- return -EINVAL;
+ ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (ret)
+ return ret;

if (pwr_on)
set_bit(RTW_FLAG_POWERON, rtwdev->flags);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a703bb70b8f5..9e4a02a322ff 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -3290,18 +3290,22 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ret = ieee80211_register_hw(hw);
if (ret) {
rtw89_err(rtwdev, "failed to register hw\n");
- goto err;
+ goto err_free_supported_band;
}

ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
if (ret) {
rtw89_err(rtwdev, "failed to init regd\n");
- goto err;
+ goto err_unregister_hw;
}

return 0;

-err:
+err_unregister_hw:
+ ieee80211_unregister_hw(hw);
+err_free_supported_band:
+ rtw89_core_clr_supported_band(rtwdev);
+
return ret;
}

diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 5f8e19639362..4a012962cd44 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -3828,25 +3828,26 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);

- ret = rtw89_core_register(rtwdev);
- if (ret) {
- rtw89_err(rtwdev, "failed to register core\n");
- goto err_clear_resource;
- }
-
rtw89_core_napi_init(rtwdev);

ret = rtw89_pci_request_irq(rtwdev, pdev);
if (ret) {
rtw89_err(rtwdev, "failed to request pci irq\n");
- goto err_unregister;
+ goto err_deinit_napi;
+ }
+
+ ret = rtw89_core_register(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to register core\n");
+ goto err_free_irq;
}

return 0;

-err_unregister:
+err_free_irq:
+ rtw89_pci_free_irq(rtwdev, pdev);
+err_deinit_napi:
rtw89_core_napi_deinit(rtwdev);
- rtw89_core_unregister(rtwdev);
err_clear_resource:
rtw89_pci_clear_resource(rtwdev, pdev);
err_declaim_pci:
diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
index dc6a7d682c15..5e6398b527e7 100644
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only

-ccflags-y += -Werror
-
obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
mtk_t7xx-y:= t7xx_pci.o \
t7xx_pcie_mac.o \
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c54c6ffba0bc..f502e032e7e4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4769,8 +4769,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
u32 aer_notice_type = nvme_aer_subtype(result);
bool requeue = true;

- trace_nvme_async_event(ctrl, aer_notice_type);
-
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
@@ -4806,7 +4804,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)

static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
{
- trace_nvme_async_event(ctrl, NVME_AER_ERROR);
dev_warn(ctrl->device, "resetting controller due to AER\n");
nvme_reset_ctrl(ctrl);
}
@@ -4822,6 +4819,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;

+ trace_nvme_async_event(ctrl, result);
switch (aer_type) {
case NVME_AER_NOTICE:
requeue = nvme_handle_aen_notice(ctrl, result);
@@ -4839,7 +4837,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
- trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result;
break;
default:
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 6f0eaf6a1528..4fb5922ffdac 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
),
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
__entry->ctrl_id, __entry->result,
- __print_symbolic(__entry->result,
- aer_name(NVME_AER_NOTICE_NS_CHANGED),
- aer_name(NVME_AER_NOTICE_ANA),
- aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
- aer_name(NVME_AER_NOTICE_DISC_CHANGED),
- aer_name(NVME_AER_ERROR),
- aer_name(NVME_AER_SMART),
- aer_name(NVME_AER_CSS),
- aer_name(NVME_AER_VS))
+ __print_symbolic(__entry->result & 0x7,
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_NOTICE),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
)
);

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 76ceaadd6eea..31d35279b37a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -686,6 +686,13 @@ static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
}
}

+static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
+{
+ /* Not supported: return zeroes */
+ nvmet_req_complete(req,
+ nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
+}
+
static void nvmet_execute_identify(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -693,13 +700,8 @@ static void nvmet_execute_identify(struct nvmet_req *req)

switch (req->cmd->identify.cns) {
case NVME_ID_CNS_NS:
- switch (req->cmd->identify.csi) {
- case NVME_CSI_NVM:
- return nvmet_execute_identify_ns(req);
- default:
- break;
- }
- break;
+ nvmet_execute_identify_ns(req);
+ return;
case NVME_ID_CNS_CS_NS:
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
switch (req->cmd->identify.csi) {
@@ -711,29 +713,24 @@ static void nvmet_execute_identify(struct nvmet_req *req)
}
break;
case NVME_ID_CNS_CTRL:
- switch (req->cmd->identify.csi) {
- case NVME_CSI_NVM:
- return nvmet_execute_identify_ctrl(req);
- }
- break;
+ nvmet_execute_identify_ctrl(req);
+ return;
case NVME_ID_CNS_CS_CTRL:
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
- switch (req->cmd->identify.csi) {
- case NVME_CSI_ZNS:
- return nvmet_execute_identify_cns_cs_ctrl(req);
- default:
- break;
- }
- }
- break;
- case NVME_ID_CNS_NS_ACTIVE_LIST:
switch (req->cmd->identify.csi) {
case NVME_CSI_NVM:
- return nvmet_execute_identify_nslist(req);
- default:
+ nvmet_execute_identify_ctrl_nvm(req);
+ return;
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ nvmet_execute_identify_ctrl_zns(req);
+ return;
+ }
break;
}
break;
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
+ nvmet_execute_identify_nslist(req);
+ return;
case NVME_ID_CNS_NS_DESC_LIST:
if (nvmet_handle_identify_desclist(req) == true)
return;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 5c16372f3b53..c780af36c1d4 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ unsigned long flags;
int ret = 0;
bool aborted = false;

- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) {
case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE;
@@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
aborted = true;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

if (unlikely(aborted))
ret = -ECANCELED;
@@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
struct nvmefc_fcp_req *fcpreq;
bool completed = false;
+ unsigned long flags;

- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_ABORTED:
@@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
completed = true;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

if (unlikely(completed)) {
/* remove reference taken in original abort downcall */
@@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req);

- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->fcpreq = NULL;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */
@@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
+ unsigned long flags;

- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
tfcp_req->inistate = INI_IO_COMPLETED;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
u32 rsplen = 0, xfrlen = 0;
int fcp_err = 0, active, aborted;
u8 op = tgt_fcpreq->op;
+ unsigned long flags;

- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
active = tfcp_req->active;
aborted = tfcp_req->aborted;
tfcp_req->active = true;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

if (unlikely(active))
/* illegal - call while i/o active */
@@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,

if (unlikely(aborted)) {
/* target transport has aborted i/o prior */
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = 0;
tgt_fcpreq->fcp_error = -ECANCELED;
tgt_fcpreq->done(tgt_fcpreq);
@@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
}

- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

tgt_fcpreq->transferred_length = xfrlen;
tgt_fcpreq->fcp_error = fcp_err;
@@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ unsigned long flags;

/*
* mark aborted only in case there were 2 threads in transport
* (one doing io, other doing abort) and only kills ops posted
* after the abort request
*/
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->aborted = true;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

tfcp_req->status = NVME_SC_INTERNAL;

@@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
bool abortio = true;
+ unsigned long flags;

spin_lock(&inireq->inilock);
tfcp_req = inireq->tfcp_req;
@@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
return;

/* break initiator/target relationship for io */
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) {
case INI_IO_START:
case INI_IO_ACTIVE:
@@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
abortio = false;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);

if (abortio)
/* leave the reference while the work item is scheduled */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index bda1c1f71f39..273cca49a040 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -578,7 +578,7 @@ bool nvmet_ns_revalidate(struct nvmet_ns *ns);
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);

bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
+void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 1254cf57e008..d93ee4ae1945 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -70,7 +70,7 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
return true;
}

-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
+void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
{
u8 zasl = req->sq->ctrl->subsys->zasl;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -97,7 +97,7 @@ void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)

void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
{
- struct nvme_id_ns_zns *id_zns;
+ struct nvme_id_ns_zns *id_zns = NULL;
u64 zsze;
u16 status;
u32 mar, mor;
@@ -118,16 +118,18 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
if (status)
goto done;

- if (!bdev_is_zoned(req->ns->bdev)) {
- req->error_loc = offsetof(struct nvme_identify, nsid);
- goto done;
- }
-
if (nvmet_ns_revalidate(req->ns)) {
mutex_lock(&req->ns->subsys->lock);
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
mutex_unlock(&req->ns->subsys->lock);
}
+
+ if (!bdev_is_zoned(req->ns->bdev)) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ goto out;
+ }
+
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
@@ -148,8 +150,8 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)

done:
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
- kfree(id_zns);
out:
+ kfree(id_zns);
nvmet_req_complete(req, status);
}

diff --git a/drivers/of/device.c b/drivers/of/device.c
index 8cefe5a7d04e..ce225d2590b5 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -297,12 +297,15 @@ int of_device_request_module(struct device *dev)
if (size < 0)
return size;

- str = kmalloc(size + 1, GFP_KERNEL);
+ /* Reserve an additional byte for the trailing '\0' */
+ size++;
+
+ str = kmalloc(size, GFP_KERNEL);
if (!str)
return -ENOMEM;

of_device_get_modalias(dev, str, size);
- str[size] = '\0';
+ str[size - 1] = '\0';
ret = request_module(str);
kfree(str);

diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 62ce3abf0f19..ae9d083c406f 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -294,6 +294,7 @@ config PCI_MESON
default m if ARCH_MESON
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select REGMAP_MMIO
help
Say Y here if you want to enable PCI controller support on Amlogic
SoCs. The PCI controller on Amlogic is based on DesignWare hardware
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 1dde5c579edc..47db2d20568e 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1402,6 +1402,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
static int __init imx6_pcie_init(void)
{
#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
/*
* Since probe() can be deferred we need to make sure that
* hook_fault_code is not called after __init memory is freed
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index f8e512540fb8..dbe6df0cb611 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1276,11 +1276,9 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~REQ_NOT_ENTR_L1;
writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);

- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- }
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);

return 0;
err_disable_clocks:
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index d17f3bf36f70..ad12515a4a12 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl)

pci_assign_unassigned_bridge_resources(bridge);
pcie_bus_configure_settings(parent);
+
+ /*
+ * Release reset_lock during driver binding
+ * to avoid AB-BA deadlock with device_lock.
+ */
+ up_read(&ctrl->reset_lock);
pci_bus_add_devices(parent);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);

out:
pci_unlock_rescan_remove();
@@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
+
+ /*
+ * Release reset_lock during driver unbinding
+ * to avoid AB-BA deadlock with device_lock.
+ */
+ up_read(&ctrl->reset_lock);
pci_stop_and_remove_bus_device(dev);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
+
/*
* Ensure that no new Requests will be generated from
* the device.
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index a6b9b479b97a..87734e4c3c20 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -193,6 +193,7 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
*/
if (estate == PCI_ERS_RESULT_RECOVERED) {
pci_dbg(edev, "DPC port successfully recovered\n");
+ pcie_clear_device_status(edev);
acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
} else {
pci_dbg(edev, "DPC port recovery failed\n");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 494fa46f5767..8d32a3834688 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1939,6 +1939,19 @@ static void quirk_radeon_pm(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);

+/*
+ * NVIDIA Ampere-based HDA controllers can wedge the whole device if a bus
+ * reset is performed too soon after transition to D0, extend d3hot_delay
+ * to previous effective default for all NVIDIA HDA controllers.
+ */
+static void quirk_nvidia_hda_pm(struct pci_dev *dev)
+{
+ quirk_d3hot_delay(dev, 20);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8,
+ quirk_nvidia_hda_pm);
+
/*
* Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
* https://bugzilla.kernel.org/show_bug.cgi?id=205587
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 1deb61b22bc7..ff86075edca4 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -57,14 +57,12 @@
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)

/* XPs also have some local topology info which has uses too */
-#define CMN_MXP__CONNECT_INFO_P0 0x0008
-#define CMN_MXP__CONNECT_INFO_P1 0x0010
-#define CMN_MXP__CONNECT_INFO_P2 0x0028
-#define CMN_MXP__CONNECT_INFO_P3 0x0030
-#define CMN_MXP__CONNECT_INFO_P4 0x0038
-#define CMN_MXP__CONNECT_INFO_P5 0x0040
+#define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)

+#define CMN_MAX_PORTS 6
+#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
+
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000

@@ -166,7 +164,7 @@
#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)

-#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
+#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
/* Note that we don't yet support the tertiary match group on newer IPs */
@@ -396,6 +394,25 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
return NULL;
}

+static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
+ const struct arm_cmn_node *xp, int port)
+{
+ int offset = CMN_MXP__CONNECT_INFO(port);
+
+ if (port >= 2) {
+ if (cmn->model & (CMN600 | CMN650))
+ return 0;
+ /*
+ * CI-700 may have extra ports, but still has the
+ * mesh_port_connect_info registers in the way.
+ */
+ if (cmn->model == CI700)
+ offset += CI700_CONNECT_INFO_P2_5_OFFSET;
+ }
+
+ return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
+}
+
static struct dentry *arm_cmn_debugfs;

#ifdef CONFIG_DEBUG_FS
@@ -469,7 +486,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
y = cmn->mesh_y;
while (y--) {
int xp_base = cmn->mesh_x * y;
- u8 port[6][CMN_MAX_DIMENSION];
+ u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];

for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
@@ -477,14 +494,9 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_printf(s, "\n%d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
struct arm_cmn_node *xp = cmn->xps + xp_base + x;
- void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
-
- port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
- port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
- port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
- port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
- port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
- port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
+
+ for (p = 0; p < CMN_MAX_PORTS; p++)
+ port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
seq_printf(s, " XP #%-2d |", xp_base + x);
}

@@ -2082,18 +2094,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
* from this, since in that case we will see at least one XP
* with port 2 connected, for the HN-D.
*/
- if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
- xp_ports |= BIT(0);
- if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
- xp_ports |= BIT(1);
- if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
- xp_ports |= BIT(2);
- if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
- xp_ports |= BIT(3);
- if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
- xp_ports |= BIT(4);
- if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
- xp_ports |= BIT(5);
+ for (int p = 0; p < CMN_MAX_PORTS; p++)
+ if (arm_cmn_device_connect_info(cmn, xp, p))
+ xp_ports |= BIT(p);

if (cmn->multi_dtm && (xp_ports & 0xc))
arm_cmn_init_dtm(dtm++, xp, 1);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 3852c18362f5..382fe5ee6100 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -869,7 +869,7 @@ static int __init pmu_sbi_devinit(void)
struct platform_device *pdev;

if (sbi_spec_version < sbi_mk_version(0, 3) ||
- sbi_probe_extension(SBI_EXT_PMU) <= 0) {
+ !sbi_probe_extension(SBI_EXT_PMU)) {
return 0;
}

diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index bb40172e23d4..876a713e3874 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -1738,7 +1738,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
};

static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
- .lanes = 1,
+ .lanes = 2,

.tables = {
.serdes = sc8180x_qmp_pcie_serdes_tbl,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index dce45fbbd699..ce14645a86ec 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -782,6 +782,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
usb2->base.lane = usb2->base.ops->map(&usb2->base);
if (IS_ERR(usb2->base.lane)) {
err = PTR_ERR(usb2->base.lane);
+ tegra_xusb_port_unregister(&usb2->base);
goto out;
}

@@ -848,6 +849,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
if (IS_ERR(ulpi->base.lane)) {
err = PTR_ERR(ulpi->base.lane);
+ tegra_xusb_port_unregister(&ulpi->base);
goto out;
}

diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 41725c6bcdf6..6a63380f6a71 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -422,18 +422,17 @@ static int wiz_mode_select(struct wiz *wiz)
int i;

for (i = 0; i < num_lanes; i++) {
- if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
+ if (wiz->lane_phy_type[i] == PHY_TYPE_DP) {
mode = LANE_MODE_GEN1;
- else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII)
+ } else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
mode = LANE_MODE_GEN2;
- else
- continue;
-
- if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ } else if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
mode = LANE_MODE_GEN1;
+ } else {
+ continue;
}

ret = regmap_field_write(wiz->p_standard_mode[i], mode);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index c7cdccdb4332..0f1ab0829ffe 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -90,6 +90,8 @@ struct bcm2835_pinctrl {
struct pinctrl_gpio_range gpio_range;

raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
+ /* Protect FSEL registers */
+ spinlock_t fsel_lock;
};

/* pins are just named GPIO0..GPIO53 */
@@ -284,14 +286,19 @@ static inline void bcm2835_pinctrl_fsel_set(
struct bcm2835_pinctrl *pc, unsigned pin,
enum bcm2835_fsel fsel)
{
- u32 val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
- enum bcm2835_fsel cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
+ u32 val;
+ enum bcm2835_fsel cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pc->fsel_lock, flags);
+ val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
+ cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;

dev_dbg(pc->dev, "read %08x (%u => %s)\n", val, pin,
- bcm2835_functions[cur]);
+ bcm2835_functions[cur]);

if (cur == fsel)
- return;
+ goto unlock;

if (cur != BCM2835_FSEL_GPIO_IN && fsel != BCM2835_FSEL_GPIO_IN) {
/* always transition through GPIO_IN */
@@ -309,6 +316,9 @@ static inline void bcm2835_pinctrl_fsel_set(
dev_dbg(pc->dev, "write %08x (%u <= %s)\n", val, pin,
bcm2835_functions[fsel]);
bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
+
+unlock:
+ spin_unlock_irqrestore(&pc->fsel_lock, flags);
}

static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -1248,6 +1258,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;

+ spin_lock_init(&pc->fsel_lock);
for (i = 0; i < BCM2835_NUM_BANKS; i++) {
unsigned long events;
unsigned offset;
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index e97ce45b6d53..a55998ae29fa 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -216,6 +216,15 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
}
}

+ /*
+ * As per Hardware Programming Guide, when configuring pin as output,
+ * set the pin value before setting output-enable (OE).
+ */
+ if (output_enabled) {
+ val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
+ lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
+ }
+
val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);

u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
@@ -225,11 +234,6 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,

lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);

- if (output_enabled) {
- val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
- lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
- }
-
return 0;
}

diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
index 22ff16eff02f..929a1ace56ae 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
@@ -372,6 +372,7 @@ static int mt7620_pinctrl_probe(struct platform_device *pdev)

static const struct of_device_id mt7620_pinctrl_match[] = {
{ .compatible = "ralink,mt7620-pinctrl" },
+ { .compatible = "ralink,rt2880-pinmux" },
{}
};
MODULE_DEVICE_TABLE(of, mt7620_pinctrl_match);
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c
index b47968f40e0c..0297cf455b3a 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7621.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c
@@ -97,6 +97,7 @@ static int mt7621_pinctrl_probe(struct platform_device *pdev)

static const struct of_device_id mt7621_pinctrl_match[] = {
{ .compatible = "ralink,mt7621-pinctrl" },
+ { .compatible = "ralink,rt2880-pinmux" },
{}
};
MODULE_DEVICE_TABLE(of, mt7621_pinctrl_match);
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 811e12df1133..fd9af7c2ffd0 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -41,6 +41,7 @@ static int rt2880_pinctrl_probe(struct platform_device *pdev)

static const struct of_device_id rt2880_pinctrl_match[] = {
{ .compatible = "ralink,rt2880-pinctrl" },
+ { .compatible = "ralink,rt2880-pinmux" },
{}
};
MODULE_DEVICE_TABLE(of, rt2880_pinctrl_match);
diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c
index 5b204b7ca1f3..13a012a65d1d 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt305x.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c
@@ -118,6 +118,7 @@ static int rt305x_pinctrl_probe(struct platform_device *pdev)

static const struct of_device_id rt305x_pinctrl_match[] = {
{ .compatible = "ralink,rt305x-pinctrl" },
+ { .compatible = "ralink,rt2880-pinmux" },
{}
};
MODULE_DEVICE_TABLE(of, rt305x_pinctrl_match);
diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c
index 44a66c3d2d2a..b263764011e7 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt3883.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c
@@ -88,6 +88,7 @@ static int rt3883_pinctrl_probe(struct platform_device *pdev)

static const struct of_device_id rt3883_pinctrl_match[] = {
{ .compatible = "ralink,rt3883-pinctrl" },
+ { .compatible = "ralink,rt2880-pinmux" },
{}
};
MODULE_DEVICE_TABLE(of, rt3883_pinctrl_match);
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index 760c83a8740b..6069869353bb 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -696,16 +696,8 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(PCIE0_CLKREQ_N),

PINMUX_SINGLE(AVB0_PHY_INT),
- PINMUX_SINGLE(AVB0_MAGIC),
- PINMUX_SINGLE(AVB0_MDC),
- PINMUX_SINGLE(AVB0_MDIO),
- PINMUX_SINGLE(AVB0_TXCREFCLK),

PINMUX_SINGLE(AVB1_PHY_INT),
- PINMUX_SINGLE(AVB1_MAGIC),
- PINMUX_SINGLE(AVB1_MDC),
- PINMUX_SINGLE(AVB1_MDIO),
- PINMUX_SINGLE(AVB1_TXCREFCLK),

PINMUX_SINGLE(AVB2_AVTP_PPS),
PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
index 417c357f16b1..65c141ce909a 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
@@ -1213,7 +1213,7 @@ static const unsigned int tsn1_avtp_pps_pins[] = {
RCAR_GP_PIN(3, 13),
};
static const unsigned int tsn1_avtp_pps_mux[] = {
- TSN0_AVTP_PPS_MARK,
+ TSN1_AVTP_PPS_MARK,
};
static const unsigned int tsn1_avtp_capture_a_pins[] = {
/* TSN1_AVTP_CAPTURE_A */
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index 5dd1c2c7708a..43a63a21a6fb 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -156,54 +156,54 @@
#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)

/* GPSR4 */
-#define GPSR4_24 FM(AVS1)
-#define GPSR4_23 FM(AVS0)
-#define GPSR4_22 FM(PCIE1_CLKREQ_N)
-#define GPSR4_21 FM(PCIE0_CLKREQ_N)
-#define GPSR4_20 FM(TSN0_TXCREFCLK)
-#define GPSR4_19 FM(TSN0_TD2)
-#define GPSR4_18 FM(TSN0_TD3)
-#define GPSR4_17 FM(TSN0_RD2)
-#define GPSR4_16 FM(TSN0_RD3)
-#define GPSR4_15 FM(TSN0_TD0)
-#define GPSR4_14 FM(TSN0_TD1)
-#define GPSR4_13 FM(TSN0_RD1)
-#define GPSR4_12 FM(TSN0_TXC)
-#define GPSR4_11 FM(TSN0_RXC)
-#define GPSR4_10 FM(TSN0_RD0)
-#define GPSR4_9 FM(TSN0_TX_CTL)
-#define GPSR4_8 FM(TSN0_AVTP_PPS0)
-#define GPSR4_7 FM(TSN0_RX_CTL)
-#define GPSR4_6 FM(TSN0_AVTP_CAPTURE)
-#define GPSR4_5 FM(TSN0_AVTP_MATCH)
-#define GPSR4_4 FM(TSN0_LINK)
-#define GPSR4_3 FM(TSN0_PHY_INT)
-#define GPSR4_2 FM(TSN0_AVTP_PPS1)
-#define GPSR4_1 FM(TSN0_MDC)
-#define GPSR4_0 FM(TSN0_MDIO)
+#define GPSR4_24 F_(AVS1, IP3SR4_3_0)
+#define GPSR4_23 F_(AVS0, IP2SR4_31_28)
+#define GPSR4_22 F_(PCIE1_CLKREQ_N, IP2SR4_27_24)
+#define GPSR4_21 F_(PCIE0_CLKREQ_N, IP2SR4_23_20)
+#define GPSR4_20 F_(TSN0_TXCREFCLK, IP2SR4_19_16)
+#define GPSR4_19 F_(TSN0_TD2, IP2SR4_15_12)
+#define GPSR4_18 F_(TSN0_TD3, IP2SR4_11_8)
+#define GPSR4_17 F_(TSN0_RD2, IP2SR4_7_4)
+#define GPSR4_16 F_(TSN0_RD3, IP2SR4_3_0)
+#define GPSR4_15 F_(TSN0_TD0, IP1SR4_31_28)
+#define GPSR4_14 F_(TSN0_TD1, IP1SR4_27_24)
+#define GPSR4_13 F_(TSN0_RD1, IP1SR4_23_20)
+#define GPSR4_12 F_(TSN0_TXC, IP1SR4_19_16)
+#define GPSR4_11 F_(TSN0_RXC, IP1SR4_15_12)
+#define GPSR4_10 F_(TSN0_RD0, IP1SR4_11_8)
+#define GPSR4_9 F_(TSN0_TX_CTL, IP1SR4_7_4)
+#define GPSR4_8 F_(TSN0_AVTP_PPS0, IP1SR4_3_0)
+#define GPSR4_7 F_(TSN0_RX_CTL, IP0SR4_31_28)
+#define GPSR4_6 F_(TSN0_AVTP_CAPTURE, IP0SR4_27_24)
+#define GPSR4_5 F_(TSN0_AVTP_MATCH, IP0SR4_23_20)
+#define GPSR4_4 F_(TSN0_LINK, IP0SR4_19_16)
+#define GPSR4_3 F_(TSN0_PHY_INT, IP0SR4_15_12)
+#define GPSR4_2 F_(TSN0_AVTP_PPS1, IP0SR4_11_8)
+#define GPSR4_1 F_(TSN0_MDC, IP0SR4_7_4)
+#define GPSR4_0 F_(TSN0_MDIO, IP0SR4_3_0)

/* GPSR 5 */
-#define GPSR5_20 FM(AVB2_RX_CTL)
-#define GPSR5_19 FM(AVB2_TX_CTL)
-#define GPSR5_18 FM(AVB2_RXC)
-#define GPSR5_17 FM(AVB2_RD0)
-#define GPSR5_16 FM(AVB2_TXC)
-#define GPSR5_15 FM(AVB2_TD0)
-#define GPSR5_14 FM(AVB2_RD1)
-#define GPSR5_13 FM(AVB2_RD2)
-#define GPSR5_12 FM(AVB2_TD1)
-#define GPSR5_11 FM(AVB2_TD2)
-#define GPSR5_10 FM(AVB2_MDIO)
-#define GPSR5_9 FM(AVB2_RD3)
-#define GPSR5_8 FM(AVB2_TD3)
-#define GPSR5_7 FM(AVB2_TXCREFCLK)
-#define GPSR5_6 FM(AVB2_MDC)
-#define GPSR5_5 FM(AVB2_MAGIC)
-#define GPSR5_4 FM(AVB2_PHY_INT)
-#define GPSR5_3 FM(AVB2_LINK)
-#define GPSR5_2 FM(AVB2_AVTP_MATCH)
-#define GPSR5_1 FM(AVB2_AVTP_CAPTURE)
-#define GPSR5_0 FM(AVB2_AVTP_PPS)
+#define GPSR5_20 F_(AVB2_RX_CTL, IP2SR5_19_16)
+#define GPSR5_19 F_(AVB2_TX_CTL, IP2SR5_15_12)
+#define GPSR5_18 F_(AVB2_RXC, IP2SR5_11_8)
+#define GPSR5_17 F_(AVB2_RD0, IP2SR5_7_4)
+#define GPSR5_16 F_(AVB2_TXC, IP2SR5_3_0)
+#define GPSR5_15 F_(AVB2_TD0, IP1SR5_31_28)
+#define GPSR5_14 F_(AVB2_RD1, IP1SR5_27_24)
+#define GPSR5_13 F_(AVB2_RD2, IP1SR5_23_20)
+#define GPSR5_12 F_(AVB2_TD1, IP1SR5_19_16)
+#define GPSR5_11 F_(AVB2_TD2, IP1SR5_15_12)
+#define GPSR5_10 F_(AVB2_MDIO, IP1SR5_11_8)
+#define GPSR5_9 F_(AVB2_RD3, IP1SR5_7_4)
+#define GPSR5_8 F_(AVB2_TD3, IP1SR5_3_0)
+#define GPSR5_7 F_(AVB2_TXCREFCLK, IP0SR5_31_28)
+#define GPSR5_6 F_(AVB2_MDC, IP0SR5_27_24)
+#define GPSR5_5 F_(AVB2_MAGIC, IP0SR5_23_20)
+#define GPSR5_4 F_(AVB2_PHY_INT, IP0SR5_19_16)
+#define GPSR5_3 F_(AVB2_LINK, IP0SR5_15_12)
+#define GPSR5_2 F_(AVB2_AVTP_MATCH, IP0SR5_11_8)
+#define GPSR5_1 F_(AVB2_AVTP_CAPTURE, IP0SR5_7_4)
+#define GPSR5_0 F_(AVB2_AVTP_PPS, IP0SR5_3_0)

/* GPSR 6 */
#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
@@ -268,209 +268,271 @@
#define GPSR8_0 F_(SCL0, IP0SR8_3_0)

/* SR0 */
-/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

/* SR1 */
-/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

/* SR2 */
-/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

/* SR3 */
-/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR4 */
+/* IP0SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR4_3_0 FM(TSN0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_7_4 FM(TSN0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_11_8 FM(TSN0_AVTP_PPS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_15_12 FM(TSN0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_19_16 FM(TSN0_LINK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_23_20 FM(TSN0_AVTP_MATCH) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_27_24 FM(TSN0_AVTP_CAPTURE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR4_31_28 FM(TSN0_RX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR4_3_0 FM(TSN0_AVTP_PPS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_7_4 FM(TSN0_TX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_11_8 FM(TSN0_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_15_12 FM(TSN0_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_19_16 FM(TSN0_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_23_20 FM(TSN0_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_27_24 FM(TSN0_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR4_31_28 FM(TSN0_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR4_3_0 FM(TSN0_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_7_4 FM(TSN0_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_11_8 FM(TSN0_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_15_12 FM(TSN0_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_19_16 FM(TSN0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_23_20 FM(PCIE0_CLKREQ_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_27_24 FM(PCIE1_CLKREQ_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR4_31_28 FM(AVS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR4_3_0 FM(AVS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR5 */
+/* IP0SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR5_3_0 FM(AVB2_AVTP_PPS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_7_4 FM(AVB2_AVTP_CAPTURE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_11_8 FM(AVB2_AVTP_MATCH) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_15_12 FM(AVB2_LINK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_19_16 FM(AVB2_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_23_20 FM(AVB2_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_27_24 FM(AVB2_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR5_31_28 FM(AVB2_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR5_3_0 FM(AVB2_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_7_4 FM(AVB2_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_11_8 FM(AVB2_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_15_12 FM(AVB2_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_19_16 FM(AVB2_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_23_20 FM(AVB2_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_27_24 FM(AVB2_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR5_31_28 FM(AVB2_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR5_3_0 FM(AVB2_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_7_4 FM(AVB2_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_11_8 FM(AVB2_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_15_12 FM(AVB2_TX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR5_19_16 FM(AVB2_RX_CTL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

/* SR6 */
-/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

/* SR7 */
-/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

/* SR8 */
-/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-
-/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)

#define PINMUX_GPSR \
GPSR3_29 \
@@ -542,6 +604,24 @@ FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2
FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 \
FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 \
\
+FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 FM(IP2SR4_3_0) IP2SR4_3_0 FM(IP3SR4_3_0) IP3SR4_3_0 \
+FM(IP0SR4_7_4) IP0SR4_7_4 FM(IP1SR4_7_4) IP1SR4_7_4 FM(IP2SR4_7_4) IP2SR4_7_4 \
+FM(IP0SR4_11_8) IP0SR4_11_8 FM(IP1SR4_11_8) IP1SR4_11_8 FM(IP2SR4_11_8) IP2SR4_11_8 \
+FM(IP0SR4_15_12) IP0SR4_15_12 FM(IP1SR4_15_12) IP1SR4_15_12 FM(IP2SR4_15_12) IP2SR4_15_12 \
+FM(IP0SR4_19_16) IP0SR4_19_16 FM(IP1SR4_19_16) IP1SR4_19_16 FM(IP2SR4_19_16) IP2SR4_19_16 \
+FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 FM(IP2SR4_23_20) IP2SR4_23_20 \
+FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 FM(IP2SR4_27_24) IP2SR4_27_24 \
+FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 FM(IP2SR4_31_28) IP2SR4_31_28 \
+\
+FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 FM(IP2SR5_3_0) IP2SR5_3_0 \
+FM(IP0SR5_7_4) IP0SR5_7_4 FM(IP1SR5_7_4) IP1SR5_7_4 FM(IP2SR5_7_4) IP2SR5_7_4 \
+FM(IP0SR5_11_8) IP0SR5_11_8 FM(IP1SR5_11_8) IP1SR5_11_8 FM(IP2SR5_11_8) IP2SR5_11_8 \
+FM(IP0SR5_15_12) IP0SR5_15_12 FM(IP1SR5_15_12) IP1SR5_15_12 FM(IP2SR5_15_12) IP2SR5_15_12 \
+FM(IP0SR5_19_16) IP0SR5_19_16 FM(IP1SR5_19_16) IP1SR5_19_16 FM(IP2SR5_19_16) IP2SR5_19_16 \
+FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 \
+FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 \
+FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 \
+\
FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
@@ -569,54 +649,6 @@ FM(IP0SR8_23_20) IP0SR8_23_20 FM(IP1SR8_23_20) IP1SR8_23_20 \
FM(IP0SR8_27_24) IP0SR8_27_24 \
FM(IP0SR8_31_28) IP0SR8_31_28

-/* MOD_SEL4 */ /* 0 */ /* 1 */
-#define MOD_SEL4_19 FM(SEL_TSN0_TD2_0) FM(SEL_TSN0_TD2_1)
-#define MOD_SEL4_18 FM(SEL_TSN0_TD3_0) FM(SEL_TSN0_TD3_1)
-#define MOD_SEL4_15 FM(SEL_TSN0_TD0_0) FM(SEL_TSN0_TD0_1)
-#define MOD_SEL4_14 FM(SEL_TSN0_TD1_0) FM(SEL_TSN0_TD1_1)
-#define MOD_SEL4_12 FM(SEL_TSN0_TXC_0) FM(SEL_TSN0_TXC_1)
-#define MOD_SEL4_9 FM(SEL_TSN0_TX_CTL_0) FM(SEL_TSN0_TX_CTL_1)
-#define MOD_SEL4_8 FM(SEL_TSN0_AVTP_PPS0_0) FM(SEL_TSN0_AVTP_PPS0_1)
-#define MOD_SEL4_5 FM(SEL_TSN0_AVTP_MATCH_0) FM(SEL_TSN0_AVTP_MATCH_1)
-#define MOD_SEL4_2 FM(SEL_TSN0_AVTP_PPS1_0) FM(SEL_TSN0_AVTP_PPS1_1)
-#define MOD_SEL4_1 FM(SEL_TSN0_MDC_0) FM(SEL_TSN0_MDC_1)
-
-/* MOD_SEL5 */ /* 0 */ /* 1 */
-#define MOD_SEL5_19 FM(SEL_AVB2_TX_CTL_0) FM(SEL_AVB2_TX_CTL_1)
-#define MOD_SEL5_16 FM(SEL_AVB2_TXC_0) FM(SEL_AVB2_TXC_1)
-#define MOD_SEL5_15 FM(SEL_AVB2_TD0_0) FM(SEL_AVB2_TD0_1)
-#define MOD_SEL5_12 FM(SEL_AVB2_TD1_0) FM(SEL_AVB2_TD1_1)
-#define MOD_SEL5_11 FM(SEL_AVB2_TD2_0) FM(SEL_AVB2_TD2_1)
-#define MOD_SEL5_8 FM(SEL_AVB2_TD3_0) FM(SEL_AVB2_TD3_1)
-#define MOD_SEL5_6 FM(SEL_AVB2_MDC_0) FM(SEL_AVB2_MDC_1)
-#define MOD_SEL5_5 FM(SEL_AVB2_MAGIC_0) FM(SEL_AVB2_MAGIC_1)
-#define MOD_SEL5_2 FM(SEL_AVB2_AVTP_MATCH_0) FM(SEL_AVB2_AVTP_MATCH_1)
-#define MOD_SEL5_0 FM(SEL_AVB2_AVTP_PPS_0) FM(SEL_AVB2_AVTP_PPS_1)
-
-/* MOD_SEL6 */ /* 0 */ /* 1 */
-#define MOD_SEL6_18 FM(SEL_AVB1_TD3_0) FM(SEL_AVB1_TD3_1)
-#define MOD_SEL6_16 FM(SEL_AVB1_TD2_0) FM(SEL_AVB1_TD2_1)
-#define MOD_SEL6_13 FM(SEL_AVB1_TD0_0) FM(SEL_AVB1_TD0_1)
-#define MOD_SEL6_12 FM(SEL_AVB1_TD1_0) FM(SEL_AVB1_TD1_1)
-#define MOD_SEL6_10 FM(SEL_AVB1_AVTP_PPS_0) FM(SEL_AVB1_AVTP_PPS_1)
-#define MOD_SEL6_7 FM(SEL_AVB1_TX_CTL_0) FM(SEL_AVB1_TX_CTL_1)
-#define MOD_SEL6_6 FM(SEL_AVB1_TXC_0) FM(SEL_AVB1_TXC_1)
-#define MOD_SEL6_5 FM(SEL_AVB1_AVTP_MATCH_0) FM(SEL_AVB1_AVTP_MATCH_1)
-#define MOD_SEL6_2 FM(SEL_AVB1_MDC_0) FM(SEL_AVB1_MDC_1)
-#define MOD_SEL6_1 FM(SEL_AVB1_MAGIC_0) FM(SEL_AVB1_MAGIC_1)
-
-/* MOD_SEL7 */ /* 0 */ /* 1 */
-#define MOD_SEL7_16 FM(SEL_AVB0_TX_CTL_0) FM(SEL_AVB0_TX_CTL_1)
-#define MOD_SEL7_15 FM(SEL_AVB0_TXC_0) FM(SEL_AVB0_TXC_1)
-#define MOD_SEL7_13 FM(SEL_AVB0_MDC_0) FM(SEL_AVB0_MDC_1)
-#define MOD_SEL7_11 FM(SEL_AVB0_TD0_0) FM(SEL_AVB0_TD0_1)
-#define MOD_SEL7_10 FM(SEL_AVB0_MAGIC_0) FM(SEL_AVB0_MAGIC_1)
-#define MOD_SEL7_7 FM(SEL_AVB0_TD1_0) FM(SEL_AVB0_TD1_1)
-#define MOD_SEL7_6 FM(SEL_AVB0_TD2_0) FM(SEL_AVB0_TD2_1)
-#define MOD_SEL7_3 FM(SEL_AVB0_TD3_0) FM(SEL_AVB0_TD3_1)
-#define MOD_SEL7_2 FM(SEL_AVB0_AVTP_MATCH_0) FM(SEL_AVB0_AVTP_MATCH_1)
-#define MOD_SEL7_0 FM(SEL_AVB0_AVTP_PPS_0) FM(SEL_AVB0_AVTP_PPS_1)
-
/* MOD_SEL8 */ /* 0 */ /* 1 */
#define MOD_SEL8_11 FM(SEL_SDA5_0) FM(SEL_SDA5_1)
#define MOD_SEL8_10 FM(SEL_SCL5_0) FM(SEL_SCL5_1)
@@ -633,26 +665,18 @@ FM(IP0SR8_31_28) IP0SR8_31_28

#define PINMUX_MOD_SELS \
\
-MOD_SEL4_19 MOD_SEL5_19 \
-MOD_SEL4_18 MOD_SEL6_18 \
- \
- MOD_SEL5_16 MOD_SEL6_16 MOD_SEL7_16 \
-MOD_SEL4_15 MOD_SEL5_15 MOD_SEL7_15 \
-MOD_SEL4_14 \
- MOD_SEL6_13 MOD_SEL7_13 \
-MOD_SEL4_12 MOD_SEL5_12 MOD_SEL6_12 \
- MOD_SEL5_11 MOD_SEL7_11 MOD_SEL8_11 \
- MOD_SEL6_10 MOD_SEL7_10 MOD_SEL8_10 \
-MOD_SEL4_9 MOD_SEL8_9 \
-MOD_SEL4_8 MOD_SEL5_8 MOD_SEL8_8 \
- MOD_SEL6_7 MOD_SEL7_7 MOD_SEL8_7 \
- MOD_SEL5_6 MOD_SEL6_6 MOD_SEL7_6 MOD_SEL8_6 \
-MOD_SEL4_5 MOD_SEL5_5 MOD_SEL6_5 MOD_SEL8_5 \
- MOD_SEL8_4 \
- MOD_SEL7_3 MOD_SEL8_3 \
-MOD_SEL4_2 MOD_SEL5_2 MOD_SEL6_2 MOD_SEL7_2 MOD_SEL8_2 \
-MOD_SEL4_1 MOD_SEL6_1 MOD_SEL8_1 \
- MOD_SEL5_0 MOD_SEL7_0 MOD_SEL8_0
+MOD_SEL8_11 \
+MOD_SEL8_10 \
+MOD_SEL8_9 \
+MOD_SEL8_8 \
+MOD_SEL8_7 \
+MOD_SEL8_6 \
+MOD_SEL8_5 \
+MOD_SEL8_4 \
+MOD_SEL8_3 \
+MOD_SEL8_2 \
+MOD_SEL8_1 \
+MOD_SEL8_0

enum {
PINMUX_RESERVED = 0,
@@ -686,61 +710,8 @@ enum {
static const u16 pinmux_data[] = {
PINMUX_DATA_GP_ALL(),

- PINMUX_SINGLE(AVS1),
- PINMUX_SINGLE(AVS0),
- PINMUX_SINGLE(PCIE1_CLKREQ_N),
- PINMUX_SINGLE(PCIE0_CLKREQ_N),
-
- /* TSN0 without MODSEL4 */
- PINMUX_SINGLE(TSN0_TXCREFCLK),
- PINMUX_SINGLE(TSN0_RD2),
- PINMUX_SINGLE(TSN0_RD3),
- PINMUX_SINGLE(TSN0_RD1),
- PINMUX_SINGLE(TSN0_RXC),
- PINMUX_SINGLE(TSN0_RD0),
- PINMUX_SINGLE(TSN0_RX_CTL),
- PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
- PINMUX_SINGLE(TSN0_LINK),
- PINMUX_SINGLE(TSN0_PHY_INT),
- PINMUX_SINGLE(TSN0_MDIO),
- /* TSN0 with MODSEL4 */
- PINMUX_IPSR_NOGM(0, TSN0_TD2, SEL_TSN0_TD2_1),
- PINMUX_IPSR_NOGM(0, TSN0_TD3, SEL_TSN0_TD3_1),
- PINMUX_IPSR_NOGM(0, TSN0_TD0, SEL_TSN0_TD0_1),
- PINMUX_IPSR_NOGM(0, TSN0_TD1, SEL_TSN0_TD1_1),
- PINMUX_IPSR_NOGM(0, TSN0_TXC, SEL_TSN0_TXC_1),
- PINMUX_IPSR_NOGM(0, TSN0_TX_CTL, SEL_TSN0_TX_CTL_1),
- PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0, SEL_TSN0_AVTP_PPS0_1),
- PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH, SEL_TSN0_AVTP_MATCH_1),
- PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1, SEL_TSN0_AVTP_PPS1_1),
- PINMUX_IPSR_NOGM(0, TSN0_MDC, SEL_TSN0_MDC_1),
-
- /* TSN0 without MODSEL5 */
- PINMUX_SINGLE(AVB2_RX_CTL),
- PINMUX_SINGLE(AVB2_RXC),
- PINMUX_SINGLE(AVB2_RD0),
- PINMUX_SINGLE(AVB2_RD1),
- PINMUX_SINGLE(AVB2_RD2),
- PINMUX_SINGLE(AVB2_MDIO),
- PINMUX_SINGLE(AVB2_RD3),
- PINMUX_SINGLE(AVB2_TXCREFCLK),
- PINMUX_SINGLE(AVB2_PHY_INT),
- PINMUX_SINGLE(AVB2_LINK),
- PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
- /* TSN0 with MODSEL5 */
- PINMUX_IPSR_NOGM(0, AVB2_TX_CTL, SEL_AVB2_TX_CTL_1),
- PINMUX_IPSR_NOGM(0, AVB2_TXC, SEL_AVB2_TXC_1),
- PINMUX_IPSR_NOGM(0, AVB2_TD0, SEL_AVB2_TD0_1),
- PINMUX_IPSR_NOGM(0, AVB2_TD1, SEL_AVB2_TD1_1),
- PINMUX_IPSR_NOGM(0, AVB2_TD2, SEL_AVB2_TD2_1),
- PINMUX_IPSR_NOGM(0, AVB2_TD3, SEL_AVB2_TD3_1),
- PINMUX_IPSR_NOGM(0, AVB2_MDC, SEL_AVB2_MDC_1),
- PINMUX_IPSR_NOGM(0, AVB2_MAGIC, SEL_AVB2_MAGIC_1),
- PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH, SEL_AVB2_AVTP_MATCH_1),
- PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS, SEL_AVB2_AVTP_PPS_1),
-
/* IP0SR0 */
- PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_N_B),
PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),

PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
@@ -1006,7 +977,7 @@ static const u16 pinmux_data[] = {

PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
- PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_A),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_N_A),
PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),

PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
@@ -1029,26 +1000,86 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),

+ /* IP0SR4 */
+ PINMUX_IPSR_GPSR(IP0SR4_3_0, TSN0_MDIO),
+ PINMUX_IPSR_GPSR(IP0SR4_7_4, TSN0_MDC),
+ PINMUX_IPSR_GPSR(IP0SR4_11_8, TSN0_AVTP_PPS1),
+ PINMUX_IPSR_GPSR(IP0SR4_15_12, TSN0_PHY_INT),
+ PINMUX_IPSR_GPSR(IP0SR4_19_16, TSN0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR4_23_20, TSN0_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR4_27_24, TSN0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR4_31_28, TSN0_RX_CTL),
+
+ /* IP1SR4 */
+ PINMUX_IPSR_GPSR(IP1SR4_3_0, TSN0_AVTP_PPS0),
+ PINMUX_IPSR_GPSR(IP1SR4_7_4, TSN0_TX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR4_11_8, TSN0_RD0),
+ PINMUX_IPSR_GPSR(IP1SR4_15_12, TSN0_RXC),
+ PINMUX_IPSR_GPSR(IP1SR4_19_16, TSN0_TXC),
+ PINMUX_IPSR_GPSR(IP1SR4_23_20, TSN0_RD1),
+ PINMUX_IPSR_GPSR(IP1SR4_27_24, TSN0_TD1),
+ PINMUX_IPSR_GPSR(IP1SR4_31_28, TSN0_TD0),
+
+ /* IP2SR4 */
+ PINMUX_IPSR_GPSR(IP2SR4_3_0, TSN0_RD3),
+ PINMUX_IPSR_GPSR(IP2SR4_7_4, TSN0_RD2),
+ PINMUX_IPSR_GPSR(IP2SR4_11_8, TSN0_TD3),
+ PINMUX_IPSR_GPSR(IP2SR4_15_12, TSN0_TD2),
+ PINMUX_IPSR_GPSR(IP2SR4_19_16, TSN0_TXCREFCLK),
+ PINMUX_IPSR_GPSR(IP2SR4_23_20, PCIE0_CLKREQ_N),
+ PINMUX_IPSR_GPSR(IP2SR4_27_24, PCIE1_CLKREQ_N),
+ PINMUX_IPSR_GPSR(IP2SR4_31_28, AVS0),
+
+ /* IP3SR4 */
+ PINMUX_IPSR_GPSR(IP3SR4_3_0, AVS1),
+
+ /* IP0SR5 */
+ PINMUX_IPSR_GPSR(IP0SR5_3_0, AVB2_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP0SR5_7_4, AVB2_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR5_11_8, AVB2_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR5_15_12, AVB2_LINK),
+ PINMUX_IPSR_GPSR(IP0SR5_19_16, AVB2_PHY_INT),
+ PINMUX_IPSR_GPSR(IP0SR5_23_20, AVB2_MAGIC),
+ PINMUX_IPSR_GPSR(IP0SR5_27_24, AVB2_MDC),
+ PINMUX_IPSR_GPSR(IP0SR5_31_28, AVB2_TXCREFCLK),
+
+ /* IP1SR5 */
+ PINMUX_IPSR_GPSR(IP1SR5_3_0, AVB2_TD3),
+ PINMUX_IPSR_GPSR(IP1SR5_7_4, AVB2_RD3),
+ PINMUX_IPSR_GPSR(IP1SR5_11_8, AVB2_MDIO),
+ PINMUX_IPSR_GPSR(IP1SR5_15_12, AVB2_TD2),
+ PINMUX_IPSR_GPSR(IP1SR5_19_16, AVB2_TD1),
+ PINMUX_IPSR_GPSR(IP1SR5_23_20, AVB2_RD2),
+ PINMUX_IPSR_GPSR(IP1SR5_27_24, AVB2_RD1),
+ PINMUX_IPSR_GPSR(IP1SR5_31_28, AVB2_TD0),
+
+ /* IP2SR5 */
+ PINMUX_IPSR_GPSR(IP2SR5_3_0, AVB2_TXC),
+ PINMUX_IPSR_GPSR(IP2SR5_7_4, AVB2_RD0),
+ PINMUX_IPSR_GPSR(IP2SR5_11_8, AVB2_RXC),
+ PINMUX_IPSR_GPSR(IP2SR5_15_12, AVB2_TX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR5_19_16, AVB2_RX_CTL),
+
/* IP0SR6 */
PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),

- PINMUX_IPSR_MSEL(IP0SR6_7_4, AVB1_MAGIC, SEL_AVB1_MAGIC_1),
+ PINMUX_IPSR_GPSR(IP0SR6_7_4, AVB1_MAGIC),

- PINMUX_IPSR_MSEL(IP0SR6_11_8, AVB1_MDC, SEL_AVB1_MDC_1),
+ PINMUX_IPSR_GPSR(IP0SR6_11_8, AVB1_MDC),

PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),

PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),

- PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_AVTP_MATCH, SEL_AVB1_AVTP_MATCH_1),
- PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_MII_RX_ER, SEL_AVB1_AVTP_MATCH_0),
+ PINMUX_IPSR_GPSR(IP0SR6_23_20, AVB1_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR6_23_20, AVB1_MII_RX_ER),

- PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_TXC, SEL_AVB1_TXC_1),
- PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_MII_TXC, SEL_AVB1_TXC_0),
+ PINMUX_IPSR_GPSR(IP0SR6_27_24, AVB1_TXC),
+ PINMUX_IPSR_GPSR(IP0SR6_27_24, AVB1_MII_TXC),

- PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_TX_CTL, SEL_AVB1_TX_CTL_1),
- PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_MII_TX_EN, SEL_AVB1_TX_CTL_0),
+ PINMUX_IPSR_GPSR(IP0SR6_31_28, AVB1_TX_CTL),
+ PINMUX_IPSR_GPSR(IP0SR6_31_28, AVB1_MII_TX_EN),

/* IP1SR6 */
PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
@@ -1057,17 +1088,17 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),

- PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_AVTP_PPS, SEL_AVB1_AVTP_PPS_1),
- PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_MII_COL, SEL_AVB1_AVTP_PPS_0),
+ PINMUX_IPSR_GPSR(IP1SR6_11_8, AVB1_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP1SR6_11_8, AVB1_MII_COL),

PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),

- PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_TD1, SEL_AVB1_TD1_1),
- PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_MII_TD1, SEL_AVB1_TD1_0),
+ PINMUX_IPSR_GPSR(IP1SR6_19_16, AVB1_TD1),
+ PINMUX_IPSR_GPSR(IP1SR6_19_16, AVB1_MII_TD1),

- PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_TD0, SEL_AVB1_TD0_1),
- PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_MII_TD0, SEL_AVB1_TD0_0),
+ PINMUX_IPSR_GPSR(IP1SR6_23_20, AVB1_TD0),
+ PINMUX_IPSR_GPSR(IP1SR6_23_20, AVB1_MII_TD0),

PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
@@ -1076,14 +1107,14 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),

/* IP2SR6 */
- PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_TD2, SEL_AVB1_TD2_1),
- PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_MII_TD2, SEL_AVB1_TD2_0),
+ PINMUX_IPSR_GPSR(IP2SR6_3_0, AVB1_TD2),
+ PINMUX_IPSR_GPSR(IP2SR6_3_0, AVB1_MII_TD2),

PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),

- PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_TD3, SEL_AVB1_TD3_1),
- PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_MII_TD3, SEL_AVB1_TD3_0),
+ PINMUX_IPSR_GPSR(IP2SR6_11_8, AVB1_TD3),
+ PINMUX_IPSR_GPSR(IP2SR6_11_8, AVB1_MII_TD3),

PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
@@ -1091,29 +1122,29 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),

/* IP0SR7 */
- PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_AVTP_PPS, SEL_AVB0_AVTP_PPS_1),
- PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_MII_COL, SEL_AVB0_AVTP_PPS_0),
+ PINMUX_IPSR_GPSR(IP0SR7_3_0, AVB0_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP0SR7_3_0, AVB0_MII_COL),

PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),

- PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_AVTP_MATCH, SEL_AVB0_AVTP_MATCH_1),
- PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_MII_RX_ER, SEL_AVB0_AVTP_MATCH_0),
- PINMUX_IPSR_MSEL(IP0SR7_11_8, CC5_OSCOUT, SEL_AVB0_AVTP_MATCH_0),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, AVB0_AVTP_MATCH),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, AVB0_MII_RX_ER),
+ PINMUX_IPSR_GPSR(IP0SR7_11_8, CC5_OSCOUT),

- PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_TD3, SEL_AVB0_TD3_1),
- PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_MII_TD3, SEL_AVB0_TD3_0),
+ PINMUX_IPSR_GPSR(IP0SR7_15_12, AVB0_TD3),
+ PINMUX_IPSR_GPSR(IP0SR7_15_12, AVB0_MII_TD3),

PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),

PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),

- PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_TD2, SEL_AVB0_TD2_1),
- PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_MII_TD2, SEL_AVB0_TD2_0),
+ PINMUX_IPSR_GPSR(IP0SR7_27_24, AVB0_TD2),
+ PINMUX_IPSR_GPSR(IP0SR7_27_24, AVB0_MII_TD2),

- PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_TD1, SEL_AVB0_TD1_1),
- PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_MII_TD1, SEL_AVB0_TD1_0),
+ PINMUX_IPSR_GPSR(IP0SR7_31_28, AVB0_TD1),
+ PINMUX_IPSR_GPSR(IP0SR7_31_28, AVB0_MII_TD1),

/* IP1SR7 */
PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
@@ -1121,24 +1152,24 @@ static const u16 pinmux_data[] = {

PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),

- PINMUX_IPSR_MSEL(IP1SR7_11_8, AVB0_MAGIC, SEL_AVB0_MAGIC_1),
+ PINMUX_IPSR_GPSR(IP1SR7_11_8, AVB0_MAGIC),

- PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_TD0, SEL_AVB0_TD0_1),
- PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_MII_TD0, SEL_AVB0_TD0_0),
+ PINMUX_IPSR_GPSR(IP1SR7_15_12, AVB0_TD0),
+ PINMUX_IPSR_GPSR(IP1SR7_15_12, AVB0_MII_TD0),

PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),

- PINMUX_IPSR_MSEL(IP1SR7_23_20, AVB0_MDC, SEL_AVB0_MDC_1),
+ PINMUX_IPSR_GPSR(IP1SR7_23_20, AVB0_MDC),

PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),

- PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_TXC, SEL_AVB0_TXC_1),
- PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_MII_TXC, SEL_AVB0_TXC_0),
+ PINMUX_IPSR_GPSR(IP1SR7_31_28, AVB0_TXC),
+ PINMUX_IPSR_GPSR(IP1SR7_31_28, AVB0_MII_TXC),

/* IP2SR7 */
- PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_TX_CTL, SEL_AVB0_TX_CTL_1),
- PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_MII_TX_EN, SEL_AVB0_TX_CTL_0),
+ PINMUX_IPSR_GPSR(IP2SR7_3_0, AVB0_TX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_3_0, AVB0_MII_TX_EN),

PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
@@ -3419,6 +3450,82 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
IP3SR3_7_4
IP3SR3_3_0))
},
+ { PINMUX_CFG_REG_VAR("IP0SR4", 0xE6060060, 32,
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ IP0SR4_31_28
+ IP0SR4_27_24
+ IP0SR4_23_20
+ IP0SR4_19_16
+ IP0SR4_15_12
+ IP0SR4_11_8
+ IP0SR4_7_4
+ IP0SR4_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP1SR4", 0xE6060064, 32,
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ IP1SR4_31_28
+ IP1SR4_27_24
+ IP1SR4_23_20
+ IP1SR4_19_16
+ IP1SR4_15_12
+ IP1SR4_11_8
+ IP1SR4_7_4
+ IP1SR4_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR4", 0xE6060068, 32,
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ IP2SR4_31_28
+ IP2SR4_27_24
+ IP2SR4_23_20
+ IP2SR4_19_16
+ IP2SR4_15_12
+ IP2SR4_11_8
+ IP2SR4_7_4
+ IP2SR4_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR4", 0xE606006C, 32,
+ GROUP(-28, 4),
+ GROUP(
+ /* IP3SR4_31_4 RESERVED */
+ IP3SR4_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP0SR5", 0xE6060860, 32,
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ IP0SR5_31_28
+ IP0SR5_27_24
+ IP0SR5_23_20
+ IP0SR5_19_16
+ IP0SR5_15_12
+ IP0SR5_11_8
+ IP0SR5_7_4
+ IP0SR5_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP1SR5", 0xE6060864, 32,
+ GROUP(4, 4, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ IP1SR5_31_28
+ IP1SR5_27_24
+ IP1SR5_23_20
+ IP1SR5_19_16
+ IP1SR5_15_12
+ IP1SR5_11_8
+ IP1SR5_7_4
+ IP1SR5_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR5", 0xE6060868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR5_31_20 RESERVED */
+ IP2SR5_19_16
+ IP2SR5_15_12
+ IP2SR5_11_8
+ IP2SR5_7_4
+ IP2SR5_3_0))
+ },
{ PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
IP0SR6_31_28
IP0SR6_27_24
@@ -3505,95 +3612,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {

#define F_(x, y) x,
#define FM(x) FN_##x,
- { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
- GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
- -2, 1, 1, -1),
- GROUP(
- /* RESERVED 31-20 */
- MOD_SEL4_19
- MOD_SEL4_18
- /* RESERVED 17-16 */
- MOD_SEL4_15
- MOD_SEL4_14
- /* RESERVED 13 */
- MOD_SEL4_12
- /* RESERVED 11-10 */
- MOD_SEL4_9
- MOD_SEL4_8
- /* RESERVED 7-6 */
- MOD_SEL4_5
- /* RESERVED 4-3 */
- MOD_SEL4_2
- MOD_SEL4_1
- /* RESERVED 0 */
- ))
- },
- { PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
- GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
- 1, 1, -2, 1, -1, 1),
- GROUP(
- /* RESERVED 31-20 */
- MOD_SEL5_19
- /* RESERVED 18-17 */
- MOD_SEL5_16
- MOD_SEL5_15
- /* RESERVED 14-13 */
- MOD_SEL5_12
- MOD_SEL5_11
- /* RESERVED 10-9 */
- MOD_SEL5_8
- /* RESERVED 7 */
- MOD_SEL5_6
- MOD_SEL5_5
- /* RESERVED 4-3 */
- MOD_SEL5_2
- /* RESERVED 1 */
- MOD_SEL5_0))
- },
- { PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
- GROUP(-13, 1, -1, 1, -2, 1, 1,
- -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
- GROUP(
- /* RESERVED 31-19 */
- MOD_SEL6_18
- /* RESERVED 17 */
- MOD_SEL6_16
- /* RESERVED 15-14 */
- MOD_SEL6_13
- MOD_SEL6_12
- /* RESERVED 11 */
- MOD_SEL6_10
- /* RESERVED 9-8 */
- MOD_SEL6_7
- MOD_SEL6_6
- MOD_SEL6_5
- /* RESERVED 4-3 */
- MOD_SEL6_2
- MOD_SEL6_1
- /* RESERVED 0 */
- ))
- },
- { PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
- GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
- -2, 1, 1, -1, 1),
- GROUP(
- /* RESERVED 31-17 */
- MOD_SEL7_16
- MOD_SEL7_15
- /* RESERVED 14 */
- MOD_SEL7_13
- /* RESERVED 12 */
- MOD_SEL7_11
- MOD_SEL7_10
- /* RESERVED 9-8 */
- MOD_SEL7_7
- MOD_SEL7_6
- /* RESERVED 5-4 */
- MOD_SEL7_3
- MOD_SEL7_2
- /* RESERVED 1 */
- MOD_SEL7_0))
- },
{ PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
GROUP(
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
index a26219e97c93..26af51952f7f 100644
--- a/drivers/platform/chrome/cros_typec_switch.c
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -268,6 +268,7 @@ static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)

return 0;
err_switch:
+ fwnode_handle_put(fwnode);
cros_typec_unregister_switches(sdata);
return ret;
}
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index 2ce8cb2170df..d9685aef0887 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -7,7 +7,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"

config AMD_PMC
tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS
+ depends on ACPI && PCI && RTC_CLASS && AMD_NB
select SERIO
help
The driver provides support for AMD Power Management Controller
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index be1b49824edb..eb9fc6cb13e3 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -10,6 +10,7 @@

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

+#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -37,8 +38,6 @@
#define AMD_PMC_SCRATCH_REG_YC 0xD14

/* STB Registers */
-#define AMD_PMC_STB_INDEX_ADDRESS 0xF8
-#define AMD_PMC_STB_INDEX_DATA 0xFC
#define AMD_PMC_STB_PMI_0 0x03E30600
#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
@@ -55,8 +54,6 @@
#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000

/* Base address of SMU for mapping physical address to virtual address */
-#define AMD_PMC_SMU_INDEX_ADDRESS 0xB8
-#define AMD_PMC_SMU_INDEX_DATA 0xBC
#define AMD_PMC_MAPPING_SIZE 0x01000
#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
@@ -310,33 +307,6 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
return 0;
}

-static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
- struct seq_file *s)
-{
- u32 val;
-
- switch (pdev->cpu_id) {
- case AMD_CPU_ID_CZN:
- val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
- break;
- case AMD_CPU_ID_YC:
- case AMD_CPU_ID_CB:
- case AMD_CPU_ID_PS:
- val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
- break;
- default:
- return -EINVAL;
- }
-
- if (dev)
- dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
-
- if (s)
- seq_printf(s, "SMU idlemask : 0x%x\n", val);
-
- return 0;
-}
-
static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
{
if (!pdev->smu_virt_addr) {
@@ -373,6 +343,9 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
int rc;
u32 val;

+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+
rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
if (rc)
return rc;
@@ -419,12 +392,31 @@ static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
static DEVICE_ATTR_RO(smu_fw_version);
static DEVICE_ATTR_RO(smu_program);

+static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return 0;
+ return 0444;
+}
+
static struct attribute *pmc_attrs[] = {
&dev_attr_smu_fw_version.attr,
&dev_attr_smu_program.attr,
NULL,
};
-ATTRIBUTE_GROUPS(pmc);
+
+static struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = pmc_attr_is_visible,
+};
+
+static const struct attribute_group *pmc_groups[] = {
+ &pmc_attr_group,
+ NULL,
+};

static int smu_fw_info_show(struct seq_file *s, void *unused)
{
@@ -491,28 +483,47 @@ static int s0ix_stats_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(s0ix_stats);

-static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
+static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
+ struct seq_file *s)
{
- struct amd_pmc_dev *dev = s->private;
+ u32 val;
int rc;

- /* we haven't yet read SMU version */
- if (!dev->major) {
- rc = amd_pmc_get_smu_version(dev);
- if (rc)
- return rc;
+ switch (pdev->cpu_id) {
+ case AMD_CPU_ID_CZN:
+ /* we haven't yet read SMU version */
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+ if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
+ else
+ return -EINVAL;
+ break;
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
+ break;
+ default:
+ return -EINVAL;
}

- if (dev->major > 56 || (dev->major >= 55 && dev->minor >= 37)) {
- rc = amd_pmc_idlemask_read(dev, NULL, s);
- if (rc)
- return rc;
- } else {
- seq_puts(s, "Unsupported SMU version for Idlemask\n");
- }
+ if (dev)
+ dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
+
+ if (s)
+ seq_printf(s, "SMU idlemask : 0x%x\n", val);

return 0;
}
+
+static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
+{
+ return amd_pmc_idlemask_read(s->private, NULL, s);
+}
DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);

static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
@@ -783,6 +794,14 @@ static void amd_pmc_s2idle_check(void)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}

+static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
+{
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+
+ return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
+}
+
static void amd_pmc_s2idle_restore(void)
{
struct amd_pmc_dev *pdev = &pmc;
@@ -795,7 +814,7 @@ static void amd_pmc_s2idle_restore(void)
dev_err(pdev->dev, "resume failed: %d\n", rc);

/* Let SMU know that we are looking for stats */
- amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
+ amd_pmc_dump_data(pdev);

rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
if (rc)
@@ -876,17 +895,9 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;

- err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
+ err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
if (err) {
- dev_err(dev->dev, "failed to write addr in stb: 0x%X\n",
- AMD_PMC_STB_INDEX_ADDRESS);
- return pcibios_err_to_errno(err);
- }
-
- err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, data);
- if (err) {
- dev_err(dev->dev, "failed to write data in stb: 0x%X\n",
- AMD_PMC_STB_INDEX_DATA);
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
return pcibios_err_to_errno(err);
}

@@ -898,18 +909,10 @@ static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
int i, err;

- err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
- if (err) {
- dev_err(dev->dev, "error writing addr to stb: 0x%X\n",
- AMD_PMC_STB_INDEX_ADDRESS);
- return pcibios_err_to_errno(err);
- }
-
for (i = 0; i < FIFO_SIZE; i++) {
- err = pci_read_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, buf++);
+ err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
if (err) {
- dev_err(dev->dev, "error reading data from stb: 0x%X\n",
- AMD_PMC_STB_INDEX_DATA);
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
return pcibios_err_to_errno(err);
}
}
@@ -936,30 +939,18 @@ static int amd_pmc_probe(struct platform_device *pdev)

dev->cpu_id = rdev->device;
dev->rdev = rdev;
- err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO);
- if (err) {
- dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
- err = pcibios_err_to_errno(err);
- goto err_pci_dev_put;
- }
-
- err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
+ err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
if (err) {
+ dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
err = pcibios_err_to_errno(err);
goto err_pci_dev_put;
}

base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;

- err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI);
- if (err) {
- dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
- err = pcibios_err_to_errno(err);
- goto err_pci_dev_put;
- }
-
- err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
+ err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
if (err) {
+ dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
err = pcibios_err_to_errno(err);
goto err_pci_dev_put;
}
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index 6d89528c3177..d87986adf91e 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -7,6 +7,7 @@ config AMD_PMF
tristate "AMD Platform Management Framework"
depends on ACPI && PCI
depends on POWER_SUPPLY
+ depends on AMD_NB
select ACPI_PLATFORM_PROFILE
help
This driver provides support for the AMD Platform Management Framework.
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index da23639071d7..0acc0b622129 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -8,6 +8,7 @@
* Author: Shyam Sundar S K <Shyam-sundar.S-k@xxxxxxx>
*/

+#include <asm/amd_nb.h>
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -22,8 +23,6 @@
#define AMD_PMF_REGISTER_ARGUMENT 0xA58

/* Base address of SMU for mapping physical address to virtual address */
-#define AMD_PMF_SMU_INDEX_ADDRESS 0xB8
-#define AMD_PMF_SMU_INDEX_DATA 0xBC
#define AMD_PMF_MAPPING_SIZE 0x01000
#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
@@ -348,30 +347,19 @@ static int amd_pmf_probe(struct platform_device *pdev)
}

dev->cpu_id = rdev->device;
- err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
- if (err) {
- dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
- }

- err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
if (err) {
+ dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
pci_dev_put(rdev);
return pcibios_err_to_errno(err);
}

base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;

- err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
- if (err) {
- dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
- }
-
- err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
if (err) {
+ dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
pci_dev_put(rdev);
return pcibios_err_to_errno(err);
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2a48a2d880d8..d1ec31086e9b 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4481,6 +4481,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
}
},
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+ }
+ },
{
.ident = "P14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index 66039c665dd1..0af536f4932f 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -135,6 +135,9 @@ static int read_channel(struct gab *adc_bat, enum power_supply_property psp,
result);
if (ret < 0)
pr_err("read channel error\n");
+ else
+ *result *= 1000;
+
return ret;
}

diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 36f807b5ec44..f1b431aa0e4f 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -335,6 +335,20 @@ static int rk817_bat_calib_cap(struct rk817_charger *charger)
charger->fcc_mah * 1000);
}

+ /*
+ * Set the SOC to 0 if we are below the minimum system voltage.
+ */
+ if (volt_avg <= charger->bat_voltage_min_design_uv) {
+ charger->soc = 0;
+ charge_now_adc = CHARGE_TO_ADC(0, charger->res_div);
+ put_unaligned_be32(charge_now_adc, bulk_reg);
+ regmap_bulk_write(rk808->regmap,
+ RK817_GAS_GAUGE_Q_INIT_H3, bulk_reg, 4);
+ dev_warn(charger->dev,
+ "Battery voltage %d below minimum voltage %d\n",
+ volt_avg, charger->bat_voltage_min_design_uv);
+ }
+
rk817_record_battery_nvram_values(charger);

return 0;
@@ -710,9 +724,10 @@ static int rk817_read_battery_nvram_values(struct rk817_charger *charger)

/*
* Read the nvram for state of charge. Sanity check for values greater
- * than 100 (10000). If the value is off it should get corrected
- * automatically when the voltage drops to the min (soc is 0) or when
- * the battery is full (soc is 100).
+ * than 100 (10000) or less than 0, because other things (BSP kernels,
+ * U-Boot, or even i2cset) can write to this register. If the value is
+ * off it should get corrected automatically when the voltage drops to
+ * the min (soc is 0) or when the battery is full (soc is 100).
*/
ret = regmap_bulk_read(charger->rk808->regmap,
RK817_GAS_GAUGE_BAT_R1, bulk_reg, 3);
@@ -721,6 +736,8 @@ static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
charger->soc = get_unaligned_le24(bulk_reg);
if (charger->soc > 10000)
charger->soc = 10000;
+ if (charger->soc < 0)
+ charger->soc = 0;

return 0;
}
@@ -731,8 +748,8 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
{
struct rk808 *rk808 = charger->rk808;
u8 bulk_reg[4];
- u32 boot_voltage, boot_charge_mah, tmp;
- int ret, reg, off_time;
+ u32 boot_voltage, boot_charge_mah;
+ int ret, reg, off_time, tmp;
bool first_boot;

/*
@@ -785,10 +802,12 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
bulk_reg, 4);
tmp = get_unaligned_be32(bulk_reg);
+ if (tmp < 0)
+ tmp = 0;
boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
charger->res_div) / 1000;
/*
- * Check if the columb counter has been off for more than 300
+ * Check if the columb counter has been off for more than 30
* minutes as it tends to drift downward. If so, re-init soc
* with the boot voltage instead. Note the unit values for the
* OFF_CNT register appear to be in decaminutes and stops
@@ -799,7 +818,7 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
* than 0 on a reboot anyway.
*/
regmap_read(rk808->regmap, RK817_GAS_GAUGE_OFF_CNT, &off_time);
- if (off_time >= 30) {
+ if (off_time >= 3) {
regmap_bulk_read(rk808->regmap,
RK817_GAS_GAUGE_PWRON_VOL_H,
bulk_reg, 2);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 5cd7b90872c6..5732300eb004 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -418,7 +418,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = {
};

static const char * const pwm_axg_ao_parent_names[] = {
- "aoclk81", "xtal", "fclk_div4", "fclk_div5"
+ "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
};

static const struct meson_pwm_data pwm_axg_ao_data = {
@@ -427,7 +427,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
};

static const char * const pwm_g12a_ao_ab_parent_names[] = {
- "xtal", "aoclk81", "fclk_div4", "fclk_div5"
+ "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
};

static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
@@ -436,7 +436,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
};

static const char * const pwm_g12a_ao_cd_parent_names[] = {
- "xtal", "aoclk81",
+ "xtal", "g12a_ao_clk81",
};

static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index ccf0ccdef29d..3811578fcff0 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -138,6 +138,19 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);

+ if (mdp->data->bls_debug && !mdp->data->has_commit) {
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
+ }
+
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
PWM_CLKDIV_MASK,
clk_div << PWM_CLKDIV_SHIFT);
@@ -152,17 +165,6 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
mdp->data->commit_mask,
0x0);
- } else {
- /*
- * For MT2701, disable double buffer before writing register
- * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
- */
- mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
- mdp->data->bls_debug_mask,
- mdp->data->bls_debug_mask);
- mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
- mdp->data->con0_sel,
- mdp->data->con0_sel);
}

mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
@@ -194,6 +196,16 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
return 0;
}

+ /*
+ * Apply DISP_PWM_DEBUG settings to choose whether to enable or disable
+ * registers double buffer and manual commit to working register before
+ * performing any read/write operation
+ */
+ if (mdp->data->bls_debug)
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+
rate = clk_get_rate(mdp->clk_main);
con0 = readl(mdp->base + mdp->data->con0);
con1 = readl(mdp->base + mdp->data->con1);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index cdac193634e0..c417eae887b2 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -207,6 +207,78 @@ static void regulator_unlock(struct regulator_dev *rdev)
mutex_unlock(&regulator_nesting_mutex);
}

+/**
+ * regulator_lock_two - lock two regulators
+ * @rdev1: first regulator
+ * @rdev2: second regulator
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * Locks both rdevs using the regulator_ww_class.
+ */
+static void regulator_lock_two(struct regulator_dev *rdev1,
+ struct regulator_dev *rdev2,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ struct regulator_dev *tmp;
+ int ret;
+
+ ww_acquire_init(ww_ctx, &regulator_ww_class);
+
+ /* Try to just grab both of them */
+ ret = regulator_lock_nested(rdev1, ww_ctx);
+ WARN_ON(ret);
+ ret = regulator_lock_nested(rdev2, ww_ctx);
+ if (ret != -EDEADLOCK) {
+ WARN_ON(ret);
+ goto exit;
+ }
+
+ while (true) {
+ /*
+ * Start of loop: rdev1 was locked and rdev2 was contended.
+ * Need to unlock rdev1, slowly lock rdev2, then try rdev1
+ * again.
+ */
+ regulator_unlock(rdev1);
+
+ ww_mutex_lock_slow(&rdev2->mutex, ww_ctx);
+ rdev2->ref_cnt++;
+ rdev2->mutex_owner = current;
+ ret = regulator_lock_nested(rdev1, ww_ctx);
+
+ if (ret == -EDEADLOCK) {
+ /* More contention; swap which needs to be slow */
+ tmp = rdev1;
+ rdev1 = rdev2;
+ rdev2 = tmp;
+ } else {
+ WARN_ON(ret);
+ break;
+ }
+ }
+
+exit:
+ ww_acquire_done(ww_ctx);
+}
+
+/**
+ * regulator_unlock_two - unlock two regulators
+ * @rdev1: first regulator
+ * @rdev2: second regulator
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * The inverse of regulator_lock_two().
+ */
+
+static void regulator_unlock_two(struct regulator_dev *rdev1,
+ struct regulator_dev *rdev2,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ regulator_unlock(rdev2);
+ regulator_unlock(rdev1);
+ ww_acquire_fini(ww_ctx);
+}
+
static bool regulator_supply_is_couple(struct regulator_dev *rdev)
{
struct regulator_dev *c_rdev;
@@ -334,6 +406,7 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
+ old_contended_rdev->mutex_owner = current;
}

err = regulator_lock_recursive(rdev,
@@ -1583,9 +1656,6 @@ static int set_machine_constraints(struct regulator_dev *rdev)
rdev->constraints->always_on = true;
}

- if (rdev->desc->off_on_delay)
- rdev->last_off = ktime_get_boottime();
-
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
@@ -1619,6 +1689,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)

if (rdev->constraints->always_on)
rdev->use_count++;
+ } else if (rdev->desc->off_on_delay) {
+ rdev->last_off = ktime_get();
}

print_constraints(rdev);
@@ -1627,8 +1699,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)

/**
* set_supply - set regulator supply regulator
- * @rdev: regulator name
- * @supply_rdev: supply regulator name
+ * @rdev: regulator (locked)
+ * @supply_rdev: supply regulator (locked))
*
* Called by platform initialisation code to set the supply regulator for this
* regulator. This ensures that a regulators supply will also be enabled by the
@@ -1800,6 +1872,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
struct regulator *regulator;
int err = 0;

+ lockdep_assert_held_once(&rdev->mutex.base);
+
if (dev) {
char buf[REG_STR_SIZE];
int size;
@@ -1827,9 +1901,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
regulator->rdev = rdev;
regulator->supply_name = supply_name;

- regulator_lock(rdev);
list_add(&regulator->list, &rdev->consumer_list);
- regulator_unlock(rdev);

if (dev) {
regulator->dev = dev;
@@ -1995,6 +2067,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
+ struct ww_acquire_ctx ww_ctx;
int ret = 0;

/* No supply to resolve? */
@@ -2061,23 +2134,23 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
* between rdev->supply null check and setting rdev->supply in
* set_supply() from concurrent tasks.
*/
- regulator_lock(rdev);
+ regulator_lock_two(rdev, r, &ww_ctx);

/* Supply just resolved by a concurrent task? */
if (rdev->supply) {
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}

ret = set_supply(rdev, r);
if (ret < 0) {
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}

- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);

/*
* In set_machine_constraints() we may have turned this regulator on
@@ -2190,7 +2263,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}

+ regulator_lock(rdev);
regulator = create_regulator(rdev, dev, id);
+ regulator_unlock(rdev);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
@@ -2668,7 +2743,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)

trace_regulator_enable(rdev_get_name(rdev));

- if (rdev->desc->off_on_delay && rdev->last_off) {
+ if (rdev->desc->off_on_delay) {
/* if needed, keep a distance of off_on_delay from last time
* this regulator was disabled.
*/
@@ -6043,6 +6118,7 @@ static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
+ old_contended_rdev->mutex_owner = current;
}

err = regulator_summary_lock_all(ww_ctx,
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index 2a42acb7c24e..e5dd4db6403b 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -129,17 +129,16 @@ static const struct regulator_desc stm32_pwr_desc[] = {

static int stm32_pwr_regulator_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct stm32_pwr_reg *priv;
void __iomem *base;
struct regulator_dev *rdev;
struct regulator_config config = { };
int i, ret = 0;

- base = of_iomap(np, 0);
- if (!base) {
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map IO memory\n");
- return -ENOMEM;
+ return PTR_ERR(base);
}

config.dev = &pdev->dev;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 35df1b0a515b..67e7664efb0d 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1348,8 +1348,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);

/* Mark intent available if we failed */
- if (ret && intent) {
- intent->in_use = false;
+ if (ret) {
+ if (intent)
+ intent->in_use = false;
return ret;
}

@@ -1370,8 +1371,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
chunk_size, wait);

/* Mark intent available if we failed */
- if (ret && intent) {
- intent->in_use = false;
+ if (ret) {
+ if (intent)
+ intent->in_use = false;
break;
}
}
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 1463c8621561..648fa362ec44 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -23,7 +23,7 @@ static int meson_vrtc_read_time(struct device *dev, struct rtc_time *tm)
struct timespec64 time;

dev_dbg(dev, "%s\n", __func__);
- ktime_get_raw_ts64(&time);
+ ktime_get_real_ts64(&time);
rtc_time64_to_tm(time.tv_sec, tm);

return 0;
@@ -96,7 +96,7 @@ static int __maybe_unused meson_vrtc_suspend(struct device *dev)
long alarm_secs;
struct timespec64 time;

- ktime_get_raw_ts64(&time);
+ ktime_get_real_ts64(&time);
local_time = time.tv_sec;

dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 4d4f3b1a7309..73634a3ccfd3 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>

/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
index ba23163cc042..0d90fe923355 100644
--- a/drivers/rtc/rtc-ti-k3.c
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -632,7 +632,8 @@ static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
struct ti_k3_rtc *priv = dev_get_drvdata(dev);

if (device_may_wakeup(dev))
- enable_irq_wake(priv->irq);
+ return enable_irq_wake(priv->irq);
+
return 0;
}

diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5a6d9c15395f..bce3422d8564 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2941,7 +2941,7 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
return 0;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
- blk_mq_requeue_request(req, false);
+ blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock);

return 0;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index d643c5a49aa9..70c24377c6a1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1258,7 +1258,11 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,

slot_err_v1_hw(hisi_hba, task, slot);
if (unlikely(slot->abort)) {
- sas_task_abort(task);
+ if (dev_is_sata(device) && task->ata_task.use_ncq)
+ sas_ata_device_link_abort(device, true);
+ else
+ sas_task_abort(task);
+
return;
}
goto out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index cded42f4ca44..02575d81afca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2404,7 +2404,11 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
error_info[2], error_info[3]);

if (unlikely(slot->abort)) {
- sas_task_abort(task);
+ if (dev_is_sata(device) && task->ata_task.use_ncq)
+ sas_ata_device_link_abort(device, true);
+ else
+ sas_task_abort(task);
+
return;
}
goto out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 620dcefe7b6f..e8a3511040af 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2293,7 +2293,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
error_info[0], error_info[1],
error_info[2], error_info[3]);
if (unlikely(slot->abort)) {
- sas_task_abort(task);
+ if (dev_is_sata(device) && task->ata_task.use_ncq)
+ sas_ata_device_link_abort(device, true);
+ else
+ sas_task_abort(task);
+
return;
}
goto out;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 2fd55ef9ffca..6b045be947b1 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -886,6 +886,24 @@ void sas_ata_wait_eh(struct domain_device *dev)
ata_port_wait_eh(ap);
}

+void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
+{
+ struct ata_port *ap = device->sata_dev.ap;
+ struct ata_link *link = &ap->link;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+ device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
+ device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
+
+ link->eh_info.err_mask |= AC_ERR_DEV;
+ if (force_reset)
+ link->eh_info.action |= ATA_EH_RESET;
+ ata_link_abort(link);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
+
int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
{
struct sas_tmf_task tmf_task = {};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2f38c8d5a48a..d54fd153cb11 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11971,7 +11971,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
goto out_iounmap_all;
} else {
error = -ENOMEM;
- goto out_iounmap_all;
+ goto out_iounmap_ctrl;
}
}

@@ -11989,7 +11989,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
dev_err(&pdev->dev,
"ioremap failed for SLI4 HBA dpp registers.\n");
error = -ENOMEM;
- goto out_iounmap_ctrl;
+ goto out_iounmap_all;
}
phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
}
@@ -12014,9 +12014,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
return 0;

out_iounmap_all:
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ if (phba->sli4_hba.drbl_regs_memmap_p)
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
out_iounmap_ctrl:
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ if (phba->sli4_hba.ctrl_regs_memmap_p)
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
out_iounmap_conf:
iounmap(phba->sli4_hba.conf_regs_memmap_p);

diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index bf491af9f0d6..16e2cf848c6e 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1441,6 +1441,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
+ cmd = scb->cmd;

list_del_init(&scb->list);
scb->state = SCB_FREE;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index ea9e69fb6282..64355d0baa5f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->unrecoverable = 1;
goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
break;
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index e1d7b4543248..364ddbe365c2 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -288,6 +288,10 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
+ if (ret && cpubiuctrl_base) {
+ iounmap(cpubiuctrl_base);
+ cpubiuctrl_base = NULL;
+ }
return ret;
}

diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 621ceaa047d4..057b85b158f9 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -454,8 +454,11 @@ static int __init renesas_soc_init(void)
}

soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
+ if (!soc_dev_attr) {
+ if (chipid)
+ iounmap(chipid);
return -ENOMEM;
+ }

np = of_find_node_by_path("/");
of_property_read_string(np, "model", &soc_dev_attr->machine);
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index ce09c42eaed2..f04c21157904 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -527,7 +527,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)

ret = am33xx_pm_alloc_sram();
if (ret)
- return ret;
+ goto err_wkup_m3_ipc_put;

ret = am33xx_pm_rtc_setup();
if (ret)
@@ -572,13 +572,14 @@ static int am33xx_pm_probe(struct platform_device *pdev)
pm_runtime_put_sync(dev);
err_pm_runtime_disable:
pm_runtime_disable(dev);
- wkup_m3_ipc_put(m3_ipc);
err_unsetup_rtc:
iounmap(rtc_base_virt);
clk_put(rtc_fck);
err_free_sram:
am33xx_pm_free_sram();
pm33xx_dev = NULL;
+err_wkup_m3_ipc_put:
+ wkup_m3_ipc_put(m3_ipc);
return ret;
}

diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index e7da7d7b213f..7286c9b3be69 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -1719,40 +1719,40 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
void *stream, int direction)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;

if (stream) {
/* first paranoia check */
if (direction == SNDRV_PCM_STREAM_PLAYBACK)
- dma = dai->playback_dma_data;
+ dai_runtime = dai->playback_dma_data;
else
- dma = dai->capture_dma_data;
+ dai_runtime = dai->capture_dma_data;

- if (dma) {
+ if (dai_runtime) {
dev_err(dai->dev,
- "dma_data already allocated for dai %s\n",
+ "dai_runtime already allocated for dai %s\n",
dai->name);
return -EINVAL;
}

- /* allocate and set dma info */
- dma = kzalloc(sizeof(*dma), GFP_KERNEL);
- if (!dma)
+ /* allocate and set dai_runtime info */
+ dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL);
+ if (!dai_runtime)
return -ENOMEM;

- dma->stream_type = SDW_STREAM_PCM;
+ dai_runtime->stream_type = SDW_STREAM_PCM;

- dma->bus = &cdns->bus;
- dma->link_id = cdns->instance;
+ dai_runtime->bus = &cdns->bus;
+ dai_runtime->link_id = cdns->instance;

- dma->stream = stream;
+ dai_runtime->stream = stream;

if (direction == SNDRV_PCM_STREAM_PLAYBACK)
- dai->playback_dma_data = dma;
+ dai->playback_dma_data = dai_runtime;
else
- dai->capture_dma_data = dma;
+ dai->capture_dma_data = dai_runtime;
} else {
- /* for NULL stream we release allocated dma_data */
+ /* for NULL stream we release allocated dai_runtime */
if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
kfree(dai->playback_dma_data);
dai->playback_dma_data = NULL;
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 51e6ecc027cb..fea3a90550d3 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -76,7 +76,7 @@ struct sdw_cdns_stream_config {
};

/**
- * struct sdw_cdns_dma_data: Cadence DMA data
+ * struct sdw_cdns_dai_runtime: Cadence DAI runtime data
*
* @name: SoundWire stream name
* @stream: stream runtime
@@ -84,18 +84,16 @@ struct sdw_cdns_stream_config {
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
- * @hw_params: hw_params to be applied in .prepare step
* @suspended: status set when suspended, to be used in .prepare
* @paused: status set in .trigger, to be used in suspend
*/
-struct sdw_cdns_dma_data {
+struct sdw_cdns_dai_runtime {
char *name;
struct sdw_stream_runtime *stream;
struct sdw_cdns_pdi *pdi;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
- struct snd_pcm_hw_params *hw_params;
bool suspended;
bool paused;
};
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 8c76541d553f..89f8cab3f514 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -824,15 +824,15 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;
struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
struct sdw_port_config *pconfig;
int ch, dir;
int ret;

- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma)
+ dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dai_runtime)
return -EIO;

ch = params_channels(params);
@@ -854,10 +854,9 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sdw_cdns_config_stream(cdns, ch, dir, pdi);

/* store pdi and hw_params, may be needed in prepare step */
- dma->paused = false;
- dma->suspended = false;
- dma->pdi = pdi;
- dma->hw_params = params;
+ dai_runtime->paused = false;
+ dai_runtime->suspended = false;
+ dai_runtime->pdi = pdi;

/* Inform DSP about PDI stream number */
ret = intel_params_stream(sdw, substream->stream, dai, params,
@@ -869,7 +868,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sconfig.direction = dir;
sconfig.ch_count = ch;
sconfig.frame_rate = params_rate(params);
- sconfig.type = dma->stream_type;
+ sconfig.type = dai_runtime->stream_type;

sconfig.bps = snd_pcm_format_width(params_format(params));

@@ -884,7 +883,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
pconfig->ch_mask = (1 << ch) - 1;

ret = sdw_stream_add_master(&cdns->bus, &sconfig,
- pconfig, 1, dma->stream);
+ pconfig, 1, dai_runtime->stream);
if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);

@@ -898,19 +897,24 @@ static int intel_prepare(struct snd_pcm_substream *substream,
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;
int ch, dir;
int ret = 0;

- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma) {
- dev_err(dai->dev, "failed to get dma data in %s\n",
+ dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dai_runtime) {
+ dev_err(dai->dev, "failed to get dai runtime in %s\n",
__func__);
return -EIO;
}

- if (dma->suspended) {
- dma->suspended = false;
+ if (dai_runtime->suspended) {
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_hw_params *hw_params;
+
+ hw_params = &rtd->dpcm[substream->stream].hw_params;
+
+ dai_runtime->suspended = false;

/*
* .prepare() is called after system resume, where we
@@ -921,21 +925,21 @@ static int intel_prepare(struct snd_pcm_substream *substream,
*/

/* configure stream */
- ch = params_channels(dma->hw_params);
+ ch = params_channels(hw_params);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
dir = SDW_DATA_DIR_RX;
else
dir = SDW_DATA_DIR_TX;

- intel_pdi_shim_configure(sdw, dma->pdi);
- intel_pdi_alh_configure(sdw, dma->pdi);
- sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
+ intel_pdi_shim_configure(sdw, dai_runtime->pdi);
+ intel_pdi_alh_configure(sdw, dai_runtime->pdi);
+ sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);

/* Inform DSP about PDI stream number */
ret = intel_params_stream(sdw, substream->stream, dai,
- dma->hw_params,
+ hw_params,
sdw->instance,
- dma->pdi->intel_alh_id);
+ dai_runtime->pdi->intel_alh_id);
}

return ret;
@@ -946,11 +950,11 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;
int ret;

- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma)
+ dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dai_runtime)
return -EIO;

/*
@@ -959,10 +963,10 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
* DEPREPARED for the first cpu-dai and to RELEASED for the last
* cpu-dai.
*/
- ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
+ ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
if (ret < 0) {
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
- dma->stream->name, ret);
+ dai_runtime->stream->name, ret);
return ret;
}

@@ -972,8 +976,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}

- dma->hw_params = NULL;
- dma->pdi = NULL;
+ dai_runtime->pdi = NULL;

return 0;
}
@@ -996,17 +999,17 @@ static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
int direction)
{
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;

if (direction == SNDRV_PCM_STREAM_PLAYBACK)
- dma = dai->playback_dma_data;
+ dai_runtime = dai->playback_dma_data;
else
- dma = dai->capture_dma_data;
+ dai_runtime = dai->capture_dma_data;

- if (!dma)
+ if (!dai_runtime)
return ERR_PTR(-EINVAL);

- return dma->stream;
+ return dai_runtime->stream;
}

static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
@@ -1014,7 +1017,7 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_intel_link_res *res = sdw->link_res;
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;
int ret = 0;

/*
@@ -1025,9 +1028,9 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
if (res->ops && res->ops->trigger)
res->ops->trigger(dai, cmd, substream->stream);

- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma) {
- dev_err(dai->dev, "failed to get dma data in %s\n",
+ dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dai_runtime) {
+ dev_err(dai->dev, "failed to get dai runtime in %s\n",
__func__);
return -EIO;
}
@@ -1042,17 +1045,17 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
* the .trigger callback is used to track the suspend case only.
*/

- dma->suspended = true;
+ dai_runtime->suspended = true;

ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
break;

case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- dma->paused = true;
+ dai_runtime->paused = true;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- dma->paused = false;
+ dai_runtime->paused = false;
break;
default:
break;
@@ -1091,25 +1094,25 @@ static int intel_component_dais_suspend(struct snd_soc_component *component)
for_each_component_dais(component, dai) {
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_dai_runtime *dai_runtime;
int stream;
int ret;

- dma = dai->playback_dma_data;
+ dai_runtime = dai->playback_dma_data;
stream = SNDRV_PCM_STREAM_PLAYBACK;
- if (!dma) {
- dma = dai->capture_dma_data;
+ if (!dai_runtime) {
+ dai_runtime = dai->capture_dma_data;
stream = SNDRV_PCM_STREAM_CAPTURE;
}

- if (!dma)
+ if (!dai_runtime)
continue;

- if (dma->suspended)
+ if (dai_runtime->suspended)
continue;

- if (dma->paused) {
- dma->suspended = true;
+ if (dai_runtime->paused) {
+ dai_runtime->suspended = true;

ret = intel_free_stream(sdw, stream, dai, sdw->instance);
if (ret < 0)
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index cee2b2223141..866026185c66 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -696,7 +696,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)

ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
/* Configure number of retries of a read/write cmd */
- if (ctrl->version > 0x01050001) {
+ if (ctrl->version >= 0x01050001) {
/* Only for versions >= 1.5.1 */
ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
SWRM_RD_WR_CMD_RETRIES |
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 976a217e356d..7e05b48dbd71 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -672,18 +672,28 @@ static int atmel_qspi_remove(struct platform_device *pdev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
int ret;

- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
-
spi_unregister_controller(ctrl);
- atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret >= 0) {
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
+ clk_disable(aq->qspick);
+ clk_disable(aq->pclk);
+ } else {
+ /*
+ * atmel_qspi_runtime_{suspend,resume} just disable and enable
+ * the two clks respectively. So after resume failed these are
+ * off, and we skip hardware access and disabling these clks again.
+ */
+ dev_warn(&pdev->dev, "Failed to resume device on remove\n");
+ }
+
+ clk_unprepare(aq->qspick);
+ clk_unprepare(aq->pclk);

pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);

- clk_disable_unprepare(aq->qspick);
- clk_disable_unprepare(aq->pclk);
return 0;
}

@@ -752,7 +762,11 @@ static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
if (ret)
return ret;

- return clk_enable(aq->qspick);
+ ret = clk_enable(aq->qspick);
+ if (ret)
+ clk_disable(aq->pclk);
+
+ return ret;
}

static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 447230547945..30fd4bc90580 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1763,32 +1763,36 @@ static int cqspi_remove(struct platform_device *pdev)
return 0;
}

-#ifdef CONFIG_PM_SLEEP
static int cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;

+ ret = spi_master_suspend(master);
cqspi_controller_enable(cqspi, 0);
- return 0;
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return ret;
}

static int cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);

- cqspi_controller_enable(cqspi, 1);
- return 0;
-}
+ clk_prepare_enable(cqspi->clk);
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);

-static const struct dev_pm_ops cqspi__dev_pm_ops = {
- .suspend = cqspi_suspend,
- .resume = cqspi_resume,
-};
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ return spi_master_resume(master);
+}

-#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
-#else
-#define CQSPI_DEV_PM_OPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);

static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
@@ -1855,7 +1859,7 @@ static struct platform_driver cqspi_platform_driver = {
.remove = cqspi_remove,
.driver = {
.name = CQSPI_NAME,
- .pm = CQSPI_DEV_PM_OPS,
+ .pm = &cqspi_dev_pm_ops,
.of_match_table = cqspi_dt_ids,
},
};
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 93152144fd2e..5602f052b2b5 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -181,8 +181,8 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
int bits_per_word)
{
- /* QE uses Little Endian for words > 8
- * so transform all words > 8 into 8 bits
+ /* CPM/QE uses Little Endian for words > 8
+ * so transform 16 and 32 bits words into 8 bits
* Unfortnatly that doesn't work for LSB so
* reject these for now */
/* Note: 32 bits word, LSB works iff
@@ -190,9 +190,11 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
if (spi->mode & SPI_LSB_FIRST &&
bits_per_word > 8)
return -EINVAL;
- if (bits_per_word > 8)
+ if (bits_per_word <= 8)
+ return bits_per_word;
+ if (bits_per_word == 16 || bits_per_word == 32)
return 8; /* pretend its 8 bits */
- return bits_per_word;
+ return -EINVAL;
}

static int fsl_spi_setup_transfer(struct spi_device *spi,
@@ -222,7 +224,7 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
mpc8xxx_spi,
bits_per_word);
- else if (mpc8xxx_spi->flags & SPI_QE)
+ else
bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
bits_per_word);

diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d209930069cf..fbd7b354dd36 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1864,13 +1864,11 @@ static int spi_imx_remove(struct platform_device *pdev)

spi_unregister_controller(controller);

- ret = pm_runtime_resume_and_get(spi_imx->dev);
- if (ret < 0) {
- dev_err(spi_imx->dev, "failed to enable clock\n");
- return ret;
- }
-
- writel(0, spi_imx->base + MXC_CSPICTRL);
+ ret = pm_runtime_get_sync(spi_imx->dev);
+ if (ret >= 0)
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+ else
+ dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");

pm_runtime_dont_use_autosuspend(spi_imx->dev);
pm_runtime_put_sync(spi_imx->dev);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 678dc51ef017..205e54f157b4 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1277,18 +1277,22 @@ static int spi_qup_remove(struct platform_device *pdev)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;

- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);

- ret = spi_qup_set_state(controller, QUP_STATE_RESET);
- if (ret)
- return ret;
+ if (ret >= 0) {
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
+ ERR_PTR(ret));

- spi_qup_release_dma(master);
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ } else {
+ dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
+ ERR_PTR(ret));
+ }

- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
+ spi_qup_release_dma(master);

pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 55381592bb5a..e73d3017863c 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -350,7 +350,8 @@ static void spmi_drv_remove(struct device *dev)
const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);

pm_runtime_get_sync(dev);
- sdrv->remove(to_spmi_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_spmi_device(dev));
pm_runtime_put_noidle(dev);

pm_runtime_disable(dev);
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index e4cf42438487..636c45b12843 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -101,7 +101,7 @@ struct ad2s1210_state {
static const int ad2s1210_mode_vals[4][2] = {
[MOD_POS] = { 0, 0 },
[MOD_VEL] = { 0, 1 },
- [MOD_CONFIG] = { 1, 0 },
+ [MOD_CONFIG] = { 1, 1 },
};

static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
diff --git a/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
index 0bf513c26b6b..a5c5bebad306 100644
--- a/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
@@ -823,10 +823,10 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
av7110_ipack_flush(ipack);

if (buf[3] & ADAPT_FIELD) {
+ if (buf[4] > len - 1 - 4)
+ return 0;
len -= buf[4] + 1;
buf += buf[4] + 1;
- if (!len)
- return 0;
}

av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 7bab7586918c..82806f198074 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -1066,6 +1066,8 @@ static int rkvdec_remove(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);

+ cancel_delayed_work_sync(&rkvdec->watchdog_work);
+
rkvdec_v4l2_cleanup(rkvdec);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 55c54dfdc585..d2419319afd7 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -541,6 +541,7 @@ static int cedrus_remove(struct platform_device *pdev)
{
struct cedrus_dev *dev = platform_get_drvdata(pdev);

+ cancel_delayed_work_sync(&dev->watchdog_work);
if (media_devnode_is_registered(dev->mdev.devnode)) {
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 89bc989cffba..c1e50084172d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -717,6 +717,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
else
netif_wake_queue(dev);

+ priv->bfirst_after_down = false;
return 0;
}

diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 6498fd17e1d3..8159bb651c44 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1549,7 +1549,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;

- spin_lock_irq(&pmlmepriv->lock);
+ spin_lock_bh(&pmlmepriv->lock);

if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
while (1) {
@@ -1577,7 +1577,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)

}

- spin_unlock_irq(&pmlmepriv->lock);
+ spin_unlock_bh(&pmlmepriv->lock);
}

/*
@@ -1590,11 +1590,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;

- spin_lock_irq(&pmlmepriv->lock);
+ spin_lock_bh(&pmlmepriv->lock);

_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);

- spin_unlock_irq(&pmlmepriv->lock);
+ spin_unlock_bh(&pmlmepriv->lock);

rtw_indicate_scan_done(adapter, true);
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index baf4da7bb3b4..3f7a9f7f5f4e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1190,9 +1190,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, be32_to_cpu(hdr->data_length),
- cmd->data_direction, sam_task_attr,
- cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
+ conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+ cmd->data_direction, sam_task_attr,
+ cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
+ conn->cmd_cnt);

pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
@@ -2055,7 +2056,8 @@ iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
- scsilun_to_int(&hdr->lun));
+ scsilun_to_int(&hdr->lun),
+ conn->cmd_cnt);

target_get_sess_cmd(&cmd->se_cmd, true);

@@ -4218,9 +4220,12 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;

- if (se_cmd->se_tfo != NULL) {
- spin_lock_irq(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!se_cmd->se_tfo)
+ continue;
+
+ spin_lock_irq(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!(se_cmd->transport_state & CMD_T_TAS))
/*
* LIO's abort path owns the cleanup for this,
* so put it back on the list and let
@@ -4228,11 +4233,10 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
*/
list_move_tail(&cmd->i_conn_node,
&conn->conn_cmd_list);
- } else {
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- }
- spin_unlock_irq(&se_cmd->t_state_lock);
+ } else {
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
+ spin_unlock_irq(&se_cmd->t_state_lock);
}
spin_unlock_bh(&conn->cmd_lock);

@@ -4243,6 +4247,16 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
iscsit_free_cmd(cmd, true);

}
+
+ /*
+ * Wait on commands that were cleaned up via the aborted_task path.
+ * LLDs that implement iscsit_wait_conn will already have waited for
+ * commands.
+ */
+ if (!conn->conn_transport->iscsit_wait_conn) {
+ target_stop_cmd_counter(conn->cmd_cnt);
+ target_wait_for_cmds(conn->cmd_cnt);
+ }
}

static void iscsit_stop_timers_for_cmds(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 27e448c2d066..274bdd7845ca 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1147,8 +1147,14 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
goto free_conn_cpumask;
}

+ conn->cmd_cnt = target_alloc_cmd_counter();
+ if (!conn->cmd_cnt)
+ goto free_conn_allowed_cpumask;
+
return conn;

+free_conn_allowed_cpumask:
+ free_cpumask_var(conn->allowed_cpumask);
free_conn_cpumask:
free_cpumask_var(conn->conn_cpumask);
free_conn_ops:
@@ -1162,6 +1168,7 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)

void iscsit_free_conn(struct iscsit_conn *conn)
{
+ target_free_cmd_counter(conn->cmd_cnt);
free_cpumask_var(conn->allowed_cpumask);
free_cpumask_var(conn->conn_cpumask);
kfree(conn->conn_ops);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cb4f7cc02f8f..d21f88de197c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -782,6 +782,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_alua.lba_map_lock);

INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
+ mutex_init(&dev->lun_reset_mutex);

dev->t10_wwn.t10_dev = dev;
/*
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 38a6d08f75b3..85e35cf582e5 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,6 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_uninit_session(struct se_session *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 2b95b4550a63..4718db628222 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -188,14 +188,23 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- if (tmr)
- list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+ if (tmr_p == tmr)
+ continue;
+
cmd = tmr_p->task_cmd;
if (!cmd) {
pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
+
+ /*
+ * We only execute one LUN_RESET at a time so we can't wait
+ * on them below.
+ */
+ if (tmr_p->function == TMR_LUN_RESET)
+ continue;
+
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
@@ -379,14 +388,25 @@ int core_tmr_lun_reset(
tmr_nacl->initiatorname);
}
}
+
+
+ /*
+ * We only allow one reset or preempt and abort to execute at a time
+ * to prevent one call from claiming all the cmds causing a second
+ * call from returning while cmds it should have waited on are still
+ * running.
+ */
+ mutex_lock(&dev->lun_reset_mutex);
+
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas);
-
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);

+ mutex_unlock(&dev->lun_reset_mutex);
+
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 736847c933e5..8ebccdbd94f0 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -328,7 +328,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl)
restart:
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
- if (atomic_read(&sess->stopped))
+ if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
continue;

list_del_init(&sess->sess_acl_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5926316252eb..86adff2a86ed 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -220,12 +220,52 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}

-static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
+static void target_release_cmd_refcnt(struct percpu_ref *ref)
{
- struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
+ struct target_cmd_counter *cmd_cnt = container_of(ref,
+ typeof(*cmd_cnt),
+ refcnt);
+ wake_up(&cmd_cnt->refcnt_wq);
+}
+
+struct target_cmd_counter *target_alloc_cmd_counter(void)
+{
+ struct target_cmd_counter *cmd_cnt;
+ int rc;
+
+ cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
+ if (!cmd_cnt)
+ return NULL;
+
+ init_completion(&cmd_cnt->stop_done);
+ init_waitqueue_head(&cmd_cnt->refcnt_wq);
+ atomic_set(&cmd_cnt->stopped, 0);

- wake_up(&sess->cmd_count_wq);
+ rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
+ GFP_KERNEL);
+ if (rc)
+ goto free_cmd_cnt;
+
+ return cmd_cnt;
+
+free_cmd_cnt:
+ kfree(cmd_cnt);
+ return NULL;
}
+EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
+
+void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
+{
+ /*
+ * Drivers like loop do not call target_stop_session during session
+ * shutdown so we have to drop the ref taken at init time here.
+ */
+ if (!atomic_read(&cmd_cnt->stopped))
+ percpu_ref_put(&cmd_cnt->refcnt);
+
+ percpu_ref_exit(&cmd_cnt->refcnt);
+}
+EXPORT_SYMBOL_GPL(target_free_cmd_counter);

/**
* transport_init_session - initialize a session object
@@ -233,32 +273,14 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
*
* The caller must have zero-initialized @se_sess before calling this function.
*/
-int transport_init_session(struct se_session *se_sess)
+void transport_init_session(struct se_session *se_sess)
{
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
spin_lock_init(&se_sess->sess_cmd_lock);
- init_waitqueue_head(&se_sess->cmd_count_wq);
- init_completion(&se_sess->stop_done);
- atomic_set(&se_sess->stopped, 0);
- return percpu_ref_init(&se_sess->cmd_count,
- target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
}
EXPORT_SYMBOL(transport_init_session);

-void transport_uninit_session(struct se_session *se_sess)
-{
- /*
- * Drivers like iscsi and loop do not call target_stop_session
- * during session shutdown so we have to drop the ref taken at init
- * time here.
- */
- if (!atomic_read(&se_sess->stopped))
- percpu_ref_put(&se_sess->cmd_count);
-
- percpu_ref_exit(&se_sess->cmd_count);
-}
-
/**
* transport_alloc_session - allocate a session object and initialize it
* @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
@@ -266,7 +288,6 @@ void transport_uninit_session(struct se_session *se_sess)
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
- int ret;

se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
if (!se_sess) {
@@ -274,11 +295,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- ret = transport_init_session(se_sess);
- if (ret < 0) {
- kmem_cache_free(se_sess_cache, se_sess);
- return ERR_PTR(ret);
- }
+ transport_init_session(se_sess);
se_sess->sup_prot_ops = sup_prot_ops;

return se_sess;
@@ -444,8 +461,13 @@ target_setup_session(struct se_portal_group *tpg,
int (*callback)(struct se_portal_group *,
struct se_session *, void *))
{
+ struct target_cmd_counter *cmd_cnt;
struct se_session *sess;
+ int rc;

+ cmd_cnt = target_alloc_cmd_counter();
+ if (!cmd_cnt)
+ return ERR_PTR(-ENOMEM);
/*
* If the fabric driver is using percpu-ida based pre allocation
* of I/O descriptor tags, go ahead and perform that setup now..
@@ -455,29 +477,36 @@ target_setup_session(struct se_portal_group *tpg,
else
sess = transport_alloc_session(prot_op);

- if (IS_ERR(sess))
- return sess;
+ if (IS_ERR(sess)) {
+ rc = PTR_ERR(sess);
+ goto free_cnt;
+ }
+ sess->cmd_cnt = cmd_cnt;

sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
(unsigned char *)initiatorname);
if (!sess->se_node_acl) {
- transport_free_session(sess);
- return ERR_PTR(-EACCES);
+ rc = -EACCES;
+ goto free_sess;
}
/*
* Go ahead and perform any remaining fabric setup that is
* required before transport_register_session().
*/
if (callback != NULL) {
- int rc = callback(tpg, sess, private);
- if (rc) {
- transport_free_session(sess);
- return ERR_PTR(rc);
- }
+ rc = callback(tpg, sess, private);
+ if (rc)
+ goto free_sess;
}

transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
+
+free_sess:
+ transport_free_session(sess);
+free_cnt:
+ target_free_cmd_counter(cmd_cnt);
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL(target_setup_session);

@@ -602,7 +631,8 @@ void transport_free_session(struct se_session *se_sess)
sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
- transport_uninit_session(se_sess);
+ if (se_sess->cmd_cnt)
+ target_free_cmd_counter(se_sess->cmd_cnt);
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
@@ -1412,14 +1442,12 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
*
* Preserves the value of @cmd->tag.
*/
-void __target_init_cmd(
- struct se_cmd *cmd,
- const struct target_core_fabric_ops *tfo,
- struct se_session *se_sess,
- u32 data_length,
- int data_direction,
- int task_attr,
- unsigned char *sense_buffer, u64 unpacked_lun)
+void __target_init_cmd(struct se_cmd *cmd,
+ const struct target_core_fabric_ops *tfo,
+ struct se_session *se_sess, u32 data_length,
+ int data_direction, int task_attr,
+ unsigned char *sense_buffer, u64 unpacked_lun,
+ struct target_cmd_counter *cmd_cnt)
{
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
@@ -1439,6 +1467,7 @@ void __target_init_cmd(
cmd->sam_task_attr = task_attr;
cmd->sense_buffer = sense_buffer;
cmd->orig_fe_lun = unpacked_lun;
+ cmd->cmd_cnt = cmd_cnt;

if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
cmd->cpuid = raw_smp_processor_id();
@@ -1658,7 +1687,8 @@ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
* target_core_fabric_ops->queue_status() callback
*/
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
- data_dir, task_attr, sense, unpacked_lun);
+ data_dir, task_attr, sense, unpacked_lun,
+ se_sess->cmd_cnt);

/*
* Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
@@ -1953,7 +1983,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
BUG_ON(!se_tpg);

__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
+ se_sess->cmd_cnt);
/*
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
* allocation failure.
@@ -2957,7 +2988,6 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
*/
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{
- struct se_session *se_sess = se_cmd->se_sess;
int ret = 0;

/*
@@ -2970,9 +3000,14 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}

- if (!percpu_ref_tryget_live(&se_sess->cmd_count))
- ret = -ESHUTDOWN;
-
+ /*
+ * Users like xcopy do not use counters since they never do a stop
+ * and wait.
+ */
+ if (se_cmd->cmd_cnt) {
+ if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
+ ret = -ESHUTDOWN;
+ }
if (ret && ack_kref)
target_put_sess_cmd(se_cmd);

@@ -2993,7 +3028,7 @@ static void target_free_cmd_mem(struct se_cmd *cmd)
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
- struct se_session *se_sess = se_cmd->se_sess;
+ struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
struct completion *free_compl = se_cmd->free_compl;
struct completion *abrt_compl = se_cmd->abrt_compl;

@@ -3004,7 +3039,8 @@ static void target_release_cmd_kref(struct kref *kref)
if (abrt_compl)
complete(abrt_compl);

- percpu_ref_put(&se_sess->cmd_count);
+ if (cmd_cnt)
+ percpu_ref_put(&cmd_cnt->refcnt);
}

/**
@@ -3123,46 +3159,67 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
}
EXPORT_SYMBOL(target_show_cmd);

-static void target_stop_session_confirm(struct percpu_ref *ref)
+static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
+{
+ struct target_cmd_counter *cmd_cnt = container_of(ref,
+ struct target_cmd_counter,
+ refcnt);
+ complete_all(&cmd_cnt->stop_done);
+}
+
+/**
+ * target_stop_cmd_counter - Stop new IO from being added to the counter.
+ * @cmd_cnt: counter to stop
+ */
+void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
{
- struct se_session *se_sess = container_of(ref, struct se_session,
- cmd_count);
- complete_all(&se_sess->stop_done);
+ pr_debug("Stopping command counter.\n");
+ if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
+ percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
+ target_stop_cmd_counter_confirm);
}
+EXPORT_SYMBOL_GPL(target_stop_cmd_counter);

/**
* target_stop_session - Stop new IO from being queued on the session.
- * @se_sess: session to stop
+ * @se_sess: session to stop
*/
void target_stop_session(struct se_session *se_sess)
{
- pr_debug("Stopping session queue.\n");
- if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
- percpu_ref_kill_and_confirm(&se_sess->cmd_count,
- target_stop_session_confirm);
+ target_stop_cmd_counter(se_sess->cmd_cnt);
}
EXPORT_SYMBOL(target_stop_session);

/**
- * target_wait_for_sess_cmds - Wait for outstanding commands
- * @se_sess: session to wait for active I/O
+ * target_wait_for_cmds - Wait for outstanding cmds.
+ * @cmd_cnt: counter to wait for active I/O for.
*/
-void target_wait_for_sess_cmds(struct se_session *se_sess)
+void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
{
int ret;

- WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
+ WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));

do {
pr_debug("Waiting for running cmds to complete.\n");
- ret = wait_event_timeout(se_sess->cmd_count_wq,
- percpu_ref_is_zero(&se_sess->cmd_count),
- 180 * HZ);
+ ret = wait_event_timeout(cmd_cnt->refcnt_wq,
+ percpu_ref_is_zero(&cmd_cnt->refcnt),
+ 180 * HZ);
} while (ret <= 0);

- wait_for_completion(&se_sess->stop_done);
+ wait_for_completion(&cmd_cnt->stop_done);
pr_debug("Waiting for cmds done.\n");
}
+EXPORT_SYMBOL_GPL(target_wait_for_cmds);
+
+/**
+ * target_wait_for_sess_cmds - Wait for outstanding commands
+ * @se_sess: session to wait for active I/O
+ */
+void target_wait_for_sess_cmds(struct se_session *se_sess)
+{
+ target_wait_for_cmds(se_sess->cmd_cnt);
+}
EXPORT_SYMBOL(target_wait_for_sess_cmds);

/*
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 8713cda0c2fb..d2900d375151 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -461,8 +461,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {

int target_xcopy_setup_pt(void)
{
- int ret;
-
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
@@ -479,9 +477,7 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
- ret = transport_init_session(&xcopy_pt_sess);
- if (ret < 0)
- goto destroy_wq;
+ transport_init_session(&xcopy_pt_sess);

xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
@@ -490,19 +486,12 @@ int target_xcopy_setup_pt(void)
xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;

return 0;
-
-destroy_wq:
- destroy_workqueue(xcopy_wq);
- xcopy_wq = NULL;
- return ret;
}

void target_xcopy_release_pt(void)
{
- if (xcopy_wq) {
+ if (xcopy_wq)
destroy_workqueue(xcopy_wq);
- transport_uninit_session(&xcopy_pt_sess);
- }
}

/*
@@ -582,11 +571,11 @@ static int target_xcopy_read_source(
struct xcopy_op *xop,
struct se_device *src_dev,
sector_t src_lba,
- u32 src_sectors)
+ u32 src_bytes)
{
struct xcopy_pt_cmd xpt_cmd;
struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
- u32 length = (src_sectors * src_dev->dev_attrib.block_size);
+ u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size;
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
@@ -597,13 +586,13 @@ static int target_xcopy_read_source(
memset(&cdb[0], 0, 16);
cdb[0] = READ_16;
put_unaligned_be64(src_lba, &cdb[2]);
- put_unaligned_be32(src_sectors, &cdb[10]);
- pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
- (unsigned long long)src_lba, src_sectors, length);
-
- __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+ put_unaligned_be32(transfer_length_block, &cdb[10]);
+ pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n",
+ (unsigned long long)src_lba, transfer_length_block, src_bytes);

+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
+ DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
+ NULL);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
remote_port);
if (rc < 0) {
@@ -627,11 +616,11 @@ static int target_xcopy_write_destination(
struct xcopy_op *xop,
struct se_device *dst_dev,
sector_t dst_lba,
- u32 dst_sectors)
+ u32 dst_bytes)
{
struct xcopy_pt_cmd xpt_cmd;
struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
- u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
+ u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size;
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
@@ -642,13 +631,13 @@ static int target_xcopy_write_destination(
memset(&cdb[0], 0, 16);
cdb[0] = WRITE_16;
put_unaligned_be64(dst_lba, &cdb[2]);
- put_unaligned_be32(dst_sectors, &cdb[10]);
- pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
- (unsigned long long)dst_lba, dst_sectors, length);
-
- __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+ put_unaligned_be32(transfer_length_block, &cdb[10]);
+ pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n",
+ (unsigned long long)dst_lba, transfer_length_block, dst_bytes);

+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
+ DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
+ NULL);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
remote_port);
if (rc < 0) {
@@ -670,9 +659,10 @@ static void target_xcopy_do_work(struct work_struct *work)
struct se_cmd *ec_cmd = xop->xop_se_cmd;
struct se_device *src_dev, *dst_dev;
sector_t src_lba, dst_lba, end_lba;
- unsigned int max_sectors;
+ unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks;
int rc = 0;
- unsigned short nolb, max_nolb, copied_nolb = 0;
+ unsigned short nolb;
+ unsigned int copied_bytes = 0;
sense_reason_t sense_rc;

sense_rc = target_parse_xcopy_cmd(xop);
@@ -691,23 +681,31 @@ static void target_xcopy_do_work(struct work_struct *work)
nolb = xop->nolb;
end_lba = src_lba + nolb;
/*
- * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
- * smallest max_sectors between src_dev + dev_dev, or
+ * Break up XCOPY I/O into hw_max_sectors * hw_block_size sized
+ * I/O based on the smallest max_bytes between src_dev + dst_dev
*/
- max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
- dst_dev->dev_attrib.hw_max_sectors);
- max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
+ max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors *
+ src_dev->dev_attrib.hw_block_size;
+ max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors *
+ dst_dev->dev_attrib.hw_block_size;
+
+ max_bytes = min_t(u64, max_bytes_src, max_bytes_dst);
+ max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES);

- max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
+ /*
+ * Using shift instead of the division because otherwise GCC
+ * generates __udivdi3 that is missing on i386
+ */
+ max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size);

- pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
- nolb, max_nolb, (unsigned long long)end_lba);
- pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
+ pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__,
+ nolb, max_blocks, (unsigned long long)end_lba);
+ pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__,
(unsigned long long)src_lba, (unsigned long long)dst_lba);

- while (src_lba < end_lba) {
- unsigned short cur_nolb = min(nolb, max_nolb);
- u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size;
+ while (nolb) {
+ u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size);
+ unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size;

if (cur_bytes != xop->xop_data_bytes) {
/*
@@ -724,43 +722,43 @@ static void target_xcopy_do_work(struct work_struct *work)
xop->xop_data_bytes = cur_bytes;
}

- pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
- " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
+ pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n",
+ __func__, src_dev, (unsigned long long)src_lba, cur_nolb);

- rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
+ rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes);
if (rc < 0)
goto out;

- src_lba += cur_nolb;
- pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
+ src_lba += cur_bytes / src_dev->dev_attrib.block_size;
+ pr_debug("%s: Incremented READ src_lba to %llu\n", __func__,
(unsigned long long)src_lba);

- pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
- " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
+ pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n",
+ __func__, dst_dev, (unsigned long long)dst_lba, cur_nolb);

rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
- dst_lba, cur_nolb);
+ dst_lba, cur_bytes);
if (rc < 0)
goto out;

- dst_lba += cur_nolb;
- pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
+ dst_lba += cur_bytes / dst_dev->dev_attrib.block_size;
+ pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__,
(unsigned long long)dst_lba);

- copied_nolb += cur_nolb;
- nolb -= cur_nolb;
+ copied_bytes += cur_bytes;
+ nolb -= cur_bytes / src_dev->dev_attrib.block_size;
}

xcopy_pt_undepend_remotedev(xop);
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
kfree(xop);

- pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
+ pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__,
(unsigned long long)src_lba, (unsigned long long)dst_lba);
- pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
- copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
+ pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__,
+ copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes);

- pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
+ pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__);
target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
return;

@@ -776,8 +774,8 @@ static void target_xcopy_do_work(struct work_struct *work)

err_free:
kfree(xop);
- pr_warn_ratelimited("target_xcopy_do_work: rc: %d, sense: %u, XCOPY operation failed\n",
- rc, sense_rc);
+ pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n",
+ __func__, rc, sense_rc);
target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
}

diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index e5f20005179a..0aad7dc65895 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -5,7 +5,7 @@
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
-#define XCOPY_MAX_SECTORS 4096
+#define XCOPY_MAX_BYTES 16777216 /* 16 MB */

/*
* SPC4r37 6.4.6.1
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 8440692e3890..62f1e691659e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -1028,7 +1028,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
return -ENODEV;
}

- auxadc_base = of_iomap(auxadc, 0);
+ auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
+ if (IS_ERR(auxadc_base)) {
+ of_node_put(auxadc);
+ return PTR_ERR(auxadc_base);
+ }
+
auxadc_phys_base = of_get_phys_base(auxadc);

of_node_put(auxadc);
@@ -1044,7 +1049,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
return -ENODEV;
}

- apmixed_base = of_iomap(apmixedsys, 0);
+ apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
+ if (IS_ERR(apmixed_base)) {
+ of_node_put(apmixedsys);
+ return PTR_ERR(apmixed_base);
+ }
+
apmixed_phys_base = of_get_phys_base(apmixedsys);

of_node_put(apmixedsys);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index f034723b1b40..f79cae48a8ea 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1058,7 +1058,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
-bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
+bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);

int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 287153d32536..1e8fe44a7099 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -365,6 +365,13 @@ static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p)
if (dma->prepare_rx_dma)
dma->prepare_rx_dma(p);
}
+
+static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ return dma && dma->tx_running;
+}
#else
static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
@@ -380,6 +387,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p)
return -1;
}
static inline void serial8250_release_dma(struct uart_8250_port *p) { }
+
+static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
+{
+ return false;
+}
#endif

static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 89bfcefbea84..36e31b96ef4a 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1016,14 +1016,16 @@ static int brcmuart_probe(struct platform_device *pdev)
/* See if a Baud clock has been specified */
baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
if (IS_ERR(baud_mux_clk)) {
- if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto release_dma;
+ }
dev_dbg(dev, "BAUD MUX clock not specified\n");
} else {
dev_dbg(dev, "BAUD MUX clock found\n");
ret = clk_prepare_enable(baud_mux_clk);
if (ret)
- return ret;
+ goto release_dma;
priv->baud_mux_clk = baud_mux_clk;
init_real_clk_rates(dev, priv);
clk_rate = priv->default_mux_rate;
@@ -1031,7 +1033,8 @@ static int brcmuart_probe(struct platform_device *pdev)

if (clk_rate == 0) {
dev_err(dev, "clock-frequency or clk not defined\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_dma;
}

dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
@@ -1118,7 +1121,9 @@ static int brcmuart_probe(struct platform_device *pdev)
serial8250_unregister_port(priv->line);
err:
brcmuart_free_bufs(dev, priv);
- brcmuart_arbitration(priv, 0);
+release_dma:
+ if (priv->dma_enabled)
+ brcmuart_arbitration(priv, 0);
return ret;
}

@@ -1130,7 +1135,8 @@ static int brcmuart_remove(struct platform_device *pdev)
hrtimer_cancel(&priv->hrt);
serial8250_unregister_port(priv->line);
brcmuart_free_bufs(&pdev->dev, priv);
- brcmuart_arbitration(priv, 0);
+ if (priv->dma_enabled)
+ brcmuart_arbitration(priv, 0);
return 0;
}

diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 2a3bd6918c77..b8e8a96c3eb6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -15,6 +15,7 @@
#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/console.h>
#include <linux/gpio/consumer.h>
#include <linux/sysrq.h>
@@ -1926,6 +1927,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ struct tty_port *tport = &port->state->port;
bool skip_rx = false;
unsigned long flags;
u16 status;
@@ -1951,6 +1953,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
skip_rx = true;

if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+ if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
@@ -2010,18 +2014,19 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int result = 0;
unsigned long flags;
- u16 lsr;

serial8250_rpm_get(up);

spin_lock_irqsave(&port->lock, flags);
- lsr = serial_lsr_in(up);
+ if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
+ result = TIOCSER_TEMT;
spin_unlock_irqrestore(&port->lock, flags);

serial8250_rpm_put(up);

- return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
+ return result;
}

unsigned int serial8250_do_get_mctrl(struct uart_port *port)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 48eb5fea62fd..81467e93c7d5 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1276,7 +1276,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
* 10ms at any baud rate.
*/
sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
- sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
+ sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len));
if (sport->rx_dma_rng_buf_len < 16)
sport->rx_dma_rng_buf_len = 16;

diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index fbf6e2b3161c..338cb19dec23 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -525,6 +525,11 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
return false;
}

+static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
+{
+ return reg == MAX310X_RHR_REG;
+}
+
static int max310x_set_baud(struct uart_port *port, int baud)
{
unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
@@ -651,14 +656,14 @@ static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int
{
struct max310x_one *one = to_max310x_port(port);

- regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
+ regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}

static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);

- regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
+ regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}

static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@@ -1472,6 +1477,10 @@ static struct regmap_config regcfg = {
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
+ .writeable_noinc_reg = max310x_reg_noinc,
+ .readable_noinc_reg = max310x_reg_noinc,
+ .max_raw_read = MAX310X_FIFO_SIZE,
+ .max_raw_write = MAX310X_FIFO_SIZE,
};

#ifdef CONFIG_SPI_MASTER
@@ -1557,6 +1566,10 @@ static struct regmap_config regcfg_i2c = {
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
.max_register = MAX310X_I2C_REVID_EXTREG,
+ .writeable_noinc_reg = max310x_reg_noinc,
+ .readable_noinc_reg = max310x_reg_noinc,
+ .max_raw_read = MAX310X_FIFO_SIZE,
+ .max_raw_write = MAX310X_FIFO_SIZE,
};

static const struct max310x_if_cfg max310x_i2c_if_cfg = {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 179ee199df34..23a7ab0de444 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1552,7 +1552,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
goto out;

/* rs485_config requires more locking than others */
- if (cmd == TIOCGRS485)
+ if (cmd == TIOCSRS485)
down_write(&tty->termios_rwsem);

mutex_lock(&port->mutex);
@@ -1595,7 +1595,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
}
out_up:
mutex_unlock(&port->mutex);
- if (cmd == TIOCGRS485)
+ if (cmd == TIOCSRS485)
up_write(&tty->termios_rwsem);
out:
return ret;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 0e6ef24419c8..28edbaf7bb32 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -690,8 +690,9 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
int ret;

if (!stm32_port->hw_flow_control &&
- port->rs485.flags & SER_RS485_ENABLED) {
- stm32_port->txdone = false;
+ port->rs485.flags & SER_RS485_ENABLED &&
+ (port->x_char ||
+ !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_enable(port);
}
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index 1c08c9b67b16..c5ee21912755 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -62,6 +62,8 @@ int __tty_check_change(struct tty_struct *tty, int sig);
int tty_check_change(struct tty_struct *tty);
void __stop_tty(struct tty_struct *tty);
void __start_tty(struct tty_struct *tty);
+void tty_write_unlock(struct tty_struct *tty);
+int tty_write_lock(struct tty_struct *tty, int ndelay);
void tty_vhangup_session(struct tty_struct *tty);
void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 1ac6784ea1f9..8fb6c6853556 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -933,13 +933,13 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
return i;
}

-static void tty_write_unlock(struct tty_struct *tty)
+void tty_write_unlock(struct tty_struct *tty)
{
mutex_unlock(&tty->atomic_write_lock);
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}

-static int tty_write_lock(struct tty_struct *tty, int ndelay)
+int tty_write_lock(struct tty_struct *tty, int ndelay)
{
if (!mutex_trylock(&tty->atomic_write_lock)) {
if (ndelay)
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index ce511557b98b..ad1cf51ecd11 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -500,21 +500,42 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios);
tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios);

- ld = tty_ldisc_ref(tty);
+ if (opt & (TERMIOS_FLUSH|TERMIOS_WAIT)) {
+retry_write_wait:
+ retval = wait_event_interruptible(tty->write_wait, !tty_chars_in_buffer(tty));
+ if (retval < 0)
+ return retval;

- if (ld != NULL) {
- if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
+ if (tty_write_lock(tty, 0) < 0)
+ goto retry_write_wait;

- if (opt & TERMIOS_WAIT) {
- tty_wait_until_sent(tty, 0);
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
+ /* Racing writer? */
+ if (tty_chars_in_buffer(tty)) {
+ tty_write_unlock(tty);
+ goto retry_write_wait;
+ }

- tty_set_termios(tty, &tmp_termios);
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+
+ if ((opt & TERMIOS_WAIT) && tty->ops->wait_until_sent) {
+ tty->ops->wait_until_sent(tty, 0);
+ if (signal_pending(current)) {
+ tty_write_unlock(tty);
+ return -ERESTARTSYS;
+ }
+ }
+
+ tty_set_termios(tty, &tmp_termios);
+
+ tty_write_unlock(tty);
+ } else {
+ tty_set_termios(tty, &tmp_termios);
+ }

/* FIXME: Arguably if tmp_termios == tty->termios AND the
actual requested termios was not tmp_termios then we may
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5abdc2b0f506..71f172ecfaab 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1101,7 +1101,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
- return ret;
+ goto ulpi_exit;
}

ci->hw_bank.phys = res->start;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 476b63618511..9f8c988c25cb 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1883,13 +1883,11 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);

+ pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err1;

pm_runtime_forbid(dev);

@@ -1954,12 +1952,10 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_free_event_buffers(dwc);

err2:
- pm_runtime_allow(&pdev->dev);
-
-err1:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
disable_clks:
dwc3_clk_disable(dwc);
assert_reset:
@@ -1983,6 +1979,7 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);

+ pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5997d7f943fe..d2622378ce04 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2522,29 +2522,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
{
unsigned long flags;
+ int ret;

spin_lock_irqsave(&dwc->lock, flags);
dwc->connected = false;

/*
- * Per databook, when we want to stop the gadget, if a control transfer
- * is still in process, complete it and get the core into setup phase.
+ * Attempt to end pending SETUP status phase, and not wait for the
+ * function to do so.
*/
- if (dwc->ep0state != EP0_SETUP_PHASE) {
- int ret;
-
- if (dwc->delayed_status)
- dwc3_ep0_send_delayed_status(dwc);
-
- reinit_completion(&dwc->ep0_in_setup);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
- msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
- spin_lock_irqsave(&dwc->lock, flags);
- if (ret == 0)
- dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
- }
+ if (dwc->delayed_status)
+ dwc3_ep0_send_delayed_status(dwc);

/*
* In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
@@ -2557,6 +2545,33 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
__dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);

+ /*
+ * Per databook, when we want to stop the gadget, if a control transfer
+ * is still in process, complete it and get the core into setup phase.
+ * In case the host is unresponsive to a SETUP transaction, forcefully
+ * stall the transfer, and move back to the SETUP phase, so that any
+ * pending endxfers can be executed.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ reinit_completion(&dwc->ep0_in_setup);
+
+ ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ if (ret == 0) {
+ unsigned int dir;
+
+ dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
+ spin_lock_irqsave(&dwc->lock, flags);
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+ dwc3_ep0_stall_and_restart(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ }
+
/*
* Note: if the GEVNTCOUNT indicates events in the event buffer, the
* driver needs to acknowledge them before the controller can halt.
@@ -4237,15 +4252,8 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_SUSPEND:
/* It changed to be suspend event for version 2.30a and above */
- if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
- /*
- * Ignore suspend event until the gadget enters into
- * USB_STATE_CONFIGURED state.
- */
- if (dwc->gadget->state >= USB_STATE_CONFIGURED)
- dwc3_gadget_suspend_interrupt(dwc,
- event->event_info);
- }
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
+ dwc3_gadget_suspend_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_SOF:
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 658e2e21fdd0..c21acebe8aae 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1054,7 +1054,7 @@ static void usbg_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun);
+ cmd->unpacked_lun, NULL);
goto out;
}

@@ -1183,7 +1183,7 @@ static void bot_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun);
+ cmd->unpacked_lun, NULL);
goto out;
}

diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index bf9878e1a72a..e85706812d61 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -37,6 +37,10 @@ static struct bus_type gadget_bus_type;
* @vbus: for udcs who care about vbus status, this value is real vbus status;
* for udcs who do not care about vbus status, this value is always true
* @started: the UDC's started state. True if the UDC had started.
+ * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
+ * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
+ * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
+ * called with this lock held.
*
* This represents the internal data structure which is used by the UDC-class
* to hold information about udc driver and gadget together.
@@ -48,6 +52,7 @@ struct usb_udc {
struct list_head list;
bool vbus;
bool started;
+ struct mutex connect_lock;
};

static struct class *udc_class;
@@ -660,17 +665,9 @@ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
}
EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);

-/**
- * usb_gadget_connect - software-controlled connect to USB host
- * @gadget:the peripheral being connected
- *
- * Enables the D+ (or potentially D-) pullup. The host will start
- * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered).
- *
- * Returns zero on success, else negative errno.
- */
-int usb_gadget_connect(struct usb_gadget *gadget)
+/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
+static int usb_gadget_connect_locked(struct usb_gadget *gadget)
+ __must_hold(&gadget->udc->connect_lock)
{
int ret = 0;

@@ -679,10 +676,15 @@ int usb_gadget_connect(struct usb_gadget *gadget)
goto out;
}

- if (gadget->deactivated) {
+ if (gadget->connected)
+ goto out;
+
+ if (gadget->deactivated || !gadget->udc->started) {
/*
* If gadget is deactivated we only save new state.
* Gadget will be connected automatically after activation.
+ *
+ * udc first needs to be started before gadget can be pulled up.
*/
gadget->connected = true;
goto out;
@@ -697,22 +699,32 @@ int usb_gadget_connect(struct usb_gadget *gadget)

return ret;
}
-EXPORT_SYMBOL_GPL(usb_gadget_connect);

/**
- * usb_gadget_disconnect - software-controlled disconnect from USB host
- * @gadget:the peripheral being disconnected
- *
- * Disables the D+ (or potentially D-) pullup, which the host may see
- * as a disconnect (when a VBUS session is active). Not all systems
- * support software pullup controls.
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
*
- * Following a successful disconnect, invoke the ->disconnect() callback
- * for the current gadget driver so that UDC drivers don't need to.
+ * Enables the D+ (or potentially D-) pullup. The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
-int usb_gadget_disconnect(struct usb_gadget *gadget)
+int usb_gadget_connect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ mutex_lock(&gadget->udc->connect_lock);
+ ret = usb_gadget_connect_locked(gadget);
+ mutex_unlock(&gadget->udc->connect_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
+
+/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
+static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
+ __must_hold(&gadget->udc->connect_lock)
{
int ret = 0;

@@ -724,10 +736,12 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
if (!gadget->connected)
goto out;

- if (gadget->deactivated) {
+ if (gadget->deactivated || !gadget->udc->started) {
/*
* If gadget is deactivated we only save new state.
* Gadget will stay disconnected after activation.
+ *
+ * udc should have been started before gadget being pulled down.
*/
gadget->connected = false;
goto out;
@@ -747,6 +761,30 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)

return ret;
}
+
+/**
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
+ *
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active). Not all systems
+ * support software pullup controls.
+ *
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ mutex_lock(&gadget->udc->connect_lock);
+ ret = usb_gadget_disconnect_locked(gadget);
+ mutex_unlock(&gadget->udc->connect_lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(usb_gadget_disconnect);

/**
@@ -767,10 +805,11 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
if (gadget->deactivated)
goto out;

+ mutex_lock(&gadget->udc->connect_lock);
if (gadget->connected) {
- ret = usb_gadget_disconnect(gadget);
+ ret = usb_gadget_disconnect_locked(gadget);
if (ret)
- goto out;
+ goto unlock;

/*
* If gadget was being connected before deactivation, we want
@@ -780,6 +819,8 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
}
gadget->deactivated = true;

+unlock:
+ mutex_unlock(&gadget->udc->connect_lock);
out:
trace_usb_gadget_deactivate(gadget, ret);

@@ -803,6 +844,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
if (!gadget->deactivated)
goto out;

+ mutex_lock(&gadget->udc->connect_lock);
gadget->deactivated = false;

/*
@@ -810,7 +852,8 @@ int usb_gadget_activate(struct usb_gadget *gadget)
* while it was being deactivated, we call usb_gadget_connect().
*/
if (gadget->connected)
- ret = usb_gadget_connect(gadget);
+ ret = usb_gadget_connect_locked(gadget);
+ mutex_unlock(&gadget->udc->connect_lock);

out:
trace_usb_gadget_activate(gadget, ret);
@@ -1051,12 +1094,13 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);

/* ------------------------------------------------------------------------- */

-static void usb_udc_connect_control(struct usb_udc *udc)
+/* Acquire connect_lock before calling this function. */
+static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
{
- if (udc->vbus)
- usb_gadget_connect(udc->gadget);
+ if (udc->vbus && udc->started)
+ usb_gadget_connect_locked(udc->gadget);
else
- usb_gadget_disconnect(udc->gadget);
+ usb_gadget_disconnect_locked(udc->gadget);
}

/**
@@ -1072,10 +1116,12 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
{
struct usb_udc *udc = gadget->udc;

+ mutex_lock(&udc->connect_lock);
if (udc) {
udc->vbus = status;
- usb_udc_connect_control(udc);
+ usb_udc_connect_control_locked(udc);
}
+ mutex_unlock(&udc->connect_lock);
}
EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);

@@ -1097,7 +1143,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);

/**
- * usb_gadget_udc_start - tells usb device controller to start up
+ * usb_gadget_udc_start_locked - tells usb device controller to start up
* @udc: The UDC to be started
*
* This call is issued by the UDC Class driver when it's about
@@ -1108,8 +1154,11 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
* necessary to have it powered on.
*
* Returns zero on success, else negative errno.
+ *
+ * Caller should acquire connect_lock before invoking this function.
*/
-static inline int usb_gadget_udc_start(struct usb_udc *udc)
+static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
+ __must_hold(&udc->connect_lock)
{
int ret;

@@ -1126,7 +1175,7 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
}

/**
- * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+ * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
* @udc: The UDC to be stopped
*
* This call is issued by the UDC Class driver after calling
@@ -1135,8 +1184,11 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
* The details are implementation specific, but it can go as
* far as powering off UDC completely and disable its data
* line pullups.
+ *
+ * Caller should acquire connect lock before invoking this function.
*/
-static inline void usb_gadget_udc_stop(struct usb_udc *udc)
+static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
+ __must_hold(&udc->connect_lock)
{
if (!udc->started) {
dev_err(&udc->dev, "UDC had already stopped\n");
@@ -1295,6 +1347,7 @@ int usb_add_gadget(struct usb_gadget *gadget)

udc->gadget = gadget;
gadget->udc = udc;
+ mutex_init(&udc->connect_lock);

udc->started = false;

@@ -1496,11 +1549,15 @@ static int gadget_bind_driver(struct device *dev)
if (ret)
goto err_bind;

- ret = usb_gadget_udc_start(udc);
- if (ret)
+ mutex_lock(&udc->connect_lock);
+ ret = usb_gadget_udc_start_locked(udc);
+ if (ret) {
+ mutex_unlock(&udc->connect_lock);
goto err_start;
+ }
usb_gadget_enable_async_callbacks(udc);
- usb_udc_connect_control(udc);
+ usb_udc_connect_control_locked(udc);
+ mutex_unlock(&udc->connect_lock);

kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
@@ -1531,12 +1588,14 @@ static void gadget_unbind_driver(struct device *dev)

kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);

- usb_gadget_disconnect(gadget);
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_disconnect_locked(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
synchronize_irq(gadget->irq);
udc->driver->unbind(gadget);
- usb_gadget_udc_stop(udc);
+ usb_gadget_udc_stop_locked(udc);
+ mutex_unlock(&udc->connect_lock);

mutex_lock(&udc_lock);
driver->is_bound = false;
@@ -1622,11 +1681,15 @@ static ssize_t soft_connect_store(struct device *dev,
}

if (sysfs_streq(buf, "connect")) {
- usb_gadget_udc_start(udc);
- usb_gadget_connect(udc->gadget);
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_udc_start_locked(udc);
+ usb_gadget_connect_locked(udc->gadget);
+ mutex_unlock(&udc->connect_lock);
} else if (sysfs_streq(buf, "disconnect")) {
- usb_gadget_disconnect(udc->gadget);
- usb_gadget_udc_stop(udc);
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_disconnect_locked(udc->gadget);
+ usb_gadget_udc_stop_locked(udc);
+ mutex_unlock(&udc->connect_lock);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
ret = -EINVAL;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 615ba0a6fbee..32c9e369216c 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2596,6 +2596,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);

+ cancel_work_sync(&usb3->role_work);
usb_role_switch_unregister(usb3->role_sw);

usb_del_gadget_udc(&usb3->gadget);
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 76919d7570d2..3c7ffb35c35c 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -2160,7 +2160,7 @@ static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,

dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);

- if (xudc->curr_usbphy->chg_type == SDP_TYPE)
+ if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
ret = usb_phy_set_power(xudc->curr_usbphy, m_a);

return ret;
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index dc832ddf7033..bd40caeeb21c 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -133,6 +133,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
regset->regs = regs;
regset->nregs = nregs;
regset->base = hcd->regs + base;
+ regset->dev = hcd->self.controller;

debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
}
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index aef0258a7160..98525704be9d 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -75,7 +75,6 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);

/* For soc_device_attribute */
#define RCAR_XHCI_FIRMWARE_V2 BIT(0) /* FIRMWARE V2 */
-#define RCAR_XHCI_FIRMWARE_V3 BIT(1) /* FIRMWARE V3 */

static const struct soc_device_attribute rcar_quirks_match[] = {
{
@@ -147,8 +146,6 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)

if (quirks & RCAR_XHCI_FIRMWARE_V2)
firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
- else if (quirks & RCAR_XHCI_FIRMWARE_V3)
- firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
else
firmware_name = priv->firmware_name;

diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 2ea3157ddb6e..e65586147965 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
return ring->enqueue;
}

+/* @dequeue may be NULL if ring is unallocated or freed */
static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
{
if (ring->dequeue < ring->end)
@@ -484,7 +485,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);

- while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {

mreq = next_request(mep);

@@ -523,7 +524,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);

- while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {

mreq = next_request(mep);

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index b7657984dd8d..6f532da59e08 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -819,11 +819,7 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
if (!v->in_batch)
ops->set_map(vdpa, asid, iotlb);
}
- /* If we are in the middle of batch processing, delay the free
- * of AS until BATCH_END.
- */
- if (!v->in_batch && !iotlb->nmaps)
- vhost_vdpa_remove_as(v, asid);
+
}

static int vhost_vdpa_va_map(struct vhost_vdpa *v,
@@ -1080,8 +1076,6 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
if (v->in_batch && ops->set_map)
ops->set_map(vdpa, asid, iotlb);
v->in_batch = false;
- if (!iotlb->nmaps)
- vhost_vdpa_remove_as(v, asid);
break;
default:
r = -EINVAL;
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index a9df8ee79810..51fbf02a0343 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -514,9 +514,9 @@ static int mmphw_probe(struct platform_device *pdev)
/* get clock */
ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
if (IS_ERR(ctrl->clk)) {
+ ret = PTR_ERR(ctrl->clk);
dev_err_probe(ctrl->dev, ret,
"unable to get clk %s\n", mi->clk_name);
- ret = -ENOENT;
goto failed;
}
clk_prepare_enable(ctrl->clk);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 741d12f75726..9e172f66a8ed 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -46,7 +46,15 @@ struct snp_guest_dev {

void *certs_data;
struct snp_guest_crypto *crypto;
+ /* request and response are in unencrypted memory */
struct snp_guest_msg *request, *response;
+
+ /*
+ * Avoid information leakage by double-buffering shared messages
+ * in fields that are in regular encrypted memory.
+ */
+ struct snp_guest_msg secret_request, secret_response;
+
struct snp_secrets_page_layout *layout;
struct snp_req_data input;
u32 *os_area_msg_seqno;
@@ -268,14 +276,17 @@ static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_guest_msg *resp = snp_dev->response;
- struct snp_guest_msg *req = snp_dev->request;
+ struct snp_guest_msg *resp = &snp_dev->secret_response;
+ struct snp_guest_msg *req = &snp_dev->secret_request;
struct snp_guest_msg_hdr *req_hdr = &req->hdr;
struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;

dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);

+ /* Copy response from shared memory to encrypted memory. */
+ memcpy(resp, snp_dev->response, sizeof(*resp));
+
/* Verify that the sequence counter is incremented by 1 */
if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
return -EBADMSG;
@@ -299,7 +310,7 @@ static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload,
static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
void *payload, size_t sz)
{
- struct snp_guest_msg *req = snp_dev->request;
+ struct snp_guest_msg *req = &snp_dev->secret_request;
struct snp_guest_msg_hdr *hdr = &req->hdr;

memset(req, 0, sizeof(*req));
@@ -419,13 +430,21 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
if (!seqno)
return -EIO;

+ /* Clear shared memory's response for the host to populate. */
memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));

- /* Encrypt the userspace provided payload */
+ /* Encrypt the userspace provided payload in snp_dev->secret_request. */
rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
if (rc)
return rc;

+ /*
+ * Write the fully encrypted request to the shared unencrypted
+ * request page.
+ */
+ memcpy(snp_dev->request, &snp_dev->secret_request,
+ sizeof(snp_dev->secret_request));
+
rc = __handle_guest_request(snp_dev, exit_code, fw_err);
if (rc) {
if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 2e7689bb933b..90d514c14179 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -848,6 +848,14 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)

if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
if (vq->event)
/* TODO: this is a hack. Figure out a cleaner value to write. */
vring_used_event(&vq->split.vring) = 0x0;
@@ -1687,6 +1695,14 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)

if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
@@ -2309,12 +2325,6 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);

- /* If device triggered an event already it won't trigger one again:
- * no need to disable.
- */
- if (vq->event_triggered)
- return;
-
if (vq->packed_ring)
virtqueue_disable_cb_packed(_vq);
else
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index fd3a644b0855..b3e3d1bb37f3 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -58,6 +58,7 @@ struct pcpu {
struct list_head list;
struct device dev;
uint32_t cpu_id;
+ uint32_t acpi_id;
uint32_t flags;
};

@@ -249,6 +250,7 @@ static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)

INIT_LIST_HEAD(&pcpu->list);
pcpu->cpu_id = info->xen_cpuid;
+ pcpu->acpi_id = info->acpi_id;
pcpu->flags = info->flags;

/* Need hold on xen_pcpu_lock before pcpu list manipulations */
@@ -381,3 +383,21 @@ static int __init xen_pcpu_init(void)
return ret;
}
arch_initcall(xen_pcpu_init);
+
+#ifdef CONFIG_ACPI
+bool __init xen_processor_present(uint32_t acpi_id)
+{
+ const struct pcpu *pcpu;
+ bool online = false;
+
+ mutex_lock(&xen_pcpu_lock);
+ list_for_each_entry(pcpu, &xen_pcpus, list)
+ if (pcpu->acpi_id == acpi_id) {
+ online = pcpu->flags & XEN_PCPU_FLAGS_ONLINE;
+ break;
+ }
+ mutex_unlock(&xen_pcpu_lock);
+
+ return online;
+}
+#endif
diff --git a/fs/Makefile b/fs/Makefile
index 4dea17840761..80ab0154419e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -6,7 +6,6 @@
# Rewritten to use lists instead of if-statements.
#

-obj-$(CONFIG_SYSCTL) += sysctls.o

obj-y := open.o read_write.o file_table.o super.o \
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
@@ -49,7 +48,7 @@ obj-$(CONFIG_FS_MBCACHE) += mbcache.o
obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_COREDUMP) += coredump.o
-obj-$(CONFIG_SYSCTL) += drop_caches.o
+obj-$(CONFIG_SYSCTL) += drop_caches.o sysctls.o

obj-$(CONFIG_FHANDLE) += fhandle.o
obj-y += iomap/
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 104df2964225..f73b2f62afaa 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -274,6 +274,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
loff_t i_size;
int nr_pages, i;
int ret;
+ loff_t remote_size = 0;

_enter("");

@@ -288,6 +289,8 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)

expand:
i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size < remote_size)
+ i_size = remote_size;
if (i_size < 2048) {
ret = afs_bad(dvnode, afs_file_error_dir_small);
goto error;
@@ -363,6 +366,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
* buffer.
*/
up_write(&dvnode->validate_lock);
+ remote_size = req->file_size;
goto expand;
}

diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 6d3a3dbe4928..5921dd3687e3 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -230,6 +230,7 @@ static void afs_apply_status(struct afs_operation *op,
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
}
change_size = true;
+ data_changed = true;
} else if (vnode->status.type == AFS_FTYPE_DIR) {
/* Expected directory change is handled elsewhere so
* that we can locally edit the directory and save on a
@@ -449,7 +450,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
0 : FSCACHE_ADV_SINGLE_CHUNK,
&key, sizeof(key),
&aux, sizeof(aux),
- vnode->status.size));
+ i_size_read(&vnode->netfs.inode)));
#endif
}

@@ -765,6 +766,13 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
stat->nlink > 0)
stat->nlink -= 1;
+
+ /* Lie about the size of directories. We maintain a locally
+ * edited copy and may make different allocation decisions on
+ * it, but we need to give userspace the server's size.
+ */
+ if (S_ISDIR(inode->i_mode))
+ stat->size = vnode->netfs.remote_i_size;
} while (need_seqretry(&vnode->cb_lock, seq));

done_seqretry(&vnode->cb_lock, seq);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fe2fb81da46b..0cebc203c4cc 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4050,6 +4050,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
if (IS_ERR(sa))
return PTR_ERR(sa);

+ if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
ret = mnt_want_write_file(file);
if (ret)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 795fd6d84bde..faf117802112 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -430,7 +430,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
*
* Called with i_ceph_lock held.
*/
-static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
+struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
struct ceph_cap *cap;
struct rb_node *n = ci->i_caps.rb_node;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index bec3c4549c07..3904333fa6c3 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -248,14 +248,20 @@ static int metrics_caps_show(struct seq_file *s, void *p)
return 0;
}

-static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
+static int caps_show_cb(struct inode *inode, int mds, void *p)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct seq_file *s = p;
-
- seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
- cap->session->s_mds,
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->implemented));
+ struct ceph_cap *cap;
+
+ spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (cap)
+ seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
+ cap->session->s_mds,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->implemented));
+ spin_unlock(&ci->i_ceph_lock);
return 0;
}

diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 27a245d959c0..54e3c2ab21d2 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1632,8 +1632,8 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
* Caller must hold session s_mutex.
*/
int ceph_iterate_session_caps(struct ceph_mds_session *session,
- int (*cb)(struct inode *, struct ceph_cap *,
- void *), void *arg)
+ int (*cb)(struct inode *, int mds, void *),
+ void *arg)
{
struct list_head *p;
struct ceph_cap *cap;
@@ -1645,6 +1645,8 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
spin_lock(&session->s_cap_lock);
p = session->s_caps.next;
while (p != &session->s_caps) {
+ int mds;
+
cap = list_entry(p, struct ceph_cap, session_caps);
inode = igrab(&cap->ci->netfs.inode);
if (!inode) {
@@ -1652,6 +1654,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
continue;
}
session->s_cap_iterator = cap;
+ mds = cap->mds;
spin_unlock(&session->s_cap_lock);

if (last_inode) {
@@ -1663,7 +1666,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
old_cap = NULL;
}

- ret = cb(inode, cap, arg);
+ ret = cb(inode, mds, arg);
last_inode = inode;

spin_lock(&session->s_cap_lock);
@@ -1696,20 +1699,25 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
return ret;
}

-static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
- void *arg)
+static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
bool invalidate = false;
- int iputs;
+ struct ceph_cap *cap;
+ int iputs = 0;

- dout("removing cap %p, ci is %p, inode is %p\n",
- cap, ci, &ci->netfs.inode);
spin_lock(&ci->i_ceph_lock);
- iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
+ cap = __get_cap_for_mds(ci, mds);
+ if (cap) {
+ dout(" removing cap %p, ci is %p, inode is %p\n",
+ cap, ci, &ci->netfs.inode);
+
+ iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
+ }
spin_unlock(&ci->i_ceph_lock);

- wake_up_all(&ci->i_cap_wq);
+ if (cap)
+ wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
while (iputs--)
@@ -1780,8 +1788,7 @@ enum {
*
* caller must hold s_mutex.
*/
-static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
- void *arg)
+static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned long ev = (unsigned long)arg;
@@ -1792,12 +1799,14 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
ci->i_requested_max_size = 0;
spin_unlock(&ci->i_ceph_lock);
} else if (ev == RENEWCAPS) {
- if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
- /* mds did not re-issue stale cap */
- spin_lock(&ci->i_ceph_lock);
+ struct ceph_cap *cap;
+
+ spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ /* mds did not re-issue stale cap */
+ if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen))
cap->issued = cap->implemented = CEPH_CAP_PIN;
- spin_unlock(&ci->i_ceph_lock);
- }
+ spin_unlock(&ci->i_ceph_lock);
} else if (ev == FORCE_RO) {
}
wake_up_all(&ci->i_cap_wq);
@@ -1959,16 +1968,22 @@ static bool drop_negative_children(struct dentry *dentry)
* Yes, this is a bit sloppy. Our only real goal here is to respond to
* memory pressure from the MDS, though, so it needn't be perfect.
*/
-static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
+static int trim_caps_cb(struct inode *inode, int mds, void *arg)
{
int *remaining = arg;
struct ceph_inode_info *ci = ceph_inode(inode);
int used, wanted, oissued, mine;
+ struct ceph_cap *cap;

if (*remaining <= 0)
return -1;

spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (!cap) {
+ spin_unlock(&ci->i_ceph_lock);
+ return 0;
+ }
mine = cap->issued | cap->implemented;
used = __ceph_caps_used(ci);
wanted = __ceph_caps_file_wanted(ci);
@@ -3911,26 +3926,22 @@ static struct dentry* d_find_primary(struct inode *inode)
/*
* Encode information about a cap for a reconnect with the MDS.
*/
-static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
- void *arg)
+static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
{
union {
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
} rec;
- struct ceph_inode_info *ci = cap->ci;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
+ struct ceph_cap *cap;
char *path;
- int pathlen = 0, err;
+ int pathlen = 0, err = 0;
u64 pathbase;
u64 snap_follows;

- dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
- inode, ceph_vinop(inode), cap, cap->cap_id,
- ceph_cap_string(cap->issued));
-
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
@@ -3947,6 +3958,15 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
}

spin_lock(&ci->i_ceph_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (!cap) {
+ spin_unlock(&ci->i_ceph_lock);
+ goto out_err;
+ }
+ dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+ inode, ceph_vinop(inode), cap, cap->cap_id,
+ ceph_cap_string(cap->issued));
+
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
cap->mseq = 0; /* and migrate_seq */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 0598faa50e2e..18b026b1ac63 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -541,8 +541,7 @@ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
- int (*cb)(struct inode *,
- struct ceph_cap *, void *),
+ int (*cb)(struct inode *, int mds, void *),
void *arg);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);

diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3599fefa91f9..478b741b1107 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1190,6 +1190,8 @@ extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
struct ceph_inode_info *ci);
+extern struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci,
+ int mds);
extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
int mds);
extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 4952a94e5272..e41154ad96af 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -279,8 +279,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
c, server->conn_id);

+ spin_lock(&server->srv_lock);
if (server->hostname)
seq_printf(m, "Hostname: %s ", server->hostname);
+ spin_unlock(&server->srv_lock);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma)
goto skip_rdma;
@@ -607,10 +609,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server->fastest_cmd[j],
server->slowest_cmd[j]);
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
- if (atomic_read(&server->smb2slowcmd[j]))
+ if (atomic_read(&server->smb2slowcmd[j])) {
+ spin_lock(&server->srv_lock);
seq_printf(m, " %d slow responses from %s for command %d\n",
atomic_read(&server->smb2slowcmd[j]),
server->hostname, j);
+ spin_unlock(&server->srv_lock);
+ }
#endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index d44808263cfb..ce5cfd236fdb 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -81,19 +81,19 @@ do { \

#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
do { \
- const char *sn = ""; \
- if (server && server->hostname) \
- sn = server->hostname; \
+ spin_lock(&server->srv_lock); \
if ((type) & FYI && cifsFYI & CIFS_INFO) { \
pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
- __FILE__, sn, ##__VA_ARGS__); \
+ __FILE__, server->hostname, \
+ ##__VA_ARGS__); \
} else if ((type) & VFS) { \
pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
- sn, ##__VA_ARGS__); \
+ server->hostname, ##__VA_ARGS__); \
} else if ((type) & NOISY && (NOISY != 0)) { \
pr_debug_ ## ratefunc("\\\\%s " fmt, \
- sn, ##__VA_ARGS__); \
+ server->hostname, ##__VA_ARGS__); \
} \
+ spin_unlock(&server->srv_lock); \
} while (0)

#define cifs_server_dbg(type, fmt, ...) \
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 077c88c49dfd..21b31d1640e5 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -452,8 +452,10 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha
if (server->hostname != target) {
hostname = extract_hostname(target);
if (!IS_ERR(hostname)) {
+ spin_lock(&server->srv_lock);
kfree(server->hostname);
server->hostname = hostname;
+ spin_unlock(&server->srv_lock);
} else {
cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
__func__, PTR_ERR(hostname));
@@ -620,9 +622,7 @@ cifs_echo_request(struct work_struct *work)
goto requeue_echo;

rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
- if (rc)
- cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
- server->hostname);
+ cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);

/* Check witness registrations */
cifs_swn_check();
@@ -1462,6 +1462,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;

+ lockdep_assert_held(&server->srv_lock);
+
if (ctx->nosharesock)
return 0;

@@ -1863,7 +1865,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (tcon == NULL)
return -ENOMEM;

+ spin_lock(&server->srv_lock);
scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
+ spin_unlock(&server->srv_lock);

xid = get_xid();
tcon->ses = ses;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6f5fbbbebec3..158a0a5f4007 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -5087,6 +5087,8 @@ void cifs_oplock_break(struct work_struct *work)
struct TCP_Server_Info *server = tcon->ses->server;
int rc = 0;
bool purge_cache = false;
+ struct cifs_deferred_close *dclose;
+ bool is_deferred = false;

wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@@ -5122,6 +5124,20 @@ void cifs_oplock_break(struct work_struct *work)
cifs_dbg(VFS, "Push locks rc = %d\n", rc);

oplock_break_ack:
+ /*
+ * When oplock break is received and there are no active
+ * file handles but cached, then schedule deferred close immediately.
+ * So, new open will not use cached handle.
+ */
+ spin_lock(&CIFS_I(inode)->deferred_lock);
+ is_deferred = cifs_is_deferred_close(cfile, &dclose);
+ spin_unlock(&CIFS_I(inode)->deferred_lock);
+
+ if (!CIFS_CACHE_HANDLE(cinode) && is_deferred &&
+ cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) {
+ cifs_close_deferred_file(cinode);
+ }
+
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index cf19e6a81ed9..31e06133acc3 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -742,7 +742,9 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
if (delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
+ spin_lock(&cifs_inode->deferred_lock);
cifs_del_deferred_close(cfile);
+ spin_unlock(&cifs_inode->deferred_lock);

tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
@@ -755,7 +757,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
spin_unlock(&cifs_inode->open_file_lock);

list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
- _cifsFileInfo_put(tmp_list->cfile, true, false);
+ _cifsFileInfo_put(tmp_list->cfile, false, false);
list_del(&tmp_list->list);
kfree(tmp_list);
}
@@ -773,7 +775,9 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
if (delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
+ spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
cifs_del_deferred_close(cfile);
+ spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);

tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
@@ -808,7 +812,9 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
if (strstr(full_path, path)) {
if (delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
+ spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
cifs_del_deferred_close(cfile);
+ spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);

tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index c47b254f0d1e..81be17845072 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -159,6 +159,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
/* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{
+ struct TCP_Server_Info *server = ses->server;
int old_chan_count, new_chan_count;
int left;
int rc = 0;
@@ -178,16 +179,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
return 0;
}

- if (ses->server->dialect < SMB30_PROT_ID) {
+ if (server->dialect < SMB30_PROT_ID) {
spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
return 0;
}

- if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
ses->chan_max = 1;
spin_unlock(&ses->chan_lock);
- cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
+ cifs_server_dbg(VFS, "no multichannel support\n");
return 0;
}
spin_unlock(&ses->chan_lock);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index ab59faf8a06a..537e8679900b 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -175,8 +175,17 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
}
}
spin_unlock(&tcon->tc_lock);
- if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
- (!tcon->ses->server) || !server)
+
+ ses = tcon->ses;
+ if (!ses)
+ return -EIO;
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
+ return -EIO;
+ }
+ spin_unlock(&ses->ses_lock);
+ if (!ses->server || !server)
return -EIO;

spin_lock(&server->srv_lock);
@@ -204,8 +213,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (rc)
return rc;

- ses = tcon->ses;
-
spin_lock(&ses->chan_lock);
if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
spin_unlock(&ses->chan_lock);
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index cea8b14007e6..8bfb3ce86476 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -12,7 +12,7 @@
* provides the key and IV to use.
*/

-#include <linux/blk-crypto-profile.h>
+#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/sched/mm.h>
@@ -77,10 +77,8 @@ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
unsigned int i;

for (i = 0; i < num_devs; i++) {
- struct request_queue *q = bdev_get_queue(devs[i]);
-
if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
- __blk_crypto_cfg_supported(q->crypto_profile, cfg)) {
+ blk_crypto_config_supported_natively(devs[i], cfg)) {
if (!xchg(&mode->logged_blk_crypto_native, 1))
pr_info("fscrypt: %s using blk-crypto (native)\n",
mode->friendly_name);
@@ -139,8 +137,7 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
return PTR_ERR(devs);

for (i = 0; i < num_devs; i++) {
- if (!blk_crypto_config_supported(bdev_get_queue(devs[i]),
- &crypto_cfg))
+ if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
goto out_free_devs;
}

@@ -184,8 +181,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
goto fail;
}
for (i = 0; i < num_devs; i++) {
- err = blk_crypto_start_using_key(blk_key,
- bdev_get_queue(devs[i]));
+ err = blk_crypto_start_using_key(devs[i], blk_key);
if (err)
break;
}
@@ -224,7 +220,7 @@ void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
devs = fscrypt_get_devices(sb, &num_devs);
if (!IS_ERR(devs)) {
for (i = 0; i < num_devs; i++)
- blk_crypto_evict_key(bdev_get_queue(devs[i]), blk_key);
+ blk_crypto_evict_key(devs[i], blk_key);
kfree(devs);
}
kfree_sensitive(blk_key);
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index e51f27b6bde1..340bd56a5755 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -154,6 +154,7 @@ struct erofs_sb_info {

/* what we really care is nid, rather than ino.. */
erofs_nid_t root_nid;
+ erofs_nid_t packed_nid;
/* used for statfs, f_files - f_favail */
u64 inos;

@@ -310,7 +311,7 @@ struct erofs_inode {

unsigned char datalayout;
unsigned char inode_isize;
- unsigned short xattr_isize;
+ unsigned int xattr_isize;

unsigned int xattr_shared_count;
unsigned int *xattr_shared_xattrs;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 626a615dafc2..bd8bf8fc2f5d 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -381,17 +381,7 @@ static int erofs_read_superblock(struct super_block *sb)
#endif
sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
sbi->root_nid = le16_to_cpu(dsb->root_nid);
-#ifdef CONFIG_EROFS_FS_ZIP
- sbi->packed_inode = NULL;
- if (erofs_sb_has_fragments(sbi) && dsb->packed_nid) {
- sbi->packed_inode =
- erofs_iget(sb, le64_to_cpu(dsb->packed_nid));
- if (IS_ERR(sbi->packed_inode)) {
- ret = PTR_ERR(sbi->packed_inode);
- goto out;
- }
- }
-#endif
+ sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
sbi->inos = le64_to_cpu(dsb->inos);

sbi->build_time = le64_to_cpu(dsb->build_time);
@@ -800,6 +790,16 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)

erofs_shrinker_register(sb);
/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
+#ifdef CONFIG_EROFS_FS_ZIP
+ if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
+ sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
+ if (IS_ERR(sbi->packed_inode)) {
+ err = PTR_ERR(sbi->packed_inode);
+ sbi->packed_inode = NULL;
+ return err;
+ }
+ }
+#endif
err = erofs_init_managed_cache(sb);
if (err)
return err;
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 39cc014dba40..bb91cc649972 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -211,6 +211,10 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
m->partialref = true;
m->clusterofs = le16_to_cpu(di->di_clusterofs);
+ if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
m->pblk = le32_to_cpu(di->di_u.blkaddr);
break;
default:
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 36225ef56b0c..1bb55a6d79c2 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5804,7 +5804,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
* mapped - no physical clusters have been allocated, and the
* file has no extents
*/
- if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
+ ext4_has_inline_data(inode))
return 0;

/* search for the extent closest to the first block in the cluster */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index eea11ad84e68..42003b5c4cad 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3077,6 +3077,9 @@ static int ext4_da_write_end(struct file *file,
ext4_has_inline_data(inode))
return ext4_write_inline_data_end(inode, pos, len, copied, page);

+ if (unlikely(copied < len) && !PageUptodate(page))
+ copied = 0;
+
start = pos & (PAGE_SIZE - 1);
end = start + copied - 1;

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 74d3f2d2271f..b160863eca14 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -762,7 +762,12 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)

if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
ret = -EFSCORRUPTED;
- f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
+
+ /* Avoid f2fs_commit_super in irq context */
+ if (in_task)
+ f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
+ else
+ f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
goto out_release;
}

@@ -1462,6 +1467,12 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (!PageDirty(cc->rpages[i]))
goto continue_unlock;

+ if (PageWriteback(cc->rpages[i])) {
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ goto continue_unlock;
+ f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
+ }
+
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f92899bfcbd5..770a606eb3f6 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -858,6 +858,8 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
bool found = false;
struct bio *target = bio ? *bio : NULL;

+ f2fs_bug_on(sbi, !target && !page);
+
for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
struct list_head *head = &io->bio_list;
@@ -2886,7 +2888,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,

if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, DATA);
- f2fs_submit_merged_ipu_write(sbi, bio, NULL);
+ if (bio && *bio)
+ f2fs_submit_merged_ipu_write(sbi, bio, NULL);
submitted = NULL;
}

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 87664c309b3c..4b44ca1decdd 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3569,6 +3569,7 @@ int f2fs_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
+void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 773b3ddc2cd7..bf37983304a3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2115,7 +2115,11 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
} else {
/* Reuse the already created COW inode */
- f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
+ ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
+ if (ret) {
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ goto out;
+ }
}

f2fs_write_inode(inode, NULL);
@@ -3004,15 +3008,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
struct dquot *transfer_to[MAXQUOTAS] = {};
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- int err = 0;
+ int err;

transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- if (err)
- set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- dqput(transfer_to[PRJQUOTA]);
- }
+ if (IS_ERR(transfer_to[PRJQUOTA]))
+ return PTR_ERR(transfer_to[PRJQUOTA]);
+
+ err = __dquot_transfer(inode, transfer_to);
+ if (err)
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ dqput(transfer_to[PRJQUOTA]);
return err;
}

diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index ee6836478efe..aa928d1c8159 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1792,8 +1792,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
prefree_segments(sbi));

cpc.reason = __get_cp_reason(sbi);
- sbi->skipped_gc_rwsem = 0;
gc_more:
+ sbi->skipped_gc_rwsem = 0;
if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
ret = -EINVAL;
goto stop;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 8d1e8c537daf..b0fbdee16a96 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -245,10 +245,16 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
} else {
blkcnt_t count = 1;

+ err = inc_valid_block_count(sbi, inode, &count);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
+
*old_addr = dn.data_blkaddr;
f2fs_truncate_data_blocks_range(&dn, 1);
dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
- inc_valid_block_count(sbi, inode, &count);
+
f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
ni.version, true, false);
}
@@ -4916,48 +4922,6 @@ int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
return 0;
}

-static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
- unsigned int dev_idx)
-{
- if (!bdev_is_zoned(FDEV(dev_idx).bdev))
- return true;
- return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
-}
-
-/* Return the zone index in the given device */
-static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
- int dev_idx)
-{
- block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
-
- return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
- sbi->log_blocks_per_blkz;
-}
-
-/*
- * Return the usable segments in a section based on the zone's
- * corresponding zone capacity. Zone is equal to a section.
- */
-static inline unsigned int f2fs_usable_zone_segs_in_sec(
- struct f2fs_sb_info *sbi, unsigned int segno)
-{
- unsigned int dev_idx, zone_idx;
-
- dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
- zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
-
- /* Conventional zone's capacity is always equal to zone size */
- if (is_conv_zone(sbi, zone_idx, dev_idx))
- return sbi->segs_per_sec;
-
- if (!sbi->unusable_blocks_per_sec)
- return sbi->segs_per_sec;
-
- /* Get the segment count beyond zone capacity block */
- return sbi->segs_per_sec - (sbi->unusable_blocks_per_sec >>
- sbi->log_blocks_per_seg);
-}
-
/*
* Return the number of usable blocks in a segment. The number of blocks
* returned is always equal to the number of blocks in a segment for
@@ -4970,23 +4934,13 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
struct f2fs_sb_info *sbi, unsigned int segno)
{
block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
- unsigned int zone_idx, dev_idx, secno;
-
- secno = GET_SEC_FROM_SEG(sbi, segno);
- seg_start = START_BLOCK(sbi, segno);
- dev_idx = f2fs_target_device_index(sbi, seg_start);
- zone_idx = get_zone_idx(sbi, secno, dev_idx);
-
- /*
- * Conventional zone's capacity is always equal to zone size,
- * so, blocks per segment is unchanged.
- */
- if (is_conv_zone(sbi, zone_idx, dev_idx))
- return sbi->blocks_per_seg;
+ unsigned int secno;

if (!sbi->unusable_blocks_per_sec)
return sbi->blocks_per_seg;

+ secno = GET_SEC_FROM_SEG(sbi, segno);
+ seg_start = START_BLOCK(sbi, segno);
sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);

@@ -5020,11 +4974,6 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi
return 0;
}

-static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
-{
- return 0;
-}
#endif
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno)
@@ -5039,7 +4988,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int segno)
{
if (f2fs_sb_has_blkzoned(sbi))
- return f2fs_usable_zone_segs_in_sec(sbi, segno);
+ return CAP_SEGS_PER_SEC(sbi);

return sbi->segs_per_sec;
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index be8f2d7d007b..cd65778fc982 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -104,6 +104,9 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
#define CAP_BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
(sbi)->unusable_blocks_per_sec)
+#define CAP_SEGS_PER_SEC(sbi) \
+ ((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
+ (sbi)->log_blocks_per_seg))
#define GET_SEC_FROM_SEG(sbi, segno) \
(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
#define GET_SEG_FROM_SEC(sbi, secno) \
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 14c87399efea..5af05411818a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -3861,7 +3861,7 @@ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
f2fs_up_write(&sbi->sb_lock);
}

-static void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
+void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
{
spin_lock(&sbi->error_lock);
if (!test_bit(flag, (unsigned long *)sbi->errors)) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index df27afd71ef4..3d68bfa75cf2 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -550,9 +550,9 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (!strcmp(a->attr.name, "iostat_period_ms")) {
if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
return -EINVAL;
- spin_lock(&sbi->iostat_lock);
+ spin_lock_irq(&sbi->iostat_lock);
sbi->iostat_period_ms = (unsigned int)t;
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_irq(&sbi->iostat_lock);
return count;
}
#endif
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 15de1385012e..18611241f451 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2387,6 +2387,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
jbd2_journal_put_journal_head(jh);
+ /* Already zapped buffer? Nothing to do... */
+ if (!bh->b_bdev)
+ return 0;
return -EBUSY;
}
/*
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index cead696b656a..df8fb076f6f1 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -221,22 +221,22 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
{
char ntlmv2_hash[CIFS_ENCPWD_SIZE];
char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
- struct ksmbd_crypto_ctx *ctx;
+ struct ksmbd_crypto_ctx *ctx = NULL;
char *construct = NULL;
int rc, len;

- ctx = ksmbd_crypto_ctx_find_hmacmd5();
- if (!ctx) {
- ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
- return -ENOMEM;
- }
-
rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
if (rc) {
ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
goto out;
}

+ ctx = ksmbd_crypto_ctx_find_hmacmd5();
+ if (!ctx) {
+ ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+ return -ENOMEM;
+ }
+
rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
ntlmv2_hash,
CIFS_HMAC_MD5_HASH_SIZE);
@@ -272,6 +272,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
ksmbd_debug(AUTH, "Could not generate md5 hash\n");
goto out;
}
+ ksmbd_release_crypto_ctx(ctx);
+ ctx = NULL;

rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
if (rc) {
@@ -282,7 +284,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
rc = -EINVAL;
out:
- ksmbd_release_crypto_ctx(ctx);
+ if (ctx)
+ ksmbd_release_crypto_ctx(ctx);
kfree(construct);
return rc;
}
diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
index 8ce17b3fb8da..f19de20c2960 100644
--- a/fs/ksmbd/mgmt/tree_connect.c
+++ b/fs/ksmbd/mgmt/tree_connect.c
@@ -109,7 +109,15 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
unsigned int id)
{
- return xa_load(&sess->tree_conns, id);
+ struct ksmbd_tree_connect *tcon;
+
+ tcon = xa_load(&sess->tree_conns, id);
+ if (tcon) {
+ if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
+ tcon = NULL;
+ }
+
+ return tcon;
}

struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
index 0f97ddc1e39c..700df36cf3e3 100644
--- a/fs/ksmbd/mgmt/tree_connect.h
+++ b/fs/ksmbd/mgmt/tree_connect.h
@@ -14,6 +14,8 @@ struct ksmbd_share_config;
struct ksmbd_user;
struct ksmbd_conn;

+#define TREE_CONN_EXPIRE 1
+
struct ksmbd_tree_connect {
int id;

@@ -25,6 +27,7 @@ struct ksmbd_tree_connect {

int maximal_access;
bool posix_extensions;
+ unsigned long status;
};

struct ksmbd_tree_conn_status {
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 651d1d01234b..8c2bc513445c 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -614,6 +614,7 @@ static int __init ksmbd_server_init(void)
static void __exit ksmbd_server_exit(void)
{
ksmbd_server_shutdown();
+ rcu_barrier();
ksmbd_release_inode_hash();
}

diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 5de7b41d6404..acd66fb40c5f 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -1456,7 +1456,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
* Reuse session if anonymous try to connect
* on reauthetication.
*/
- if (ksmbd_anonymous_user(user)) {
+ if (conn->binding == false && ksmbd_anonymous_user(user)) {
ksmbd_free_user(user);
return 0;
}
@@ -1470,7 +1470,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
sess->user = user;
}

- if (user_guest(sess->user)) {
+ if (conn->binding == false && user_guest(sess->user)) {
rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
} else {
struct authenticate_message *authblob;
@@ -1713,6 +1713,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
goto out_err;
}

+ if (user_guest(sess->user)) {
+ rc = -EOPNOTSUPP;
+ goto out_err;
+ }
+
conn->binding = true;
} else if ((conn->dialect < SMB30_PROT_ID ||
server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
@@ -1799,6 +1804,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
}
kfree(sess->Preauth_HashValue);
sess->Preauth_HashValue = NULL;
+ } else {
+ pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
+ le32_to_cpu(negblob->MessageType));
+ rc = -EINVAL;
}
} else {
/* TODO: need one more negotiation */
@@ -1821,6 +1830,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
else if (rc == -ENOMEM)
rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+ else if (rc == -EOPNOTSUPP)
+ rsp->hdr.Status = STATUS_NOT_SUPPORTED;
else if (rc)
rsp->hdr.Status = STATUS_LOGON_FAILURE;

@@ -2053,11 +2064,12 @@ int smb2_tree_disconnect(struct ksmbd_work *work)

ksmbd_debug(SMB, "request\n");

- if (!tcon) {
+ if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
struct smb2_tree_disconnect_req *req =
smb2_get_msg(work->request_buf);

ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
smb2_set_err_rsp(work);
return 0;
@@ -4912,6 +4924,9 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
int rc = 0, len;
int fs_infoclass_size = 0;

+ if (!share->path)
+ return -EIO;
+
rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
if (rc) {
pr_err("cannot create vfs path\n");
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 03087ef1c7b4..5b49e5365bb3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -67,6 +67,8 @@

#define OPENOWNER_POOL_SIZE 8

+static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
+
const nfs4_stateid zero_stateid = {
{ .data = { 0 } },
.type = NFS4_SPECIAL_STATEID_TYPE,
@@ -330,6 +332,8 @@ int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
status = nfs4_proc_create_session(clp, cred);
if (status != 0)
goto out;
+ if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
+ nfs4_state_start_reclaim_reboot(clp);
nfs41_finish_session_reset(clp);
nfs_mark_client_ready(clp, NFS_CS_READY);
out:
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 798a2c1b38c6..7a8f166f2c8d 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,

down_read(&bmap->b_sem);
ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
- if (ret < 0) {
- ret = nilfs_bmap_convert_error(bmap, __func__, ret);
+ if (ret < 0)
goto out;
- }
+
if (NILFS_BMAP_USE_VBN(bmap)) {
ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
&blocknr);
if (!ret)
*ptrp = blocknr;
+ else if (ret == -ENOENT) {
+ /*
+ * If there was no valid entry in DAT for the block
+ * address obtained by b_ops->bop_lookup, then pass
+ * internal code -EINVAL to nilfs_bmap_convert_error
+ * to treat it as metadata corruption.
+ */
+ ret = -EINVAL;
+ }
}

out:
up_read(&bmap->b_sem);
- return ret;
+ return nilfs_bmap_convert_error(bmap, __func__, ret);
}

int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 101f2ce6ba37..209e46431a5e 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2039,6 +2039,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int err;

+ if (sb_rdonly(sci->sc_super))
+ return -EROFS;
+
nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
sci->sc_cno = nilfs->ns_cno;

@@ -2722,7 +2725,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)

flush_work(&sci->sc_iput_work);

- } while (ret && retrycount-- > 0);
+ } while (ret && ret != -EROFS && retrycount-- > 0);
}

/**
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index c662d2a51907..00faf41d8f97 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -2575,7 +2575,7 @@ static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
return find_log_rec(log, *lsn, lcb);
}

-static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
+bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
{
__le16 mask;
u32 min_de, de_off, used, total;
@@ -4258,6 +4258,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
rec_len -= t32;

attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
+ if (!attr_names) {
+ err = -ENOMEM;
+ goto out;
+ }

lcb_put(lcb);
lcb = NULL;
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index c27b4fe57513..98491abf95b9 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -679,9 +679,13 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
u32 e_size, e_key_len;
u32 end = le32_to_cpu(hdr->used);
u32 off = le32_to_cpu(hdr->de_off);
+ u32 total = le32_to_cpu(hdr->total);
u16 offs[128];

fill_table:
+ if (end > total)
+ return NULL;
+
if (off + sizeof(struct NTFS_DE) > end)
return NULL;

@@ -798,6 +802,10 @@ static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
u32 off = PtrOffset(hdr, re);
int bytes = used - (off + esize);

+ /* check INDEX_HDR valid before using INDEX_HDR */
+ if (!check_index_header(hdr, le32_to_cpu(hdr->total)))
+ return NULL;
+
if (off >= used || esize < sizeof(struct NTFS_DE) ||
bytes < sizeof(struct NTFS_DE))
return NULL;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 22152300e60c..57988fedd184 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -257,7 +257,6 @@ static struct inode *ntfs_read_mft(struct inode *inode,
goto out;

root = Add2Ptr(attr, roff);
- is_root = true;

if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
@@ -270,6 +269,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
if (!is_dir)
goto next_attr;

+ is_root = true;
ni->ni_flags |= NI_FLAG_DIR;

err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 2c791222c4e2..c5c022fef4e0 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -574,6 +574,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
bool ni_is_dirty(struct inode *inode);

/* Globals from fslog.c */
+bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
int log_replay(struct ntfs_inode *ni, bool *initialized);

/* Globals from fsntfs.c */
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
index 18cf94b597e0..d8542ec2f38c 100644
--- a/fs/pstore/pmsg.c
+++ b/fs/pstore/pmsg.c
@@ -7,10 +7,9 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <linux/rtmutex.h>
#include "internal.h"

-static DEFINE_RT_MUTEX(pmsg_lock);
+static DEFINE_MUTEX(pmsg_lock);

static ssize_t write_pmsg(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -29,9 +28,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
if (!access_ok(buf, count))
return -EFAULT;

- rt_mutex_lock(&pmsg_lock);
+ mutex_lock(&pmsg_lock);
ret = psinfo->write_user(&record, buf);
- rt_mutex_unlock(&pmsg_lock);
+ mutex_unlock(&pmsg_lock);
return ret ? ret : count;
}

diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 857a65b05726..157ebfe2456b 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -82,11 +82,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct reiserfs_security_handle *sec)
{
+ char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX;
int error;
- if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
+
+ if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX)
return -EINVAL;

- error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
+ strlcat(xattr_name, sec->name, sizeof(xattr_name));
+
+ error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value,
sec->length, XATTR_CREATE);
if (error == -ENODATA || error == -EOPNOTSUPP)
error = 0;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 5e6bcce94e64..66ba57a139d2 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -358,7 +358,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
umode_t mode = S_IFCHR | WHITEOUT_MODE;
struct inode *inode;
struct ubifs_info *c = dir->i_sb->s_fs_info;
- struct fscrypt_name nm;

/*
* Create an inode('nlink = 1') for whiteout without updating journal,
@@ -369,10 +368,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
dentry, mode, dir->i_ino);

- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
- if (err)
- return ERR_PTR(err);
-
inode = ubifs_new_inode(c, dir, mode, false);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -395,7 +390,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
make_bad_inode(inode);
iput(inode);
out_free:
- fscrypt_free_filename(&nm);
ubifs_err(c, "cannot create whiteout file, error %d", err);
return ERR_PTR(err);
}
@@ -492,6 +486,7 @@ static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
unlock_2_inodes(dir, inode);

ubifs_release_budget(c, &req);
+ fscrypt_free_filename(&nm);

return finish_open_simple(file, 0);

diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 2469f72eeaab..6b7d95b65f4b 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -44,6 +44,33 @@ enum {
NOT_ON_MEDIA = 3,
};

+static void do_insert_old_idx(struct ubifs_info *c,
+ struct ubifs_old_idx *old_idx)
+{
+ struct ubifs_old_idx *o;
+ struct rb_node **p, *parent = NULL;
+
+ p = &c->old_idx.rb_node;
+ while (*p) {
+ parent = *p;
+ o = rb_entry(parent, struct ubifs_old_idx, rb);
+ if (old_idx->lnum < o->lnum)
+ p = &(*p)->rb_left;
+ else if (old_idx->lnum > o->lnum)
+ p = &(*p)->rb_right;
+ else if (old_idx->offs < o->offs)
+ p = &(*p)->rb_left;
+ else if (old_idx->offs > o->offs)
+ p = &(*p)->rb_right;
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+ rb_insert_color(&old_idx->rb, &c->old_idx);
+}
+
/**
* insert_old_idx - record an index node obsoleted since the last commit start.
* @c: UBIFS file-system description object
@@ -69,35 +96,15 @@ enum {
*/
static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
{
- struct ubifs_old_idx *old_idx, *o;
- struct rb_node **p, *parent = NULL;
+ struct ubifs_old_idx *old_idx;

old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
if (unlikely(!old_idx))
return -ENOMEM;
old_idx->lnum = lnum;
old_idx->offs = offs;
+ do_insert_old_idx(c, old_idx);

- p = &c->old_idx.rb_node;
- while (*p) {
- parent = *p;
- o = rb_entry(parent, struct ubifs_old_idx, rb);
- if (lnum < o->lnum)
- p = &(*p)->rb_left;
- else if (lnum > o->lnum)
- p = &(*p)->rb_right;
- else if (offs < o->offs)
- p = &(*p)->rb_left;
- else if (offs > o->offs)
- p = &(*p)->rb_right;
- else {
- ubifs_err(c, "old idx added twice!");
- kfree(old_idx);
- return 0;
- }
- }
- rb_link_node(&old_idx->rb, parent, p);
- rb_insert_color(&old_idx->rb, &c->old_idx);
return 0;
}

@@ -199,23 +206,6 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
__set_bit(DIRTY_ZNODE, &zn->flags);
__clear_bit(COW_ZNODE, &zn->flags);

- ubifs_assert(c, !ubifs_zn_obsolete(znode));
- __set_bit(OBSOLETE_ZNODE, &znode->flags);
-
- if (znode->level != 0) {
- int i;
- const int n = zn->child_cnt;
-
- /* The children now have new parent */
- for (i = 0; i < n; i++) {
- struct ubifs_zbranch *zbr = &zn->zbranch[i];
-
- if (zbr->znode)
- zbr->znode->parent = zn;
- }
- }
-
- atomic_long_inc(&c->dirty_zn_cnt);
return zn;
}

@@ -233,6 +223,42 @@ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
return ubifs_add_dirt(c, lnum, dirt);
}

+/**
+ * replace_znode - replace old znode with new znode.
+ * @c: UBIFS file-system description object
+ * @new_zn: new znode
+ * @old_zn: old znode
+ * @zbr: the branch of parent znode
+ *
+ * Replace old znode with new znode in TNC.
+ */
+static void replace_znode(struct ubifs_info *c, struct ubifs_znode *new_zn,
+ struct ubifs_znode *old_zn, struct ubifs_zbranch *zbr)
+{
+ ubifs_assert(c, !ubifs_zn_obsolete(old_zn));
+ __set_bit(OBSOLETE_ZNODE, &old_zn->flags);
+
+ if (old_zn->level != 0) {
+ int i;
+ const int n = new_zn->child_cnt;
+
+ /* The children now have new parent */
+ for (i = 0; i < n; i++) {
+ struct ubifs_zbranch *child = &new_zn->zbranch[i];
+
+ if (child->znode)
+ child->znode->parent = new_zn;
+ }
+ }
+
+ zbr->znode = new_zn;
+ zbr->lnum = 0;
+ zbr->offs = 0;
+ zbr->len = 0;
+
+ atomic_long_inc(&c->dirty_zn_cnt);
+}
+
/**
* dirty_cow_znode - ensure a znode is not being committed.
* @c: UBIFS file-system description object
@@ -265,28 +291,32 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
return zn;

if (zbr->len) {
- err = insert_old_idx(c, zbr->lnum, zbr->offs);
- if (unlikely(err))
- /*
- * Obsolete znodes will be freed by tnc_destroy_cnext()
- * or free_obsolete_znodes(), copied up znodes should
- * be added back to tnc and freed by
- * ubifs_destroy_tnc_subtree().
- */
+ struct ubifs_old_idx *old_idx;
+
+ old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
+ if (unlikely(!old_idx)) {
+ err = -ENOMEM;
goto out;
+ }
+ old_idx->lnum = zbr->lnum;
+ old_idx->offs = zbr->offs;
+
err = add_idx_dirt(c, zbr->lnum, zbr->len);
- } else
- err = 0;
+ if (err) {
+ kfree(old_idx);
+ goto out;
+ }

-out:
- zbr->znode = zn;
- zbr->lnum = 0;
- zbr->offs = 0;
- zbr->len = 0;
+ do_insert_old_idx(c, old_idx);
+ }
+
+ replace_znode(c, zn, znode, zbr);

- if (unlikely(err))
- return ERR_PTR(err);
return zn;
+
+out:
+ kfree(zn);
+ return ERR_PTR(err);
}

/**
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index a20cade590e9..b6a584e044be 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -72,7 +72,8 @@ xfs_sb_validate_v5_features(
}

/*
- * We support all XFS versions newer than a v4 superblock with V2 directories.
+ * We current support XFS v5 formats with known features and v4 superblocks with
+ * at least V2 directories.
*/
bool
xfs_sb_good_version(
@@ -86,16 +87,16 @@ xfs_sb_good_version(
if (xfs_sb_is_v5(sbp))
return xfs_sb_validate_v5_features(sbp);

+ /* versions prior to v4 are not supported */
+ if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_4)
+ return false;
+
/* We must not have any unknown v4 feature bits set */
if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
(sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
return false;

- /* versions prior to v4 are not supported */
- if (XFS_SB_VERSION_NUM(sbp) < XFS_SB_VERSION_4)
- return false;
-
/* V4 filesystems need v2 directories and unwritten extents */
if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
return false;
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index a68f8fbf423b..cde032f86856 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr)

log_read_mmio(64, addr, _THIS_IP_);
__io_br();
- val = __le64_to_cpu(__raw_readq(addr));
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
log_post_read_mmio(val, 64, addr, _THIS_IP_);
return val;
@@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
{
log_write_mmio(value, 64, addr, _THIS_IP_);
__io_bw();
- __raw_writeq(__cpu_to_le64(value), addr);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 4a4c190f7698..8f648c32a965 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -706,7 +706,6 @@
INTEL_VGA_DEVICE(0x5693, info), \
INTEL_VGA_DEVICE(0x5694, info), \
INTEL_VGA_DEVICE(0x5695, info), \
- INTEL_VGA_DEVICE(0x5698, info), \
INTEL_VGA_DEVICE(0x56A5, info), \
INTEL_VGA_DEVICE(0x56A6, info), \
INTEL_VGA_DEVICE(0x56B0, info), \
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
index bbab65bd5428..e6802b69cdd6 100644
--- a/include/linux/blk-crypto-profile.h
+++ b/include/linux/blk-crypto-profile.h
@@ -138,18 +138,6 @@ int devm_blk_crypto_profile_init(struct device *dev,

unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);

-blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
- const struct blk_crypto_key *key,
- struct blk_crypto_keyslot **slot_ptr);
-
-void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
-
-bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
- const struct blk_crypto_config *cfg);
-
-int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
- const struct blk_crypto_key *key);
-
void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);

void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 69b24fe92cbf..ad17eaa192fb 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -71,9 +71,6 @@ struct bio_crypt_ctx {
#include <linux/blk_types.h>
#include <linux/blkdev.h>

-struct request;
-struct request_queue;
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION

static inline bool bio_has_crypt_ctx(struct bio *bio)
@@ -94,13 +91,15 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
unsigned int dun_bytes,
unsigned int data_unit_size);

-int blk_crypto_start_using_key(const struct blk_crypto_key *key,
- struct request_queue *q);
+int blk_crypto_start_using_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);

-int blk_crypto_evict_key(struct request_queue *q,
- const struct blk_crypto_key *key);
+void blk_crypto_evict_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);

-bool blk_crypto_config_supported(struct request_queue *q,
+bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+bool blk_crypto_config_supported(struct block_device *bdev,
const struct blk_crypto_config *cfg);

#else /* CONFIG_BLK_INLINE_ENCRYPTION */
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 35ce84c8ca02..31d8046d945e 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -9,7 +9,7 @@
* @data: message payload
*
* This is the structure for data used in mbox_send_message
- * the maximum length of data buffer is fixed to 12 bytes.
+ * the maximum length of data buffer is fixed to 32 bytes.
* Client is supposed to be aware of this.
*/
struct zynqmp_ipi_message {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index e45bdec73baf..097cbf84c1e0 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -9063,7 +9063,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];

- u8 reserved_at_40[0x38];
+ u8 reserved_at_40[0x33];
+ u8 flow_counter_bulk_log_size[0x5];
u8 flow_counter_bulk[0x8];
};

diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 241e005f290a..e9a9ab34a7cc 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -45,7 +45,6 @@ struct nfnetlink_subsystem {
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action);
- void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};

diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 2c6e99ca48af..d607f51404fc 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -4,6 +4,7 @@

#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/alarmtimer.h>
#include <linux/timerqueue.h>

@@ -62,16 +63,18 @@ static inline int clockid_to_fd(const clockid_t clk)
* cpu_timer - Posix CPU timer representation for k_itimer
* @node: timerqueue node to queue in the task/sig
* @head: timerqueue head on which this timer is queued
- * @task: Pointer to target task
+ * @pid: Pointer to target task PID
* @elist: List head for the expiry list
* @firing: Timer is currently firing
+ * @handling: Pointer to the task which handles expiry
*/
struct cpu_timer {
- struct timerqueue_node node;
- struct timerqueue_head *head;
- struct pid *pid;
- struct list_head elist;
- int firing;
+ struct timerqueue_node node;
+ struct timerqueue_head *head;
+ struct pid *pid;
+ struct list_head elist;
+ int firing;
+ struct task_struct __rcu *handling;
};

static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
@@ -135,10 +138,12 @@ struct posix_cputimers {
/**
* posix_cputimers_work - Container for task work based posix CPU timer expiry
* @work: The task work to be scheduled
+ * @mutex: Mutex held around expiry in context of this task work
* @scheduled: @work has been scheduled already, no further processing
*/
struct posix_cputimers_work {
struct callback_head work;
+ struct mutex mutex;
unsigned int scheduled;
};

diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index b8ca3ecaf8d7..8ada7dc802d3 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -90,8 +90,7 @@ struct rpc_task {
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
- tk_cred_retry : 2,
- tk_rebind_retry : 2;
+ tk_cred_retry : 2;
};

typedef void (*rpc_action)(struct rpc_task *);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index bfd571f18cfd..9459fef5b857 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -216,6 +216,7 @@ extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
+extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);

/*
* The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
@@ -280,6 +281,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }

static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }

static inline void tick_dep_set(enum tick_dep_bits bit) { }
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 848db1b1569f..919d999a8c1d 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -16,7 +16,7 @@

#include <linux/string.h>

-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
#include <asm/vga.h>
#endif

diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index 191c36afa1f4..9dc082b2d543 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -156,8 +156,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
-int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
-int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb);
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index b2b9de70d9f4..a36f87af415c 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -90,7 +90,11 @@ static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
{
if (timeout > INT_MAX)
timeout = INT_MAX;
- WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
+
+ if (nf_ct_is_confirmed(ct))
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
+ else
+ ct->timeout = (u32)timeout;
}

int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 6bacbf57ac17..a1ccf1276f3e 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -614,6 +614,7 @@ struct nft_set_binding {
};

enum nft_trans_phase;
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase);
diff --git a/include/net/scm.h b/include/net/scm.h
index 1ce365f4c256..585adc1346bd 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -105,16 +105,27 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
}
}
}
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return test_bit(SOCK_PASSSEC, &sock->flags);
+}
#else
static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
{ }
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return false;
+}
#endif /* CONFIG_SECURITY_NETWORK */

static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
+ scm_has_secdata(sock))
msg->msg_flags |= MSG_CTRUNC;
scm_destroy(scm);
return;
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index f787c3f524b0..996eaf1ef1a1 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -175,13 +175,8 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
if (likely(!cross_pg))
return false;

- if (pool->dma_pages_cnt) {
- return !(pool->dma_pages[addr >> PAGE_SHIFT] &
- XSK_NEXT_PG_CONTIG_MASK);
- }
-
- /* skb path */
- return addr + len > pool->addrs_cnt;
+ return pool->dma_pages_cnt &&
+ !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
}

static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index ec646217e7f6..e7d466df8157 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -32,6 +32,7 @@ void sas_probe_sata(struct asd_sas_port *port);
void sas_suspend_sata(struct asd_sas_port *port);
void sas_resume_sata(struct asd_sas_port *port);
void sas_ata_end_eh(struct ata_port *ap);
+void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset);
int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
int force_phy_id);
int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline);
@@ -88,6 +89,11 @@ static inline void sas_ata_end_eh(struct ata_port *ap)
{
}

+static inline void sas_ata_device_link_abort(struct domain_device *dev,
+ bool force_reset)
+{
+}
+
static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
int force_phy_id)
{
diff --git a/include/sound/acp62_chip_offset_byte.h b/include/sound/acp62_chip_offset_byte.h
deleted file mode 100644
index f03992f81168..000000000000
--- a/include/sound/acp62_chip_offset_byte.h
+++ /dev/null
@@ -1,444 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * AMD ACP 6.2 Register Documentation
- *
- * Copyright 2022 Advanced Micro Devices, Inc.
- */
-
-#ifndef _acp_ip_OFFSET_HEADER
-#define _acp_ip_OFFSET_HEADER
-
-/* Registers from ACP_DMA block */
-#define ACP_DMA_CNTL_0 0x0000000
-#define ACP_DMA_CNTL_1 0x0000004
-#define ACP_DMA_CNTL_2 0x0000008
-#define ACP_DMA_CNTL_3 0x000000C
-#define ACP_DMA_CNTL_4 0x0000010
-#define ACP_DMA_CNTL_5 0x0000014
-#define ACP_DMA_CNTL_6 0x0000018
-#define ACP_DMA_CNTL_7 0x000001C
-#define ACP_DMA_DSCR_STRT_IDX_0 0x0000020
-#define ACP_DMA_DSCR_STRT_IDX_1 0x0000024
-#define ACP_DMA_DSCR_STRT_IDX_2 0x0000028
-#define ACP_DMA_DSCR_STRT_IDX_3 0x000002C
-#define ACP_DMA_DSCR_STRT_IDX_4 0x0000030
-#define ACP_DMA_DSCR_STRT_IDX_5 0x0000034
-#define ACP_DMA_DSCR_STRT_IDX_6 0x0000038
-#define ACP_DMA_DSCR_STRT_IDX_7 0x000003C
-#define ACP_DMA_DSCR_CNT_0 0x0000040
-#define ACP_DMA_DSCR_CNT_1 0x0000044
-#define ACP_DMA_DSCR_CNT_2 0x0000048
-#define ACP_DMA_DSCR_CNT_3 0x000004C
-#define ACP_DMA_DSCR_CNT_4 0x0000050
-#define ACP_DMA_DSCR_CNT_5 0x0000054
-#define ACP_DMA_DSCR_CNT_6 0x0000058
-#define ACP_DMA_DSCR_CNT_7 0x000005C
-#define ACP_DMA_PRIO_0 0x0000060
-#define ACP_DMA_PRIO_1 0x0000064
-#define ACP_DMA_PRIO_2 0x0000068
-#define ACP_DMA_PRIO_3 0x000006C
-#define ACP_DMA_PRIO_4 0x0000070
-#define ACP_DMA_PRIO_5 0x0000074
-#define ACP_DMA_PRIO_6 0x0000078
-#define ACP_DMA_PRIO_7 0x000007C
-#define ACP_DMA_CUR_DSCR_0 0x0000080
-#define ACP_DMA_CUR_DSCR_1 0x0000084
-#define ACP_DMA_CUR_DSCR_2 0x0000088
-#define ACP_DMA_CUR_DSCR_3 0x000008C
-#define ACP_DMA_CUR_DSCR_4 0x0000090
-#define ACP_DMA_CUR_DSCR_5 0x0000094
-#define ACP_DMA_CUR_DSCR_6 0x0000098
-#define ACP_DMA_CUR_DSCR_7 0x000009C
-#define ACP_DMA_CUR_TRANS_CNT_0 0x00000A0
-#define ACP_DMA_CUR_TRANS_CNT_1 0x00000A4
-#define ACP_DMA_CUR_TRANS_CNT_2 0x00000A8
-#define ACP_DMA_CUR_TRANS_CNT_3 0x00000AC
-#define ACP_DMA_CUR_TRANS_CNT_4 0x00000B0
-#define ACP_DMA_CUR_TRANS_CNT_5 0x00000B4
-#define ACP_DMA_CUR_TRANS_CNT_6 0x00000B8
-#define ACP_DMA_CUR_TRANS_CNT_7 0x00000BC
-#define ACP_DMA_ERR_STS_0 0x00000C0
-#define ACP_DMA_ERR_STS_1 0x00000C4
-#define ACP_DMA_ERR_STS_2 0x00000C8
-#define ACP_DMA_ERR_STS_3 0x00000CC
-#define ACP_DMA_ERR_STS_4 0x00000D0
-#define ACP_DMA_ERR_STS_5 0x00000D4
-#define ACP_DMA_ERR_STS_6 0x00000D8
-#define ACP_DMA_ERR_STS_7 0x00000DC
-#define ACP_DMA_DESC_BASE_ADDR 0x00000E0
-#define ACP_DMA_DESC_MAX_NUM_DSCR 0x00000E4
-#define ACP_DMA_CH_STS 0x00000E8
-#define ACP_DMA_CH_GROUP 0x00000EC
-#define ACP_DMA_CH_RST_STS 0x00000F0
-
-/* Registers from ACP_AXI2AXIATU block */
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x0000C00
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x0000C04
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x0000C08
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x0000C0C
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x0000C10
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x0000C14
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x0000C18
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x0000C1C
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x0000C20
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x0000C24
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x0000C28
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x0000C2C
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x0000C30
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x0000C34
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x0000C38
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x0000C3C
-#define ACPAXI2AXI_ATU_CTRL 0x0000C40
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9 0x0000C44
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9 0x0000C48
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10 0x0000C4C
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10 0x0000C50
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11 0x0000C54
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11 0x0000C58
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12 0x0000C5C
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12 0x0000C60
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13 0x0000C64
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13 0x0000C68
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14 0x0000C6C
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14 0x0000C70
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15 0x0000C74
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15 0x0000C78
-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16 0x0000C7C
-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16 0x0000C80
-
-/* Registers from ACP_CLKRST block */
-#define ACP_SOFT_RESET 0x0001000
-#define ACP_CONTROL 0x0001004
-#define ACP_STATUS 0x0001008
-#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x0001010
-#define ACP_ZSC_DSP_CTRL 0x0001014
-#define ACP_ZSC_STS 0x0001018
-#define ACP_PGFSM_CONTROL 0x0001024
-#define ACP_PGFSM_STATUS 0x0001028
-#define ACP_CLKMUX_SEL 0x000102C
-
-/* Registers from ACP_AON block */
-#define ACP_PME_EN 0x0001400
-#define ACP_DEVICE_STATE 0x0001404
-#define AZ_DEVICE_STATE 0x0001408
-#define ACP_PIN_CONFIG 0x0001440
-#define ACP_PAD_PULLUP_CTRL 0x0001444
-#define ACP_PAD_PULLDOWN_CTRL 0x0001448
-#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x000144C
-#define ACP_PAD_SCHMEN_CTRL 0x0001450
-#define ACP_SW_PAD_KEEPER_EN 0x0001454
-#define ACP_SW_WAKE_EN 0x0001458
-#define ACP_I2S_WAKE_EN 0x000145C
-#define ACP_SW1_WAKE_EN 0x0001460
-
-/* Registers from ACP_P1_MISC block */
-#define ACP_EXTERNAL_INTR_ENB 0x0001A00
-#define ACP_EXTERNAL_INTR_CNTL 0x0001A04
-#define ACP_EXTERNAL_INTR_CNTL1 0x0001A08
-#define ACP_EXTERNAL_INTR_STAT 0x0001A0C
-#define ACP_EXTERNAL_INTR_STAT1 0x0001A10
-#define ACP_ERROR_STATUS 0x0001A4C
-#define ACP_P1_SW_I2S_ERROR_REASON 0x0001A50
-#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL 0x0001A6C
-#define ACP_P1_SW_I2S_TX_DMA_POS 0x0001A70
-#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL 0x0001A74
-#define ACP_P1_SW_I2S_RX_DMA_POS 0x0001A78
-#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL 0x0001A7C
-#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS 0x0001A80
-#define ACP_SCRATCH_REG_BASE_ADDR 0x0001A84
-#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL 0x0001A88
-#define ACP_P1_SW_BT_TX_DMA_POS 0x0001A8C
-#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL 0x0001A90
-#define ACP_P1_SW_HS_TX_DMA_POS 0x0001A94
-#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL 0x0001A98
-#define ACP_P1_SW_BT_RX_DMA_POS 0x0001A9C
-#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL 0x0001AA0
-#define ACP_P1_SW_HS_RX_DMA_POS 0x0001AA4
-
-/* Registers from ACP_AUDIO_BUFFERS block */
-#define ACP_I2S_RX_RINGBUFADDR 0x0002000
-#define ACP_I2S_RX_RINGBUFSIZE 0x0002004
-#define ACP_I2S_RX_LINKPOSITIONCNTR 0x0002008
-#define ACP_I2S_RX_FIFOADDR 0x000200C
-#define ACP_I2S_RX_FIFOSIZE 0x0002010
-#define ACP_I2S_RX_DMA_SIZE 0x0002014
-#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
-#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x000201C
-#define ACP_I2S_RX_INTR_WATERMARK_SIZE 0x0002020
-#define ACP_I2S_TX_RINGBUFADDR 0x0002024
-#define ACP_I2S_TX_RINGBUFSIZE 0x0002028
-#define ACP_I2S_TX_LINKPOSITIONCNTR 0x000202C
-#define ACP_I2S_TX_FIFOADDR 0x0002030
-#define ACP_I2S_TX_FIFOSIZE 0x0002034
-#define ACP_I2S_TX_DMA_SIZE 0x0002038
-#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
-#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0002040
-#define ACP_I2S_TX_INTR_WATERMARK_SIZE 0x0002044
-#define ACP_BT_RX_RINGBUFADDR 0x0002048
-#define ACP_BT_RX_RINGBUFSIZE 0x000204C
-#define ACP_BT_RX_LINKPOSITIONCNTR 0x0002050
-#define ACP_BT_RX_FIFOADDR 0x0002054
-#define ACP_BT_RX_FIFOSIZE 0x0002058
-#define ACP_BT_RX_DMA_SIZE 0x000205C
-#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
-#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x0002064
-#define ACP_BT_RX_INTR_WATERMARK_SIZE 0x0002068
-#define ACP_BT_TX_RINGBUFADDR 0x000206C
-#define ACP_BT_TX_RINGBUFSIZE 0x0002070
-#define ACP_BT_TX_LINKPOSITIONCNTR 0x0002074
-#define ACP_BT_TX_FIFOADDR 0x0002078
-#define ACP_BT_TX_FIFOSIZE 0x000207C
-#define ACP_BT_TX_DMA_SIZE 0x0002080
-#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
-#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x0002088
-#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x000208C
-#define ACP_HS_RX_RINGBUFADDR 0x0002090
-#define ACP_HS_RX_RINGBUFSIZE 0x0002094
-#define ACP_HS_RX_LINKPOSITIONCNTR 0x0002098
-#define ACP_HS_RX_FIFOADDR 0x000209C
-#define ACP_HS_RX_FIFOSIZE 0x00020A0
-#define ACP_HS_RX_DMA_SIZE 0x00020A4
-#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
-#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
-#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x00020B0
-#define ACP_HS_TX_RINGBUFADDR 0x00020B4
-#define ACP_HS_TX_RINGBUFSIZE 0x00020B8
-#define ACP_HS_TX_LINKPOSITIONCNTR 0x00020BC
-#define ACP_HS_TX_FIFOADDR 0x00020C0
-#define ACP_HS_TX_FIFOSIZE 0x00020C4
-#define ACP_HS_TX_DMA_SIZE 0x00020C8
-#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
-#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
-#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x00020D4
-
-/* Registers from ACP_I2S_TDM block */
-#define ACP_I2STDM_IER 0x0002400
-#define ACP_I2STDM_IRER 0x0002404
-#define ACP_I2STDM_RXFRMT 0x0002408
-#define ACP_I2STDM_ITER 0x000240C
-#define ACP_I2STDM_TXFRMT 0x0002410
-#define ACP_I2STDM0_MSTRCLKGEN 0x0002414
-#define ACP_I2STDM1_MSTRCLKGEN 0x0002418
-#define ACP_I2STDM2_MSTRCLKGEN 0x000241C
-#define ACP_I2STDM_REFCLKGEN 0x0002420
-
-/* Registers from ACP_BT_TDM block */
-#define ACP_BTTDM_IER 0x0002800
-#define ACP_BTTDM_IRER 0x0002804
-#define ACP_BTTDM_RXFRMT 0x0002808
-#define ACP_BTTDM_ITER 0x000280C
-#define ACP_BTTDM_TXFRMT 0x0002810
-#define ACP_HSTDM_IER 0x0002814
-#define ACP_HSTDM_IRER 0x0002818
-#define ACP_HSTDM_RXFRMT 0x000281C
-#define ACP_HSTDM_ITER 0x0002820
-#define ACP_HSTDM_TXFRMT 0x0002824
-
-/* Registers from ACP_WOV block */
-#define ACP_WOV_PDM_ENABLE 0x0002C04
-#define ACP_WOV_PDM_DMA_ENABLE 0x0002C08
-#define ACP_WOV_RX_RINGBUFADDR 0x0002C0C
-#define ACP_WOV_RX_RINGBUFSIZE 0x0002C10
-#define ACP_WOV_RX_LINKPOSITIONCNTR 0x0002C14
-#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH 0x0002C18
-#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW 0x0002C1C
-#define ACP_WOV_RX_INTR_WATERMARK_SIZE 0x0002C20
-#define ACP_WOV_PDM_FIFO_FLUSH 0x0002C24
-#define ACP_WOV_PDM_NO_OF_CHANNELS 0x0002C28
-#define ACP_WOV_PDM_DECIMATION_FACTOR 0x0002C2C
-#define ACP_WOV_PDM_VAD_CTRL 0x0002C30
-#define ACP_WOV_WAKE 0x0002C54
-#define ACP_WOV_BUFFER_STATUS 0x0002C58
-#define ACP_WOV_MISC_CTRL 0x0002C5C
-#define ACP_WOV_CLK_CTRL 0x0002C60
-#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x0002C64
-#define ACP_WOV_ERROR_STATUS_REGISTER 0x0002C68
-#define ACP_PDM_CLKDIV 0x0002C6C
-
-/* Registers from ACP_P1_AUDIO_BUFFERS block */
-#define ACP_P1_I2S_RX_RINGBUFADDR 0x0003A00
-#define ACP_P1_I2S_RX_RINGBUFSIZE 0x0003A04
-#define ACP_P1_I2S_RX_LINKPOSITIONCNTR 0x0003A08
-#define ACP_P1_I2S_RX_FIFOADDR 0x0003A0C
-#define ACP_P1_I2S_RX_FIFOSIZE 0x0003A10
-#define ACP_P1_I2S_RX_DMA_SIZE 0x0003A14
-#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
-#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
-#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE 0x0003A20
-#define ACP_P1_I2S_TX_RINGBUFADDR 0x0003A24
-#define ACP_P1_I2S_TX_RINGBUFSIZE 0x0003A28
-#define ACP_P1_I2S_TX_LINKPOSITIONCNTR 0x0003A2C
-#define ACP_P1_I2S_TX_FIFOADDR 0x0003A30
-#define ACP_P1_I2S_TX_FIFOSIZE 0x0003A34
-#define ACP_P1_I2S_TX_DMA_SIZE 0x0003A38
-#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
-#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
-#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE 0x0003A44
-#define ACP_P1_BT_RX_RINGBUFADDR 0x0003A48
-#define ACP_P1_BT_RX_RINGBUFSIZE 0x0003A4C
-#define ACP_P1_BT_RX_LINKPOSITIONCNTR 0x0003A50
-#define ACP_P1_BT_RX_FIFOADDR 0x0003A54
-#define ACP_P1_BT_RX_FIFOSIZE 0x0003A58
-#define ACP_P1_BT_RX_DMA_SIZE 0x0003A5C
-#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
-#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
-#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE 0x0003A68
-#define ACP_P1_BT_TX_RINGBUFADDR 0x0003A6C
-#define ACP_P1_BT_TX_RINGBUFSIZE 0x0003A70
-#define ACP_P1_BT_TX_LINKPOSITIONCNTR 0x0003A74
-#define ACP_P1_BT_TX_FIFOADDR 0x0003A78
-#define ACP_P1_BT_TX_FIFOSIZE 0x0003A7C
-#define ACP_P1_BT_TX_DMA_SIZE 0x0003A80
-#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
-#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
-#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE 0x0003A8C
-#define ACP_P1_HS_RX_RINGBUFADDR 0x0003A90
-#define ACP_P1_HS_RX_RINGBUFSIZE 0x0003A94
-#define ACP_P1_HS_RX_LINKPOSITIONCNTR 0x0003A98
-#define ACP_P1_HS_RX_FIFOADDR 0x0003A9C
-#define ACP_P1_HS_RX_FIFOSIZE 0x0003AA0
-#define ACP_P1_HS_RX_DMA_SIZE 0x0003AA4
-#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
-#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
-#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE 0x0003AB0
-#define ACP_P1_HS_TX_RINGBUFADDR 0x0003AB4
-#define ACP_P1_HS_TX_RINGBUFSIZE 0x0003AB8
-#define ACP_P1_HS_TX_LINKPOSITIONCNTR 0x0003ABC
-#define ACP_P1_HS_TX_FIFOADDR 0x0003AC0
-#define ACP_P1_HS_TX_FIFOSIZE 0x0003AC4
-#define ACP_P1_HS_TX_DMA_SIZE 0x0003AC8
-#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
-#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
-#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE 0x0003AD4
-
-/* Registers from ACP_SCRATCH block */
-#define ACP_SCRATCH_REG_0 0x0010000
-#define ACP_SCRATCH_REG_1 0x0010004
-#define ACP_SCRATCH_REG_2 0x0010008
-#define ACP_SCRATCH_REG_3 0x001000C
-#define ACP_SCRATCH_REG_4 0x0010010
-#define ACP_SCRATCH_REG_5 0x0010014
-#define ACP_SCRATCH_REG_6 0x0010018
-#define ACP_SCRATCH_REG_7 0x001001C
-#define ACP_SCRATCH_REG_8 0x0010020
-#define ACP_SCRATCH_REG_9 0x0010024
-#define ACP_SCRATCH_REG_10 0x0010028
-#define ACP_SCRATCH_REG_11 0x001002C
-#define ACP_SCRATCH_REG_12 0x0010030
-#define ACP_SCRATCH_REG_13 0x0010034
-#define ACP_SCRATCH_REG_14 0x0010038
-#define ACP_SCRATCH_REG_15 0x001003C
-#define ACP_SCRATCH_REG_16 0x0010040
-#define ACP_SCRATCH_REG_17 0x0010044
-#define ACP_SCRATCH_REG_18 0x0010048
-#define ACP_SCRATCH_REG_19 0x001004C
-#define ACP_SCRATCH_REG_20 0x0010050
-#define ACP_SCRATCH_REG_21 0x0010054
-#define ACP_SCRATCH_REG_22 0x0010058
-#define ACP_SCRATCH_REG_23 0x001005C
-#define ACP_SCRATCH_REG_24 0x0010060
-#define ACP_SCRATCH_REG_25 0x0010064
-#define ACP_SCRATCH_REG_26 0x0010068
-#define ACP_SCRATCH_REG_27 0x001006C
-#define ACP_SCRATCH_REG_28 0x0010070
-#define ACP_SCRATCH_REG_29 0x0010074
-#define ACP_SCRATCH_REG_30 0x0010078
-#define ACP_SCRATCH_REG_31 0x001007C
-#define ACP_SCRATCH_REG_32 0x0010080
-#define ACP_SCRATCH_REG_33 0x0010084
-#define ACP_SCRATCH_REG_34 0x0010088
-#define ACP_SCRATCH_REG_35 0x001008C
-#define ACP_SCRATCH_REG_36 0x0010090
-#define ACP_SCRATCH_REG_37 0x0010094
-#define ACP_SCRATCH_REG_38 0x0010098
-#define ACP_SCRATCH_REG_39 0x001009C
-#define ACP_SCRATCH_REG_40 0x00100A0
-#define ACP_SCRATCH_REG_41 0x00100A4
-#define ACP_SCRATCH_REG_42 0x00100A8
-#define ACP_SCRATCH_REG_43 0x00100AC
-#define ACP_SCRATCH_REG_44 0x00100B0
-#define ACP_SCRATCH_REG_45 0x00100B4
-#define ACP_SCRATCH_REG_46 0x00100B8
-#define ACP_SCRATCH_REG_47 0x00100BC
-#define ACP_SCRATCH_REG_48 0x00100C0
-#define ACP_SCRATCH_REG_49 0x00100C4
-#define ACP_SCRATCH_REG_50 0x00100C8
-#define ACP_SCRATCH_REG_51 0x00100CC
-#define ACP_SCRATCH_REG_52 0x00100D0
-#define ACP_SCRATCH_REG_53 0x00100D4
-#define ACP_SCRATCH_REG_54 0x00100D8
-#define ACP_SCRATCH_REG_55 0x00100DC
-#define ACP_SCRATCH_REG_56 0x00100E0
-#define ACP_SCRATCH_REG_57 0x00100E4
-#define ACP_SCRATCH_REG_58 0x00100E8
-#define ACP_SCRATCH_REG_59 0x00100EC
-#define ACP_SCRATCH_REG_60 0x00100F0
-#define ACP_SCRATCH_REG_61 0x00100F4
-#define ACP_SCRATCH_REG_62 0x00100F8
-#define ACP_SCRATCH_REG_63 0x00100FC
-#define ACP_SCRATCH_REG_64 0x0010100
-#define ACP_SCRATCH_REG_65 0x0010104
-#define ACP_SCRATCH_REG_66 0x0010108
-#define ACP_SCRATCH_REG_67 0x001010C
-#define ACP_SCRATCH_REG_68 0x0010110
-#define ACP_SCRATCH_REG_69 0x0010114
-#define ACP_SCRATCH_REG_70 0x0010118
-#define ACP_SCRATCH_REG_71 0x001011C
-#define ACP_SCRATCH_REG_72 0x0010120
-#define ACP_SCRATCH_REG_73 0x0010124
-#define ACP_SCRATCH_REG_74 0x0010128
-#define ACP_SCRATCH_REG_75 0x001012C
-#define ACP_SCRATCH_REG_76 0x0010130
-#define ACP_SCRATCH_REG_77 0x0010134
-#define ACP_SCRATCH_REG_78 0x0010138
-#define ACP_SCRATCH_REG_79 0x001013C
-#define ACP_SCRATCH_REG_80 0x0010140
-#define ACP_SCRATCH_REG_81 0x0010144
-#define ACP_SCRATCH_REG_82 0x0010148
-#define ACP_SCRATCH_REG_83 0x001014C
-#define ACP_SCRATCH_REG_84 0x0010150
-#define ACP_SCRATCH_REG_85 0x0010154
-#define ACP_SCRATCH_REG_86 0x0010158
-#define ACP_SCRATCH_REG_87 0x001015C
-#define ACP_SCRATCH_REG_88 0x0010160
-#define ACP_SCRATCH_REG_89 0x0010164
-#define ACP_SCRATCH_REG_90 0x0010168
-#define ACP_SCRATCH_REG_91 0x001016C
-#define ACP_SCRATCH_REG_92 0x0010170
-#define ACP_SCRATCH_REG_93 0x0010174
-#define ACP_SCRATCH_REG_94 0x0010178
-#define ACP_SCRATCH_REG_95 0x001017C
-#define ACP_SCRATCH_REG_96 0x0010180
-#define ACP_SCRATCH_REG_97 0x0010184
-#define ACP_SCRATCH_REG_98 0x0010188
-#define ACP_SCRATCH_REG_99 0x001018C
-#define ACP_SCRATCH_REG_100 0x0010190
-#define ACP_SCRATCH_REG_101 0x0010194
-#define ACP_SCRATCH_REG_102 0x0010198
-#define ACP_SCRATCH_REG_103 0x001019C
-#define ACP_SCRATCH_REG_104 0x00101A0
-#define ACP_SCRATCH_REG_105 0x00101A4
-#define ACP_SCRATCH_REG_106 0x00101A8
-#define ACP_SCRATCH_REG_107 0x00101AC
-#define ACP_SCRATCH_REG_108 0x00101B0
-#define ACP_SCRATCH_REG_109 0x00101B4
-#define ACP_SCRATCH_REG_110 0x00101B8
-#define ACP_SCRATCH_REG_111 0x00101BC
-#define ACP_SCRATCH_REG_112 0x00101C0
-#define ACP_SCRATCH_REG_113 0x00101C4
-#define ACP_SCRATCH_REG_114 0x00101C8
-#define ACP_SCRATCH_REG_115 0x00101CC
-#define ACP_SCRATCH_REG_116 0x00101D0
-#define ACP_SCRATCH_REG_117 0x00101D4
-#define ACP_SCRATCH_REG_118 0x00101D8
-#define ACP_SCRATCH_REG_119 0x00101DC
-#define ACP_SCRATCH_REG_120 0x00101E0
-#define ACP_SCRATCH_REG_121 0x00101E4
-#define ACP_SCRATCH_REG_122 0x00101E8
-#define ACP_SCRATCH_REG_123 0x00101EC
-#define ACP_SCRATCH_REG_124 0x00101F0
-#define ACP_SCRATCH_REG_125 0x00101F4
-#define ACP_SCRATCH_REG_126 0x00101F8
-#define ACP_SCRATCH_REG_127 0x00101FC
-#define ACP_SCRATCH_REG_128 0x0010200
-#endif
diff --git a/include/sound/acp63_chip_offset_byte.h b/include/sound/acp63_chip_offset_byte.h
new file mode 100644
index 000000000000..b02d0467c3cf
--- /dev/null
+++ b/include/sound/acp63_chip_offset_byte.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 6.3 Register Documentation
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _acp_ip_OFFSET_HEADER
+#define _acp_ip_OFFSET_HEADER
+
+/* Registers from ACP_DMA block */
+#define ACP_DMA_CNTL_0 0x0000000
+#define ACP_DMA_CNTL_1 0x0000004
+#define ACP_DMA_CNTL_2 0x0000008
+#define ACP_DMA_CNTL_3 0x000000C
+#define ACP_DMA_CNTL_4 0x0000010
+#define ACP_DMA_CNTL_5 0x0000014
+#define ACP_DMA_CNTL_6 0x0000018
+#define ACP_DMA_CNTL_7 0x000001C
+#define ACP_DMA_DSCR_STRT_IDX_0 0x0000020
+#define ACP_DMA_DSCR_STRT_IDX_1 0x0000024
+#define ACP_DMA_DSCR_STRT_IDX_2 0x0000028
+#define ACP_DMA_DSCR_STRT_IDX_3 0x000002C
+#define ACP_DMA_DSCR_STRT_IDX_4 0x0000030
+#define ACP_DMA_DSCR_STRT_IDX_5 0x0000034
+#define ACP_DMA_DSCR_STRT_IDX_6 0x0000038
+#define ACP_DMA_DSCR_STRT_IDX_7 0x000003C
+#define ACP_DMA_DSCR_CNT_0 0x0000040
+#define ACP_DMA_DSCR_CNT_1 0x0000044
+#define ACP_DMA_DSCR_CNT_2 0x0000048
+#define ACP_DMA_DSCR_CNT_3 0x000004C
+#define ACP_DMA_DSCR_CNT_4 0x0000050
+#define ACP_DMA_DSCR_CNT_5 0x0000054
+#define ACP_DMA_DSCR_CNT_6 0x0000058
+#define ACP_DMA_DSCR_CNT_7 0x000005C
+#define ACP_DMA_PRIO_0 0x0000060
+#define ACP_DMA_PRIO_1 0x0000064
+#define ACP_DMA_PRIO_2 0x0000068
+#define ACP_DMA_PRIO_3 0x000006C
+#define ACP_DMA_PRIO_4 0x0000070
+#define ACP_DMA_PRIO_5 0x0000074
+#define ACP_DMA_PRIO_6 0x0000078
+#define ACP_DMA_PRIO_7 0x000007C
+#define ACP_DMA_CUR_DSCR_0 0x0000080
+#define ACP_DMA_CUR_DSCR_1 0x0000084
+#define ACP_DMA_CUR_DSCR_2 0x0000088
+#define ACP_DMA_CUR_DSCR_3 0x000008C
+#define ACP_DMA_CUR_DSCR_4 0x0000090
+#define ACP_DMA_CUR_DSCR_5 0x0000094
+#define ACP_DMA_CUR_DSCR_6 0x0000098
+#define ACP_DMA_CUR_DSCR_7 0x000009C
+#define ACP_DMA_CUR_TRANS_CNT_0 0x00000A0
+#define ACP_DMA_CUR_TRANS_CNT_1 0x00000A4
+#define ACP_DMA_CUR_TRANS_CNT_2 0x00000A8
+#define ACP_DMA_CUR_TRANS_CNT_3 0x00000AC
+#define ACP_DMA_CUR_TRANS_CNT_4 0x00000B0
+#define ACP_DMA_CUR_TRANS_CNT_5 0x00000B4
+#define ACP_DMA_CUR_TRANS_CNT_6 0x00000B8
+#define ACP_DMA_CUR_TRANS_CNT_7 0x00000BC
+#define ACP_DMA_ERR_STS_0 0x00000C0
+#define ACP_DMA_ERR_STS_1 0x00000C4
+#define ACP_DMA_ERR_STS_2 0x00000C8
+#define ACP_DMA_ERR_STS_3 0x00000CC
+#define ACP_DMA_ERR_STS_4 0x00000D0
+#define ACP_DMA_ERR_STS_5 0x00000D4
+#define ACP_DMA_ERR_STS_6 0x00000D8
+#define ACP_DMA_ERR_STS_7 0x00000DC
+#define ACP_DMA_DESC_BASE_ADDR 0x00000E0
+#define ACP_DMA_DESC_MAX_NUM_DSCR 0x00000E4
+#define ACP_DMA_CH_STS 0x00000E8
+#define ACP_DMA_CH_GROUP 0x00000EC
+#define ACP_DMA_CH_RST_STS 0x00000F0
+
+/* Registers from ACP_AXI2AXIATU block */
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x0000C00
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x0000C04
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x0000C08
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x0000C0C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x0000C10
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x0000C14
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x0000C18
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x0000C1C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x0000C20
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x0000C24
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x0000C28
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x0000C2C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x0000C30
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x0000C34
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x0000C38
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x0000C3C
+#define ACPAXI2AXI_ATU_CTRL 0x0000C40
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9 0x0000C44
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9 0x0000C48
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10 0x0000C4C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10 0x0000C50
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11 0x0000C54
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11 0x0000C58
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12 0x0000C5C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12 0x0000C60
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13 0x0000C64
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13 0x0000C68
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14 0x0000C6C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14 0x0000C70
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15 0x0000C74
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15 0x0000C78
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16 0x0000C7C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16 0x0000C80
+
+/* Registers from ACP_CLKRST block */
+#define ACP_SOFT_RESET 0x0001000
+#define ACP_CONTROL 0x0001004
+#define ACP_STATUS 0x0001008
+#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x0001010
+#define ACP_ZSC_DSP_CTRL 0x0001014
+#define ACP_ZSC_STS 0x0001018
+#define ACP_PGFSM_CONTROL 0x0001024
+#define ACP_PGFSM_STATUS 0x0001028
+#define ACP_CLKMUX_SEL 0x000102C
+
+/* Registers from ACP_AON block */
+#define ACP_PME_EN 0x0001400
+#define ACP_DEVICE_STATE 0x0001404
+#define AZ_DEVICE_STATE 0x0001408
+#define ACP_PIN_CONFIG 0x0001440
+#define ACP_PAD_PULLUP_CTRL 0x0001444
+#define ACP_PAD_PULLDOWN_CTRL 0x0001448
+#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x000144C
+#define ACP_PAD_SCHMEN_CTRL 0x0001450
+#define ACP_SW_PAD_KEEPER_EN 0x0001454
+#define ACP_SW_WAKE_EN 0x0001458
+#define ACP_I2S_WAKE_EN 0x000145C
+#define ACP_SW1_WAKE_EN 0x0001460
+
+/* Registers from ACP_P1_MISC block */
+#define ACP_EXTERNAL_INTR_ENB 0x0001A00
+#define ACP_EXTERNAL_INTR_CNTL 0x0001A04
+#define ACP_EXTERNAL_INTR_CNTL1 0x0001A08
+#define ACP_EXTERNAL_INTR_STAT 0x0001A0C
+#define ACP_EXTERNAL_INTR_STAT1 0x0001A10
+#define ACP_ERROR_STATUS 0x0001A4C
+#define ACP_P1_SW_I2S_ERROR_REASON 0x0001A50
+#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL 0x0001A6C
+#define ACP_P1_SW_I2S_TX_DMA_POS 0x0001A70
+#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL 0x0001A74
+#define ACP_P1_SW_I2S_RX_DMA_POS 0x0001A78
+#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL 0x0001A7C
+#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS 0x0001A80
+#define ACP_SCRATCH_REG_BASE_ADDR 0x0001A84
+#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL 0x0001A88
+#define ACP_P1_SW_BT_TX_DMA_POS 0x0001A8C
+#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL 0x0001A90
+#define ACP_P1_SW_HS_TX_DMA_POS 0x0001A94
+#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL 0x0001A98
+#define ACP_P1_SW_BT_RX_DMA_POS 0x0001A9C
+#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL 0x0001AA0
+#define ACP_P1_SW_HS_RX_DMA_POS 0x0001AA4
+
+/* Registers from ACP_AUDIO_BUFFERS block */
+#define ACP_I2S_RX_RINGBUFADDR 0x0002000
+#define ACP_I2S_RX_RINGBUFSIZE 0x0002004
+#define ACP_I2S_RX_LINKPOSITIONCNTR 0x0002008
+#define ACP_I2S_RX_FIFOADDR 0x000200C
+#define ACP_I2S_RX_FIFOSIZE 0x0002010
+#define ACP_I2S_RX_DMA_SIZE 0x0002014
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x000201C
+#define ACP_I2S_RX_INTR_WATERMARK_SIZE 0x0002020
+#define ACP_I2S_TX_RINGBUFADDR 0x0002024
+#define ACP_I2S_TX_RINGBUFSIZE 0x0002028
+#define ACP_I2S_TX_LINKPOSITIONCNTR 0x000202C
+#define ACP_I2S_TX_FIFOADDR 0x0002030
+#define ACP_I2S_TX_FIFOSIZE 0x0002034
+#define ACP_I2S_TX_DMA_SIZE 0x0002038
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0002040
+#define ACP_I2S_TX_INTR_WATERMARK_SIZE 0x0002044
+#define ACP_BT_RX_RINGBUFADDR 0x0002048
+#define ACP_BT_RX_RINGBUFSIZE 0x000204C
+#define ACP_BT_RX_LINKPOSITIONCNTR 0x0002050
+#define ACP_BT_RX_FIFOADDR 0x0002054
+#define ACP_BT_RX_FIFOSIZE 0x0002058
+#define ACP_BT_RX_DMA_SIZE 0x000205C
+#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
+#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x0002064
+#define ACP_BT_RX_INTR_WATERMARK_SIZE 0x0002068
+#define ACP_BT_TX_RINGBUFADDR 0x000206C
+#define ACP_BT_TX_RINGBUFSIZE 0x0002070
+#define ACP_BT_TX_LINKPOSITIONCNTR 0x0002074
+#define ACP_BT_TX_FIFOADDR 0x0002078
+#define ACP_BT_TX_FIFOSIZE 0x000207C
+#define ACP_BT_TX_DMA_SIZE 0x0002080
+#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
+#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x0002088
+#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x000208C
+#define ACP_HS_RX_RINGBUFADDR 0x0002090
+#define ACP_HS_RX_RINGBUFSIZE 0x0002094
+#define ACP_HS_RX_LINKPOSITIONCNTR 0x0002098
+#define ACP_HS_RX_FIFOADDR 0x000209C
+#define ACP_HS_RX_FIFOSIZE 0x00020A0
+#define ACP_HS_RX_DMA_SIZE 0x00020A4
+#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
+#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
+#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x00020B0
+#define ACP_HS_TX_RINGBUFADDR 0x00020B4
+#define ACP_HS_TX_RINGBUFSIZE 0x00020B8
+#define ACP_HS_TX_LINKPOSITIONCNTR 0x00020BC
+#define ACP_HS_TX_FIFOADDR 0x00020C0
+#define ACP_HS_TX_FIFOSIZE 0x00020C4
+#define ACP_HS_TX_DMA_SIZE 0x00020C8
+#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
+#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
+#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x00020D4
+
+/* Registers from ACP_I2S_TDM block */
+#define ACP_I2STDM_IER 0x0002400
+#define ACP_I2STDM_IRER 0x0002404
+#define ACP_I2STDM_RXFRMT 0x0002408
+#define ACP_I2STDM_ITER 0x000240C
+#define ACP_I2STDM_TXFRMT 0x0002410
+#define ACP_I2STDM0_MSTRCLKGEN 0x0002414
+#define ACP_I2STDM1_MSTRCLKGEN 0x0002418
+#define ACP_I2STDM2_MSTRCLKGEN 0x000241C
+#define ACP_I2STDM_REFCLKGEN 0x0002420
+
+/* Registers from ACP_BT_TDM block */
+#define ACP_BTTDM_IER 0x0002800
+#define ACP_BTTDM_IRER 0x0002804
+#define ACP_BTTDM_RXFRMT 0x0002808
+#define ACP_BTTDM_ITER 0x000280C
+#define ACP_BTTDM_TXFRMT 0x0002810
+#define ACP_HSTDM_IER 0x0002814
+#define ACP_HSTDM_IRER 0x0002818
+#define ACP_HSTDM_RXFRMT 0x000281C
+#define ACP_HSTDM_ITER 0x0002820
+#define ACP_HSTDM_TXFRMT 0x0002824
+
+/* Registers from ACP_WOV block */
+#define ACP_WOV_PDM_ENABLE 0x0002C04
+#define ACP_WOV_PDM_DMA_ENABLE 0x0002C08
+#define ACP_WOV_RX_RINGBUFADDR 0x0002C0C
+#define ACP_WOV_RX_RINGBUFSIZE 0x0002C10
+#define ACP_WOV_RX_LINKPOSITIONCNTR 0x0002C14
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH 0x0002C18
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW 0x0002C1C
+#define ACP_WOV_RX_INTR_WATERMARK_SIZE 0x0002C20
+#define ACP_WOV_PDM_FIFO_FLUSH 0x0002C24
+#define ACP_WOV_PDM_NO_OF_CHANNELS 0x0002C28
+#define ACP_WOV_PDM_DECIMATION_FACTOR 0x0002C2C
+#define ACP_WOV_PDM_VAD_CTRL 0x0002C30
+#define ACP_WOV_WAKE 0x0002C54
+#define ACP_WOV_BUFFER_STATUS 0x0002C58
+#define ACP_WOV_MISC_CTRL 0x0002C5C
+#define ACP_WOV_CLK_CTRL 0x0002C60
+#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x0002C64
+#define ACP_WOV_ERROR_STATUS_REGISTER 0x0002C68
+#define ACP_PDM_CLKDIV 0x0002C6C
+
+/* Registers from ACP_P1_AUDIO_BUFFERS block */
+#define ACP_P1_I2S_RX_RINGBUFADDR 0x0003A00
+#define ACP_P1_I2S_RX_RINGBUFSIZE 0x0003A04
+#define ACP_P1_I2S_RX_LINKPOSITIONCNTR 0x0003A08
+#define ACP_P1_I2S_RX_FIFOADDR 0x0003A0C
+#define ACP_P1_I2S_RX_FIFOSIZE 0x0003A10
+#define ACP_P1_I2S_RX_DMA_SIZE 0x0003A14
+#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
+#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
+#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE 0x0003A20
+#define ACP_P1_I2S_TX_RINGBUFADDR 0x0003A24
+#define ACP_P1_I2S_TX_RINGBUFSIZE 0x0003A28
+#define ACP_P1_I2S_TX_LINKPOSITIONCNTR 0x0003A2C
+#define ACP_P1_I2S_TX_FIFOADDR 0x0003A30
+#define ACP_P1_I2S_TX_FIFOSIZE 0x0003A34
+#define ACP_P1_I2S_TX_DMA_SIZE 0x0003A38
+#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
+#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
+#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE 0x0003A44
+#define ACP_P1_BT_RX_RINGBUFADDR 0x0003A48
+#define ACP_P1_BT_RX_RINGBUFSIZE 0x0003A4C
+#define ACP_P1_BT_RX_LINKPOSITIONCNTR 0x0003A50
+#define ACP_P1_BT_RX_FIFOADDR 0x0003A54
+#define ACP_P1_BT_RX_FIFOSIZE 0x0003A58
+#define ACP_P1_BT_RX_DMA_SIZE 0x0003A5C
+#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
+#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
+#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE 0x0003A68
+#define ACP_P1_BT_TX_RINGBUFADDR 0x0003A6C
+#define ACP_P1_BT_TX_RINGBUFSIZE 0x0003A70
+#define ACP_P1_BT_TX_LINKPOSITIONCNTR 0x0003A74
+#define ACP_P1_BT_TX_FIFOADDR 0x0003A78
+#define ACP_P1_BT_TX_FIFOSIZE 0x0003A7C
+#define ACP_P1_BT_TX_DMA_SIZE 0x0003A80
+#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
+#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
+#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE 0x0003A8C
+#define ACP_P1_HS_RX_RINGBUFADDR 0x0003A90
+#define ACP_P1_HS_RX_RINGBUFSIZE 0x0003A94
+#define ACP_P1_HS_RX_LINKPOSITIONCNTR 0x0003A98
+#define ACP_P1_HS_RX_FIFOADDR 0x0003A9C
+#define ACP_P1_HS_RX_FIFOSIZE 0x0003AA0
+#define ACP_P1_HS_RX_DMA_SIZE 0x0003AA4
+#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
+#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
+#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE 0x0003AB0
+#define ACP_P1_HS_TX_RINGBUFADDR 0x0003AB4
+#define ACP_P1_HS_TX_RINGBUFSIZE 0x0003AB8
+#define ACP_P1_HS_TX_LINKPOSITIONCNTR 0x0003ABC
+#define ACP_P1_HS_TX_FIFOADDR 0x0003AC0
+#define ACP_P1_HS_TX_FIFOSIZE 0x0003AC4
+#define ACP_P1_HS_TX_DMA_SIZE 0x0003AC8
+#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
+#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
+#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE 0x0003AD4
+
+/* Registers from ACP_SCRATCH block */
+#define ACP_SCRATCH_REG_0 0x0010000
+#define ACP_SCRATCH_REG_1 0x0010004
+#define ACP_SCRATCH_REG_2 0x0010008
+#define ACP_SCRATCH_REG_3 0x001000C
+#define ACP_SCRATCH_REG_4 0x0010010
+#define ACP_SCRATCH_REG_5 0x0010014
+#define ACP_SCRATCH_REG_6 0x0010018
+#define ACP_SCRATCH_REG_7 0x001001C
+#define ACP_SCRATCH_REG_8 0x0010020
+#define ACP_SCRATCH_REG_9 0x0010024
+#define ACP_SCRATCH_REG_10 0x0010028
+#define ACP_SCRATCH_REG_11 0x001002C
+#define ACP_SCRATCH_REG_12 0x0010030
+#define ACP_SCRATCH_REG_13 0x0010034
+#define ACP_SCRATCH_REG_14 0x0010038
+#define ACP_SCRATCH_REG_15 0x001003C
+#define ACP_SCRATCH_REG_16 0x0010040
+#define ACP_SCRATCH_REG_17 0x0010044
+#define ACP_SCRATCH_REG_18 0x0010048
+#define ACP_SCRATCH_REG_19 0x001004C
+#define ACP_SCRATCH_REG_20 0x0010050
+#define ACP_SCRATCH_REG_21 0x0010054
+#define ACP_SCRATCH_REG_22 0x0010058
+#define ACP_SCRATCH_REG_23 0x001005C
+#define ACP_SCRATCH_REG_24 0x0010060
+#define ACP_SCRATCH_REG_25 0x0010064
+#define ACP_SCRATCH_REG_26 0x0010068
+#define ACP_SCRATCH_REG_27 0x001006C
+#define ACP_SCRATCH_REG_28 0x0010070
+#define ACP_SCRATCH_REG_29 0x0010074
+#define ACP_SCRATCH_REG_30 0x0010078
+#define ACP_SCRATCH_REG_31 0x001007C
+#define ACP_SCRATCH_REG_32 0x0010080
+#define ACP_SCRATCH_REG_33 0x0010084
+#define ACP_SCRATCH_REG_34 0x0010088
+#define ACP_SCRATCH_REG_35 0x001008C
+#define ACP_SCRATCH_REG_36 0x0010090
+#define ACP_SCRATCH_REG_37 0x0010094
+#define ACP_SCRATCH_REG_38 0x0010098
+#define ACP_SCRATCH_REG_39 0x001009C
+#define ACP_SCRATCH_REG_40 0x00100A0
+#define ACP_SCRATCH_REG_41 0x00100A4
+#define ACP_SCRATCH_REG_42 0x00100A8
+#define ACP_SCRATCH_REG_43 0x00100AC
+#define ACP_SCRATCH_REG_44 0x00100B0
+#define ACP_SCRATCH_REG_45 0x00100B4
+#define ACP_SCRATCH_REG_46 0x00100B8
+#define ACP_SCRATCH_REG_47 0x00100BC
+#define ACP_SCRATCH_REG_48 0x00100C0
+#define ACP_SCRATCH_REG_49 0x00100C4
+#define ACP_SCRATCH_REG_50 0x00100C8
+#define ACP_SCRATCH_REG_51 0x00100CC
+#define ACP_SCRATCH_REG_52 0x00100D0
+#define ACP_SCRATCH_REG_53 0x00100D4
+#define ACP_SCRATCH_REG_54 0x00100D8
+#define ACP_SCRATCH_REG_55 0x00100DC
+#define ACP_SCRATCH_REG_56 0x00100E0
+#define ACP_SCRATCH_REG_57 0x00100E4
+#define ACP_SCRATCH_REG_58 0x00100E8
+#define ACP_SCRATCH_REG_59 0x00100EC
+#define ACP_SCRATCH_REG_60 0x00100F0
+#define ACP_SCRATCH_REG_61 0x00100F4
+#define ACP_SCRATCH_REG_62 0x00100F8
+#define ACP_SCRATCH_REG_63 0x00100FC
+#define ACP_SCRATCH_REG_64 0x0010100
+#define ACP_SCRATCH_REG_65 0x0010104
+#define ACP_SCRATCH_REG_66 0x0010108
+#define ACP_SCRATCH_REG_67 0x001010C
+#define ACP_SCRATCH_REG_68 0x0010110
+#define ACP_SCRATCH_REG_69 0x0010114
+#define ACP_SCRATCH_REG_70 0x0010118
+#define ACP_SCRATCH_REG_71 0x001011C
+#define ACP_SCRATCH_REG_72 0x0010120
+#define ACP_SCRATCH_REG_73 0x0010124
+#define ACP_SCRATCH_REG_74 0x0010128
+#define ACP_SCRATCH_REG_75 0x001012C
+#define ACP_SCRATCH_REG_76 0x0010130
+#define ACP_SCRATCH_REG_77 0x0010134
+#define ACP_SCRATCH_REG_78 0x0010138
+#define ACP_SCRATCH_REG_79 0x001013C
+#define ACP_SCRATCH_REG_80 0x0010140
+#define ACP_SCRATCH_REG_81 0x0010144
+#define ACP_SCRATCH_REG_82 0x0010148
+#define ACP_SCRATCH_REG_83 0x001014C
+#define ACP_SCRATCH_REG_84 0x0010150
+#define ACP_SCRATCH_REG_85 0x0010154
+#define ACP_SCRATCH_REG_86 0x0010158
+#define ACP_SCRATCH_REG_87 0x001015C
+#define ACP_SCRATCH_REG_88 0x0010160
+#define ACP_SCRATCH_REG_89 0x0010164
+#define ACP_SCRATCH_REG_90 0x0010168
+#define ACP_SCRATCH_REG_91 0x001016C
+#define ACP_SCRATCH_REG_92 0x0010170
+#define ACP_SCRATCH_REG_93 0x0010174
+#define ACP_SCRATCH_REG_94 0x0010178
+#define ACP_SCRATCH_REG_95 0x001017C
+#define ACP_SCRATCH_REG_96 0x0010180
+#define ACP_SCRATCH_REG_97 0x0010184
+#define ACP_SCRATCH_REG_98 0x0010188
+#define ACP_SCRATCH_REG_99 0x001018C
+#define ACP_SCRATCH_REG_100 0x0010190
+#define ACP_SCRATCH_REG_101 0x0010194
+#define ACP_SCRATCH_REG_102 0x0010198
+#define ACP_SCRATCH_REG_103 0x001019C
+#define ACP_SCRATCH_REG_104 0x00101A0
+#define ACP_SCRATCH_REG_105 0x00101A4
+#define ACP_SCRATCH_REG_106 0x00101A8
+#define ACP_SCRATCH_REG_107 0x00101AC
+#define ACP_SCRATCH_REG_108 0x00101B0
+#define ACP_SCRATCH_REG_109 0x00101B4
+#define ACP_SCRATCH_REG_110 0x00101B8
+#define ACP_SCRATCH_REG_111 0x00101BC
+#define ACP_SCRATCH_REG_112 0x00101C0
+#define ACP_SCRATCH_REG_113 0x00101C4
+#define ACP_SCRATCH_REG_114 0x00101C8
+#define ACP_SCRATCH_REG_115 0x00101CC
+#define ACP_SCRATCH_REG_116 0x00101D0
+#define ACP_SCRATCH_REG_117 0x00101D4
+#define ACP_SCRATCH_REG_118 0x00101D8
+#define ACP_SCRATCH_REG_119 0x00101DC
+#define ACP_SCRATCH_REG_120 0x00101E0
+#define ACP_SCRATCH_REG_121 0x00101E4
+#define ACP_SCRATCH_REG_122 0x00101E8
+#define ACP_SCRATCH_REG_123 0x00101EC
+#define ACP_SCRATCH_REG_124 0x00101F0
+#define ACP_SCRATCH_REG_125 0x00101F4
+#define ACP_SCRATCH_REG_126 0x00101F8
+#define ACP_SCRATCH_REG_127 0x00101FC
+#define ACP_SCRATCH_REG_128 0x0010200
+#endif
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 94d06ddfd80a..229118156a1f 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -600,6 +600,7 @@ struct iscsit_conn {
struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
struct iscsit_session *sess;
+ struct target_cmd_counter *cmd_cnt;
int bitmap_id;
int rx_thread_active;
struct task_struct *rx_thread;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 8c920456edd9..010e966aee0a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -492,6 +492,7 @@ struct se_cmd {
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
+ struct target_cmd_counter *cmd_cnt;
struct se_tmr_req *se_tmr_req;
struct llist_node se_cmd_list;
struct completion *free_compl;
@@ -617,22 +618,26 @@ static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item)
acl_fabric_stat_group);
}

-struct se_session {
+struct target_cmd_counter {
+ struct percpu_ref refcnt;
+ wait_queue_head_t refcnt_wq;
+ struct completion stop_done;
atomic_t stopped;
+};
+
+struct se_session {
u64 sess_bin_isid;
enum target_prot_op sup_prot_ops;
enum target_prot_type sess_prot_type;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
- struct percpu_ref cmd_count;
struct list_head sess_list;
struct list_head sess_acl_list;
spinlock_t sess_cmd_lock;
- wait_queue_head_t cmd_count_wq;
- struct completion stop_done;
void *sess_cmd_map;
struct sbitmap_queue sess_tag_pool;
+ struct target_cmd_counter *cmd_cnt;
};

struct se_device;
@@ -865,6 +870,7 @@ struct se_device {
struct rcu_head rcu_head;
int queue_cnt;
struct se_device_queue *queues;
+ struct mutex lun_reset_mutex;
};

struct se_hba {
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 38f0662476d1..b188b1e90e1e 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -133,7 +133,12 @@ struct se_session *target_setup_session(struct se_portal_group *,
struct se_session *, void *));
void target_remove_session(struct se_session *);

-int transport_init_session(struct se_session *se_sess);
+void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt);
+void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt);
+struct target_cmd_counter *target_alloc_cmd_counter(void);
+void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt);
+
+void transport_init_session(struct se_session *se_sess);
struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -149,9 +154,11 @@ void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);


-void __target_init_cmd(struct se_cmd *,
- const struct target_core_fabric_ops *,
- struct se_session *, u32, int, int, unsigned char *, u64);
+void __target_init_cmd(struct se_cmd *cmd,
+ const struct target_core_fabric_ops *tfo,
+ struct se_session *sess, u32 data_length, int data_direction,
+ int task_attr, unsigned char *sense_buffer, u64 unpacked_lun,
+ struct target_cmd_counter *cmd_cnt);
int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun, u32 data_length,
int task_attr, int data_dir, int flags);
diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h
index b1de14c3bb93..441132c67133 100644
--- a/include/trace/events/qrtr.h
+++ b/include/trace/events/qrtr.h
@@ -10,15 +10,16 @@

TRACE_EVENT(qrtr_ns_service_announce_new,

- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),

TP_ARGS(service, instance, node, port),

TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),

TP_fast_assign(
@@ -36,15 +37,16 @@ TRACE_EVENT(qrtr_ns_service_announce_new,

TRACE_EVENT(qrtr_ns_service_announce_del,

- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),

TP_ARGS(service, instance, node, port),

TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),

TP_fast_assign(
@@ -62,15 +64,16 @@ TRACE_EVENT(qrtr_ns_service_announce_del,

TRACE_EVENT(qrtr_ns_server_add,

- TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
+ TP_PROTO(unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port),

TP_ARGS(service, instance, node, port),

TP_STRUCT__entry(
- __field(__le32, service)
- __field(__le32, instance)
- __field(__le32, node)
- __field(__le32, port)
+ __field(unsigned int, service)
+ __field(unsigned int, instance)
+ __field(unsigned int, node)
+ __field(unsigned int, port)
),

TP_fast_assign(
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 2e713a7d9aa3..3e8619c72f77 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -371,7 +371,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
tick_dep_name(CLOCK_UNSTABLE) \
- tick_dep_name_end(RCU)
+ tick_dep_name(RCU) \
+ tick_dep_name_end(RCU_EXP)

#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
index fba4c24ed9e6..def36fbb8c5c 100644
--- a/include/trace/stages/stage5_get_offsets.h
+++ b/include/trace/stages/stage5_get_offsets.h
@@ -9,17 +9,30 @@
#undef __entry
#define __entry entry

+/*
+ * Fields should never declare an array: i.e. __field(int, arr[5])
+ * If they do, it will cause issues in parsing and possibly corrupt the
+ * events. To prevent that from happening, test the sizeof() a fictitious
+ * type called "struct _test_no_array_##item" which will fail if "item"
+ * contains array elements (like "arr[5]").
+ *
+ * If you hit this, use __array(int, arr, 5) instead.
+ */
#undef __field
-#define __field(type, item)
+#define __field(type, item) \
+ { (void)sizeof(struct _test_no_array_##item *); }

#undef __field_ext
-#define __field_ext(type, item, filter_type)
+#define __field_ext(type, item, filter_type) \
+ { (void)sizeof(struct _test_no_array_##item *); }

#undef __field_struct
-#define __field_struct(type, item)
+#define __field_struct(type, item) \
+ { (void)sizeof(struct _test_no_array_##item *); }

#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
+#define __field_struct_ext(type, item, filter_type) \
+ { (void)sizeof(struct _test_no_array_##item *); }

#undef __array
#define __array(type, item, len)
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 5655e89b962b..d4d4fa0bb362 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -181,6 +181,7 @@ struct btrfs_scrub_progress {
};

#define BTRFS_SCRUB_READONLY 1
+#define BTRFS_SCRUB_SUPPORTED_FLAGS (BTRFS_SCRUB_READONLY)
struct btrfs_ioctl_scrub_args {
__u64 devid; /* in */
__u64 start; /* in */
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index af2a44c08683..a429381e7ca5 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,7 +28,7 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))

-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))

#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
diff --git a/include/xen/xen.h b/include/xen/xen.h
index a99bab817523..b088f0d31689 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -71,4 +71,15 @@ static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
}
#endif

+#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI) && defined(CONFIG_X86)
+bool __init xen_processor_present(uint32_t acpi_id);
+#else
+#include <linux/bug.h>
+static inline bool xen_processor_present(uint32_t acpi_id)
+{
+ BUG();
+ return false;
+}
+#endif
+
#endif /* _XEN_XEN_H */
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 4426d0e15174..cce95164204f 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -562,7 +562,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
}

ctx->user_bufs[i] = imu;
- *io_get_tag_slot(ctx->buf_data, offset) = tag;
+ *io_get_tag_slot(ctx->buf_data, i) = tag;
}

if (needs_switch)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index b73169737a01..a8838a32f750 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -572,8 +572,8 @@ static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
*btf_p = btf;
return ret;
}
- spin_lock_bh(&btf_idr_lock);
btf_put(btf);
+ spin_lock_bh(&btf_idr_lock);
}
spin_unlock_bh(&btf_idr_lock);
return ret;
@@ -5333,12 +5333,8 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)

static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
{
- /* t comes in already as a pointer */
- t = btf_type_by_id(btf, t->type);
-
- /* allow const */
- if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
- t = btf_type_by_id(btf, t->type);
+ /* skip modifiers */
+ t = btf_type_skip_modifiers(btf, t->type, NULL);

return btf_type_is_int(t);
}
@@ -7961,12 +7957,10 @@ bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
btf_get(mod_btf);
spin_unlock_bh(&btf_idr_lock);
cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
- if (IS_ERR(cands)) {
- btf_put(mod_btf);
+ btf_put(mod_btf);
+ if (IS_ERR(cands))
return ERR_CAST(cands);
- }
spin_lock_bh(&btf_idr_lock);
- btf_put(mod_btf);
}
spin_unlock_bh(&btf_idr_lock);
/* cands is a pointer to kmalloced memory here if cands->cnt > 0
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index bf2fdb33fb31..819f011f0a9c 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1921,14 +1921,17 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
if (ret < 0)
goto out;

- if (ctx.optlen > max_optlen || ctx.optlen < 0) {
+ if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
ret = -EFAULT;
goto out;
}

if (ctx.optlen != 0) {
- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
- put_user(ctx.optlen, optlen)) {
+ if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(ctx.optlen, optlen)) {
ret = -EFAULT;
goto out;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8db2ed564939..872616107613 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1590,9 +1590,9 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
struct tnum var64_off = tnum_intersect(reg->var_off,
tnum_range(reg->umin_value,
reg->umax_value));
- struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
- tnum_range(reg->u32_min_value,
- reg->u32_max_value));
+ struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
+ tnum_range(reg->u32_min_value,
+ reg->u32_max_value));

reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
}
@@ -3518,17 +3518,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
}
/* Variable offset is prohibited for unprivileged mode for simplicity
* since it requires corresponding support in Spectre masking for stack
- * ALU. See also retrieve_ptr_limit().
+ * ALU. See also retrieve_ptr_limit(). The check in
+ * check_stack_access_for_ptr_arithmetic() called by
+ * adjust_ptr_min_max_vals() prevents users from creating stack pointers
+ * with variable offsets, therefore no check is required here. Further,
+ * just checking it here would be insufficient as speculative stack
+ * writes could still lead to unsafe speculative behaviour.
*/
- if (!env->bypass_spec_v1 && var_off) {
- char tn_buf[48];
-
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
- ptr_regno, tn_buf);
- return -EACCES;
- }
-
if (!var_off) {
off += reg->var_off.value;
err = check_stack_read_fixed_off(env, state, off, size,
@@ -11908,10 +11904,11 @@ static int propagate_precision(struct bpf_verifier_env *env,
state_reg = state->regs;
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
if (state_reg->type != SCALAR_VALUE ||
- !state_reg->precise)
+ !state_reg->precise ||
+ !(state_reg->live & REG_LIVE_READ))
continue;
if (env->log.level & BPF_LOG_LEVEL2)
- verbose(env, "frame %d: propagating r%d\n", i, fr);
+ verbose(env, "frame %d: propagating r%d\n", fr, i);
err = mark_chain_precision_frame(env, fr, i);
if (err < 0)
return err;
@@ -11922,11 +11919,12 @@ static int propagate_precision(struct bpf_verifier_env *env,
continue;
state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE ||
- !state_reg->precise)
+ !state_reg->precise ||
+ !(state_reg->live & REG_LIVE_READ))
continue;
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "frame %d: propagating fp%d\n",
- (-i - 1) * BPF_REG_SIZE, fr);
+ fr, (-i - 1) * BPF_REG_SIZE);
err = mark_chain_precision_stack_frame(env, fr, i);
if (err < 0)
return err;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 339a990554e7..7f4ad5e70b40 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -919,7 +919,9 @@ EXPORT_SYMBOL_GPL(is_swiotlb_active);

static int io_tlb_used_get(void *data, u64 *val)
{
- *val = mem_used(&io_tlb_default_mem);
+ struct io_tlb_mem *mem = data;
+
+ *val = mem_used(mem);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
@@ -932,7 +934,7 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
return;

debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
- debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
+ debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
&fops_io_tlb_used);
}

@@ -987,6 +989,11 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
/* Set Per-device io tlb area to one */
unsigned int nareas = 1;

+ if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
+ dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
+ return -EINVAL;
+ }
+
/*
* Since multiple devices can share the same pool, the private data,
* io_tlb_mem struct, will be initialized by the first device attached
@@ -1048,11 +1055,6 @@ static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;

- if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
- pr_err("Restricted DMA pool must be accessible within the linear mapping.");
- return -EINVAL;
- }
-
rmem->ops = &rmem_swiotlb_ops;
pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7699b99706ad..934332b3eb54 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9254,8 +9254,8 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
hwc->interrupts = 1;
} else {
hwc->interrupts++;
- if (unlikely(throttle
- && hwc->interrupts >= max_samples_per_tick)) {
+ if (unlikely(throttle &&
+ hwc->interrupts > max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
hwc->interrupts = MAX_INTERRUPTS;
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 54d077e1a2dc..5a60cc52adc0 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -337,11 +337,20 @@ static void delay_access(int type)
*/
static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
{
+ /*
+ * In the below we don't necessarily need the read of the location to
+ * be atomic, and we don't use READ_ONCE(), since all we need for race
+ * detection is to observe 2 different values.
+ *
+ * Furthermore, on certain architectures (such as arm64), READ_ONCE()
+ * may turn into more complex instructions than a plain load that cannot
+ * do unaligned accesses.
+ */
switch (size) {
- case 1: return READ_ONCE(*(const u8 *)ptr);
- case 2: return READ_ONCE(*(const u16 *)ptr);
- case 4: return READ_ONCE(*(const u32 *)ptr);
- case 8: return READ_ONCE(*(const u64 *)ptr);
+ case 1: return *(const volatile u8 *)ptr;
+ case 2: return *(const volatile u16 *)ptr;
+ case 4: return *(const volatile u32 *)ptr;
+ case 8: return *(const volatile u64 *)ptr;
default: return 0; /* Ignore; we do not diff the values. */
}
}
diff --git a/kernel/kheaders.c b/kernel/kheaders.c
index 8f69772af77b..42163c9e94e5 100644
--- a/kernel/kheaders.c
+++ b/kernel/kheaders.c
@@ -26,15 +26,15 @@ asm (
" .popsection \n"
);

-extern char kernel_headers_data;
-extern char kernel_headers_data_end;
+extern char kernel_headers_data[];
+extern char kernel_headers_data_end[];

static ssize_t
ikheaders_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
- memcpy(buf, &kernel_headers_data + off, len);
+ memcpy(buf, &kernel_headers_data[off], len);
return len;
}

@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = {

static int __init ikheaders_init(void)
{
- kheaders_attr.size = (&kernel_headers_data_end -
- &kernel_headers_data);
+ kheaders_attr.size = (kernel_headers_data_end -
+ kernel_headers_data);
return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
}

diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 793c55a2becb..30d1274f03f6 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -64,6 +64,7 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;

bool freezer_test_done;
+bool snapshot_test;

static const struct platform_hibernation_ops *hibernation_ops;

@@ -687,18 +688,22 @@ static int load_image_and_restore(void)
{
int error;
unsigned int flags;
+ fmode_t mode = FMODE_READ;
+
+ if (snapshot_test)
+ mode |= FMODE_EXCL;

pm_pr_dbg("Loading hibernation image.\n");

lock_device_hotplug();
error = create_basic_memory_bitmaps();
if (error) {
- swsusp_close(FMODE_READ | FMODE_EXCL);
+ swsusp_close(mode);
goto Unlock;
}

error = swsusp_read(&flags);
- swsusp_close(FMODE_READ | FMODE_EXCL);
+ swsusp_close(mode);
if (!error)
error = hibernation_restore(flags & SF_PLATFORM_MODE);

@@ -716,7 +721,6 @@ static int load_image_and_restore(void)
*/
int hibernate(void)
{
- bool snapshot_test = false;
unsigned int sleep_flags;
int error;

@@ -744,6 +748,9 @@ int hibernate(void)
if (error)
goto Exit;

+ /* protected by system_transition_mutex */
+ snapshot_test = false;
+
lock_device_hotplug();
/* Allocate memory management structures */
error = create_basic_memory_bitmaps();
@@ -940,6 +947,8 @@ static int software_resume(void)
*/
mutex_lock_nested(&system_transition_mutex, SINGLE_DEPTH_NESTING);

+ snapshot_test = false;
+
if (swsusp_resume_device)
goto Check_image;

diff --git a/kernel/power/power.h b/kernel/power/power.h
index b4f433943209..b83c8d5e188d 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -59,6 +59,7 @@ asmlinkage int swsusp_save(void);

/* kernel/power/hibernate.c */
extern bool freezer_test_done;
+extern bool snapshot_test;

extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 277434b6c0bf..cc44c37699de 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1518,9 +1518,13 @@ int swsusp_check(void)
{
int error;
void *holder;
+ fmode_t mode = FMODE_READ;
+
+ if (snapshot_test)
+ mode |= FMODE_EXCL;

hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
- FMODE_READ | FMODE_EXCL, &holder);
+ mode, &holder);
if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE);
clear_page(swsusp_header);
@@ -1547,7 +1551,7 @@ int swsusp_check(void)

put:
if (error)
- blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
+ blkdev_put(hib_resume_bdev, mode);
else
pr_debug("Image signature found, resuming\n");
} else {
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 14d9384fba05..ce34ca0b5b98 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -665,6 +665,7 @@ void __rcu_irq_enter_check_tick(void)
}
raw_spin_unlock_rcu_node(rdp->mynode);
}
+NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
#endif /* CONFIG_NO_HZ_FULL */

/*
diff --git a/kernel/relay.c b/kernel/relay.c
index 88bcb09f0a1f..1cb4bb9f09a3 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -989,7 +989,8 @@ static size_t relay_file_read_start_pos(struct rchan_buf *buf)
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t consumed = buf->subbufs_consumed % n_subbufs;
- size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
+ size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
+ % (n_subbufs * subbuf_size);

read_subbuf = read_pos / subbuf_size;
padding = buf->padding[read_subbuf];
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9ae8f41e3372..f7d381b6c313 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2246,6 +2246,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!dl_task(task) ||
+ is_migration_disabled(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f70c4a7fb4ef..fa33c441ae86 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6475,7 +6475,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);

schedstat_inc(p->stats.nr_wakeups_affine_attempts);
- if (target == nr_cpumask_bits)
+ if (target != this_cpu)
return prev_cpu;

schedstat_inc(sd->ttwu_move_affine);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 0a11f44adee5..4f5796dd26a5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2000,11 +2000,15 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
+ * It is possible the task was scheduled, set
+ * "migrate_disabled" and then got preempted, so we must
+ * check the task migration disable flag here too.
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!rt_task(task) ||
+ is_migration_disabled(task) ||
!task_on_rq_queued(task))) {

double_unlock_balance(rq, lowest_rq);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index cb925e8ef9a8..44b25ff35d28 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -847,6 +847,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head,
return expires;

ctmr->firing = 1;
+ /* See posix_cpu_timer_wait_running() */
+ rcu_assign_pointer(ctmr->handling, current);
cpu_timer_dequeue(ctmr);
list_add_tail(&ctmr->elist, firing);
}
@@ -1162,7 +1164,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk);
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
static void posix_cpu_timers_work(struct callback_head *work)
{
+ struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
+
+ mutex_lock(&cw->mutex);
handle_posix_cpu_timers(current);
+ mutex_unlock(&cw->mutex);
+}
+
+/*
+ * Invoked from the posix-timer core when a cancel operation failed because
+ * the timer is marked firing. The caller holds rcu_read_lock(), which
+ * protects the timer and the task which is expiring it from being freed.
+ */
+static void posix_cpu_timer_wait_running(struct k_itimer *timr)
+{
+ struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
+
+ /* Has the handling task completed expiry already? */
+ if (!tsk)
+ return;
+
+ /* Ensure that the task cannot go away */
+ get_task_struct(tsk);
+ /* Now drop the RCU protection so the mutex can be locked */
+ rcu_read_unlock();
+ /* Wait on the expiry mutex */
+ mutex_lock(&tsk->posix_cputimers_work.mutex);
+ /* Release it immediately again. */
+ mutex_unlock(&tsk->posix_cputimers_work.mutex);
+ /* Drop the task reference. */
+ put_task_struct(tsk);
+ /* Relock RCU so the callsite is balanced */
+ rcu_read_lock();
+}
+
+static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
+{
+ /* Ensure that timr->it.cpu.handling task cannot go away */
+ rcu_read_lock();
+ spin_unlock_irq(&timr->it_lock);
+ posix_cpu_timer_wait_running(timr);
+ rcu_read_unlock();
+ /* @timr is on stack and is valid */
+ spin_lock_irq(&timr->it_lock);
}

/*
@@ -1178,6 +1222,7 @@ void clear_posix_cputimers_work(struct task_struct *p)
sizeof(p->posix_cputimers_work.work));
init_task_work(&p->posix_cputimers_work.work,
posix_cpu_timers_work);
+ mutex_init(&p->posix_cputimers_work.mutex);
p->posix_cputimers_work.scheduled = false;
}

@@ -1256,6 +1301,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk)
lockdep_posixtimer_exit();
}

+static void posix_cpu_timer_wait_running(struct k_itimer *timr)
+{
+ cpu_relax();
+}
+
+static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
+{
+ spin_unlock_irq(&timr->it_lock);
+ cpu_relax();
+ spin_lock_irq(&timr->it_lock);
+}
+
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
{
return false;
@@ -1364,6 +1421,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
*/
if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
+ /* See posix_cpu_timer_wait_running() */
+ rcu_assign_pointer(timer->it.cpu.handling, NULL);
spin_unlock(&timer->it_lock);
}
}
@@ -1498,23 +1557,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
expires = cpu_timer_getexpires(&timer.it.cpu);
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
if (!error) {
- /*
- * Timer is now unarmed, deletion can not fail.
- */
+ /* Timer is now unarmed, deletion can not fail. */
posix_cpu_timer_del(&timer);
+ } else {
+ while (error == TIMER_RETRY) {
+ posix_cpu_timer_wait_running_nsleep(&timer);
+ error = posix_cpu_timer_del(&timer);
+ }
}
- spin_unlock_irq(&timer.it_lock);

- while (error == TIMER_RETRY) {
- /*
- * We need to handle case when timer was or is in the
- * middle of firing. In other cases we already freed
- * resources.
- */
- spin_lock_irq(&timer.it_lock);
- error = posix_cpu_timer_del(&timer);
- spin_unlock_irq(&timer.it_lock);
- }
+ spin_unlock_irq(&timer.it_lock);

if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
/*
@@ -1624,6 +1676,7 @@ const struct k_clock clock_posix_cpu = {
.timer_del = posix_cpu_timer_del,
.timer_get = posix_cpu_timer_get,
.timer_rearm = posix_cpu_timer_rearm,
+ .timer_wait_running = posix_cpu_timer_wait_running,
};

const struct k_clock clock_process = {
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 0c8a87a11b39..808a247205a9 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
rcu_read_lock();
unlock_timer(timer, *flags);

+ /*
+ * kc->timer_wait_running() might drop RCU lock. So @timer
+ * cannot be touched anymore after the function returns!
+ */
if (!WARN_ON_ONCE(!kc->timer_wait_running))
kc->timer_wait_running(timer);

diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 46789356f856..65b8658da829 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -218,9 +218,19 @@ static void tick_setup_device(struct tick_device *td,
* this cpu:
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
+ ktime_t next_p;
+ u32 rem;
+
tick_do_timer_cpu = cpu;

- tick_next_period = ktime_get();
+ next_p = ktime_get();
+ div_u64_rem(next_p, TICK_NSEC, &rem);
+ if (rem) {
+ next_p -= rem;
+ next_p += TICK_NSEC;
+ }
+
+ tick_next_period = next_p;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b0e3c9205946..a46506f7ec6d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -281,6 +281,11 @@ static bool check_tick_dependency(atomic_t *dep)
return true;
}

+ if (val & TICK_DEP_MASK_RCU_EXP) {
+ trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
+ return true;
+ }
+
return false;
}

@@ -527,7 +532,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
tick_nohz_full_running = true;
}

-static int tick_nohz_cpu_down(unsigned int cpu)
+bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
{
/*
* The tick_do_timer_cpu CPU handles housekeeping duty (unbound
@@ -535,8 +540,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
* CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
- return -EBUSY;
- return 0;
+ return false;
+ return true;
+}
+
+static int tick_nohz_cpu_down(unsigned int cpu)
+{
+ return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
}

void __init tick_nohz_init(void)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f72b9f1de178..221c8c404973 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -526,7 +526,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
*
- * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
+ * The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
* apply as well.
*/
u64 notrace ktime_get_boot_fast_ns(void)
@@ -576,7 +576,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
*
- * See ktime_get_fast_ns() for documentation of the time stamp ordering.
+ * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
*/
u64 ktime_get_real_fast_ns(void)
{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9d8538531a54..4acc27cb856f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1778,6 +1778,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;

+ irq_work_sync(&cpu_buffer->irq_work.work);
+
free_buffer_page(cpu_buffer->reader_page);

if (head) {
@@ -1884,6 +1886,8 @@ ring_buffer_free(struct trace_buffer *buffer)

cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);

+ irq_work_sync(&buffer->irq_work.work);
+
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);

@@ -5333,6 +5337,9 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);

+/* Flag to ensure proper resetting of atomic variables */
+#define RESET_BIT (1 << 30)
+
/**
* ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
* @buffer: The ring buffer to reset a per cpu buffer of
@@ -5349,20 +5356,27 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
for_each_online_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];

- atomic_inc(&cpu_buffer->resize_disabled);
+ atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled);
}

/* Make sure all commits have finished */
synchronize_rcu();

- for_each_online_buffer_cpu(buffer, cpu) {
+ for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];

+ /*
+ * If a CPU came online during the synchronize_rcu(), then
+ * ignore it.
+ */
+ if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
+ continue;
+
reset_disabled_cpu_buffer(cpu_buffer);

atomic_dec(&cpu_buffer->record_disabled);
- atomic_dec(&cpu_buffer->resize_disabled);
+ atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
}

mutex_unlock(&buffer->mutex);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3360d638071a..5c1087df2f1c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9619,7 +9619,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)

tr->buffer_percent = 50;

- trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
+ trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
tr, &buffer_percent_fops);

create_trace_options_dir(tr);
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 908e8a13c675..625cab4b9d94 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -1398,6 +1398,9 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
return -EFAULT;

+ if (idx < 0)
+ return -EINVAL;
+
rcu_read_lock_sched();

refs = rcu_dereference_sched(info->refs);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8e21c352c155..4dd494f786bc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4850,10 +4850,16 @@ static void show_one_worker_pool(struct worker_pool *pool)
struct worker *worker;
bool first = true;
unsigned long flags;
+ unsigned long hung = 0;

raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
+
+ /* How long the first pending work is waiting for a worker. */
+ if (!list_empty(&pool->worklist))
+ hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
+
/*
* Defer printing to avoid deadlocks in console drivers that
* queue work while holding locks also taken in their write
@@ -4862,9 +4868,7 @@ static void show_one_worker_pool(struct worker_pool *pool)
printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
- pr_cont(" hung=%us workers=%d",
- jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
- pool->nr_workers);
+ pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 6f8e5dd1dcd0..bdfd859cccaf 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -216,10 +216,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
return obj;
}

-/*
- * Allocate a new object. If the pool is empty, switch off the debugger.
- * Must be called with interrupts disabled.
- */
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
{
@@ -552,36 +548,74 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}

-static void
-__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ const struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
{
- enum debug_obj_state state;
- bool check_stack = false;
- struct debug_bucket *db;
- struct debug_obj *obj;
- unsigned long flags;
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}

+static void debug_objects_fill_pool(void)
+{
/*
* On RT enabled kernels the pool refill must happen in preemptible
* context:
*/
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
fill_pool();
+}
+
+static void
+__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ debug_objects_fill_pool();

db = get_bucket((unsigned long) addr);

raw_spin_lock_irqsave(&db->lock, flags);

- obj = lookup_object(addr, db);
- if (!obj) {
- obj = alloc_object(addr, db, descr);
- if (!obj) {
- debug_objects_enabled = 0;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_objects_oom();
- return;
- }
- check_stack = true;
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
}

switch (obj->state) {
@@ -607,8 +641,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
}

raw_spin_unlock_irqrestore(&db->lock, flags);
- if (check_stack)
- debug_object_is_on_stack(addr, onstack);
}

/**
@@ -648,24 +680,24 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
*/
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int ret;
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };

if (!debug_objects_enabled)
return 0;

+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);

raw_spin_lock_irqsave(&db->lock, flags);

- obj = lookup_object(addr, db);
- if (obj) {
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (likely(!IS_ERR_OR_NULL(obj))) {
bool print_object = false;

switch (obj->state) {
@@ -698,24 +730,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)

raw_spin_unlock_irqrestore(&db->lock, flags);

- /*
- * We are here when a static object is activated. We
- * let the type specific code confirm whether this is
- * true or not. if true, we just make sure that the
- * static object is tracked in the object tracker. If
- * not, this must be a bug, so we try to fix it up.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* track this static object */
- debug_object_init(addr, descr);
- debug_object_activate(addr, descr);
- } else {
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return 0;
}
- return 0;
+
+ /* Object is neither static nor tracked. It's not initialized */
+ debug_print_object(&o, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+ return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);

@@ -869,6 +893,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
*/
void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -876,34 +901,25 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return;

+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);

raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;

- obj = lookup_object(addr, db);
+ /* If NULL the allocation has hit OOM */
if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * Maybe the object is static, and we let the type specific
- * code confirm. Track this static object if true, else invoke
- * fixup.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* Track this static object */
- debug_object_init(addr, descr);
- } else {
- debug_print_object(&o, "assert_init");
- debug_object_fixup(descr->fixup_assert_init, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- }
+ debug_objects_oom();
return;
}

- raw_spin_unlock_irqrestore(&db->lock, flags);
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);

diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 1048ef1b8d6e..b08bb1fba106 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -55,15 +55,25 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
enum kunit_status success = kunit_suite_has_succeeded(suite);
struct kunit_case *test_case;

- if (!suite || !suite->log)
+ if (!suite)
return 0;

- seq_printf(seq, "%s", suite->log);
+ /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
+ seq_puts(seq, "KTAP version 1\n");
+ seq_puts(seq, "1..1\n");
+
+ /* Print suite header because it is not stored in the test logs. */
+ seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));

kunit_suite_for_each_test_case(suite, test_case)
debugfs_print_result(seq, suite, test_case);

- seq_printf(seq, "%s %d - %s\n",
+ if (suite->log)
+ seq_printf(seq, "%s", suite->log);
+
+ seq_printf(seq, "%s %d %s\n",
kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 9bbc422c284b..74982b83707c 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -166,7 +166,7 @@ static void kunit_exec_run_tests(struct suite_set *suite_set)
{
size_t num_suites = suite_set->end - suite_set->start;

- pr_info("TAP version 14\n");
+ pr_info("KTAP version 1\n");
pr_info("1..%zu\n", num_suites);

__kunit_test_suites_init(suite_set->start, num_suites);
@@ -177,8 +177,8 @@ static void kunit_exec_list_tests(struct suite_set *suite_set)
struct kunit_suite * const *suites;
struct kunit_case *test_case;

- /* Hack: print a tap header so kunit.py can find the start of KUnit output. */
- pr_info("TAP version 14\n");
+ /* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
+ pr_info("KTAP version 1\n");

for (suites = suite_set->start; suites < suite_set->end; suites++)
kunit_suite_for_each_test_case((*suites), test_case) {
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2a6992fe7c3e..184df6f701b4 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -149,9 +149,18 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);

static void kunit_print_suite_start(struct kunit_suite *suite)
{
- kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
+ /*
+ * We do not log the test suite header as doing so would
+ * mean debugfs display would consist of the test suite
+ * header prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite header for the debugfs
+ * representation.
+ */
+ pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
suite->name);
- kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
+ pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
kunit_suite_num_test_cases(suite));
}

@@ -168,20 +177,19 @@ static void kunit_print_ok_not_ok(void *test_or_suite,

/*
* We do not log the test suite results as doing so would
- * mean debugfs display would consist of the test suite
- * description and status prior to individual test results.
- * Hence directly printk the suite status, and we will
- * separately seq_printf() the suite status for the debugfs
+ * mean debugfs display would consist of an incorrect test
+ * number. Hence directly printk the suite result, and we will
+ * separately seq_printf() the suite results for the debugfs
* representation.
*/
if (suite)
- pr_info("%s %zd - %s%s%s\n",
+ pr_info("%s %zd %s%s%s\n",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
(status == KUNIT_SKIPPED) ? directive : "");
else
kunit_log(KERN_INFO, test,
- KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ KUNIT_SUBTEST_INDENT "%s %zd %s%s%s",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
(status == KUNIT_SKIPPED) ? directive : "");
@@ -542,6 +550,8 @@ int kunit_run_tests(struct kunit_suite *suite)
/* Get initial param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "KTAP version 1\n");
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"# Subtest: %s", test_case->name);

@@ -555,7 +565,7 @@ int kunit_run_tests(struct kunit_suite *suite)

kunit_log(KERN_INFO, &test,
KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
- "%s %d - %s",
+ "%s %d %s",
kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);

diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index b22c4f461cb0..cc9bc99e47cd 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -225,7 +225,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
const void *addr;

for (addr = start; addr < start + size; addr += PAGE_SIZE) {
- struct page *page = virt_to_page(addr);
+ struct page *page = vmalloc_to_page(addr);

clear_highpage_kasan_tagged(page);
}
@@ -237,7 +237,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
u8 tag;
unsigned long redzone_start, redzone_size;

- if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) {
+ if (!kasan_vmalloc_enabled()) {
if (flags & KASAN_VMALLOC_INIT)
init_vmalloc_pages(start, size);
return (void *)start;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e132f70a059e..7d36dd95d1ff 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -802,8 +802,10 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
vmstart = vma->vm_start;
}

- if (mpol_equal(vma_policy(vma), new_pol))
+ if (mpol_equal(vma_policy(vma), new_pol)) {
+ *prev = vma;
return 0;
+ }

pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dc66f6715bfc..d18296109aa7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1887,6 +1887,16 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
}
}

+ /*
+ * Folio is unmapped now so it cannot be newly pinned anymore.
+ * No point in trying to reclaim folio if it is pinned.
+ * Furthermore we don't want to reclaim underlying fs metadata
+ * if the folio is pinned and thus potentially modified by the
+ * pinning process as that may upset the filesystem.
+ */
+ if (folio_maybe_dma_pinned(folio))
+ goto activate_locked;
+
mapping = folio_mapping(folio);
if (folio_test_dirty(folio)) {
/*
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e1bb41a443c4..07e86d03d4ba 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -365,7 +365,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)

switch (cmd) {
case SIOCSHWTSTAMP:
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), dev_net(real_dev)))
break;
fallthrough;
case SIOCGMIIPHY:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cd4b3a610961..597c1f17d388 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4972,6 +4972,9 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
skb = alloc_skb(0, GFP_ATOMIC);
} else {
skb = skb_clone(orig_skb, GFP_ATOMIC);
+
+ if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
+ return;
}
if (!skb)
return;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b9d7c3dd1cb3..c0fd8f5f3b94 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -783,6 +783,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)

if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ nf_reset_ct(skb);

return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
refcounted) ? -1 : 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 922c87ef1ab5..2a07588265c7 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1570,9 +1570,19 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);

- if (iph->protocol == IPPROTO_ICMP)
- icmp_out_count(net, ((struct icmphdr *)
- skb_transport_header(skb))->type);
+ if (iph->protocol == IPPROTO_ICMP) {
+ u8 icmp_type;
+
+ /* For such sockets, transhdrlen is zero when do ip_append_data(),
+ * so icmphdr does not in skb linear region and can not get icmp_type
+ * by icmp_hdr(skb)->type.
+ */
+ if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+ icmp_type = fl4->fl4_icmp_type;
+ else
+ icmp_type = icmp_hdr(skb)->type;
+ icmp_out_count(net, icmp_type);
+ }

ip_cork_release(cork);
out:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index e1ebf5e42ebe..d94041bb4287 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -404,10 +404,6 @@ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
/* Only do this once for first final protocol */
have_final = true;

- /* Free reference early: we don't need it any more,
- and it may hold ip_conntrack module loaded
- indefinitely. */
- nf_reset_ct(skb);

skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
@@ -430,10 +426,12 @@ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
goto discard;
}
}
- if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
- !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- SKB_DR_SET(reason, XFRM_POLICY);
- goto discard;
+ if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) {
+ if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ SKB_DR_SET(reason, XFRM_POLICY);
+ goto discard;
+ }
+ nf_reset_ct(skb);
}

ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4fc511bdf176..f44b99f7ecdc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -193,10 +193,8 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);

/* Not releasing hash table! */
- if (clone) {
- nf_reset_ct(clone);
+ if (clone)
rawv6_rcv(sk, clone);
- }
}
}
rcu_read_unlock();
@@ -387,6 +385,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
+ nf_reset_ct(skb);

if (!rp->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 81afb40bfc0b..c563a84d67b4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1722,6 +1722,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
if (drop_reason)
goto discard_and_relse;

+ nf_reset_ct(skb);
+
if (tcp_filter(sk, skb)) {
drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
goto discard_and_relse;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 0b8127988adb..c029222ce46b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -701,6 +701,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
drop_reason = SKB_DROP_REASON_XFRM_POLICY;
goto drop;
}
+ nf_reset_ct(skb);

if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
@@ -1024,6 +1025,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,

if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
+ nf_reset_ct(skb);

if (udp_lib_checksum_complete(skb))
goto csum_error;
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index adae86e8e02e..8639e7efd0e2 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -384,6 +384,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;

+ nfct->status |= IPS_CONFIRMED;
err = nf_conntrack_hash_check_insert(nfct);
if (err < 0) {
nf_conntrack_free(nfct);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 30ed45b1b57d..a0e9c7af0846 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -938,7 +938,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
goto out;
}

- ct->status |= IPS_CONFIRMED;
smp_wmb();
/* The caller holds a reference to this object */
refcount_set(&ct->ct_general.use, 2);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d095d3c1ceca..cb4325b8ebb1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -176,7 +176,12 @@ static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
bool skip_zero)
{
- long timeout = nf_ct_expires(ct) / HZ;
+ long timeout;
+
+ if (nf_ct_is_confirmed(ct))
+ timeout = nf_ct_expires(ct) / HZ;
+ else
+ timeout = ct->timeout / HZ;

if (skip_zero && timeout == 0)
return 0;
@@ -2253,9 +2258,6 @@ ctnetlink_create_conntrack(struct net *net,
if (!cda[CTA_TIMEOUT])
goto err1;

- timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
- __nf_ct_set_timeout(ct, timeout);
-
rcu_read_lock();
if (cda[CTA_HELP]) {
char *helpname = NULL;
@@ -2316,6 +2318,12 @@ ctnetlink_create_conntrack(struct net *net,
nfct_seqadj_ext_add(ct);
nfct_synproxy_ext_add(ct);

+ /* we must add conntrack extensions before confirmation. */
+ ct->status |= IPS_CONFIRMED;
+
+ timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+ __nf_ct_set_timeout(ct, timeout);
+
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
if (err < 0)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 12d815b9aa13..f663262df698 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4936,12 +4936,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
}

+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ if (nft_set_is_anonymous(set))
+ nft_clear(ctx->net, set);
+
+ set->use++;
+}
+EXPORT_SYMBOL_GPL(nf_tables_activate_set);
+
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
switch (phase) {
case NFT_TRANS_PREPARE:
+ if (nft_set_is_anonymous(set))
+ nft_deactivate_next(ctx->net, set);
+
set->use--;
return;
case NFT_TRANS_ABORT:
@@ -8517,6 +8529,8 @@ static int nf_tables_validate(struct net *net)
if (nft_table_validate(net, table) < 0)
return -EAGAIN;
}
+
+ nft_validate_state_update(net, NFT_VALIDATE_SKIP);
break;
}

@@ -9437,11 +9451,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
return 0;
}

-static void nf_tables_cleanup(struct net *net)
-{
- nft_validate_state_update(net, NFT_VALIDATE_SKIP);
-}
-
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action)
{
@@ -9475,7 +9484,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
.cb = nf_tables_cb,
.commit = nf_tables_commit,
.abort = nf_tables_abort,
- .cleanup = nf_tables_cleanup,
.valid_genid = nf_tables_valid_genid,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 81c7737c803a..ae7146475d17 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -590,8 +590,6 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
goto replay_abort;
}
}
- if (ss->cleanup)
- ss->cleanup(net);

nfnl_err_deliver(&err_list, oskb);
kfree_skb(skb);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 6983e6ddeef9..e65a83328b55 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
{
struct nft_dynset *priv = nft_expr_priv(expr);

- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}

static void nft_dynset_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index d9ad1aa81856..68a5dea80548 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
{
struct nft_lookup *priv = nft_expr_priv(expr);

- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}

static void nft_lookup_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 5d8d91b3904d..7f8e480b6be5 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -184,7 +184,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
{
struct nft_objref_map *priv = nft_expr_priv(expr);

- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}

static void nft_objref_map_destroy(const struct nft_ctx *ctx,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 99622c64081c..b1dcc536521b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1731,7 +1731,8 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int len, val, err;
+ unsigned int flag;
+ int len, val;

if (level != SOL_NETLINK)
return -ENOPROTOOPT;
@@ -1743,39 +1744,17 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,

switch (optname) {
case NETLINK_PKTINFO:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_RECV_PKTINFO;
break;
case NETLINK_BROADCAST_ERROR:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_BROADCAST_SEND_ERROR;
break;
case NETLINK_NO_ENOBUFS:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_RECV_NO_ENOBUFS;
break;
case NETLINK_LIST_MEMBERSHIPS: {
- int pos, idx, shift;
+ int pos, idx, shift, err = 0;

- err = 0;
netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
@@ -1792,40 +1771,32 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
err = -EFAULT;
netlink_unlock_table();
- break;
+ return err;
}
case NETLINK_CAP_ACK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_CAP_ACK;
break;
case NETLINK_EXT_ACK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
- if (put_user(len, optlen) || put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_EXT_ACK;
break;
case NETLINK_GET_STRICT_CHK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
- if (put_user(len, optlen) || put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_STRICT_CHK;
break;
default:
- err = -ENOPROTOOPT;
+ return -ENOPROTOOPT;
}
- return err;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = nlk->flags & flag ? 1 : 0;
+
+ if (put_user(len, optlen) ||
+ copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
}

static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1ab65f7f2a0a..ac9335d76fb7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -307,7 +307,8 @@ static void packet_cached_dev_reset(struct packet_sock *po)

static bool packet_use_direct_xmit(const struct packet_sock *po)
{
- return po->xmit == packet_direct_xmit;
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ return READ_ONCE(po->xmit) == packet_direct_xmit;
}

static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -2184,7 +2185,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_hatype = dev->type;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev))
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -2459,7 +2460,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev))
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -2866,7 +2867,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
packet_inc_pending(&po->tx_ring);

status = TP_STATUS_SEND_REQUEST;
- err = po->xmit(skb);
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@@ -3069,7 +3071,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}

- err = po->xmit(skb);
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@@ -3512,7 +3515,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}

- if (pkt_sk(sk)->auxdata) {
+ if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
struct tpacket_auxdata aux;

aux.tp_status = TP_STATUS_USER;
@@ -3896,9 +3899,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;

- lock_sock(sk);
- po->auxdata = !!val;
- release_sock(sk);
+ packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
return 0;
}
case PACKET_ORIGDEV:
@@ -3910,9 +3911,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;

- lock_sock(sk);
- po->origdev = !!val;
- release_sock(sk);
+ packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
return 0;
}
case PACKET_VNET_HDR:
@@ -4006,7 +4005,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;

- po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ /* Paired with all lockless reads of po->xmit */
+ WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
return 0;
}
default:
@@ -4057,10 +4057,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,

break;
case PACKET_AUXDATA:
- val = po->auxdata;
+ val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
break;
case PACKET_ORIGDEV:
- val = po->origdev;
+ val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
break;
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 07812ae5ca07..d704c7bf51b2 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -23,9 +23,9 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_flags = 0;
if (po->running)
pinfo.pdi_flags |= PDI_RUNNING;
- if (po->auxdata)
+ if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
pinfo.pdi_flags |= PDI_AUXDATA;
- if (po->origdev)
+ if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
pinfo.pdi_flags |= PDI_ORIGDEV;
if (po->has_vnet_hdr)
pinfo.pdi_flags |= PDI_VNETHDR;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 48af35b1aed2..3bae8ea7a36f 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -116,10 +116,9 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
+ unsigned long flags;
unsigned int running; /* bind_lock must be held */
- unsigned int auxdata:1, /* writer must hold sock lock */
- origdev:1,
- has_vnet_hdr:1,
+ unsigned int has_vnet_hdr:1, /* writer must hold sock lock */
tp_loss:1,
tp_tx_has_off:1;
int pressure;
@@ -144,4 +143,25 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
return (struct packet_sock *)sk;
}

+enum packet_sock_flags {
+ PACKET_SOCK_ORIGDEV,
+ PACKET_SOCK_AUXDATA,
+};
+
+static inline void packet_sock_flag_set(struct packet_sock *po,
+ enum packet_sock_flags flag,
+ bool val)
+{
+ if (val)
+ set_bit(flag, &po->flags);
+ else
+ clear_bit(flag, &po->flags);
+}
+
+static inline bool packet_sock_flag(const struct packet_sock *po,
+ enum packet_sock_flags flag)
+{
+ return test_bit(flag, &po->flags);
+}
+
#endif
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 48d14fb90ba0..f59a2cb2c803 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -779,13 +779,17 @@ static int fq_resize(struct Qdisc *sch, u32 log)
return 0;
}

+static struct netlink_range_validation iq_range = {
+ .max = INT_MAX,
+};
+
static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK },

[TCA_FQ_PLIMIT] = { .type = NLA_U32 },
[TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
[TCA_FQ_QUANTUM] = { .type = NLA_U32 },
- [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
+ [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
[TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
[TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index fd7e1c630493..d2ee56634308 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2050,9 +2050,6 @@ call_bind_status(struct rpc_task *task)
status = -EOPNOTSUPP;
break;
}
- if (task->tk_rebind_retry == 0)
- break;
- task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
case -ENOBUFS:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index be587a308e05..c8321de341ee 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -817,7 +817,6 @@ rpc_init_task_statistics(struct rpc_task *task)
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;

/* starting timestamp */
task->tk_start = ktime_get();
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index c6fb6b763658..bdeba20aaf8f 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -161,6 +161,7 @@ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
return false;

if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
+ addr + desc->len > pool->addrs_cnt ||
xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
return false;

diff --git a/scripts/gdb/linux/clk.py b/scripts/gdb/linux/clk.py
index 061aecfa294e..7a01fdc3e844 100644
--- a/scripts/gdb/linux/clk.py
+++ b/scripts/gdb/linux/clk.py
@@ -41,6 +41,8 @@ are cached and potentially out of date"""
self.show_subtree(child, level + 1)

def invoke(self, arg, from_tty):
+ if utils.gdb_eval_or_none("clk_root_list") is None:
+ raise gdb.GdbError("No clocks registered")
gdb.write(" enable prepare protect \n")
gdb.write(" clock count count count rate \n")
gdb.write("------------------------------------------------------------------------\n")
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index 2efbec6b6b8d..08f0587d15ea 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -39,6 +39,8 @@

import gdb

+LX_CONFIG(CONFIG_DEBUG_INFO_REDUCED)
+
/* linux/clk-provider.h */
if IS_BUILTIN(CONFIG_COMMON_CLK):
LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py
index 39cd1abd8559..b53649c0a77a 100644
--- a/scripts/gdb/linux/genpd.py
+++ b/scripts/gdb/linux/genpd.py
@@ -5,7 +5,7 @@
import gdb
import sys

-from linux.utils import CachedType
+from linux.utils import CachedType, gdb_eval_or_none
from linux.lists import list_for_each_entry

generic_pm_domain_type = CachedType('struct generic_pm_domain')
@@ -70,6 +70,8 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))

def invoke(self, arg, from_tty):
+ if gdb_eval_or_none("&gpd_list") is None:
+ raise gdb.GdbError("No power domain(s) registered")
gdb.write('domain status children\n');
gdb.write(' /device runtime status\n');
gdb.write('----------------------------------------------------------------------\n');
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 071d0dd5a634..51def847f1ef 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
ts = cpus.per_cpu(tick_sched_ptr, cpu)

text = "cpu: {}\n".format(cpu)
- for i in xrange(max_clock_bases):
+ for i in range(max_clock_bases):
text += " clock {}:\n".format(i)
text += print_base(cpu_base['clock_base'][i])

@@ -158,6 +158,8 @@ def pr_cpumask(mask):
num_bytes = (nr_cpu_ids + 7) / 8
buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
buf = binascii.b2a_hex(buf)
+ if type(buf) is not str:
+ buf=buf.decode()

chunks = []
i = num_bytes
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index 1553f68716cc..7f36aee32ac6 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -88,7 +88,10 @@ def get_target_endianness():


def read_memoryview(inf, start, length):
- return memoryview(inf.read_memory(start, length))
+ m = inf.read_memory(start, length)
+ if type(m) is memoryview:
+ return m
+ return memoryview(m)


def read_u16(buffer, offset):
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index 3e8d3669f0ce..5564ffe8ae32 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -22,6 +22,10 @@ except:
gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
"work.\n")
else:
+ import linux.constants
+ if linux.constants.LX_CONFIG_DEBUG_INFO_REDUCED:
+ raise gdb.GdbError("Reduced debug information will prevent GDB "
+ "from having complete types.\n")
import linux.utils
import linux.symbols
import linux.modules
@@ -32,7 +36,6 @@ else:
import linux.lists
import linux.rbtree
import linux.proc
- import linux.constants
import linux.timerlist
import linux.clk
import linux.genpd
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 39caeca47444..60a511c6b583 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -8,7 +8,7 @@ config IMA
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_HASH_INFO
- select TCG_TPM if HAS_IOMEM && !UML
+ select TCG_TPM if HAS_IOMEM
select TCG_TIS if TCG_TPM && X86
select TCG_CRB if TCG_TPM && ACPI
select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 776162444882..0aecf9334ec3 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -23,8 +23,8 @@ ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h

quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
- cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
+ cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h

targets += flask.h av_permissions.h
-$(obj)/flask.h: $(src)/include/classmap.h FORCE
+$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
$(call if_changed,flask)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f70d6a33421d..172ffc2c332b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9428,6 +9428,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9478,6 +9479,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -9500,6 +9502,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
@@ -9689,6 +9692,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index 150786279257..c88ebd84bdd5 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -129,10 +129,10 @@ config SND_SOC_AMD_RPL_ACP6x
If unsure select "N".

config SND_SOC_AMD_PS
- tristate "AMD Audio Coprocessor-v6.2 Pink Sardine support"
+ tristate "AMD Audio Coprocessor-v6.3 Pink Sardine support"
depends on X86 && PCI && ACPI
help
- This option enables Audio Coprocessor i.e ACP v6.2 support on
+ This option enables Audio Coprocessor i.e ACP v6.3 support on
AMD Pink sardine platform. By enabling this flag build will be
triggered for ACP PCI driver, ACP PDM DMA driver.
Say m if you have such a device.
diff --git a/sound/soc/amd/ps/acp62.h b/sound/soc/amd/ps/acp62.h
deleted file mode 100644
index 8b30aefa4cd0..000000000000
--- a/sound/soc/amd/ps/acp62.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * AMD ALSA SoC PDM Driver
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc. All rights reserved.
- */
-
-#include <sound/acp62_chip_offset_byte.h>
-
-#define ACP_DEVICE_ID 0x15E2
-#define ACP6x_REG_START 0x1240000
-#define ACP6x_REG_END 0x1250200
-#define ACP6x_DEVS 3
-#define ACP6x_PDM_MODE 1
-
-#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001
-#define ACP_PGFSM_CNTL_POWER_ON_MASK 1
-#define ACP_PGFSM_CNTL_POWER_OFF_MASK 0
-#define ACP_PGFSM_STATUS_MASK 3
-#define ACP_POWERED_ON 0
-#define ACP_POWER_ON_IN_PROGRESS 1
-#define ACP_POWERED_OFF 2
-#define ACP_POWER_OFF_IN_PROGRESS 3
-
-#define ACP_ERROR_MASK 0x20000000
-#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
-#define PDM_DMA_STAT 0x10
-
-#define PDM_DMA_INTR_MASK 0x10000
-#define ACP_ERROR_STAT 29
-#define PDM_DECIMATION_FACTOR 2
-#define ACP_PDM_CLK_FREQ_MASK 7
-#define ACP_WOV_MISC_CTRL_MASK 0x10
-#define ACP_PDM_ENABLE 1
-#define ACP_PDM_DISABLE 0
-#define ACP_PDM_DMA_EN_STATUS 2
-#define TWO_CH 2
-#define DELAY_US 5
-#define ACP_COUNTER 20000
-
-#define ACP_SRAM_PTE_OFFSET 0x03800000
-#define PAGE_SIZE_4K_ENABLE 2
-#define PDM_PTE_OFFSET 0
-#define PDM_MEM_WINDOW_START 0x4000000
-
-#define CAPTURE_MIN_NUM_PERIODS 4
-#define CAPTURE_MAX_NUM_PERIODS 4
-#define CAPTURE_MAX_PERIOD_SIZE 8192
-#define CAPTURE_MIN_PERIOD_SIZE 4096
-
-#define MAX_BUFFER (CAPTURE_MAX_PERIOD_SIZE * CAPTURE_MAX_NUM_PERIODS)
-#define MIN_BUFFER MAX_BUFFER
-
-/* time in ms for runtime suspend delay */
-#define ACP_SUSPEND_DELAY_MS 2000
-
-enum acp_config {
- ACP_CONFIG_0 = 0,
- ACP_CONFIG_1,
- ACP_CONFIG_2,
- ACP_CONFIG_3,
- ACP_CONFIG_4,
- ACP_CONFIG_5,
- ACP_CONFIG_6,
- ACP_CONFIG_7,
- ACP_CONFIG_8,
- ACP_CONFIG_9,
- ACP_CONFIG_10,
- ACP_CONFIG_11,
- ACP_CONFIG_12,
- ACP_CONFIG_13,
- ACP_CONFIG_14,
- ACP_CONFIG_15,
-};
-
-struct pdm_stream_instance {
- u16 num_pages;
- u16 channels;
- dma_addr_t dma_addr;
- u64 bytescount;
- void __iomem *acp62_base;
-};
-
-struct pdm_dev_data {
- u32 pdm_irq;
- void __iomem *acp62_base;
- struct snd_pcm_substream *capture_stream;
-};
-
-static inline u32 acp62_readl(void __iomem *base_addr)
-{
- return readl(base_addr);
-}
-
-static inline void acp62_writel(u32 val, void __iomem *base_addr)
-{
- writel(val, base_addr);
-}
diff --git a/sound/soc/amd/ps/acp63.h b/sound/soc/amd/ps/acp63.h
new file mode 100644
index 000000000000..85f869c2229f
--- /dev/null
+++ b/sound/soc/amd/ps/acp63.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ALSA SoC PDM Driver
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <sound/acp63_chip_offset_byte.h>
+
+#define ACP_DEVICE_ID 0x15E2
+#define ACP6x_REG_START 0x1240000
+#define ACP6x_REG_END 0x1250200
+#define ACP6x_DEVS 3
+#define ACP6x_PDM_MODE 1
+
+#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001
+#define ACP_PGFSM_CNTL_POWER_ON_MASK 1
+#define ACP_PGFSM_CNTL_POWER_OFF_MASK 0
+#define ACP_PGFSM_STATUS_MASK 3
+#define ACP_POWERED_ON 0
+#define ACP_POWER_ON_IN_PROGRESS 1
+#define ACP_POWERED_OFF 2
+#define ACP_POWER_OFF_IN_PROGRESS 3
+
+#define ACP_ERROR_MASK 0x20000000
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
+#define PDM_DMA_STAT 0x10
+
+#define PDM_DMA_INTR_MASK 0x10000
+#define ACP_ERROR_STAT 29
+#define PDM_DECIMATION_FACTOR 2
+#define ACP_PDM_CLK_FREQ_MASK 7
+#define ACP_WOV_MISC_CTRL_MASK 0x10
+#define ACP_PDM_ENABLE 1
+#define ACP_PDM_DISABLE 0
+#define ACP_PDM_DMA_EN_STATUS 2
+#define TWO_CH 2
+#define DELAY_US 5
+#define ACP_COUNTER 20000
+
+#define ACP_SRAM_PTE_OFFSET 0x03800000
+#define PAGE_SIZE_4K_ENABLE 2
+#define PDM_PTE_OFFSET 0
+#define PDM_MEM_WINDOW_START 0x4000000
+
+#define CAPTURE_MIN_NUM_PERIODS 4
+#define CAPTURE_MAX_NUM_PERIODS 4
+#define CAPTURE_MAX_PERIOD_SIZE 8192
+#define CAPTURE_MIN_PERIOD_SIZE 4096
+
+#define MAX_BUFFER (CAPTURE_MAX_PERIOD_SIZE * CAPTURE_MAX_NUM_PERIODS)
+#define MIN_BUFFER MAX_BUFFER
+
+/* time in ms for runtime suspend delay */
+#define ACP_SUSPEND_DELAY_MS 2000
+
+enum acp_config {
+ ACP_CONFIG_0 = 0,
+ ACP_CONFIG_1,
+ ACP_CONFIG_2,
+ ACP_CONFIG_3,
+ ACP_CONFIG_4,
+ ACP_CONFIG_5,
+ ACP_CONFIG_6,
+ ACP_CONFIG_7,
+ ACP_CONFIG_8,
+ ACP_CONFIG_9,
+ ACP_CONFIG_10,
+ ACP_CONFIG_11,
+ ACP_CONFIG_12,
+ ACP_CONFIG_13,
+ ACP_CONFIG_14,
+ ACP_CONFIG_15,
+};
+
+struct pdm_stream_instance {
+ u16 num_pages;
+ u16 channels;
+ dma_addr_t dma_addr;
+ u64 bytescount;
+ void __iomem *acp63_base;
+};
+
+struct pdm_dev_data {
+ u32 pdm_irq;
+ void __iomem *acp63_base;
+ struct snd_pcm_substream *capture_stream;
+};
+
+static inline u32 acp63_readl(void __iomem *base_addr)
+{
+ return readl(base_addr);
+}
+
+static inline void acp63_writel(u32 val, void __iomem *base_addr)
+{
+ writel(val, base_addr);
+}
diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
index dff2e2376bbf..7c9751a7eedc 100644
--- a/sound/soc/amd/ps/pci-ps.c
+++ b/sound/soc/amd/ps/pci-ps.c
@@ -15,30 +15,30 @@
#include <sound/pcm_params.h>
#include <linux/pm_runtime.h>

-#include "acp62.h"
+#include "acp63.h"

-struct acp62_dev_data {
- void __iomem *acp62_base;
+struct acp63_dev_data {
+ void __iomem *acp63_base;
struct resource *res;
- bool acp62_audio_mode;
+ bool acp63_audio_mode;
struct platform_device *pdev[ACP6x_DEVS];
};

-static int acp62_power_on(void __iomem *acp_base)
+static int acp63_power_on(void __iomem *acp_base)
{
u32 val;
int timeout;

- val = acp62_readl(acp_base + ACP_PGFSM_STATUS);
+ val = acp63_readl(acp_base + ACP_PGFSM_STATUS);

if (!val)
return val;

if ((val & ACP_PGFSM_STATUS_MASK) != ACP_POWER_ON_IN_PROGRESS)
- acp62_writel(ACP_PGFSM_CNTL_POWER_ON_MASK, acp_base + ACP_PGFSM_CONTROL);
+ acp63_writel(ACP_PGFSM_CNTL_POWER_ON_MASK, acp_base + ACP_PGFSM_CONTROL);
timeout = 0;
while (++timeout < 500) {
- val = acp62_readl(acp_base + ACP_PGFSM_STATUS);
+ val = acp63_readl(acp_base + ACP_PGFSM_STATUS);
if (!val)
return 0;
udelay(1);
@@ -46,23 +46,23 @@ static int acp62_power_on(void __iomem *acp_base)
return -ETIMEDOUT;
}

-static int acp62_reset(void __iomem *acp_base)
+static int acp63_reset(void __iomem *acp_base)
{
u32 val;
int timeout;

- acp62_writel(1, acp_base + ACP_SOFT_RESET);
+ acp63_writel(1, acp_base + ACP_SOFT_RESET);
timeout = 0;
while (++timeout < 500) {
- val = acp62_readl(acp_base + ACP_SOFT_RESET);
+ val = acp63_readl(acp_base + ACP_SOFT_RESET);
if (val & ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK)
break;
cpu_relax();
}
- acp62_writel(0, acp_base + ACP_SOFT_RESET);
+ acp63_writel(0, acp_base + ACP_SOFT_RESET);
timeout = 0;
while (++timeout < 500) {
- val = acp62_readl(acp_base + ACP_SOFT_RESET);
+ val = acp63_readl(acp_base + ACP_SOFT_RESET);
if (!val)
return 0;
cpu_relax();
@@ -70,57 +70,55 @@ static int acp62_reset(void __iomem *acp_base)
return -ETIMEDOUT;
}

-static void acp62_enable_interrupts(void __iomem *acp_base)
+static void acp63_enable_interrupts(void __iomem *acp_base)
{
- acp62_writel(1, acp_base + ACP_EXTERNAL_INTR_ENB);
+ acp63_writel(1, acp_base + ACP_EXTERNAL_INTR_ENB);
}

-static void acp62_disable_interrupts(void __iomem *acp_base)
+static void acp63_disable_interrupts(void __iomem *acp_base)
{
- acp62_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+ acp63_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
ACP_EXTERNAL_INTR_STAT);
- acp62_writel(0, acp_base + ACP_EXTERNAL_INTR_CNTL);
- acp62_writel(0, acp_base + ACP_EXTERNAL_INTR_ENB);
+ acp63_writel(0, acp_base + ACP_EXTERNAL_INTR_CNTL);
+ acp63_writel(0, acp_base + ACP_EXTERNAL_INTR_ENB);
}

-static int acp62_init(void __iomem *acp_base, struct device *dev)
+static int acp63_init(void __iomem *acp_base, struct device *dev)
{
int ret;

- ret = acp62_power_on(acp_base);
+ ret = acp63_power_on(acp_base);
if (ret) {
dev_err(dev, "ACP power on failed\n");
return ret;
}
- acp62_writel(0x01, acp_base + ACP_CONTROL);
- ret = acp62_reset(acp_base);
+ acp63_writel(0x01, acp_base + ACP_CONTROL);
+ ret = acp63_reset(acp_base);
if (ret) {
dev_err(dev, "ACP reset failed\n");
return ret;
}
- acp62_writel(0x03, acp_base + ACP_CLKMUX_SEL);
- acp62_enable_interrupts(acp_base);
+ acp63_enable_interrupts(acp_base);
return 0;
}

-static int acp62_deinit(void __iomem *acp_base, struct device *dev)
+static int acp63_deinit(void __iomem *acp_base, struct device *dev)
{
int ret;

- acp62_disable_interrupts(acp_base);
- ret = acp62_reset(acp_base);
+ acp63_disable_interrupts(acp_base);
+ ret = acp63_reset(acp_base);
if (ret) {
dev_err(dev, "ACP reset failed\n");
return ret;
}
- acp62_writel(0, acp_base + ACP_CLKMUX_SEL);
- acp62_writel(0, acp_base + ACP_CONTROL);
+ acp63_writel(0, acp_base + ACP_CONTROL);
return 0;
}

-static irqreturn_t acp62_irq_handler(int irq, void *dev_id)
+static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
{
- struct acp62_dev_data *adata;
+ struct acp63_dev_data *adata;
struct pdm_dev_data *ps_pdm_data;
u32 val;

@@ -128,10 +126,10 @@ static irqreturn_t acp62_irq_handler(int irq, void *dev_id)
if (!adata)
return IRQ_NONE;

- val = acp62_readl(adata->acp62_base + ACP_EXTERNAL_INTR_STAT);
+ val = acp63_readl(adata->acp63_base + ACP_EXTERNAL_INTR_STAT);
if (val & BIT(PDM_DMA_STAT)) {
ps_pdm_data = dev_get_drvdata(&adata->pdev[0]->dev);
- acp62_writel(BIT(PDM_DMA_STAT), adata->acp62_base + ACP_EXTERNAL_INTR_STAT);
+ acp63_writel(BIT(PDM_DMA_STAT), adata->acp63_base + ACP_EXTERNAL_INTR_STAT);
if (ps_pdm_data->capture_stream)
snd_pcm_period_elapsed(ps_pdm_data->capture_stream);
return IRQ_HANDLED;
@@ -139,10 +137,10 @@ static irqreturn_t acp62_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}

-static int snd_acp62_probe(struct pci_dev *pci,
+static int snd_acp63_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
- struct acp62_dev_data *adata;
+ struct acp63_dev_data *adata;
struct platform_device_info pdevinfo[ACP6x_DEVS];
int index, ret;
int val = 0x00;
@@ -157,7 +155,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
case 0x63:
break;
default:
- dev_dbg(&pci->dev, "acp62 pci device not found\n");
+ dev_dbg(&pci->dev, "acp63 pci device not found\n");
return -ENODEV;
}
if (pci_enable_device(pci)) {
@@ -170,7 +168,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
dev_err(&pci->dev, "pci_request_regions failed\n");
goto disable_pci;
}
- adata = devm_kzalloc(&pci->dev, sizeof(struct acp62_dev_data),
+ adata = devm_kzalloc(&pci->dev, sizeof(struct acp63_dev_data),
GFP_KERNEL);
if (!adata) {
ret = -ENOMEM;
@@ -178,18 +176,18 @@ static int snd_acp62_probe(struct pci_dev *pci,
}

addr = pci_resource_start(pci, 0);
- adata->acp62_base = devm_ioremap(&pci->dev, addr,
+ adata->acp63_base = devm_ioremap(&pci->dev, addr,
pci_resource_len(pci, 0));
- if (!adata->acp62_base) {
+ if (!adata->acp63_base) {
ret = -ENOMEM;
goto release_regions;
}
pci_set_master(pci);
pci_set_drvdata(pci, adata);
- ret = acp62_init(adata->acp62_base, &pci->dev);
+ ret = acp63_init(adata->acp63_base, &pci->dev);
if (ret)
goto release_regions;
- val = acp62_readl(adata->acp62_base + ACP_PIN_CONFIG);
+ val = acp63_readl(adata->acp63_base + ACP_PIN_CONFIG);
switch (val) {
case ACP_CONFIG_0:
case ACP_CONFIG_1:
@@ -220,7 +218,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
adata->res->flags = IORESOURCE_MEM;
adata->res->start = addr;
adata->res->end = addr + (ACP6x_REG_END - ACP6x_REG_START);
- adata->acp62_audio_mode = ACP6x_PDM_MODE;
+ adata->acp63_audio_mode = ACP6x_PDM_MODE;

memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo[0].name = "acp_ps_pdm_dma";
@@ -248,7 +246,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
ret = PTR_ERR(adata->pdev[index]);
goto unregister_devs;
}
- ret = devm_request_irq(&pci->dev, pci->irq, acp62_irq_handler,
+ ret = devm_request_irq(&pci->dev, pci->irq, acp63_irq_handler,
irqflags, "ACP_PCI_IRQ", adata);
if (ret) {
dev_err(&pci->dev, "ACP PCI IRQ request failed\n");
@@ -267,7 +265,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
for (--index; index >= 0; index--)
platform_device_unregister(adata->pdev[index]);
de_init:
- if (acp62_deinit(adata->acp62_base, &pci->dev))
+ if (acp63_deinit(adata->acp63_base, &pci->dev))
dev_err(&pci->dev, "ACP de-init failed\n");
release_regions:
pci_release_regions(pci);
@@ -277,46 +275,46 @@ static int snd_acp62_probe(struct pci_dev *pci,
return ret;
}

-static int __maybe_unused snd_acp62_suspend(struct device *dev)
+static int __maybe_unused snd_acp63_suspend(struct device *dev)
{
- struct acp62_dev_data *adata;
+ struct acp63_dev_data *adata;
int ret;

adata = dev_get_drvdata(dev);
- ret = acp62_deinit(adata->acp62_base, dev);
+ ret = acp63_deinit(adata->acp63_base, dev);
if (ret)
dev_err(dev, "ACP de-init failed\n");
return ret;
}

-static int __maybe_unused snd_acp62_resume(struct device *dev)
+static int __maybe_unused snd_acp63_resume(struct device *dev)
{
- struct acp62_dev_data *adata;
+ struct acp63_dev_data *adata;
int ret;

adata = dev_get_drvdata(dev);
- ret = acp62_init(adata->acp62_base, dev);
+ ret = acp63_init(adata->acp63_base, dev);
if (ret)
dev_err(dev, "ACP init failed\n");
return ret;
}

-static const struct dev_pm_ops acp62_pm_ops = {
- SET_RUNTIME_PM_OPS(snd_acp62_suspend, snd_acp62_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(snd_acp62_suspend, snd_acp62_resume)
+static const struct dev_pm_ops acp63_pm_ops = {
+ SET_RUNTIME_PM_OPS(snd_acp63_suspend, snd_acp63_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(snd_acp63_suspend, snd_acp63_resume)
};

-static void snd_acp62_remove(struct pci_dev *pci)
+static void snd_acp63_remove(struct pci_dev *pci)
{
- struct acp62_dev_data *adata;
+ struct acp63_dev_data *adata;
int ret, index;

adata = pci_get_drvdata(pci);
- if (adata->acp62_audio_mode == ACP6x_PDM_MODE) {
+ if (adata->acp63_audio_mode == ACP6x_PDM_MODE) {
for (index = 0; index < ACP6x_DEVS; index++)
platform_device_unregister(adata->pdev[index]);
}
- ret = acp62_deinit(adata->acp62_base, &pci->dev);
+ ret = acp63_deinit(adata->acp63_base, &pci->dev);
if (ret)
dev_err(&pci->dev, "ACP de-init failed\n");
pm_runtime_forbid(&pci->dev);
@@ -325,25 +323,25 @@ static void snd_acp62_remove(struct pci_dev *pci)
pci_disable_device(pci);
}

-static const struct pci_device_id snd_acp62_ids[] = {
+static const struct pci_device_id snd_acp63_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_DEVICE_ID),
.class = PCI_CLASS_MULTIMEDIA_OTHER << 8,
.class_mask = 0xffffff },
{ 0, },
};
-MODULE_DEVICE_TABLE(pci, snd_acp62_ids);
+MODULE_DEVICE_TABLE(pci, snd_acp63_ids);

-static struct pci_driver ps_acp62_driver = {
+static struct pci_driver ps_acp63_driver = {
.name = KBUILD_MODNAME,
- .id_table = snd_acp62_ids,
- .probe = snd_acp62_probe,
- .remove = snd_acp62_remove,
+ .id_table = snd_acp63_ids,
+ .probe = snd_acp63_probe,
+ .remove = snd_acp63_remove,
.driver = {
- .pm = &acp62_pm_ops,
+ .pm = &acp63_pm_ops,
}
};

-module_pci_driver(ps_acp62_driver);
+module_pci_driver(ps_acp63_driver);

MODULE_AUTHOR("Vijendar.Mukunda@xxxxxxx");
MODULE_AUTHOR("Syed.SabaKareem@xxxxxxx");
diff --git a/sound/soc/amd/ps/ps-mach.c b/sound/soc/amd/ps/ps-mach.c
index b3e97093481d..3ffbe4fdafdf 100644
--- a/sound/soc/amd/ps/ps-mach.c
+++ b/sound/soc/amd/ps/ps-mach.c
@@ -13,11 +13,11 @@
#include <linux/io.h>
#include <linux/dmi.h>

-#include "acp62.h"
+#include "acp63.h"

#define DRV_NAME "acp_ps_mach"

-SND_SOC_DAILINK_DEF(acp62_pdm,
+SND_SOC_DAILINK_DEF(acp63_pdm,
DAILINK_COMP_ARRAY(COMP_CPU("acp_ps_pdm_dma.0")));

SND_SOC_DAILINK_DEF(dmic_codec,
@@ -27,31 +27,31 @@ SND_SOC_DAILINK_DEF(dmic_codec,
SND_SOC_DAILINK_DEF(pdm_platform,
DAILINK_COMP_ARRAY(COMP_PLATFORM("acp_ps_pdm_dma.0")));

-static struct snd_soc_dai_link acp62_dai_pdm[] = {
+static struct snd_soc_dai_link acp63_dai_pdm[] = {
{
- .name = "acp62-dmic-capture",
+ .name = "acp63-dmic-capture",
.stream_name = "DMIC capture",
.capture_only = 1,
- SND_SOC_DAILINK_REG(acp62_pdm, dmic_codec, pdm_platform),
+ SND_SOC_DAILINK_REG(acp63_pdm, dmic_codec, pdm_platform),
},
};

-static struct snd_soc_card acp62_card = {
- .name = "acp62",
+static struct snd_soc_card acp63_card = {
+ .name = "acp63",
.owner = THIS_MODULE,
- .dai_link = acp62_dai_pdm,
+ .dai_link = acp63_dai_pdm,
.num_links = 1,
};

-static int acp62_probe(struct platform_device *pdev)
+static int acp63_probe(struct platform_device *pdev)
{
- struct acp62_pdm *machine = NULL;
+ struct acp63_pdm *machine = NULL;
struct snd_soc_card *card;
int ret;

- platform_set_drvdata(pdev, &acp62_card);
+ platform_set_drvdata(pdev, &acp63_card);
card = platform_get_drvdata(pdev);
- acp62_card.dev = &pdev->dev;
+ acp63_card.dev = &pdev->dev;

snd_soc_card_set_drvdata(card, machine);
ret = devm_snd_soc_register_card(&pdev->dev, card);
@@ -64,15 +64,15 @@ static int acp62_probe(struct platform_device *pdev)
return 0;
}

-static struct platform_driver acp62_mach_driver = {
+static struct platform_driver acp63_mach_driver = {
.driver = {
.name = "acp_ps_mach",
.pm = &snd_soc_pm_ops,
},
- .probe = acp62_probe,
+ .probe = acp63_probe,
};

-module_platform_driver(acp62_mach_driver);
+module_platform_driver(acp63_mach_driver);

MODULE_AUTHOR("Syed.SabaKareem@xxxxxxx");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/ps/ps-pdm-dma.c b/sound/soc/amd/ps/ps-pdm-dma.c
index b207b726cd82..eea71a9d2ef1 100644
--- a/sound/soc/amd/ps/ps-pdm-dma.c
+++ b/sound/soc/amd/ps/ps-pdm-dma.c
@@ -14,11 +14,11 @@
#include <sound/soc-dai.h>
#include <linux/pm_runtime.h>

-#include "acp62.h"
+#include "acp63.h"

#define DRV_NAME "acp_ps_pdm_dma"

-static const struct snd_pcm_hardware acp62_pdm_hardware_capture = {
+static const struct snd_pcm_hardware acp63_pdm_hardware_capture = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
@@ -37,61 +37,61 @@ static const struct snd_pcm_hardware acp62_pdm_hardware_capture = {
.periods_max = CAPTURE_MAX_NUM_PERIODS,
};

-static void acp62_init_pdm_ring_buffer(u32 physical_addr, u32 buffer_size,
+static void acp63_init_pdm_ring_buffer(u32 physical_addr, u32 buffer_size,
u32 watermark_size, void __iomem *acp_base)
{
- acp62_writel(physical_addr, acp_base + ACP_WOV_RX_RINGBUFADDR);
- acp62_writel(buffer_size, acp_base + ACP_WOV_RX_RINGBUFSIZE);
- acp62_writel(watermark_size, acp_base + ACP_WOV_RX_INTR_WATERMARK_SIZE);
- acp62_writel(0x01, acp_base + ACPAXI2AXI_ATU_CTRL);
+ acp63_writel(physical_addr, acp_base + ACP_WOV_RX_RINGBUFADDR);
+ acp63_writel(buffer_size, acp_base + ACP_WOV_RX_RINGBUFSIZE);
+ acp63_writel(watermark_size, acp_base + ACP_WOV_RX_INTR_WATERMARK_SIZE);
+ acp63_writel(0x01, acp_base + ACPAXI2AXI_ATU_CTRL);
}

-static void acp62_enable_pdm_clock(void __iomem *acp_base)
+static void acp63_enable_pdm_clock(void __iomem *acp_base)
{
u32 pdm_clk_enable, pdm_ctrl;

pdm_clk_enable = ACP_PDM_CLK_FREQ_MASK;
pdm_ctrl = 0x00;

- acp62_writel(pdm_clk_enable, acp_base + ACP_WOV_CLK_CTRL);
- pdm_ctrl = acp62_readl(acp_base + ACP_WOV_MISC_CTRL);
+ acp63_writel(pdm_clk_enable, acp_base + ACP_WOV_CLK_CTRL);
+ pdm_ctrl = acp63_readl(acp_base + ACP_WOV_MISC_CTRL);
pdm_ctrl |= ACP_WOV_MISC_CTRL_MASK;
- acp62_writel(pdm_ctrl, acp_base + ACP_WOV_MISC_CTRL);
+ acp63_writel(pdm_ctrl, acp_base + ACP_WOV_MISC_CTRL);
}

-static void acp62_enable_pdm_interrupts(void __iomem *acp_base)
+static void acp63_enable_pdm_interrupts(void __iomem *acp_base)
{
u32 ext_int_ctrl;

- ext_int_ctrl = acp62_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ ext_int_ctrl = acp63_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
ext_int_ctrl |= PDM_DMA_INTR_MASK;
- acp62_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+ acp63_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
}

-static void acp62_disable_pdm_interrupts(void __iomem *acp_base)
+static void acp63_disable_pdm_interrupts(void __iomem *acp_base)
{
u32 ext_int_ctrl;

- ext_int_ctrl = acp62_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ ext_int_ctrl = acp63_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
ext_int_ctrl &= ~PDM_DMA_INTR_MASK;
- acp62_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+ acp63_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
}

-static bool acp62_check_pdm_dma_status(void __iomem *acp_base)
+static bool acp63_check_pdm_dma_status(void __iomem *acp_base)
{
bool pdm_dma_status;
u32 pdm_enable, pdm_dma_enable;

pdm_dma_status = false;
- pdm_enable = acp62_readl(acp_base + ACP_WOV_PDM_ENABLE);
- pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ pdm_enable = acp63_readl(acp_base + ACP_WOV_PDM_ENABLE);
+ pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
if ((pdm_enable & ACP_PDM_ENABLE) && (pdm_dma_enable & ACP_PDM_DMA_EN_STATUS))
pdm_dma_status = true;

return pdm_dma_status;
}

-static int acp62_start_pdm_dma(void __iomem *acp_base)
+static int acp63_start_pdm_dma(void __iomem *acp_base)
{
u32 pdm_enable;
u32 pdm_dma_enable;
@@ -100,12 +100,12 @@ static int acp62_start_pdm_dma(void __iomem *acp_base)
pdm_enable = 0x01;
pdm_dma_enable = 0x01;

- acp62_enable_pdm_clock(acp_base);
- acp62_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
- acp62_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ acp63_enable_pdm_clock(acp_base);
+ acp63_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
+ acp63_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
timeout = 0;
while (++timeout < ACP_COUNTER) {
- pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
if ((pdm_dma_enable & 0x02) == ACP_PDM_DMA_EN_STATUS)
return 0;
udelay(DELAY_US);
@@ -113,7 +113,7 @@ static int acp62_start_pdm_dma(void __iomem *acp_base)
return -ETIMEDOUT;
}

-static int acp62_stop_pdm_dma(void __iomem *acp_base)
+static int acp63_stop_pdm_dma(void __iomem *acp_base)
{
u32 pdm_enable, pdm_dma_enable;
int timeout;
@@ -121,14 +121,14 @@ static int acp62_stop_pdm_dma(void __iomem *acp_base)
pdm_enable = 0x00;
pdm_dma_enable = 0x00;

- pdm_enable = acp62_readl(acp_base + ACP_WOV_PDM_ENABLE);
- pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ pdm_enable = acp63_readl(acp_base + ACP_WOV_PDM_ENABLE);
+ pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
if (pdm_dma_enable & 0x01) {
pdm_dma_enable = 0x02;
- acp62_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ acp63_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
timeout = 0;
while (++timeout < ACP_COUNTER) {
- pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
if ((pdm_dma_enable & 0x02) == 0x00)
break;
udelay(DELAY_US);
@@ -138,13 +138,13 @@ static int acp62_stop_pdm_dma(void __iomem *acp_base)
}
if (pdm_enable == ACP_PDM_ENABLE) {
pdm_enable = ACP_PDM_DISABLE;
- acp62_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
+ acp63_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
}
- acp62_writel(0x01, acp_base + ACP_WOV_PDM_FIFO_FLUSH);
+ acp63_writel(0x01, acp_base + ACP_WOV_PDM_FIFO_FLUSH);
return 0;
}

-static void acp62_config_dma(struct pdm_stream_instance *rtd, int direction)
+static void acp63_config_dma(struct pdm_stream_instance *rtd, int direction)
{
u16 page_idx;
u32 low, high, val;
@@ -154,24 +154,24 @@ static void acp62_config_dma(struct pdm_stream_instance *rtd, int direction)
val = PDM_PTE_OFFSET;

/* Group Enable */
- acp62_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp62_base +
+ acp63_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp63_base +
ACPAXI2AXI_ATU_BASE_ADDR_GRP_1);
- acp62_writel(PAGE_SIZE_4K_ENABLE, rtd->acp62_base +
+ acp63_writel(PAGE_SIZE_4K_ENABLE, rtd->acp63_base +
ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1);
for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
/* Load the low address of page int ACP SRAM through SRBM */
low = lower_32_bits(addr);
high = upper_32_bits(addr);

- acp62_writel(low, rtd->acp62_base + ACP_SCRATCH_REG_0 + val);
+ acp63_writel(low, rtd->acp63_base + ACP_SCRATCH_REG_0 + val);
high |= BIT(31);
- acp62_writel(high, rtd->acp62_base + ACP_SCRATCH_REG_0 + val + 4);
+ acp63_writel(high, rtd->acp63_base + ACP_SCRATCH_REG_0 + val + 4);
val += 8;
addr += PAGE_SIZE;
}
}

-static int acp62_pdm_dma_open(struct snd_soc_component *component,
+static int acp63_pdm_dma_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
@@ -186,7 +186,7 @@ static int acp62_pdm_dma_open(struct snd_soc_component *component,
return -EINVAL;

if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- runtime->hw = acp62_pdm_hardware_capture;
+ runtime->hw = acp63_pdm_hardware_capture;

ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
@@ -196,17 +196,17 @@ static int acp62_pdm_dma_open(struct snd_soc_component *component,
return ret;
}

- acp62_enable_pdm_interrupts(adata->acp62_base);
+ acp63_enable_pdm_interrupts(adata->acp63_base);

if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
adata->capture_stream = substream;

- pdm_data->acp62_base = adata->acp62_base;
+ pdm_data->acp63_base = adata->acp63_base;
runtime->private_data = pdm_data;
return ret;
}

-static int acp62_pdm_dma_hw_params(struct snd_soc_component *component,
+static int acp63_pdm_dma_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -220,26 +220,26 @@ static int acp62_pdm_dma_hw_params(struct snd_soc_component *component,
period_bytes = params_period_bytes(params);
rtd->dma_addr = substream->runtime->dma_addr;
rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
- acp62_config_dma(rtd, substream->stream);
- acp62_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, size,
- period_bytes, rtd->acp62_base);
+ acp63_config_dma(rtd, substream->stream);
+ acp63_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, size,
+ period_bytes, rtd->acp63_base);
return 0;
}

-static u64 acp62_pdm_get_byte_count(struct pdm_stream_instance *rtd,
+static u64 acp63_pdm_get_byte_count(struct pdm_stream_instance *rtd,
int direction)
{
u32 high, low;
u64 byte_count;

- high = acp62_readl(rtd->acp62_base + ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH);
+ high = acp63_readl(rtd->acp63_base + ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH);
byte_count = high;
- low = acp62_readl(rtd->acp62_base + ACP_WOV_RX_LINEARPOSITIONCNTR_LOW);
+ low = acp63_readl(rtd->acp63_base + ACP_WOV_RX_LINEARPOSITIONCNTR_LOW);
byte_count = (byte_count << 32) | low;
return byte_count;
}

-static snd_pcm_uframes_t acp62_pdm_dma_pointer(struct snd_soc_component *comp,
+static snd_pcm_uframes_t acp63_pdm_dma_pointer(struct snd_soc_component *comp,
struct snd_pcm_substream *stream)
{
struct pdm_stream_instance *rtd;
@@ -249,14 +249,14 @@ static snd_pcm_uframes_t acp62_pdm_dma_pointer(struct snd_soc_component *comp,
rtd = stream->runtime->private_data;
buffersize = frames_to_bytes(stream->runtime,
stream->runtime->buffer_size);
- bytescount = acp62_pdm_get_byte_count(rtd, stream->stream);
+ bytescount = acp63_pdm_get_byte_count(rtd, stream->stream);
if (bytescount > rtd->bytescount)
bytescount -= rtd->bytescount;
pos = do_div(bytescount, buffersize);
return bytes_to_frames(stream->runtime, pos);
}

-static int acp62_pdm_dma_new(struct snd_soc_component *component,
+static int acp63_pdm_dma_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
struct device *parent = component->dev->parent;
@@ -266,19 +266,19 @@ static int acp62_pdm_dma_new(struct snd_soc_component *component,
return 0;
}

-static int acp62_pdm_dma_close(struct snd_soc_component *component,
+static int acp63_pdm_dma_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct pdm_dev_data *adata = dev_get_drvdata(component->dev);
struct snd_pcm_runtime *runtime = substream->runtime;

- acp62_disable_pdm_interrupts(adata->acp62_base);
+ acp63_disable_pdm_interrupts(adata->acp63_base);
adata->capture_stream = NULL;
kfree(runtime->private_data);
return 0;
}

-static int acp62_pdm_dai_trigger(struct snd_pcm_substream *substream,
+static int acp63_pdm_dai_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct pdm_stream_instance *rtd;
@@ -299,20 +299,20 @@ static int acp62_pdm_dai_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- acp62_writel(ch_mask, rtd->acp62_base + ACP_WOV_PDM_NO_OF_CHANNELS);
- acp62_writel(PDM_DECIMATION_FACTOR, rtd->acp62_base +
+ acp63_writel(ch_mask, rtd->acp63_base + ACP_WOV_PDM_NO_OF_CHANNELS);
+ acp63_writel(PDM_DECIMATION_FACTOR, rtd->acp63_base +
ACP_WOV_PDM_DECIMATION_FACTOR);
- rtd->bytescount = acp62_pdm_get_byte_count(rtd, substream->stream);
- pdm_status = acp62_check_pdm_dma_status(rtd->acp62_base);
+ rtd->bytescount = acp63_pdm_get_byte_count(rtd, substream->stream);
+ pdm_status = acp63_check_pdm_dma_status(rtd->acp63_base);
if (!pdm_status)
- ret = acp62_start_pdm_dma(rtd->acp62_base);
+ ret = acp63_start_pdm_dma(rtd->acp63_base);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pdm_status = acp62_check_pdm_dma_status(rtd->acp62_base);
+ pdm_status = acp63_check_pdm_dma_status(rtd->acp63_base);
if (pdm_status)
- ret = acp62_stop_pdm_dma(rtd->acp62_base);
+ ret = acp63_stop_pdm_dma(rtd->acp63_base);
break;
default:
ret = -EINVAL;
@@ -321,11 +321,11 @@ static int acp62_pdm_dai_trigger(struct snd_pcm_substream *substream,
return ret;
}

-static const struct snd_soc_dai_ops acp62_pdm_dai_ops = {
- .trigger = acp62_pdm_dai_trigger,
+static const struct snd_soc_dai_ops acp63_pdm_dai_ops = {
+ .trigger = acp63_pdm_dai_trigger,
};

-static struct snd_soc_dai_driver acp62_pdm_dai_driver = {
+static struct snd_soc_dai_driver acp63_pdm_dai_driver = {
.name = "acp_ps_pdm_dma.0",
.capture = {
.rates = SNDRV_PCM_RATE_48000,
@@ -335,19 +335,19 @@ static struct snd_soc_dai_driver acp62_pdm_dai_driver = {
.rate_min = 48000,
.rate_max = 48000,
},
- .ops = &acp62_pdm_dai_ops,
+ .ops = &acp63_pdm_dai_ops,
};

-static const struct snd_soc_component_driver acp62_pdm_component = {
+static const struct snd_soc_component_driver acp63_pdm_component = {
.name = DRV_NAME,
- .open = acp62_pdm_dma_open,
- .close = acp62_pdm_dma_close,
- .hw_params = acp62_pdm_dma_hw_params,
- .pointer = acp62_pdm_dma_pointer,
- .pcm_construct = acp62_pdm_dma_new,
+ .open = acp63_pdm_dma_open,
+ .close = acp63_pdm_dma_close,
+ .hw_params = acp63_pdm_dma_hw_params,
+ .pointer = acp63_pdm_dma_pointer,
+ .pcm_construct = acp63_pdm_dma_new,
};

-static int acp62_pdm_audio_probe(struct platform_device *pdev)
+static int acp63_pdm_audio_probe(struct platform_device *pdev)
{
struct resource *res;
struct pdm_dev_data *adata;
@@ -363,16 +363,16 @@ static int acp62_pdm_audio_probe(struct platform_device *pdev)
if (!adata)
return -ENOMEM;

- adata->acp62_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!adata->acp62_base)
+ adata->acp63_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!adata->acp63_base)
return -ENOMEM;

adata->capture_stream = NULL;

dev_set_drvdata(&pdev->dev, adata);
status = devm_snd_soc_register_component(&pdev->dev,
- &acp62_pdm_component,
- &acp62_pdm_dai_driver, 1);
+ &acp63_pdm_component,
+ &acp63_pdm_dai_driver, 1);
if (status) {
dev_err(&pdev->dev, "Fail to register acp pdm dai\n");

@@ -385,13 +385,13 @@ static int acp62_pdm_audio_probe(struct platform_device *pdev)
return 0;
}

-static int acp62_pdm_audio_remove(struct platform_device *pdev)
+static int acp63_pdm_audio_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
return 0;
}

-static int __maybe_unused acp62_pdm_resume(struct device *dev)
+static int __maybe_unused acp63_pdm_resume(struct device *dev)
{
struct pdm_dev_data *adata;
struct snd_pcm_runtime *runtime;
@@ -404,47 +404,47 @@ static int __maybe_unused acp62_pdm_resume(struct device *dev)
rtd = runtime->private_data;
period_bytes = frames_to_bytes(runtime, runtime->period_size);
buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
- acp62_config_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
- acp62_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, buffer_len,
- period_bytes, adata->acp62_base);
+ acp63_config_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
+ acp63_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, buffer_len,
+ period_bytes, adata->acp63_base);
}
- acp62_enable_pdm_interrupts(adata->acp62_base);
+ acp63_enable_pdm_interrupts(adata->acp63_base);
return 0;
}

-static int __maybe_unused acp62_pdm_suspend(struct device *dev)
+static int __maybe_unused acp63_pdm_suspend(struct device *dev)
{
struct pdm_dev_data *adata;

adata = dev_get_drvdata(dev);
- acp62_disable_pdm_interrupts(adata->acp62_base);
+ acp63_disable_pdm_interrupts(adata->acp63_base);
return 0;
}

-static int __maybe_unused acp62_pdm_runtime_resume(struct device *dev)
+static int __maybe_unused acp63_pdm_runtime_resume(struct device *dev)
{
struct pdm_dev_data *adata;

adata = dev_get_drvdata(dev);
- acp62_enable_pdm_interrupts(adata->acp62_base);
+ acp63_enable_pdm_interrupts(adata->acp63_base);
return 0;
}

-static const struct dev_pm_ops acp62_pdm_pm_ops = {
- SET_RUNTIME_PM_OPS(acp62_pdm_suspend, acp62_pdm_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(acp62_pdm_suspend, acp62_pdm_resume)
+static const struct dev_pm_ops acp63_pdm_pm_ops = {
+ SET_RUNTIME_PM_OPS(acp63_pdm_suspend, acp63_pdm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(acp63_pdm_suspend, acp63_pdm_resume)
};

-static struct platform_driver acp62_pdm_dma_driver = {
- .probe = acp62_pdm_audio_probe,
- .remove = acp62_pdm_audio_remove,
+static struct platform_driver acp63_pdm_dma_driver = {
+ .probe = acp63_pdm_audio_probe,
+ .remove = acp63_pdm_audio_remove,
.driver = {
.name = "acp_ps_pdm_dma",
- .pm = &acp62_pdm_pm_ops,
+ .pm = &acp63_pdm_pm_ops,
},
};

-module_platform_driver(acp62_pdm_dma_driver);
+module_platform_driver(acp63_pdm_dma_driver);

MODULE_AUTHOR("Syed.SabaKareem@xxxxxxx");
MODULE_DESCRIPTION("AMD PINK SARDINE PDM Driver");
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 4a69ce702360..0acdf0156f07 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -269,6 +269,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "8A43"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8A22"),
+ }
+ },
{}
};

diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index c223d83e02cf..f2b5032daa6a 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -356,6 +356,19 @@ static const struct snd_kcontrol_new cs35l41_aud_controls[] = {
WM_ADSP_FW_CONTROL("DSP1", 0),
};

+static void cs35l41_boost_enable(struct cs35l41_private *cs35l41, unsigned int enable)
+{
+ switch (cs35l41->hw_cfg.bst_type) {
+ case CS35L41_INT_BOOST:
+ enable = enable ? CS35L41_BST_EN_DEFAULT : CS35L41_BST_DIS_FET_OFF;
+ regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2, CS35L41_BST_EN_MASK,
+ enable << CS35L41_BST_EN_SHIFT);
+ break;
+ default:
+ break;
+ }
+}
+
static irqreturn_t cs35l41_irq(int irq, void *data)
{
struct cs35l41_private *cs35l41 = data;
@@ -431,8 +444,7 @@ static irqreturn_t cs35l41_irq(int irq, void *data)

if (status[0] & CS35L41_BST_OVP_ERR) {
dev_crit_ratelimited(cs35l41->dev, "VBST Over Voltage error\n");
- regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
- CS35L41_BST_EN_MASK, 0);
+ cs35l41_boost_enable(cs35l41, 0);
regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
CS35L41_BST_OVP_ERR);
regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
@@ -441,16 +453,13 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
CS35L41_BST_OVP_ERR_RLS);
regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
CS35L41_BST_OVP_ERR_RLS, 0);
- regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
- CS35L41_BST_EN_MASK,
- CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
+ cs35l41_boost_enable(cs35l41, 1);
ret = IRQ_HANDLED;
}

if (status[0] & CS35L41_BST_DCM_UVP_ERR) {
dev_crit_ratelimited(cs35l41->dev, "DCM VBST Under Voltage Error\n");
- regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
- CS35L41_BST_EN_MASK, 0);
+ cs35l41_boost_enable(cs35l41, 0);
regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
CS35L41_BST_DCM_UVP_ERR);
regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
@@ -459,16 +468,13 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
CS35L41_BST_UVP_ERR_RLS);
regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
CS35L41_BST_UVP_ERR_RLS, 0);
- regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
- CS35L41_BST_EN_MASK,
- CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
+ cs35l41_boost_enable(cs35l41, 1);
ret = IRQ_HANDLED;
}

if (status[0] & CS35L41_BST_SHORT_ERR) {
dev_crit_ratelimited(cs35l41->dev, "LBST error: powering off!\n");
- regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
- CS35L41_BST_EN_MASK, 0);
+ cs35l41_boost_enable(cs35l41, 0);
regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
CS35L41_BST_SHORT_ERR);
regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
@@ -477,9 +483,7 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
CS35L41_BST_SHORT_ERR_RLS);
regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
CS35L41_BST_SHORT_ERR_RLS, 0);
- regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
- CS35L41_BST_EN_MASK,
- CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
+ cs35l41_boost_enable(cs35l41, 1);
ret = IRQ_HANDLED;
}

diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 544ccbcfc884..5678683c71be 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1996,6 +1996,11 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
return ret;
}

+static void da7213_i2c_remove(struct i2c_client *i2c)
+{
+ pm_runtime_disable(&i2c->dev);
+}
+
static int __maybe_unused da7213_runtime_suspend(struct device *dev)
{
struct da7213_priv *da7213 = dev_get_drvdata(dev);
@@ -2039,6 +2044,7 @@ static struct i2c_driver da7213_i2c_driver = {
.pm = &da7213_pm,
},
.probe_new = da7213_i2c_probe,
+ .remove = da7213_i2c_remove,
.id_table = da7213_i2c_id,
};

diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index 056c3082fe02..f7d7a9c91e04 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -842,12 +842,14 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client)
es8316->irq = i2c_client->irq;
mutex_init(&es8316->lock);

- ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
- "es8316", es8316);
- if (ret) {
- dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
- es8316->irq = -ENXIO;
+ if (es8316->irq > 0) {
+ ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "es8316", es8316);
+ if (ret) {
+ dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
+ es8316->irq = -ENXIO;
+ }
}

return devm_snd_soc_register_component(&i2c_client->dev,
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
index 4922e6795b73..32d20d351bbf 100644
--- a/sound/soc/fsl/fsl_mqs.c
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -210,10 +210,10 @@ static int fsl_mqs_probe(struct platform_device *pdev)
}

mqs_priv->regmap = syscon_node_to_regmap(gpr_np);
+ of_node_put(gpr_np);
if (IS_ERR(mqs_priv->regmap)) {
dev_err(&pdev->dev, "failed to get gpr regmap\n");
- ret = PTR_ERR(mqs_priv->regmap);
- goto err_free_gpr_np;
+ return PTR_ERR(mqs_priv->regmap);
}
} else {
regs = devm_platform_ioremap_resource(pdev, 0);
@@ -242,8 +242,7 @@ static int fsl_mqs_probe(struct platform_device *pdev)
if (IS_ERR(mqs_priv->mclk)) {
dev_err(&pdev->dev, "failed to get the clock: %ld\n",
PTR_ERR(mqs_priv->mclk));
- ret = PTR_ERR(mqs_priv->mclk);
- goto err_free_gpr_np;
+ return PTR_ERR(mqs_priv->mclk);
}

dev_set_drvdata(&pdev->dev, mqs_priv);
@@ -252,13 +251,9 @@ static int fsl_mqs_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_fsl_mqs,
&fsl_mqs_dai, 1);
if (ret)
- goto err_free_gpr_np;
- return 0;
-
-err_free_gpr_np:
- of_node_put(gpr_np);
+ return ret;

- return ret;
+ return 0;
}

static int fsl_mqs_remove(struct platform_device *pdev)
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 4f46f52c38e4..783c20125992 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -533,6 +533,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,

/* Please keep this list alphabetically sorted */
static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ { /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD1_IN4P |
+ BYT_RT5640_OVCD_TH_1500UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Acer Iconia Tab 8 W1-810 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index ee9857dc3135..d4f92bb5e29f 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -213,6 +213,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
SOF_SDW_PCH_DMIC |
RT711_JD1),
},
+ {
+ /* NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ SOF_SDW_PCH_DMIC |
+ RT711_JD2_100K),
+ },
/* TigerLake-SDCA devices */
{
.callback = sof_sdw_quirk_cb,
diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
index 0102574025e9..6e21e1640acf 100644
--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
@@ -354,6 +354,20 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link3[] = {
{}
};

+static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link2[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+ .adr_d = rt711_sdca_0_adr,
+ },
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt1316_2_single_adr),
+ .adr_d = rt1316_2_single_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_adr_device mx8373_2_adr[] = {
{
.adr = 0x000223019F837300ull,
@@ -612,6 +626,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l3.tplg",
},
+ {
+ .link_mask = 0x5, /* 2 active links required */
+ .links = adl_sdw_rt711_link0_rt1316_link2,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l2.tplg",
+ },
{
.link_mask = 0x1, /* link0 required */
.links = adl_rvp,
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index e7aa6f360cab..d649b0cf4744 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -622,6 +622,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
return ret;
}

+ /* inherit atomicity from DAI link */
+ be_pcm->nonatomic = rtd->dai_link->nonatomic;
+
rtd->pcm = be_pcm;
rtd->fe_compr = 1;
if (rtd->dai_link->dpcm_playback)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 35a16c3f9591..7a486ca9633c 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1649,10 +1649,14 @@ static void dpcm_runtime_setup_fe(struct snd_pcm_substream *substream)
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_dai *dai;
int stream = substream->stream;
+ u64 formats = hw->formats;
int i;

soc_pcm_hw_init(hw);

+ if (formats)
+ hw->formats &= formats;
+
for_each_rtd_cpu_dais(fe, i, dai) {
struct snd_soc_pcm_stream *cpu_stream;

diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 271884e35003..efb4a3311cc5 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3884,6 +3884,64 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},

+{
+ /*
+ * PIONEER DJ DDJ-800
+ * PCM is 6 channels out, 6 channels in @ 44.1 fixed
+ * The Feedback for the output is the input
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0029),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rates = SNDRV_PCM_RATE_44100,
+ .rate_min = 44100,
+ .rate_max = 44100,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x82,
+ .ep_idx = 1,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC|
+ USB_ENDPOINT_USAGE_IMPLICIT_FB,
+ .rates = SNDRV_PCM_RATE_44100,
+ .rate_min = 44100,
+ .rate_max = 44100,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
/*
* MacroSilicon MS2100/MS2106 based AV capture cards
*
diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
index 4f1c4b0c29e9..9914bdf4fc9e 100644
--- a/tools/arch/x86/kcpuid/cpuid.csv
+++ b/tools/arch/x86/kcpuid/cpuid.csv
@@ -184,8 +184,8 @@
7, 0, EBX, 27, avx512er, AVX512 Exponent Reciproca instr
7, 0, EBX, 28, avx512cd, AVX512 Conflict Detection instr
7, 0, EBX, 29, sha, Intel Secure Hash Algorithm Extensions instr
- 7, 0, EBX, 26, avx512bw, AVX512 Byte & Word instr
- 7, 0, EBX, 28, avx512vl, AVX512 Vector Length Extentions (VL)
+ 7, 0, EBX, 30, avx512bw, AVX512 Byte & Word instr
+ 7, 0, EBX, 31, avx512vl, AVX512 Vector Length Extentions (VL)
7, 0, ECX, 0, prefetchwt1, X
7, 0, ECX, 1, avx512vbmi, AVX512 Vector Byte Manipulation Instructions
7, 0, ECX, 2, umip, User-mode Instruction Prevention
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index 7fea83bedf48..bca5dd0a59e3 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -80,9 +80,6 @@ static void jsonw_puts(json_writer_t *self, const char *str)
case '"':
fputs("\\\"", self->out);
break;
- case '\'':
- fputs("\\\'", self->out);
- break;
default:
putc(*str, self->out);
}
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 2d9cd6a7b3c8..d9386a1a4df4 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -370,8 +370,15 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
struct bpf_insn *insn_start = buf_start;
struct bpf_insn *insn_end = buf_end;
struct bpf_insn *cur = insn_start;
+ bool double_insn = false;

for (; cur <= insn_end; cur++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
+
printf("% 4d: ", (int)(cur - insn_start + start_idx));
print_bpf_insn(&cbs, cur, true);
if (cur != insn_end)
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 23f5c46708f8..b74c82bb831e 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -804,11 +804,13 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
return;
/* try to copy from existing ldimm64 insn */
if (kdesc->ref > 1) {
- move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
- kdesc->insn + offsetof(struct bpf_insn, imm));
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
- /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
+ * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
+ */
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
goto clear_src_reg;
}
@@ -831,7 +833,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
/* skip src_reg adjustment */
- emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
clear_src_reg:
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
reg_mask = src_reg_mask();
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 730b49e255e4..c2c350933a23 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2757,17 +2757,6 @@ static int update_cfi_state(struct instruction *insn,
break;
}

- if (!cfi->drap && op->src.reg == CFI_SP &&
- op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
- check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
-
- /* lea disp(%rsp), %rbp */
- cfa->base = CFI_BP;
- cfa->offset -= op->src.offset;
- cfi->bp_scratch = false;
- break;
- }
-
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {

/* drap: lea disp(%rsp), %drap */
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 6e60b6f06ab0..4445c5c2d4c5 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -2443,6 +2443,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
char type, u64 start)
{
struct sym_args *args = arg;
+ u64 size;

if (!kallsyms__is_function(type))
return 0;
@@ -2452,7 +2453,9 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
args->start = start;
}
/* Don't know exactly where the kernel ends, so we add a page */
- args->size = round_up(start, page_size) + page_size - args->start;
+ size = round_up(start, page_size) + page_size - args->start;
+ if (size > args->size)
+ args->size = size;

return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 0ac860c8dd2b..7145c5890de0 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1998,6 +1998,8 @@ static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)

decoder->cbr = cbr;
decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
+ decoder->cyc_ref_timestamp = decoder->timestamp;
+ decoder->cycle_cnt = 0;

intel_pt_mtc_cyc_cnt_cbr(decoder);
}
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 1f37adff7632..1fa4672380a9 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -95,7 +95,7 @@ static int __start_server(int type, int protocol, const struct sockaddr *addr,
if (reuseport &&
setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) {
log_err("Failed to set SO_REUSEPORT");
- return -1;
+ goto error_close;
}

if (bind(fd, addr, addrlen) < 0) {
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 970f09156eb4..de27a29af270 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -565,14 +565,14 @@ static struct bpf_align_test tests[] = {
/* New unknown value in R7 is (4n), >= 76 */
{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
+ {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
+ {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
},
},
};
diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
index 621c57222191..63ee892bc757 100644
--- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
@@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key)

static bool connect_send(const char *cgroup_path)
{
- bool res = true;
int server_fd = -1, client_fd = -1;
+ char message[] = "message";
+ bool res = true;

if (join_cgroup(cgroup_path))
goto out_clean;
@@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path)
if (client_fd < 0)
goto out_clean;

- if (send(client_fd, "message", strlen("message"), 0) < 0)
+ if (send(client_fd, &message, sizeof(message), 0) < 0)
+ goto out_clean;
+
+ if (read(server_fd, &message, sizeof(message)) < 0)
goto out_clean;

res = false;
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
index 5308de1ed478..2715c68301f5 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
@@ -65,6 +65,7 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
+ bpf_link__destroy(skel->links.oncpu);
close(pmu_fd);

/* add exclude_callchain_kernel, attach should fail */
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
index 33144c9432ae..f4aad35afae1 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
@@ -63,7 +63,8 @@ void test_perf_event_stackmap(void)
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_CALL_STACK,
- .sample_period = 5000,
+ .freq = 1,
+ .sample_freq = read_perf_max_sample_freq(),
.size = sizeof(struct perf_event_attr),
};
struct perf_event_stackmap *skel;
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index f4ea1a215ce4..704f7f6c3704 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -2,21 +2,6 @@
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"

-static __u64 read_perf_max_sample_freq(void)
-{
- __u64 sample_freq = 5000; /* fallback to 5000 on error */
- FILE *f;
- __u32 duration = 0;
-
- f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
- if (f == NULL)
- return sample_freq;
- CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
- "return default value: 5000,err %d\n", -errno);
- fclose(f);
- return sample_freq;
-}
-
void test_stacktrace_build_id_nmi(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd;
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index d821fd098504..4e3ec38cbe68 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -118,6 +118,7 @@ setup_vethPairs() {
ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4
if [ -f /proc/net/if_inet6 ]; then
echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6
+ echo 1 > /proc/sys/net/ipv6/conf/${VETH1}/disable_ipv6
fi
if [[ $verbose -eq 1 ]]; then
echo "setting up ${VETH1}: namespace: ${NS1}"
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 9695318e8132..9c3de39023f6 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -229,3 +229,23 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,

return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts);
}
+
+__u64 read_perf_max_sample_freq(void)
+{
+ __u64 sample_freq = 5000; /* fallback to 5000 on error */
+ FILE *f;
+
+ f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
+ if (f == NULL) {
+ printf("Failed to open /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
+ "return default value: 5000\n", -errno);
+ return sample_freq;
+ }
+ if (fscanf(f, "%llu", &sample_freq) != 1) {
+ printf("Failed to parse /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
+ "return default value: 5000\n", -errno);
+ }
+
+ fclose(f);
+ return sample_freq;
+}
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index 6ec00bf79cb5..eb8790f928e4 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -20,3 +20,5 @@ struct test_filter_set;
int parse_test_list(const char *s,
struct test_filter_set *test_set,
bool is_glob_pattern);
+
+__u64 read_perf_max_sample_freq(void);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 8d5d9b94b020..cd6578928c28 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -649,7 +649,6 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
if (!pkt_stream)
exit_with_error(ENOMEM);

- pkt_stream->nb_pkts = nb_pkts;
for (i = 0; i < nb_pkts; i++) {
pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
pkt_len);
@@ -1141,7 +1140,14 @@ static int validate_rx_dropped(struct ifobject *ifobject)
if (err)
return TEST_FAILURE;

- if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
+ /* The receiver calls getsockopt after receiving the last (valid)
+ * packet which is not the final packet sent in this test (valid and
+ * invalid packets are sent in alternating fashion with the final
+ * packet being invalid). Since the last packet may or may not have
+ * been dropped already, both outcomes must be allowed.
+ */
+ if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
+ stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
return TEST_PASS;

return TEST_FAILURE;
@@ -1661,6 +1667,7 @@ static void testapp_single_pkt(struct test_spec *test)

static void testapp_invalid_desc(struct test_spec *test)
{
+ u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
struct pkt pkts[] = {
/* Zero packet address allowed */
{0, PKT_SIZE, 0, true},
@@ -1671,9 +1678,9 @@ static void testapp_invalid_desc(struct test_spec *test)
/* Packet too large */
{0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
/* After umem ends */
- {UMEM_SIZE, PKT_SIZE, 0, false},
+ {umem_size, PKT_SIZE, 0, false},
/* Straddle the end of umem */
- {UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false},
+ {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false},
/* Straddle a page boundrary */
{0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
/* Straddle a 2K boundrary */
@@ -1691,8 +1698,8 @@ static void testapp_invalid_desc(struct test_spec *test)
}

if (test->ifobj_tx->shared_umem) {
- pkts[4].addr += UMEM_SIZE;
- pkts[5].addr += UMEM_SIZE;
+ pkts[4].addr += umem_size;
+ pkts[5].addr += umem_size;
}

pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index edb76d2def9f..292fc943b8fd 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -52,7 +52,6 @@
#define THREAD_TMOUT 3
#define DEFAULT_PKT_CNT (4 * 1024)
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
-#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
#define RX_FULL_RXQSIZE 32
#define UMEM_HEADROOM_TEST_SIZE 128
#define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index 8c5fea68ae67..969647228817 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -18,6 +18,7 @@
#include <grp.h>
#include <stdbool.h>
#include <stdarg.h>
+#include <linux/mount.h>

#include "../kselftest_harness.h"

diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
index 022cc1655eb5..75527876ad3c 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
@@ -63,9 +63,9 @@ static int mmcra_thresh_marked_sample(void)
get_mmcra_thd_stop(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, marked) !=
get_mmcra_marked(get_reg_value(intr_regs, "MMCRA"), 4));
- FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample >> 2) !=
+ FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) >> 2) !=
get_mmcra_rand_samp_elig(get_reg_value(intr_regs, "MMCRA"), 4));
- FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample & 0x3) !=
+ FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) & 0x3) !=
get_mmcra_sample_mode(get_reg_value(intr_regs, "MMCRA"), 4));
FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sm) !=
get_mmcra_sm(get_reg_value(intr_regs, "MMCRA"), 4));
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
index 68ff856d36f0..0485863a169f 100644
--- a/tools/testing/selftests/resctrl/cache.c
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -244,10 +244,12 @@ int cat_val(struct resctrl_val_param *param)
while (1) {
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = param->setup(1, param);
- if (ret) {
+ if (ret == END_OF_TESTS) {
ret = 0;
break;
}
+ if (ret < 0)
+ break;
ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
if (ret)
break;
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
index 1c5e90c63254..2d3c7c77ab6c 100644
--- a/tools/testing/selftests/resctrl/cat_test.c
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -40,7 +40,7 @@ static int cat_setup(int num, ...)

/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
- return -1;
+ return END_OF_TESTS;

if (p->num_of_runs == 0) {
sprintf(schemata, "%lx", p->mask);
diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
index 8968e36db99d..3b0454e7fc82 100644
--- a/tools/testing/selftests/resctrl/cmt_test.c
+++ b/tools/testing/selftests/resctrl/cmt_test.c
@@ -32,7 +32,7 @@ static int cmt_setup(int num, ...)

/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
- return -1;
+ return END_OF_TESTS;

p->num_of_runs++;

diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
index 56ccbeae0638..c20d0a7ecbe6 100644
--- a/tools/testing/selftests/resctrl/fill_buf.c
+++ b/tools/testing/selftests/resctrl/fill_buf.c
@@ -68,6 +68,8 @@ static void *malloc_and_init_memory(size_t s)
size_t s64;

void *p = memalign(PAGE_SIZE, s);
+ if (!p)
+ return NULL;

p64 = (uint64_t *)p;
s64 = s / sizeof(uint64_t);
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
index 1a1bdb6180cf..97dc98c0c949 100644
--- a/tools/testing/selftests/resctrl/mba_test.c
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -28,6 +28,7 @@ static int mba_setup(int num, ...)
struct resctrl_val_param *p;
char allocation_str[64];
va_list param;
+ int ret;

va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
@@ -41,11 +42,15 @@ static int mba_setup(int num, ...)
return 0;

if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
- return -1;
+ return END_OF_TESTS;

sprintf(allocation_str, "%d", allocation);

- write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, p->resctrl_val);
+ ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no,
+ p->resctrl_val);
+ if (ret < 0)
+ return ret;
+
allocation -= ALLOCATION_STEP;

return 0;
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
index 8392e5c55ed0..280187628054 100644
--- a/tools/testing/selftests/resctrl/mbm_test.c
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -95,7 +95,7 @@ static int mbm_setup(int num, ...)

/* Run NUM_OF_RUNS times */
if (num_of_runs++ >= NUM_OF_RUNS)
- return -1;
+ return END_OF_TESTS;

va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index f0ded31fb3c7..f44fa2de4d98 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -37,6 +37,8 @@
#define ARCH_INTEL 1
#define ARCH_AMD 2

+#define END_OF_TESTS 1
+
#define PARENT_EXIT(err_msg) \
do { \
perror(err_msg); \
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index b32b96356ec7..00864242d76c 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -734,29 +734,24 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)

/* Test runs until the callback setup() tells the test to stop. */
while (1) {
+ ret = param->setup(1, param);
+ if (ret == END_OF_TESTS) {
+ ret = 0;
+ break;
+ }
+ if (ret < 0)
+ break;
+
if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- ret = param->setup(1, param);
- if (ret) {
- ret = 0;
- break;
- }
-
ret = measure_vals(param, &bw_resc_start);
if (ret)
break;
} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
- ret = param->setup(1, param);
- if (ret) {
- ret = 0;
- break;
- }
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
break;
- } else {
- break;
}
}

diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
index 8acb904d1419..3593fb8f79ad 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
@@ -114,6 +114,28 @@
"$IP link del dev $DUMMY type dummy"
]
},
+ {
+ "id": "10f7",
+ "name": "Create FQ with invalid initial_quantum setting",
+ "category": [
+ "qdisc",
+ "fq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq initial_quantum 0x80000000",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc fq 1: root.*initial_quantum 2048Mb",
+ "matchCount": "0",
+ "teardown": [
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
{
"id": "9398",
"name": "Create FQ with maxrate setting",
diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c
index 404a2713dcae..1bc26e6476fc 100644
--- a/tools/testing/selftests/user_events/ftrace_test.c
+++ b/tools/testing/selftests/user_events/ftrace_test.c
@@ -294,6 +294,11 @@ TEST_F(user, write_events) {
ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 3));
after = trace_bytes();
ASSERT_GT(after, before);
+
+ /* Negative index should fail with EINVAL */
+ reg.write_index = -1;
+ ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
+ ASSERT_EQ(EINVAL, errno);
}

TEST_F(user, write_fault) {