Re: Linux 6.6.14

From: Greg Kroah-Hartman
Date: Thu Jan 25 2024 - 19:02:50 EST


diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index adbfaea32343..90f31beb80c2 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -136,7 +136,7 @@ description: |
There are many devices in the list below that run the standard ChromeOS
bootloader setup and use the open source depthcharge bootloader to boot the
OS. These devices do not use the scheme described above. For details, see:
- https://docs.kernel.org/arm/google/chromebook-boot-flow.html
+ https://docs.kernel.org/arch/arm/google/chromebook-boot-flow.html

properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
index c1060e5fcef3..d3d8a2e143ed 100644
--- a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
+++ b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
@@ -126,7 +126,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>

- gpio@e000a000 {
+ gpio@a0020000 {
compatible = "xlnx,xps-gpio-1.00.a";
reg = <0xa0020000 0x10000>;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
index 7032c7e15039..3e128733ef53 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
@@ -61,6 +61,9 @@ properties:
- description: used for 1st data pipe from RDMA
- description: used for 2nd data pipe from RDMA

+ '#dma-cells':
+ const: 1
+
required:
- compatible
- reg
@@ -70,6 +73,7 @@ required:
- clocks
- iommus
- mboxes
+ - '#dma-cells'

additionalProperties: false

@@ -80,16 +84,17 @@ examples:
#include <dt-bindings/power/mt8183-power.h>
#include <dt-bindings/memory/mt8183-larb-port.h>

- mdp3_rdma0: mdp3-rdma0@14001000 {
- compatible = "mediatek,mt8183-mdp3-rdma";
- reg = <0x14001000 0x1000>;
- mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
- mediatek,gce-events = <CMDQ_EVENT_MDP_RDMA0_SOF>,
- <CMDQ_EVENT_MDP_RDMA0_EOF>;
- power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
- clocks = <&mmsys CLK_MM_MDP_RDMA0>,
- <&mmsys CLK_MM_MDP_RSZ1>;
- iommus = <&iommu>;
- mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>,
- <&gce 21 CMDQ_THR_PRIO_LOWEST>;
+ dma-controller@14001000 {
+ compatible = "mediatek,mt8183-mdp3-rdma";
+ reg = <0x14001000 0x1000>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_MDP_RDMA0_SOF>,
+ <CMDQ_EVENT_MDP_RDMA0_EOF>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_MDP_RDMA0>,
+ <&mmsys CLK_MM_MDP_RSZ1>;
+ iommus = <&iommu>;
+ mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>,
+ <&gce 21 CMDQ_THR_PRIO_LOWEST>;
+ #dma-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
index 0baa77198fa2..64ea98aa0592 100644
--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
@@ -50,6 +50,9 @@ properties:
iommus:
maxItems: 1

+ '#dma-cells':
+ const: 1
+
required:
- compatible
- reg
@@ -58,6 +61,7 @@ required:
- power-domains
- clocks
- iommus
+ - '#dma-cells'

additionalProperties: false

@@ -68,13 +72,14 @@ examples:
#include <dt-bindings/power/mt8183-power.h>
#include <dt-bindings/memory/mt8183-larb-port.h>

- mdp3_wrot0: mdp3-wrot0@14005000 {
- compatible = "mediatek,mt8183-mdp3-wrot";
- reg = <0x14005000 0x1000>;
- mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
- mediatek,gce-events = <CMDQ_EVENT_MDP_WROT0_SOF>,
- <CMDQ_EVENT_MDP_WROT0_EOF>;
- power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
- clocks = <&mmsys CLK_MM_MDP_WROT0>;
- iommus = <&iommu>;
+ dma-controller@14005000 {
+ compatible = "mediatek,mt8183-mdp3-wrot";
+ reg = <0x14005000 0x1000>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_MDP_WROT0_SOF>,
+ <CMDQ_EVENT_MDP_WROT0_EOF>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_MDP_WROT0>;
+ iommus = <&iommu>;
+ #dma-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
index e466dff8286d..afcaa427d48b 100644
--- a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
@@ -90,15 +90,16 @@ properties:
description: connection point for input on the parallel interface

properties:
- bus-type:
- enum: [5, 6]
-
endpoint:
$ref: video-interfaces.yaml#
unevaluatedProperties: false

- required:
- - bus-type
+ properties:
+ bus-type:
+ enum: [5, 6]
+
+ required:
+ - bus-type

anyOf:
- required:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
index 9af203dc8793..fa7408eb7489 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
@@ -62,12 +62,12 @@ properties:
"#clock-cells":
const: 1
description:
- See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
+ See include/dt-bindings/phy/phy-qcom-qmp.h

"#phy-cells":
const: 1
description:
- See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
+ See include/dt-bindings/phy/phy-qcom-qmp.h

orientation-switch:
description:
diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
index 44deb52beeb4..d0b241628cf1 100644
--- a/Documentation/driver-api/pci/p2pdma.rst
+++ b/Documentation/driver-api/pci/p2pdma.rst
@@ -83,19 +83,9 @@ this to include other types of resources like doorbells.
Client Drivers
--------------

-A client driver typically only has to conditionally change its DMA map
-routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead
-of the usual :c:func:`dma_map_sg()` function. Memory mapped in this
-way does not need to be unmapped.
-
-The client may also, optionally, make use of
-:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping
-functions and when to use the regular mapping functions. In some
-situations, it may be more appropriate to use a flag to indicate a
-given request is P2P memory and map appropriately. It is important to
-ensure that struct pages that back P2P memory stay out of code that
-does not have support for them as other code may treat the pages as
-regular memory which may not be appropriate.
+A client driver only has to use the mapping API :c:func:`dma_map_sg()`
+and :c:func:`dma_unmap_sg()` functions as usual, and the implementation
+will do the right thing for the P2P capable memory.


Orchestrator Drivers
diff --git a/Makefile b/Makefile
index a4a2932a4318..bad16eda67e2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
-SUBLEVEL = 13
+SUBLEVEL = 14
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
index 516f0d2495e2..950adb63af70 100644
--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
@@ -738,7 +738,7 @@ pwrkey@1c {

xoadc: xoadc@197 {
compatible = "qcom,pm8921-adc";
- reg = <197>;
+ reg = <0x197>;
interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
#address-cells = <2>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
index 44f3f0127fd7..78738371f634 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
@@ -404,8 +404,8 @@ mmcc: clock-controller@fd8c0000 {
<&gcc GPLL0_VOTE>,
<&gcc GPLL1_VOTE>,
<&rpmcc RPM_SMD_GFX3D_CLK_SRC>,
- <0>,
- <0>;
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi0_phy 0>;
clock-names = "xo",
"mmss_gpll0_vote",
"gpll0_vote",
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
index 1a3583029a64..271899c861c0 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
@@ -338,7 +338,7 @@ pcie_ep: pcie-ep@1c00000 {
power-domains = <&gcc PCIE_GDSC>;

phys = <&pcie_phy>;
- phy-names = "pcie-phy";
+ phy-names = "pciephy";

max-link-speed = <3>;
num-lanes = <2>;
@@ -530,7 +530,7 @@ restart@c264000 {
reg = <0x0c264000 0x1000>;
};

- spmi_bus: qcom,spmi@c440000 {
+ spmi_bus: spmi@c440000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0xc440000 0xd00>,
<0xc600000 0x2000000>,
diff --git a/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
index afcd6285890c..c27963898b5e 100644
--- a/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
@@ -11,7 +11,7 @@

/ {
model = "STMicroelectronics STM32MP157A-DK1 SCMI Discovery Board";
- compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157a-dk1", "st,stm32mp157";
+ compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157";

reserved-memory {
optee@de000000 {
diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
index 39358d902000..622618943134 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
@@ -11,7 +11,7 @@

/ {
model = "STMicroelectronics STM32MP157C-DK2 SCMI Discovery Board";
- compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157c-dk2", "st,stm32mp157";
+ compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157";

reserved-memory {
optee@de000000 {
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
index 07ea765a4553..c7c4d7e89d61 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
@@ -11,7 +11,7 @@

/ {
model = "STMicroelectronics STM32MP157C-ED1 SCMI eval daughter";
- compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157";
+ compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157";

reserved-memory {
optee@fe000000 {
diff --git a/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
index 813086ec2489..2ab77e64f1bb 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
@@ -11,8 +11,7 @@

/ {
model = "STMicroelectronics STM32MP157C-EV1 SCMI eval daughter on eval mother";
- compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ev1", "st,stm32mp157c-ed1",
- "st,stm32mp157";
+ compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157";

reserved-memory {
optee@fe000000 {
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 4316e1370627..2a8a9fe46586 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -4,12 +4,14 @@ menuconfig ARCH_DAVINCI
bool "TI DaVinci"
depends on ARCH_MULTI_V5
depends on CPU_LITTLE_ENDIAN
+ select CPU_ARM926T
select DAVINCI_TIMER
select ZONE_DMA
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
select RESET_CONTROLLER
+ select PINCTRL
select PINCTRL_SINGLE

if ARCH_DAVINCI
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 738024baaa57..54faf83cb436 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -1408,7 +1408,7 @@ gpu_3d: gpu@38000000 {
assigned-clocks = <&clk IMX8MM_CLK_GPU3D_CORE>,
<&clk IMX8MM_GPU_PLL_OUT>;
assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>;
- assigned-clock-rates = <0>, <1000000000>;
+ assigned-clock-rates = <0>, <800000000>;
power-domains = <&pgc_gpu>;
};

@@ -1423,7 +1423,7 @@ gpu_2d: gpu@38008000 {
assigned-clocks = <&clk IMX8MM_CLK_GPU2D_CORE>,
<&clk IMX8MM_GPU_PLL_OUT>;
assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>;
- assigned-clock-rates = <0>, <1000000000>;
+ assigned-clock-rates = <0>, <800000000>;
power-domains = <&pgc_gpu>;
};

diff --git a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
index 970047f2dabd..c06e011a6c3f 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
@@ -25,9 +25,6 @@ pmic: pmic@0 {
gpios = <&gpio28 0 0>;

regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
ldo3: ldo3 { /* HDMI */
regulator-name = "ldo3";
regulator-min-microvolt = <1500000>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index 9eab2bb22134..805ef2d79b40 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -130,7 +130,7 @@ rtc@6f {
compatible = "microchip,mcp7940x";
reg = <0x6f>;
interrupt-parent = <&gpiosb>;
- interrupts = <5 0>; /* GPIO2_5 */
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
};
};

diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 976dc968b3ca..df6e9990cd5f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1660,7 +1660,7 @@ mmsys: syscon@14000000 {
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
};

- mdp3-rdma0@14001000 {
+ dma-controller0@14001000 {
compatible = "mediatek,mt8183-mdp3-rdma";
reg = <0 0x14001000 0 0x1000>;
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
@@ -1672,6 +1672,7 @@ mdp3-rdma0@14001000 {
iommus = <&iommu M4U_PORT_MDP_RDMA0>;
mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST 0>,
<&gce 21 CMDQ_THR_PRIO_LOWEST 0>;
+ #dma-cells = <1>;
};

mdp3-rsz0@14003000 {
@@ -1692,7 +1693,7 @@ mdp3-rsz1@14004000 {
clocks = <&mmsys CLK_MM_MDP_RSZ1>;
};

- mdp3-wrot0@14005000 {
+ dma-controller@14005000 {
compatible = "mediatek,mt8183-mdp3-wrot";
reg = <0 0x14005000 0 0x1000>;
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
@@ -1701,6 +1702,7 @@ mdp3-wrot0@14005000 {
power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
clocks = <&mmsys CLK_MM_MDP_WROT0>;
iommus = <&iommu M4U_PORT_MDP_WROT0>;
+ #dma-cells = <1>;
};

mdp3-wdma@14006000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
index df0c04f2ba1d..2fec6fd1c1a7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
@@ -22,7 +22,7 @@ / {

aliases {
ovl0 = &ovl0;
- ovl_2l0 = &ovl_2l0;
+ ovl-2l0 = &ovl_2l0;
rdma0 = &rdma0;
rdma1 = &rdma1;
};
@@ -1160,14 +1160,14 @@ adsp: adsp@10680000 {
status = "disabled";
};

- adsp_mailbox0: mailbox@10686000 {
+ adsp_mailbox0: mailbox@10686100 {
compatible = "mediatek,mt8186-adsp-mbox";
#mbox-cells = <0>;
reg = <0 0x10686100 0 0x1000>;
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH 0>;
};

- adsp_mailbox1: mailbox@10687000 {
+ adsp_mailbox1: mailbox@10687100 {
compatible = "mediatek,mt8186-adsp-mbox";
#mbox-cells = <0>;
reg = <0 0x10687100 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index e0ac2e9f5b72..6708c4d21abf 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -2873,7 +2873,7 @@ larb3: larb@1c103000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
};

- vdo1_rdma0: rdma@1c104000 {
+ vdo1_rdma0: dma-controller@1c104000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c104000 0 0x1000>;
interrupts = <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2881,9 +2881,10 @@ vdo1_rdma0: rdma@1c104000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x4000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma1: rdma@1c105000 {
+ vdo1_rdma1: dma-controller@1c105000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c105000 0 0x1000>;
interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2891,9 +2892,10 @@ vdo1_rdma1: rdma@1c105000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x5000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma2: rdma@1c106000 {
+ vdo1_rdma2: dma-controller@1c106000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c106000 0 0x1000>;
interrupts = <GIC_SPI 497 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2901,9 +2903,10 @@ vdo1_rdma2: rdma@1c106000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA2>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x6000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma3: rdma@1c107000 {
+ vdo1_rdma3: dma-controller@1c107000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c107000 0 0x1000>;
interrupts = <GIC_SPI 498 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2911,9 +2914,10 @@ vdo1_rdma3: rdma@1c107000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA3>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x7000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma4: rdma@1c108000 {
+ vdo1_rdma4: dma-controller@1c108000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c108000 0 0x1000>;
interrupts = <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2921,9 +2925,10 @@ vdo1_rdma4: rdma@1c108000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA4>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x8000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma5: rdma@1c109000 {
+ vdo1_rdma5: dma-controller@1c109000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c109000 0 0x1000>;
interrupts = <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2931,9 +2936,10 @@ vdo1_rdma5: rdma@1c109000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA5>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x9000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma6: rdma@1c10a000 {
+ vdo1_rdma6: dma-controller@1c10a000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c10a000 0 0x1000>;
interrupts = <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2941,9 +2947,10 @@ vdo1_rdma6: rdma@1c10a000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA6>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xa000 0x1000>;
+ #dma-cells = <1>;
};

- vdo1_rdma7: rdma@1c10b000 {
+ vdo1_rdma7: dma-controller@1c10b000 {
compatible = "mediatek,mt8195-vdo1-rdma";
reg = <0 0x1c10b000 0 0x1000>;
interrupts = <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -2951,6 +2958,7 @@ vdo1_rdma7: rdma@1c10b000 {
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA7>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xb000 0x1000>;
+ #dma-cells = <1>;
};

merge1: vpp-merge@1c10c000 {
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index 264845cecf92..fc907afe5174 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -565,7 +565,7 @@ usb3: usb@8af8800 {
<&gcc GCC_USB0_MOCK_UTMI_CLK>;
assigned-clock-rates = <133330000>,
<133330000>,
- <20000000>;
+ <24000000>;

resets = <&gcc GCC_USB0_BCR>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
index 0f7c59187896..37abb83ea464 100644
--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
@@ -366,6 +366,16 @@ &usb {
status = "okay";
};

+&usb_qmpphy {
+ vdda-phy-supply = <&pm2250_l12>;
+ vdda-pll-supply = <&pm2250_l13>;
+ status = "okay";
+};
+
+&usb_dwc3 {
+ dr_mode = "host";
+};
+
&usb_hsphy {
vdd-supply = <&pm2250_l12>;
vdda-pll-supply = <&pm2250_l13>;
diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
index a7278a9472ed..9738c0dacd58 100644
--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
@@ -518,7 +518,6 @@ &usb {

&usb_dwc3 {
maximum-speed = "super-speed";
- dr_mode = "peripheral";
};

&usb_hsphy {
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index dfa8ee5c75af..e95a004c3391 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -63,8 +63,8 @@ led-user4 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_GREEN>;
gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "panic-indicator";
default-state = "off";
+ panic-indicator;
};

led-wlan {
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
index 9f4f58e831a4..d4ca92b98c7d 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
@@ -1602,8 +1602,8 @@ usb_0: usb@a6f8800 {
assigned-clock-rates = <19200000>, <200000000>;

interrupts-extended = <&intc GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 14 IRQ_TYPE_EDGE_RISING>,
- <&pdc 15 IRQ_TYPE_EDGE_RISING>,
+ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
<&pdc 12 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pwr_event",
"dp_hs_phy_irq",
@@ -1689,8 +1689,8 @@ usb_1: usb@a8f8800 {
assigned-clock-rates = <19200000>, <200000000>;

interrupts-extended = <&intc GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 8 IRQ_TYPE_EDGE_RISING>,
- <&pdc 7 IRQ_TYPE_EDGE_RISING>,
+ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 7 IRQ_TYPE_EDGE_BOTH>,
<&pdc 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pwr_event",
"dp_hs_phy_irq",
@@ -1752,8 +1752,8 @@ usb_2: usb@a4f8800 {
assigned-clock-rates = <19200000>, <200000000>;

interrupts-extended = <&intc GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 10 IRQ_TYPE_EDGE_RISING>,
- <&pdc 9 IRQ_TYPE_EDGE_RISING>;
+ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "pwr_event",
"dp_hs_phy_irq",
"dm_hs_phy_irq";
@@ -2173,7 +2173,7 @@ watchdog@17c10000 {
compatible = "qcom,apss-wdt-sa8775p", "qcom,kpss-wdt";
reg = <0x0 0x17c10000 0x0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

memtimer: timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
index dbb48934d499..3342cb048038 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
@@ -209,9 +209,22 @@ alc5682: codec@1a {
AVDD-supply = <&vreg_l15a_1p8>;
MICVDD-supply = <&reg_codec_3p3>;
VBAT-supply = <&reg_codec_3p3>;
+ DBVDD-supply = <&vreg_l15a_1p8>;
+ LDO1-IN-supply = <&vreg_l15a_1p8>;
+
+ /*
+ * NOTE: The board has a path from this codec to the
+ * DMIC microphones in the lid, however some of the option
+ * resistors are absent and the microphones are connected
+ * to the SoC instead.
+ *
+ * If the resistors were to be changed by the user to
+ * connect the codec, the following could be used:
+ *
+ * realtek,dmic1-data-pin = <1>;
+ * realtek,dmic1-clk-pin = <1>;
+ */

- realtek,dmic1-data-pin = <1>;
- realtek,dmic1-clk-pin = <1>;
realtek,jd-src = <1>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index a79c0f2e1879..589ae40cd3cc 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -3587,7 +3587,7 @@ watchdog@17c10000 {
compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
index 2e1cd219fc18..5d462ae14ba1 100644
--- a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
@@ -46,6 +46,26 @@ wpss_mem: memory@9ae00000 {
};
};

+&lpass_aon {
+ status = "okay";
+};
+
+&lpass_core {
+ status = "okay";
+};
+
+&lpass_hm {
+ status = "okay";
+};
+
+&lpasscc {
+ status = "okay";
+};
+
+&pdc_reset {
+ status = "okay";
+};
+
/* The PMIC PON code isn't compatible w/ how Chrome EC/BIOS handle things. */
&pmk8350_pon {
status = "disabled";
@@ -84,6 +104,10 @@ &scm {
dma-coherent;
};

+&watchdog {
+ status = "okay";
+};
+
&wifi {
status = "okay";

diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 91bb58c6b1a6..ec5c36425a22 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -925,6 +925,7 @@ sdhc_1: mmc@7c4000 {

bus-width = <8>;
supports-cqe;
+ dma-coherent;

qcom,dll-config = <0x0007642c>;
qcom,ddr-config = <0x80040868>;
@@ -2255,6 +2256,7 @@ lpasscc: lpasscc@3000000 {
clocks = <&gcc GCC_CFG_NOC_LPASS_CLK>;
clock-names = "iface";
#clock-cells = <1>;
+ status = "reserved"; /* Owned by ADSP firmware */
};

lpass_rx_macro: codec@3200000 {
@@ -2406,6 +2408,7 @@ lpass_aon: clock-controller@3380000 {
clock-names = "bi_tcxo", "bi_tcxo_ao", "iface";
#clock-cells = <1>;
#power-domain-cells = <1>;
+ status = "reserved"; /* Owned by ADSP firmware */
};

lpass_core: clock-controller@3900000 {
@@ -2416,6 +2419,7 @@ lpass_core: clock-controller@3900000 {
power-domains = <&lpass_hm LPASS_CORE_CC_LPASS_CORE_HM_GDSC>;
#clock-cells = <1>;
#power-domain-cells = <1>;
+ status = "reserved"; /* Owned by ADSP firmware */
};

lpass_cpu: audio@3987000 {
@@ -2486,6 +2490,7 @@ lpass_hm: clock-controller@3c00000 {
clock-names = "bi_tcxo";
#clock-cells = <1>;
#power-domain-cells = <1>;
+ status = "reserved"; /* Owned by ADSP firmware */
};

lpass_ag_noc: interconnect@3c40000 {
@@ -2554,7 +2559,8 @@ gpu: gpu@3d00000 {
"cx_mem",
"cx_dbgc";
interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
- iommus = <&adreno_smmu 0 0x401>;
+ iommus = <&adreno_smmu 0 0x400>,
+ <&adreno_smmu 1 0x400>;
operating-points-v2 = <&gpu_opp_table>;
qcom,gmu = <&gmu>;
interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
@@ -2728,6 +2734,7 @@ adreno_smmu: iommu@3da0000 {
"gpu_cc_hub_aon_clk";

power-domains = <&gpucc GPU_CC_CX_GDSC>;
+ dma-coherent;
};

remoteproc_mpss: remoteproc@4080000 {
@@ -3285,6 +3292,7 @@ sdhc_2: mmc@8804000 {
operating-points-v2 = <&sdhc2_opp_table>;

bus-width = <4>;
+ dma-coherent;

qcom,dll-config = <0x0007642c>;

@@ -3405,8 +3413,8 @@ usb_2: usb@8cf8800 {
assigned-clock-rates = <19200000>, <200000000>;

interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 12 IRQ_TYPE_EDGE_RISING>,
- <&pdc 13 IRQ_TYPE_EDGE_RISING>;
+ <&pdc 12 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 13 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq",
"dp_hs_phy_irq",
"dm_hs_phy_irq";
@@ -4199,6 +4207,7 @@ pdc_reset: reset-controller@b5e0000 {
compatible = "qcom,sc7280-pdc-global";
reg = <0 0x0b5e0000 0 0x20000>;
#reset-cells = <1>;
+ status = "reserved"; /* Owned by firmware */
};

tsens0: thermal-sensor@c263000 {
@@ -5195,11 +5204,12 @@ msi-controller@17a40000 {
};
};

- watchdog@17c10000 {
+ watchdog: watchdog@17c10000 {
compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ status = "reserved"; /* Owned by Gunyah hyp */
};

timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
index 834e6f9fb7c8..ae008c3b0aed 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
+++ b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
@@ -42,7 +42,7 @@ gpio-keys {
pinctrl-0 = <&hall_int_active_state>;

lid-switch {
- gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>;
+ gpios = <&tlmm 121 GPIO_ACTIVE_LOW>;
linux,input-type = <EV_SW>;
linux,code = <SW_LID>;
wakeup-source;
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 486f7ffef43b..f4381424e70a 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -1749,23 +1749,29 @@ pcie0: pci@1c00000 {
<&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";

- phys = <&pcie0_lane>;
+ phys = <&pcie0_phy>;
phy-names = "pciephy";
+ dma-coherent;

status = "disabled";
};

- pcie0_phy: phy-wrapper@1c06000 {
+ pcie0_phy: phy@1c06000 {
compatible = "qcom,sc8180x-qmp-pcie-phy";
- reg = <0 0x1c06000 0 0x1c0>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x01c06000 0 0x1000>;
clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
<&gcc GCC_PCIE_0_CFG_AHB_CLK>,
<&gcc GCC_PCIE_0_CLKREF_CLK>,
- <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "refgen";
+ <&gcc GCC_PCIE0_PHY_REFGEN_CLK>,
+ <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "refgen",
+ "pipe";
+ #clock-cells = <0>;
+ clock-output-names = "pcie_0_pipe_clk";
+ #phy-cells = <0>;

resets = <&gcc GCC_PCIE_0_PHY_BCR>;
reset-names = "phy";
@@ -1774,21 +1780,6 @@ pcie0_phy: phy-wrapper@1c06000 {
assigned-clock-rates = <100000000>;

status = "disabled";
-
- pcie0_lane: phy@1c06200 {
- reg = <0 0x1c06200 0 0x170>, /* tx0 */
- <0 0x1c06400 0 0x200>, /* rx0 */
- <0 0x1c06a00 0 0x1f0>, /* pcs */
- <0 0x1c06600 0 0x170>, /* tx1 */
- <0 0x1c06800 0 0x200>, /* rx1 */
- <0 0x1c06e00 0 0xf4>; /* pcs_com */
- clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
- clock-names = "pipe0";
-
- #clock-cells = <0>;
- clock-output-names = "pcie_0_pipe_clk";
- #phy-cells = <0>;
- };
};

pcie3: pci@1c08000 {
@@ -1856,23 +1847,30 @@ pcie3: pci@1c08000 {
<&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";

- phys = <&pcie3_lane>;
+ phys = <&pcie3_phy>;
phy-names = "pciephy";
+ dma-coherent;

status = "disabled";
};

- pcie3_phy: phy-wrapper@1c0c000 {
+ pcie3_phy: phy@1c0c000 {
compatible = "qcom,sc8180x-qmp-pcie-phy";
- reg = <0 0x1c0c000 0 0x1c0>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x01c0c000 0 0x1000>;
clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
<&gcc GCC_PCIE_3_CFG_AHB_CLK>,
<&gcc GCC_PCIE_3_CLKREF_CLK>,
- <&gcc GCC_PCIE2_PHY_REFGEN_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "refgen";
+ <&gcc GCC_PCIE3_PHY_REFGEN_CLK>,
+ <&gcc GCC_PCIE_3_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "refgen",
+ "pipe";
+ #clock-cells = <0>;
+ clock-output-names = "pcie_3_pipe_clk";
+
+ #phy-cells = <0>;

resets = <&gcc GCC_PCIE_3_PHY_BCR>;
reset-names = "phy";
@@ -1881,21 +1879,6 @@ pcie3_phy: phy-wrapper@1c0c000 {
assigned-clock-rates = <100000000>;

status = "disabled";
-
- pcie3_lane: phy@1c0c200 {
- reg = <0 0x1c0c200 0 0x170>, /* tx0 */
- <0 0x1c0c400 0 0x200>, /* rx0 */
- <0 0x1c0ca00 0 0x1f0>, /* pcs */
- <0 0x1c0c600 0 0x170>, /* tx1 */
- <0 0x1c0c800 0 0x200>, /* rx1 */
- <0 0x1c0ce00 0 0xf4>; /* pcs_com */
- clocks = <&gcc GCC_PCIE_3_PIPE_CLK>;
- clock-names = "pipe0";
-
- #clock-cells = <0>;
- clock-output-names = "pcie_3_pipe_clk";
- #phy-cells = <0>;
- };
};

pcie1: pci@1c10000 {
@@ -1963,23 +1946,30 @@ pcie1: pci@1c10000 {
<&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";

- phys = <&pcie1_lane>;
+ phys = <&pcie1_phy>;
phy-names = "pciephy";
+ dma-coherent;

status = "disabled";
};

- pcie1_phy: phy-wrapper@1c16000 {
+ pcie1_phy: phy@1c16000 {
compatible = "qcom,sc8180x-qmp-pcie-phy";
- reg = <0 0x1c16000 0 0x1c0>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x01c16000 0 0x1000>;
clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
<&gcc GCC_PCIE_1_CFG_AHB_CLK>,
<&gcc GCC_PCIE_1_CLKREF_CLK>,
- <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "refgen";
+ <&gcc GCC_PCIE1_PHY_REFGEN_CLK>,
+ <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "refgen",
+ "pipe";
+ #clock-cells = <0>;
+ clock-output-names = "pcie_1_pipe_clk";
+
+ #phy-cells = <0>;

resets = <&gcc GCC_PCIE_1_PHY_BCR>;
reset-names = "phy";
@@ -1988,21 +1978,6 @@ pcie1_phy: phy-wrapper@1c16000 {
assigned-clock-rates = <100000000>;

status = "disabled";
-
- pcie1_lane: phy@1c0e200 {
- reg = <0 0x1c16200 0 0x170>, /* tx0 */
- <0 0x1c16400 0 0x200>, /* rx0 */
- <0 0x1c16a00 0 0x1f0>, /* pcs */
- <0 0x1c16600 0 0x170>, /* tx1 */
- <0 0x1c16800 0 0x200>, /* rx1 */
- <0 0x1c16e00 0 0xf4>; /* pcs_com */
- clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
- clock-names = "pipe0";
- #clock-cells = <0>;
- clock-output-names = "pcie_1_pipe_clk";
-
- #phy-cells = <0>;
- };
};

pcie2: pci@1c18000 {
@@ -2070,23 +2045,30 @@ pcie2: pci@1c18000 {
<&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";

- phys = <&pcie2_lane>;
+ phys = <&pcie2_phy>;
phy-names = "pciephy";
+ dma-coherent;

status = "disabled";
};

- pcie2_phy: phy-wrapper@1c1c000 {
+ pcie2_phy: phy@1c1c000 {
compatible = "qcom,sc8180x-qmp-pcie-phy";
- reg = <0 0x1c1c000 0 0x1c0>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x01c1c000 0 0x1000>;
clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
<&gcc GCC_PCIE_2_CFG_AHB_CLK>,
<&gcc GCC_PCIE_2_CLKREF_CLK>,
- <&gcc GCC_PCIE2_PHY_REFGEN_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "refgen";
+ <&gcc GCC_PCIE2_PHY_REFGEN_CLK>,
+ <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "refgen",
+ "pipe";
+ #clock-cells = <0>;
+ clock-output-names = "pcie_2_pipe_clk";
+
+ #phy-cells = <0>;

resets = <&gcc GCC_PCIE_2_PHY_BCR>;
reset-names = "phy";
@@ -2095,22 +2077,6 @@ pcie2_phy: phy-wrapper@1c1c000 {
assigned-clock-rates = <100000000>;

status = "disabled";
-
- pcie2_lane: phy@1c0e200 {
- reg = <0 0x1c1c200 0 0x170>, /* tx0 */
- <0 0x1c1c400 0 0x200>, /* rx0 */
- <0 0x1c1ca00 0 0x1f0>, /* pcs */
- <0 0x1c1c600 0 0x170>, /* tx1 */
- <0 0x1c1c800 0 0x200>, /* rx1 */
- <0 0x1c1ce00 0 0xf4>; /* pcs_com */
- clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
- clock-names = "pipe0";
-
- #clock-cells = <0>;
- clock-output-names = "pcie_2_pipe_clk";
-
- #phy-cells = <0>;
- };
};

ufs_mem_hc: ufshc@1d84000 {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index 38edaf51aa34..f2055899ae7a 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -82,6 +82,9 @@ switch-lid {
leds {
compatible = "gpio-leds";

+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_indicator_en>;
+
led-camera-indicator {
label = "white:camera-indicator";
function = LED_FUNCTION_INDICATOR;
@@ -601,6 +604,7 @@ mdss0_dp3_out: endpoint {
};

&mdss0_dp3_phy {
+ compatible = "qcom,sc8280xp-edp-phy";
vdda-phy-supply = <&vreg_l6b>;
vdda-pll-supply = <&vreg_l3b>;

@@ -1277,6 +1281,13 @@ hstp-sw-ctrl-pins {
};
};

+ cam_indicator_en: cam-indicator-en-state {
+ pins = "gpio28";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
edp_reg_en: edp-reg-en-state {
pins = "gpio25";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index cad59af7ccef..b8081513176a 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -4225,7 +4225,7 @@ watchdog@17c10000 {
compatible = "qcom,apss-wdt-sc8280xp", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index c7eba6c491be..7e7bf3fb3be6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -67,8 +67,8 @@ led-0 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_GREEN>;
gpios = <&pm8998_gpios 13 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "panic-indicator";
default-state = "off";
+ panic-indicator;
};

led-1 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 055ca80c0075..2cf1993a8190 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -5118,7 +5118,7 @@ watchdog@17980000 {
compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt";
reg = <0 0x17980000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

apss_shared: mailbox@17990000 {
diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
index 197f8fed19a2..07081088ba14 100644
--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
@@ -1165,6 +1165,10 @@ usb3: usb@4ef8800 {
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <66666667>;

+ interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq";
+
power-domains = <&gcc USB30_PRIM_GDSC>;
qcom,select-utmi-as-pipe-clk;
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 8fd6f4d03490..6464e144c228 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -2524,7 +2524,7 @@ watchdog@17c10000 {
compatible = "qcom,apss-wdt-sm6350", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index e7ff55443da7..e56f7ea4ebc6 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -311,6 +311,25 @@ scm {
};
};

+ mpm: interrupt-controller {
+ compatible = "qcom,mpm";
+ qcom,rpm-msg-ram = <&apss_mpm>;
+ interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_SMP2P>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #power-domain-cells = <0>;
+ interrupt-parent = <&intc>;
+ qcom,mpm-pin-count = <96>;
+ qcom,mpm-pin-map = <5 296>, /* Soundwire wake_irq */
+ <12 422>, /* DWC3 ss_phy_irq */
+ <86 183>, /* MPM wake, SPMI */
+ <89 314>, /* TSENS0 0C */
+ <90 315>, /* TSENS1 0C */
+ <93 164>, /* DWC3 dm_hs_phy_irq */
+ <94 165>; /* DWC3 dp_hs_phy_irq */
+ };
+
memory@80000000 {
device_type = "memory";
/* We expect the bootloader to fill in the size */
@@ -486,6 +505,7 @@ CPU_PD7: power-domain-cpu7 {

CLUSTER_PD: power-domain-cpu-cluster0 {
#power-domain-cells = <0>;
+ power-domains = <&mpm>;
domain-idle-states = <&CLUSTER_SLEEP_0>;
};
};
@@ -808,7 +828,7 @@ tlmm: pinctrl@500000 {
reg = <0 0x00500000 0 0x800000>;
interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&tlmm 0 0 157>;
- /* TODO: Hook up MPM as wakeup-parent when it's there */
+ wakeup-parent = <&mpm>;
interrupt-controller;
gpio-controller;
#interrupt-cells = <2>;
@@ -930,7 +950,7 @@ spmi_bus: spmi@1c40000 {
<0 0x01c0a000 0 0x26000>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&mpm 86 IRQ_TYPE_LEVEL_HIGH>;
qcom,ee = <0>;
qcom,channel = <0>;
#address-cells = <2>;
@@ -962,8 +982,15 @@ tsens1: thermal-sensor@4413000 {
};

rpm_msg_ram: sram@45f0000 {
- compatible = "qcom,rpm-msg-ram";
+ compatible = "qcom,rpm-msg-ram", "mmio-sram";
reg = <0 0x045f0000 0 0x7000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x045f0000 0x7000>;
+
+ apss_mpm: sram@1b8 {
+ reg = <0x1b8 0x48>;
+ };
};

sram@4690000 {
@@ -1360,10 +1387,10 @@ usb_1: usb@4ef8800 {
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <133333333>;

- interrupts = <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
+ <&mpm 12 IRQ_TYPE_LEVEL_HIGH>,
+ <&mpm 93 IRQ_TYPE_EDGE_BOTH>,
+ <&mpm 94 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq",
"ss_phy_irq",
"dm_hs_phy_irq",
diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
index bb161b536da4..f4c6e1309a7e 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
@@ -127,8 +127,6 @@ vdda_qrefs_0p875_5:
vdda_sp_sensor:
vdda_ufs_2ln_core_1:
vdda_ufs_2ln_core_2:
- vdda_usb_ss_dp_core_1:
- vdda_usb_ss_dp_core_2:
vdda_qlink_lv:
vdda_qlink_lv_ck:
vreg_l5a_0p875: ldo5 {
@@ -210,6 +208,12 @@ vreg_l17a_3p0: ldo17 {
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+
+ vreg_l18a_0p8: ldo18 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
};

regulators-1 {
@@ -445,13 +449,13 @@ &usb_2_hsphy {
&usb_1_qmpphy {
status = "okay";
vdda-phy-supply = <&vreg_l3c_1p2>;
- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
+ vdda-pll-supply = <&vreg_l18a_0p8>;
};

&usb_2_qmpphy {
status = "okay";
vdda-phy-supply = <&vreg_l3c_1p2>;
- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
+ vdda-pll-supply = <&vreg_l5a_0p875>;
};

&usb_1 {
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 19c6003dca15..32045fb48157 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -3959,6 +3959,7 @@ dispcc: clock-controller@af00000 {
"dp_phy_pll_link_clk",
"dp_phy_pll_vco_div_clk";
power-domains = <&rpmhpd SM8150_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -4197,7 +4198,7 @@ watchdog@17c10000 {
compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index a4e58ad731c3..1a98481d0c7f 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -5664,7 +5664,7 @@ watchdog@17c10000 {
compatible = "qcom,apss-wdt-sm8250", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
};

timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index a94e069da83d..a7cf506f24b6 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -918,9 +918,9 @@ spi19: spi@894000 {
};
};

- gpi_dma0: dma-controller@9800000 {
+ gpi_dma0: dma-controller@900000 {
compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
- reg = <0 0x09800000 0 0x60000>;
+ reg = <0 0x00900000 0 0x60000>;
interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index 2a60cf8bd891..79cc8fbcd846 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -2325,7 +2325,7 @@ swr2: soundwire-controller@33b0000 {
<GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core", "wakeup";

- clocks = <&vamacro>;
+ clocks = <&txmacro>;
clock-names = "iface";
label = "TX";

diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index d115960bdeec..3a228d4f0c14 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -283,9 +283,9 @@ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
compatible = "arm,idle-state";
idle-state-name = "silver-rail-power-collapse";
arm,psci-suspend-param = <0x40000004>;
- entry-latency-us = <800>;
+ entry-latency-us = <550>;
exit-latency-us = <750>;
- min-residency-us = <4090>;
+ min-residency-us = <6700>;
local-timer-stop;
};

@@ -294,8 +294,18 @@ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
idle-state-name = "gold-rail-power-collapse";
arm,psci-suspend-param = <0x40000004>;
entry-latency-us = <600>;
- exit-latency-us = <1550>;
- min-residency-us = <4791>;
+ exit-latency-us = <1300>;
+ min-residency-us = <8136>;
+ local-timer-stop;
+ };
+
+ PRIME_CPU_SLEEP_0: cpu-sleep-2-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "goldplus-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <500>;
+ exit-latency-us = <1350>;
+ min-residency-us = <7480>;
local-timer-stop;
};
};
@@ -304,17 +314,17 @@ domain-idle-states {
CLUSTER_SLEEP_0: cluster-sleep-0 {
compatible = "domain-idle-state";
arm,psci-suspend-param = <0x41000044>;
- entry-latency-us = <1050>;
- exit-latency-us = <2500>;
- min-residency-us = <5309>;
+ entry-latency-us = <750>;
+ exit-latency-us = <2350>;
+ min-residency-us = <9144>;
};

CLUSTER_SLEEP_1: cluster-sleep-1 {
compatible = "domain-idle-state";
arm,psci-suspend-param = <0x4100c344>;
- entry-latency-us = <2700>;
- exit-latency-us = <3500>;
- min-residency-us = <13959>;
+ entry-latency-us = <2800>;
+ exit-latency-us = <4400>;
+ min-residency-us = <10150>;
};
};
};
@@ -398,7 +408,7 @@ CPU_PD6: power-domain-cpu6 {
CPU_PD7: power-domain-cpu7 {
#power-domain-cells = <0>;
power-domains = <&CLUSTER_PD>;
- domain-idle-states = <&BIG_CPU_SLEEP_0>;
+ domain-idle-states = <&PRIME_CPU_SLEEP_0>;
};

CLUSTER_PD: power-domain-cluster {
@@ -2178,7 +2188,7 @@ swr2: soundwire-controller@6d30000 {
interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core", "wakeup";
- clocks = <&lpass_vamacro>;
+ clocks = <&lpass_txmacro>;
clock-names = "iface";
label = "TX";

@@ -2893,8 +2903,8 @@ usb_1: usb@a6f8800 {

interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 15 IRQ_TYPE_EDGE_RISING>,
- <&pdc 14 IRQ_TYPE_EDGE_RISING>;
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 14 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq",
"ss_phy_irq",
"dm_hs_phy_irq",
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
index bb4a5270f71b..913f70fe6c5c 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
@@ -187,6 +187,9 @@ &extalr_clk {
};

&hscif0 {
+ pinctrl-0 = <&hscif0_pins>;
+ pinctrl-names = "default";
+
status = "okay";
};

diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
index 1c6d83b47cd2..6ecdf5d28339 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
@@ -455,7 +455,7 @@ &pcie2x1 {
&pinctrl {
leds {
sys_led_pin: sys-status-led-pin {
- rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};

diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
index 3198af08fb9f..de36abb243f1 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
@@ -462,7 +462,7 @@ main_gpio0: gpio@600000 {
<193>, <194>, <195>;
interrupt-controller;
#interrupt-cells = <2>;
- ti,ngpio = <87>;
+ ti,ngpio = <92>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 77 0>;
@@ -480,7 +480,7 @@ main_gpio1: gpio@601000 {
<183>, <184>, <185>;
interrupt-controller;
#interrupt-cells = <2>;
- ti,ngpio = <88>;
+ ti,ngpio = <52>;
ti,davinci-gpio-unbanked = <0>;
power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 78 0>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
index ba1c14a54acf..b849648d51f9 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
@@ -14,6 +14,16 @@

/ {
aliases {
+ serial0 = &wkup_uart0;
+ serial1 = &mcu_uart0;
+ serial2 = &main_uart0;
+ serial3 = &main_uart1;
+ i2c0 = &wkup_i2c0;
+ i2c1 = &mcu_i2c0;
+ i2c2 = &main_i2c0;
+ i2c3 = &main_i2c1;
+ i2c4 = &main_i2c2;
+ i2c5 = &main_i2c3;
spi0 = &mcu_spi0;
mmc0 = &sdhci1;
mmc1 = &sdhci0;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index bc460033a37a..c98068b6c122 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -1034,7 +1034,7 @@ dss: dss@4a00000 {
assigned-clocks = <&k3_clks 67 2>;
assigned-clock-parents = <&k3_clks 67 5>;

- interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;

dma-coherent;

diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index 5e40c0b4fa0a..1068b0fa8e98 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -22,11 +22,10 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-sm-k26-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA.dtb

zynqmp-sm-k26-revA-sck-kv-g-revA-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kv-g-revA.dtbo
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-sm-k26-revA-sck-kv-g-revA.dtb
zynqmp-sm-k26-revA-sck-kv-g-revB-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kv-g-revB.dtbo
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-sm-k26-revA-sck-kv-g-revB.dtb
zynqmp-smk-k26-revA-sck-kv-g-revA-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kv-g-revA.dtbo
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA-sck-kv-g-revA.dtb
zynqmp-smk-k26-revA-sck-kv-g-revB-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kv-g-revB.dtbo
-
-zynqmp-sm-k26-revA-sck-kr-g-revA-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kr-g-revA.dtbo
-zynqmp-sm-k26-revA-sck-kr-g-revB-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kr-g-revB.dtbo
-zynqmp-smk-k26-revA-sck-kr-g-revA-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kr-g-revA.dtbo
-zynqmp-smk-k26-revA-sck-kr-g-revB-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kr-g-revB.dtbo
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA-sck-kv-g-revB.dtb
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 20d7ef82de90..b3f64144b5cd 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1107,12 +1107,13 @@ static int za_set(struct task_struct *target,
}
}

- /* Allocate/reinit ZA storage */
- sme_alloc(target, true);
- if (!target->thread.sme_state) {
- ret = -ENOMEM;
- goto out;
- }
+ /*
+ * Only flush the storage if PSTATE.ZA was not already set,
+ * otherwise preserve any existing data.
+ */
+ sme_alloc(target, !thread_za_enabled(&target->thread));
+ if (!target->thread.sme_state)
+ return -ENOMEM;

/* If there is no data then disable ZA */
if (!count) {
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 5fe2365a629f..c420723548d8 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
unsigned long flags;

raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+
irq = __vgic_its_check_cache(dist, db, devid, eventid);
+ if (irq)
+ vgic_get_irq_kref(irq);
+
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);

return irq;
@@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
+ vgic_put_irq(kvm, irq);

return 0;
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 871a45d4fc84..ae5a3a717655 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -365,19 +365,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (test_bit(i, &val)) {
- /*
- * pending_latch is set irrespective of irq type
- * (level or edge) to avoid dependency that VM should
- * restore irq config before pending info.
- */
- irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
- } else {
+
+ /*
+ * pending_latch is set irrespective of irq type
+ * (level or edge) to avoid dependency that VM should
+ * restore irq config before pending info.
+ */
+ irq->pending_latch = test_bit(i, &val);
+
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ irq->pending_latch);
irq->pending_latch = false;
- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}

+ if (irq->pending_latch)
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ else
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
vgic_put_irq(vcpu->kvm, irq);
}

diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
index d488ba6084bc..98a3f4b168bd 100644
--- a/arch/csky/include/asm/jump_label.h
+++ b/arch/csky/include/asm/jump_label.h
@@ -43,5 +43,10 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
return true;
}

+enum jump_label_type;
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type);
+#define arch_jump_label_transform_static arch_jump_label_transform_static
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_JUMP_LABEL_H */
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index 9b16a3b8e706..f16bd42456e4 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -241,8 +241,6 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
do { \
current->thread.vdso = &vdso_info; \
\
- loongarch_set_personality_fcsr(state); \
- \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
} while (0)
@@ -259,7 +257,6 @@ do { \
clear_thread_flag(TIF_32BIT_ADDR); \
\
current->thread.vdso = &vdso_info; \
- loongarch_set_personality_fcsr(state); \
\
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \
@@ -340,6 +337,4 @@ extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
struct arch_elf_state *state);

-extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
-
#endif /* _ASM_ELF_H */
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
index 183e94fc9c69..0fa81ced28dc 100644
--- a/arch/loongarch/kernel/elf.c
+++ b/arch/loongarch/kernel/elf.c
@@ -23,8 +23,3 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
{
return 0;
}
-
-void loongarch_set_personality_fcsr(struct arch_elf_state *state)
-{
- current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
-}
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 767d94cce0de..f2ff8b5d591e 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -85,6 +85,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
regs->csr_euen = euen;
lose_fpu(0);
lose_lbt(0);
+ current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;

clear_thread_flag(TIF_LSX_CTX_LIVE);
clear_thread_flag(TIF_LASX_CTX_LIVE);
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 00915fb3cb82..9eb7753d117d 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -461,7 +461,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
const u8 dst = regmap[insn->dst_reg];
const s16 off = insn->off;
const s32 imm = insn->imm;
- const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;

switch (code) {
@@ -865,8 +864,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext

/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+
move_imm(ctx, dst, imm64, is32);
return 1;
+ }

/* dst = *(size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index f521874ebb07..67f067706af2 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
i2c_register_board_info(0, db1200_i2c_devs,
ARRAY_SIZE(db1200_i2c_devs));
spi_register_board_info(db1200_spi_devs,
- ARRAY_SIZE(db1200_i2c_devs));
+ ARRAY_SIZE(db1200_spi_devs));

/* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index fd91d9c9a252..6c6837181f55 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -589,7 +589,7 @@ int __init db1550_dev_setup(void)
i2c_register_board_info(0, db1550_i2c_devs,
ARRAY_SIZE(db1550_i2c_devs));
spi_register_board_info(db1550_spi_devs,
- ARRAY_SIZE(db1550_i2c_devs));
+ ARRAY_SIZE(db1550_spi_devs));

c = clk_get(NULL, "psc0_intclk");
if (!IS_ERR(c)) {
diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
index 27415a288adf..dc397f630c66 100644
--- a/arch/mips/include/asm/dmi.h
+++ b/arch/mips/include/asm/dmi.h
@@ -5,7 +5,7 @@
#include <linux/io.h>
#include <linux/memblock.h>

-#define dmi_early_remap(x, l) ioremap_cache(x, l)
+#define dmi_early_remap(x, l) ioremap(x, l)
#define dmi_early_unmap(x, l) iounmap(x)
#define dmi_remap(x, l) ioremap_cache(x, l)
#define dmi_unmap(x) iounmap(x)
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index cb871eb784a7..f88a2f83c5ea 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -326,11 +326,11 @@ static void __init bootmem_init(void)
panic("Incorrect memory mapping !!!");

if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
+ max_low_pfn = PFN_DOWN(HIGHMEM_START);
#ifdef CONFIG_HIGHMEM
- highstart_pfn = PFN_DOWN(HIGHMEM_START);
+ highstart_pfn = max_low_pfn;
highend_pfn = max_pfn;
#else
- max_low_pfn = PFN_DOWN(HIGHMEM_START);
max_pfn = max_low_pfn;
#endif
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 8fbef537fb88..81f6c4f8fbc1 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -351,10 +351,11 @@ early_initcall(mips_smp_ipi_init);
*/
asmlinkage void start_secondary(void)
{
- unsigned int cpu;
+ unsigned int cpu = raw_smp_processor_id();

cpu_probe();
per_cpu_trap_init(false);
+ rcu_cpu_starting(cpu);
mips_clockevent_init();
mp_ops->init_secondary();
cpu_report();
@@ -366,7 +367,6 @@ asmlinkage void start_secondary(void)
*/

calibrate_delay();
- cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;

set_cpu_sibling_map(cpu);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4640cee33f12..2fe51e0ad637 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -857,6 +857,7 @@ config THREAD_SHIFT
int "Thread shift" if EXPERT
range 13 15
default "15" if PPC_256K_PAGES
+ default "15" if PPC_PSERIES || PPC_POWERNV
default "14" if PPC64
default "13"
help
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index eddc031c4b95..87d65bdd3eca 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -544,6 +544,21 @@ static int __init rtas_token_to_function_xarray_init(void)
}
arch_initcall(rtas_token_to_function_xarray_init);

+/*
+ * For use by sys_rtas(), where the token value is provided by user
+ * space and we don't want to warn on failed lookups.
+ */
+static const struct rtas_function *rtas_token_to_function_untrusted(s32 token)
+{
+ return xa_load(&rtas_token_to_function_xarray, token);
+}
+
+/*
+ * Reverse lookup for deriving the function descriptor from a
+ * known-good token value in contexts where the former is not already
+ * available. @token must be valid, e.g. derived from the result of a
+ * prior lookup against the function table.
+ */
static const struct rtas_function *rtas_token_to_function(s32 token)
{
const struct rtas_function *func;
@@ -551,7 +566,7 @@ static const struct rtas_function *rtas_token_to_function(s32 token)
if (WARN_ONCE(token < 0, "invalid token %d", token))
return NULL;

- func = xa_load(&rtas_token_to_function_xarray, token);
+ func = rtas_token_to_function_untrusted(token);

if (WARN_ONCE(!func, "unexpected failed lookup for token %d", token))
return NULL;
@@ -1726,7 +1741,7 @@ static bool block_rtas_call(int token, int nargs,
* If this token doesn't correspond to a function the kernel
* understands, you're not allowed to call it.
*/
- func = rtas_token_to_function(token);
+ func = rtas_token_to_function_untrusted(token);
if (!func)
goto err;
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index efd0ebf70a5e..fdfc2a62dd67 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -28,6 +28,7 @@
#include <asm/pte-walk.h>

#include "book3s.h"
+#include "book3s_hv.h"
#include "trace_hv.h"

//#define DEBUG_RESIZE_HPT 1
@@ -347,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long v, orig_v, gr;
__be64 *hptep;
long int index;
- int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+ int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);

if (kvm_is_radix(vcpu->kvm))
return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
@@ -385,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,

/* Get PP bits and key for permission check */
pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
- key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
key &= slb_v;

/* Calculate permissions */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 572707858d65..10aacbf92466 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -15,6 +15,7 @@

#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#include "book3s_hv.h"
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgalloc.h>
@@ -294,9 +295,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
} else {
if (!(pte & _PAGE_PRIVILEGED)) {
/* Check AMR/IAMR to see if strict mode is in force */
- if (vcpu->arch.amr & (1ul << 62))
+ if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
gpte->may_read = 0;
- if (vcpu->arch.amr & (1ul << 63))
+ if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
gpte->may_write = 0;
if (vcpu->arch.iamr & (1ul << 62))
gpte->may_execute = 0;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 130bafdb1430..0429488ba170 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -868,7 +868,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
/* Guests can't breakpoint the hypervisor */
if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
return H_P3;
- vcpu->arch.ciabr = value1;
+ kvmppc_set_ciabr_hv(vcpu, value1);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR0:
if (!kvmppc_power8_compatible(vcpu))
@@ -879,8 +879,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr0 = value1;
- vcpu->arch.dawrx0 = value2;
+ kvmppc_set_dawr0_hv(vcpu, value1);
+ kvmppc_set_dawrx0_hv(vcpu, value2);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR1:
if (!kvmppc_power8_compatible(vcpu))
@@ -895,8 +895,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr1 = value1;
- vcpu->arch.dawrx1 = value2;
+ kvmppc_set_dawr1_hv(vcpu, value1);
+ kvmppc_set_dawrx1_hv(vcpu, value2);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
/*
@@ -1370,7 +1370,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
*/
static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
- vcpu->arch.shregs.msr |= MSR_EE;
+ __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
vcpu->arch.ceded = 1;
smp_mb();
if (vcpu->arch.prodded) {
@@ -1544,7 +1544,7 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
return EMULATE_FAIL;

- vcpu->arch.hfscr |= HFSCR_PM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM);

return RESUME_GUEST;
}
@@ -1554,7 +1554,7 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
return EMULATE_FAIL;

- vcpu->arch.hfscr |= HFSCR_EBB;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB);

return RESUME_GUEST;
}
@@ -1564,7 +1564,7 @@ static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
return EMULATE_FAIL;

- vcpu->arch.hfscr |= HFSCR_TM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);

return RESUME_GUEST;
}
@@ -1585,7 +1585,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (vcpu->arch.shregs.msr & MSR_HV) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
@@ -1636,7 +1636,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* so that it knows that the machine check occurred.
*/
if (!vcpu->kvm->arch.fwnmi_enabled) {
- ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
+ ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST;
@@ -1666,7 +1666,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* as a result of a hypervisor emulation interrupt
* (e40) getting turned into a 700 by BML RTAS.
*/
- flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
+ flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
@@ -1676,7 +1676,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
{
int i;

- if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* Guest userspace executed sc 1. This can only be
* reached by the P9 path because the old path
@@ -1754,7 +1754,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
break;
}

- if (!(vcpu->arch.shregs.msr & MSR_DR))
+ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
@@ -1778,7 +1778,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
long err;

vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
- vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
+ vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
DSISR_SRR1_MATCH_64S;
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
@@ -1787,7 +1787,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
@@ -1801,7 +1801,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
break;
}

- if (!(vcpu->arch.shregs.msr & MSR_IR))
+ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
@@ -1863,7 +1863,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* Otherwise, we just generate a program interrupt to the guest.
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
- u64 cause = vcpu->arch.hfscr >> 56;
+ u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56;

r = EMULATE_FAIL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
@@ -1891,7 +1891,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
kvmppc_dump_regs(vcpu);
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
- vcpu->arch.shregs.msr);
+ __kvmppc_get_msr_hv(vcpu));
run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST;
break;
@@ -1915,11 +1915,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (vcpu->arch.shregs.msr & MSR_HV) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
pr_emerg("KVM trap in HV mode while nested!\n");
pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
- vcpu->arch.shregs.msr);
+ __kvmppc_get_msr_hv(vcpu));
kvmppc_dump_regs(vcpu);
return RESUME_HOST;
}
@@ -1976,7 +1976,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
@@ -2207,64 +2207,64 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.dabrx);
break;
case KVM_REG_PPC_DSCR:
- *val = get_reg_val(id, vcpu->arch.dscr);
+ *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu));
break;
case KVM_REG_PPC_PURR:
- *val = get_reg_val(id, vcpu->arch.purr);
+ *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu));
break;
case KVM_REG_PPC_SPURR:
- *val = get_reg_val(id, vcpu->arch.spurr);
+ *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu));
break;
case KVM_REG_PPC_AMR:
- *val = get_reg_val(id, vcpu->arch.amr);
+ *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu));
break;
case KVM_REG_PPC_UAMOR:
- *val = get_reg_val(id, vcpu->arch.uamor);
+ *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu));
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
- *val = get_reg_val(id, vcpu->arch.mmcr[i]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i));
break;
case KVM_REG_PPC_MMCR2:
- *val = get_reg_val(id, vcpu->arch.mmcr[2]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2));
break;
case KVM_REG_PPC_MMCRA:
- *val = get_reg_val(id, vcpu->arch.mmcra);
+ *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu));
break;
case KVM_REG_PPC_MMCRS:
*val = get_reg_val(id, vcpu->arch.mmcrs);
break;
case KVM_REG_PPC_MMCR3:
- *val = get_reg_val(id, vcpu->arch.mmcr[3]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3));
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
- *val = get_reg_val(id, vcpu->arch.pmc[i]);
+ *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i));
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
*val = get_reg_val(id, vcpu->arch.spmc[i]);
break;
case KVM_REG_PPC_SIAR:
- *val = get_reg_val(id, vcpu->arch.siar);
+ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SDAR:
- *val = get_reg_val(id, vcpu->arch.sdar);
+ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SIER:
- *val = get_reg_val(id, vcpu->arch.sier[0]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0));
break;
case KVM_REG_PPC_SIER2:
- *val = get_reg_val(id, vcpu->arch.sier[1]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1));
break;
case KVM_REG_PPC_SIER3:
- *val = get_reg_val(id, vcpu->arch.sier[2]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2));
break;
case KVM_REG_PPC_IAMR:
- *val = get_reg_val(id, vcpu->arch.iamr);
+ *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu));
break;
case KVM_REG_PPC_PSPB:
- *val = get_reg_val(id, vcpu->arch.pspb);
+ *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu));
break;
case KVM_REG_PPC_DPDES:
/*
@@ -2282,19 +2282,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
break;
case KVM_REG_PPC_DAWR:
- *val = get_reg_val(id, vcpu->arch.dawr0);
+ *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu));
break;
case KVM_REG_PPC_DAWRX:
- *val = get_reg_val(id, vcpu->arch.dawrx0);
+ *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu));
break;
case KVM_REG_PPC_DAWR1:
- *val = get_reg_val(id, vcpu->arch.dawr1);
+ *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu));
break;
case KVM_REG_PPC_DAWRX1:
- *val = get_reg_val(id, vcpu->arch.dawrx1);
+ *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu));
break;
case KVM_REG_PPC_CIABR:
- *val = get_reg_val(id, vcpu->arch.ciabr);
+ *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu));
break;
case KVM_REG_PPC_CSIGR:
*val = get_reg_val(id, vcpu->arch.csigr);
@@ -2312,7 +2312,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.acop);
break;
case KVM_REG_PPC_WORT:
- *val = get_reg_val(id, vcpu->arch.wort);
+ *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu));
break;
case KVM_REG_PPC_TIDR:
*val = get_reg_val(id, vcpu->arch.tid);
@@ -2345,7 +2345,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
break;
case KVM_REG_PPC_PPR:
- *val = get_reg_val(id, vcpu->arch.ppr);
+ *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu));
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
@@ -2425,6 +2425,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_PTCR:
*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
break;
+ case KVM_REG_PPC_FSCR:
+ *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu));
+ break;
default:
r = -EINVAL;
break;
@@ -2453,29 +2456,29 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
break;
case KVM_REG_PPC_DSCR:
- vcpu->arch.dscr = set_reg_val(id, *val);
+ kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PURR:
- vcpu->arch.purr = set_reg_val(id, *val);
+ kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SPURR:
- vcpu->arch.spurr = set_reg_val(id, *val);
+ kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_AMR:
- vcpu->arch.amr = set_reg_val(id, *val);
+ kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_UAMOR:
- vcpu->arch.uamor = set_reg_val(id, *val);
+ kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
- vcpu->arch.mmcr[i] = set_reg_val(id, *val);
+ kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCR2:
- vcpu->arch.mmcr[2] = set_reg_val(id, *val);
+ kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCRA:
- vcpu->arch.mmcra = set_reg_val(id, *val);
+ kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCRS:
vcpu->arch.mmcrs = set_reg_val(id, *val);
@@ -2485,32 +2488,32 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
- vcpu->arch.pmc[i] = set_reg_val(id, *val);
+ kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
vcpu->arch.spmc[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIAR:
- vcpu->arch.siar = set_reg_val(id, *val);
+ kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SDAR:
- vcpu->arch.sdar = set_reg_val(id, *val);
+ kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER:
- vcpu->arch.sier[0] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER2:
- vcpu->arch.sier[1] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER3:
- vcpu->arch.sier[2] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val));
break;
case KVM_REG_PPC_IAMR:
- vcpu->arch.iamr = set_reg_val(id, *val);
+ kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PSPB:
- vcpu->arch.pspb = set_reg_val(id, *val);
+ kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DPDES:
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -2522,22 +2525,22 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWR:
- vcpu->arch.dawr0 = set_reg_val(id, *val);
+ kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWRX:
- vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_DAWR1:
- vcpu->arch.dawr1 = set_reg_val(id, *val);
+ kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWRX1:
- vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_CIABR:
- vcpu->arch.ciabr = set_reg_val(id, *val);
+ kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val));
/* Don't allow setting breakpoints in hypervisor code */
- if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
- vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
+ if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV);
break;
case KVM_REG_PPC_CSIGR:
vcpu->arch.csigr = set_reg_val(id, *val);
@@ -2555,7 +2558,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.acop = set_reg_val(id, *val);
break;
case KVM_REG_PPC_WORT:
- vcpu->arch.wort = set_reg_val(id, *val);
+ kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TIDR:
vcpu->arch.tid = set_reg_val(id, *val);
@@ -2615,7 +2618,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
break;
case KVM_REG_PPC_PPR:
- vcpu->arch.ppr = set_reg_val(id, *val);
+ kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val));
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
@@ -2699,6 +2702,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_PTCR:
vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_FSCR:
+ kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val));
+ break;
default:
r = -EINVAL;
break;
@@ -2916,19 +2922,20 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.shared_big_endian = false;
#endif
#endif
- vcpu->arch.mmcr[0] = MMCR0_FC;
+ kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
+
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
- vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+ kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
+ kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
}

- vcpu->arch.ctrl = CTRL_RUNLATCH;
+ kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH);
/* default to host PVR, since we can't spoof it */
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
- vcpu->arch.shregs.msr = MSR_ME;
+ __kvmppc_set_msr_hv(vcpu, MSR_ME);
vcpu->arch.intr_msr = MSR_SF | MSR_ME;

/*
@@ -2938,29 +2945,30 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
* don't set the HFSCR_MSGP bit, and that causes those instructions
* to trap and then we emulate them.
*/
- vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
- HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+ kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP);

/* On POWER10 and later, allow prefixed instructions */
if (cpu_has_feature(CPU_FTR_ARCH_31))
- vcpu->arch.hfscr |= HFSCR_PREFIX;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX);

if (cpu_has_feature(CPU_FTR_HVMODE)) {
- vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR));
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- vcpu->arch.hfscr |= HFSCR_TM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;

- vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+ vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);

/*
* PM, EBB, TM are demand-faulted so start with it clear.
*/
- vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM));

kvmppc_mmu_book3s_hv_init(vcpu);

@@ -4176,7 +4184,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
__this_cpu_write(cpu_in_guest, NULL);

if (trap == BOOK3S_INTERRUPT_SYSCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3);

/*
@@ -4655,13 +4663,19 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,

if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (vcpu->arch.shregs.msr & MSR_EE) {
- if (xive_interrupt_pending(vcpu))
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+ &vcpu->arch.pending_exceptions) ||
+ xive_interrupt_pending(vcpu)) {
+ /*
+ * For nested HV, don't synthesize but always pass MER,
+ * the L0 will be able to optimise that more
+ * effectively than manipulating registers directly.
+ */
+ if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE))
kvmppc_inject_interrupt_hv(vcpu,
- BOOK3S_INTERRUPT_EXTERNAL, 0);
- } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
- &vcpu->arch.pending_exceptions)) {
- lpcr |= LPCR_MER;
+ BOOK3S_INTERRUPT_EXTERNAL, 0);
+ else
+ lpcr |= LPCR_MER;
}
} else if (vcpu->arch.pending_exceptions ||
vcpu->arch.doorbell_request ||
@@ -4844,7 +4858,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
msr |= MSR_VSX;
if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
- (vcpu->arch.hfscr & HFSCR_TM))
+ (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM))
msr |= MSR_TM;
msr = msr_check_and_set(msr);

@@ -4868,7 +4882,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
accumulate_time(vcpu, &vcpu->arch.hcall);

- if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* These should have been caught reflected
* into the guest by now. Final sanity check:
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
index 2f2e59d7d433..95241764dfb4 100644
--- a/arch/powerpc/kvm/book3s_hv.h
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -50,3 +50,71 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
#define start_timing(vcpu, next) do {} while (0)
#define end_timing(vcpu) do {} while (0)
#endif
+
+static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.shregs.msr = val;
+}
+
+static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shregs.msr;
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.reg = val; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
+{ \
+ return vcpu->arch.reg; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
+{ \
+ vcpu->arch.reg[i] = val; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
+{ \
+ return vcpu->arch.reg[i]; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64)
+
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32)
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0f5b021fa559..663f5222f3d0 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -32,6 +32,7 @@

#include "book3s_xics.h"
#include "book3s_xive.h"
+#include "book3s_hv.h"

/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
@@ -510,7 +511,7 @@ void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
*/
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
- vcpu->arch.shregs.msr = msr;
+ __kvmppc_set_msr_hv(vcpu, msr);
kvmppc_end_cede(vcpu);
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
@@ -548,7 +549,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
kvmppc_set_srr0(vcpu, pc);
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
kvmppc_set_pc(vcpu, new_pc);
- vcpu->arch.shregs.msr = new_msr;
+ __kvmppc_set_msr_hv(vcpu, new_msr);
}

void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 51ad0397c17a..6eac63e79a89 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs
ifndef CONFIG_LD_IS_BFD
-extra-$(CONFIG_PPC64) += crtsavres.o
+always-$(CONFIG_PPC64) += crtsavres.o
endif

obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 39dbe6b348df..27f18119fda1 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -534,6 +534,9 @@ static ssize_t affinity_domain_via_partition_show(struct device *dev, struct dev
if (!ret)
goto parse_result;

+ if (ret && (ret != H_PARAMETER))
+ goto out;
+
/*
* ret value as 'H_PARAMETER' implies that the current buffer size
* can't accommodate all the information, and a partial buffer
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index ada817c49b72..56d82f7f9734 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -299,6 +299,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attr_group->attrs = attrs;
do {
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
+ if (!ev_val_str)
+ continue;
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
if (!dev_str)
continue;
@@ -306,6 +308,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attrs[j++] = dev_str;
if (pmu->events[i].scale) {
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
+ if (!ev_scale_str)
+ continue;
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
if (!dev_str)
continue;
@@ -315,6 +319,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)

if (pmu->events[i].unit) {
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
+ if (!ev_unit_str)
+ continue;
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
if (!dev_str)
continue;
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 1624ebf95497..35a1f4b9f827 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -173,6 +173,7 @@ config ISS4xx
config CURRITUCK
bool "IBM Currituck (476fpe) Support"
depends on PPC_47x
+ select I2C
select SWIOTLB
select 476FPE
select FORCE_PCI
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index f9a7001dacb7..56a1f7ce78d2 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -275,6 +275,8 @@ int __init opal_event_init(void)
else
name = kasprintf(GFP_KERNEL, "opal");

+ if (!name)
+ continue;
/* Install interrupt handler */
rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL);
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index 7bfe4cbeb35a..ea917266aa17 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)

j = 0;
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
+ if (!pcaps[i].pg.name) {
+ kfree(pcaps[i].pattrs);
+ kfree(pcaps[i].pg.attrs);
+ goto out_pcaps_pattrs;
+ }
+
if (has_min) {
powercap_add_attr(min, "powercap-min",
&pcaps[i].pattrs[j]);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 262cd6fac907..748c2b97fa53 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
ent->chip = chip;
snprintf(ent->name, 16, "%08x", chip);
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
+ if (!ent->path.data) {
+ kfree(ent);
+ return -ENOMEM;
+ }
+
ent->path.size = strlen((char *)ent->path.data);

dir = debugfs_create_dir(ent->name, root);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index aa4042dcd6d4..4adca5b61dab 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -435,14 +435,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
}
}

- if (!lmb_found)
+ if (!lmb_found) {
+ pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
rc = -EINVAL;
-
- if (rc)
+ } else if (rc) {
pr_debug("Failed to hot-remove memory at %llx\n",
lmb->base_addr);
- else
+ } else {
pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
+ }

return rc;
}
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
index 32336e8a17cb..a393d5035c54 100644
--- a/arch/riscv/include/asm/sections.h
+++ b/arch/riscv/include/asm/sections.h
@@ -13,6 +13,7 @@ extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];
+extern char __exittext_begin[], __exittext_end[];

static inline bool is_va_kernel_text(uintptr_t va)
{
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index d4ffc3c37649..b65bf6306f69 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -13,7 +13,7 @@
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
+ la t0, __data_loc
REG_L t1, _xip_phys_offset
sub \reg, \reg, t1
add \reg, \reg, t0
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 7c651d55fcbd..df4f6fec5d17 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -440,7 +440,8 @@ void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR,
MODULES_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 13ee7bf589a1..37e87fdcf6a0 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -14,6 +14,7 @@
#include <asm/fixmap.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
+#include <asm/sections.h>

struct patch_insn {
void *addr;
@@ -25,6 +26,14 @@ struct patch_insn {
int riscv_patch_in_stop_machine = false;

#ifdef CONFIG_MMU
+
+static inline bool is_kernel_exittext(uintptr_t addr)
+{
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (uintptr_t)__exittext_begin &&
+ addr < (uintptr_t)__exittext_end;
+}
+
/*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
* reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
@@ -35,7 +44,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;

- if (core_kernel_text(uintaddr))
+ if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 50767647fbc6..8c3daa1b0531 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -29,10 +29,12 @@ SECTIONS
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
/* we have to discard exit text and such at runtime, not link time */
+ __exittext_begin = .;
.exit.text :
{
EXIT_TEXT
}
+ __exittext_end = .;

.text : {
_text = .;
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 492dd4b8f3d6..002ca58dd998 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -69,10 +69,12 @@ SECTIONS
__soc_builtin_dtb_table_end = .;
}
/* we have to discard exit text and such at runtime, not link time */
+ __exittext_begin = .;
.exit.text :
{
EXIT_TEXT
}
+ __exittext_end = .;

__init_text_end = .;
. = ALIGN(SECTION_ALIGN);
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 161d0b34c2cb..01398fee5cf8 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -5,6 +5,7 @@

#include <linux/pagewalk.h>
#include <linux/pgtable.h>
+#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/set_memory.h>
@@ -25,19 +26,6 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
return new_val;
}

-static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
- unsigned long next, struct mm_walk *walk)
-{
- pgd_t val = READ_ONCE(*pgd);
-
- if (pgd_leaf(val)) {
- val = __pgd(set_pageattr_masks(pgd_val(val), walk));
- set_pgd(pgd, val);
- }
-
- return 0;
-}
-
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
@@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next,
}

static const struct mm_walk_ops pageattr_ops = {
- .pgd_entry = pageattr_pgd_entry,
.p4d_entry = pageattr_p4d_entry,
.pud_entry = pageattr_pud_entry,
.pmd_entry = pageattr_pmd_entry,
@@ -105,12 +92,181 @@ static const struct mm_walk_ops pageattr_ops = {
.walk_lock = PGWALK_RDLOCK,
};

+#ifdef CONFIG_64BIT
+static int __split_linear_mapping_pmd(pud_t *pudp,
+ unsigned long vaddr, unsigned long end)
+{
+ pmd_t *pmdp;
+ unsigned long next;
+
+ pmdp = pmd_offset(pudp, vaddr);
+
+ do {
+ next = pmd_addr_end(vaddr, end);
+
+ if (next - vaddr >= PMD_SIZE &&
+ vaddr <= (vaddr & PMD_MASK) && end >= next)
+ continue;
+
+ if (pmd_leaf(*pmdp)) {
+ struct page *pte_page;
+ unsigned long pfn = _pmd_pfn(*pmdp);
+ pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+ pte_t *ptep_new;
+ int i;
+
+ pte_page = alloc_page(GFP_KERNEL);
+ if (!pte_page)
+ return -ENOMEM;
+
+ ptep_new = (pte_t *)page_address(pte_page);
+ for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
+ set_pte(ptep_new, pfn_pte(pfn + i, prot));
+
+ smp_wmb();
+
+ set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
+ }
+ } while (pmdp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int __split_linear_mapping_pud(p4d_t *p4dp,
+ unsigned long vaddr, unsigned long end)
+{
+ pud_t *pudp;
+ unsigned long next;
+ int ret;
+
+ pudp = pud_offset(p4dp, vaddr);
+
+ do {
+ next = pud_addr_end(vaddr, end);
+
+ if (next - vaddr >= PUD_SIZE &&
+ vaddr <= (vaddr & PUD_MASK) && end >= next)
+ continue;
+
+ if (pud_leaf(*pudp)) {
+ struct page *pmd_page;
+ unsigned long pfn = _pud_pfn(*pudp);
+ pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+ pmd_t *pmdp_new;
+ int i;
+
+ pmd_page = alloc_page(GFP_KERNEL);
+ if (!pmd_page)
+ return -ENOMEM;
+
+ pmdp_new = (pmd_t *)page_address(pmd_page);
+ for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
+ set_pmd(pmdp_new,
+ pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
+
+ smp_wmb();
+
+ set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
+ }
+
+ ret = __split_linear_mapping_pmd(pudp, vaddr, next);
+ if (ret)
+ return ret;
+ } while (pudp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int __split_linear_mapping_p4d(pgd_t *pgdp,
+ unsigned long vaddr, unsigned long end)
+{
+ p4d_t *p4dp;
+ unsigned long next;
+ int ret;
+
+ p4dp = p4d_offset(pgdp, vaddr);
+
+ do {
+ next = p4d_addr_end(vaddr, end);
+
+ /*
+ * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
+ * need to split, we'll change the protections on the whole P4D.
+ */
+ if (next - vaddr >= P4D_SIZE &&
+ vaddr <= (vaddr & P4D_MASK) && end >= next)
+ continue;
+
+ if (p4d_leaf(*p4dp)) {
+ struct page *pud_page;
+ unsigned long pfn = _p4d_pfn(*p4dp);
+ pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+ pud_t *pudp_new;
+ int i;
+
+ pud_page = alloc_page(GFP_KERNEL);
+ if (!pud_page)
+ return -ENOMEM;
+
+ /*
+ * Fill the pud level with leaf puds that have the same
+ * protections as the leaf p4d.
+ */
+ pudp_new = (pud_t *)page_address(pud_page);
+ for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
+ set_pud(pudp_new,
+ pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
+
+ /*
+ * Make sure the pud filling is not reordered with the
+ * p4d store which could result in seeing a partially
+ * filled pud level.
+ */
+ smp_wmb();
+
+ set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
+ }
+
+ ret = __split_linear_mapping_pud(p4dp, vaddr, next);
+ if (ret)
+ return ret;
+ } while (p4dp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int __split_linear_mapping_pgd(pgd_t *pgdp,
+ unsigned long vaddr,
+ unsigned long end)
+{
+ unsigned long next;
+ int ret;
+
+ do {
+ next = pgd_addr_end(vaddr, end);
+ /* We never use PGD mappings for the linear mapping */
+ ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
+ if (ret)
+ return ret;
+ } while (pgdp++, vaddr = next, vaddr != end);
+
+ return 0;
+}
+
+static int split_linear_mapping(unsigned long start, unsigned long end)
+{
+ return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
+}
+#endif /* CONFIG_64BIT */
+
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
pgprot_t clear_mask)
{
int ret;
unsigned long start = addr;
unsigned long end = start + PAGE_SIZE * numpages;
+ unsigned long __maybe_unused lm_start;
+ unsigned long __maybe_unused lm_end;
struct pageattr_masks masks = {
.set_mask = set_mask,
.clear_mask = clear_mask
@@ -120,11 +276,72 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
return 0;

mmap_write_lock(&init_mm);
+
+#ifdef CONFIG_64BIT
+ /*
+ * We are about to change the permissions of a kernel mapping, we must
+ * apply the same changes to its linear mapping alias, which may imply
+ * splitting a huge mapping.
+ */
+
+ if (is_vmalloc_or_module_addr((void *)start)) {
+ struct vm_struct *area = NULL;
+ int i, page_start;
+
+ area = find_vm_area((void *)start);
+ page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
+
+ for (i = page_start; i < page_start + numpages; ++i) {
+ lm_start = (unsigned long)page_address(area->pages[i]);
+ lm_end = lm_start + PAGE_SIZE;
+
+ ret = split_linear_mapping(lm_start, lm_end);
+ if (ret)
+ goto unlock;
+
+ ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+ &pageattr_ops, NULL, &masks);
+ if (ret)
+ goto unlock;
+ }
+ } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
+ if (is_kernel_mapping(start)) {
+ lm_start = (unsigned long)lm_alias(start);
+ lm_end = (unsigned long)lm_alias(end);
+ } else {
+ lm_start = start;
+ lm_end = end;
+ }
+
+ ret = split_linear_mapping(lm_start, lm_end);
+ if (ret)
+ goto unlock;
+
+ ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+ &pageattr_ops, NULL, &masks);
+ if (ret)
+ goto unlock;
+ }
+
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
+
+unlock:
+ mmap_write_unlock(&init_mm);
+
+ /*
+ * We can't use flush_tlb_kernel_range() here as we may have split a
+ * hugepage that is larger than that, so let's flush everything.
+ */
+ flush_tlb_all();
+#else
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ &masks);
+
mmap_write_unlock(&init_mm);

flush_tlb_kernel_range(start, end);
+#endif

return ret;
}
@@ -159,36 +376,14 @@ int set_memory_nx(unsigned long addr, int numpages)

int set_direct_map_invalid_noflush(struct page *page)
{
- int ret;
- unsigned long start = (unsigned long)page_address(page);
- unsigned long end = start + PAGE_SIZE;
- struct pageattr_masks masks = {
- .set_mask = __pgprot(0),
- .clear_mask = __pgprot(_PAGE_PRESENT)
- };
-
- mmap_read_lock(&init_mm);
- ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
- mmap_read_unlock(&init_mm);
-
- return ret;
+ return __set_memory((unsigned long)page_address(page), 1,
+ __pgprot(0), __pgprot(_PAGE_PRESENT));
}

int set_direct_map_default_noflush(struct page *page)
{
- int ret;
- unsigned long start = (unsigned long)page_address(page);
- unsigned long end = start + PAGE_SIZE;
- struct pageattr_masks masks = {
- .set_mask = PAGE_KERNEL,
- .clear_mask = __pgprot(0)
- };
-
- mmap_read_lock(&init_mm);
- ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
- mmap_read_unlock(&init_mm);
-
- return ret;
+ return __set_memory((unsigned long)page_address(page), 1,
+ PAGE_KERNEL, __pgprot(_PAGE_EXEC));
}

#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 287bb88f7698..2686bee800e3 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -11,6 +11,8 @@
/* I/O size constraints */
#define ZPCI_MAX_READ_SIZE 8
#define ZPCI_MAX_WRITE_SIZE 128
+#define ZPCI_BOUNDARY_SIZE (1 << 12)
+#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)

/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
@@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len);

-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
{
- int count = len > max ? max : len, size = 1;
+ int offset = dst & ZPCI_BOUNDARY_MASK;
+ int size;

- while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
- dst = dst >> 1;
- src = src >> 1;
- size = size << 1;
- }
- return size;
+ size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
+ if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
+ return size;
+
+ if (size >= 8)
+ return 8;
+ return rounddown_pow_of_two(size);
}

static inline int zpci_memcpy_fromio(void *dst,
@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0;

while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) src,
- (u64) dst, n,
- ZPCI_MAX_READ_SIZE);
+ size = zpci_get_max_io_size((u64 __force) src,
+ (u64) dst, n,
+ ZPCI_MAX_READ_SIZE);
rc = zpci_read_single(dst, src, size);
if (rc)
break;
@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL;

while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) dst,
- (u64) src, n,
- ZPCI_MAX_WRITE_SIZE);
+ size = zpci_get_max_io_size((u64 __force) dst,
+ (u64) src, n,
+ ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = zpci_write_block(dst, src, size);
else
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 588089332931..a90499c087f0 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -97,9 +97,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
return -EINVAL;

while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) dst,
- (u64 __force) src, n,
- ZPCI_MAX_WRITE_SIZE);
+ size = zpci_get_max_io_size((u64 __force) dst,
+ (u64 __force) src, n,
+ ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = __pcistb_mio_inuser(dst, src, size, &status);
else
@@ -242,9 +242,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
u8 status;

while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) src,
- (u64 __force) dst, n,
- ZPCI_MAX_READ_SIZE);
+ size = zpci_get_max_io_size((u64 __force) src,
+ (u64 __force) dst, n,
+ ZPCI_MAX_READ_SIZE);
rc = __pcilg_mio_inuser(dst, src, size, &status);
if (rc)
break;
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index ffe2ee8a0246..97a37c062997 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -971,7 +971,7 @@ static long um_pci_map_platform(unsigned long offset, size_t size,
*ops = &um_pci_device_bar_ops;
*priv = &um_pci_platform_device->resptr[0];

- return 0;
+ return offset;
}

static const struct logic_iomem_region_ops um_pci_platform_ops = {
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 8250f0f59c2b..49bc27ab26ad 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -5596,7 +5596,7 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
struct pci_dev *ubox = NULL;
struct pci_dev *dev = NULL;
u32 nid, gid;
- int i, idx, ret = -EPERM;
+ int i, idx, lgc_pkg, ret = -EPERM;
struct intel_uncore_topology *upi;
unsigned int devfn;

@@ -5614,8 +5614,13 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
for (i = 0; i < 8; i++) {
if (nid != GIDNIDMAP(gid, i))
continue;
+ lgc_pkg = topology_phys_to_logical_pkg(i);
+ if (lgc_pkg < 0) {
+ ret = -EPERM;
+ goto err;
+ }
for (idx = 0; idx < type->num_boxes; idx++) {
- upi = &type->topology[nid][idx];
+ upi = &type->topology[lgc_pkg][idx];
devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
ubox->bus->number,
@@ -5626,6 +5631,7 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
goto err;
}
}
+ break;
}
}
err:
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
index 6c98f4bb4228..058bc636356a 100644
--- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
+++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
@@ -22,7 +22,7 @@ KVM_X86_PMU_OP(get_msr)
KVM_X86_PMU_OP(set_msr)
KVM_X86_PMU_OP(refresh)
KVM_X86_PMU_OP(init)
-KVM_X86_PMU_OP(reset)
+KVM_X86_PMU_OP_OPTIONAL(reset)
KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
KVM_X86_PMU_OP_OPTIONAL(cleanup)

diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 778df05f8539..bae83810505b 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -115,8 +115,15 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
}

__monitor((void *)&current_thread_info()->flags, 0, 0);
- if (!need_resched())
- __mwait(eax, ecx);
+
+ if (!need_resched()) {
+ if (ecx & 1) {
+ __mwait(eax, ecx);
+ } else {
+ __sti_mwait(eax, ecx);
+ raw_local_irq_disable();
+ }
+ }
}
current_clr_polling();
}
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 4d8d4bcf915d..72f0695c3dc1 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -746,6 +746,7 @@ static void check_hw_inj_possible(void)

wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
+ wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);

if (!status) {
hw_injection_possible = false;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index fb8f52149be9..f2fff625576d 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -24,8 +24,8 @@

static int kvmclock __initdata = 1;
static int kvmclock_vsyscall __initdata = 1;
-static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static int msr_kvm_system_time __ro_after_init;
+static int msr_kvm_wall_clock __ro_after_init;
static u64 kvm_sched_clock_offset __ro_after_init;

static int __init parse_no_kvmclock(char *arg)
@@ -195,7 +195,8 @@ static void kvm_setup_secondary_clock(void)

void kvmclock_disable(void)
{
- native_write_msr(msr_kvm_system_time, 0, 0);
+ if (msr_kvm_system_time)
+ native_write_msr(msr_kvm_system_time, 0, 0);
}

static void __init kvmclock_init_mem(void)
@@ -294,7 +295,10 @@ void __init kvmclock_init(void)
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
- } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
+ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
+ } else {
return;
}

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 9ae07db6f0f6..dc8e8e907cfb 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -250,6 +250,24 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
return true;
}

+static void pmc_release_perf_event(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event) {
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ pmc->current_config = 0;
+ pmc_to_pmu(pmc)->event_count--;
+ }
+}
+
+static void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event) {
+ pmc->counter = pmc_read_counter(pmc);
+ pmc_release_perf_event(pmc);
+ }
+}
+
static int filter_cmp(const void *pa, const void *pb, u64 mask)
{
u64 a = *(u64 *)pa & mask;
@@ -639,24 +657,53 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}

-/* refresh PMU settings. This function generally is called when underlying
- * settings are changed (such as changes of PMU CPUID by guest VMs), which
- * should rarely happen.
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+ int i;
+
+ pmu->need_cleanup = false;
+
+ bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
+
+ for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
+ pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
+ if (!pmc)
+ continue;
+
+ pmc_stop_counter(pmc);
+ pmc->counter = 0;
+
+ if (pmc_is_gp(pmc))
+ pmc->eventsel = 0;
+ }
+
+ pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
+
+ static_call_cond(kvm_x86_pmu_reset)(vcpu);
+}
+
+
+/*
+ * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
+ * and/or PERF_CAPABILITIES.
*/
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
return;

+ /*
+ * Stop/release all existing counters/events before realizing the new
+ * vPMU model.
+ */
+ kvm_pmu_reset(vcpu);
+
bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
static_call(kvm_x86_pmu_refresh)(vcpu);
}

-void kvm_pmu_reset(struct kvm_vcpu *vcpu)
-{
- static_call(kvm_x86_pmu_reset)(vcpu);
-}
-
void kvm_pmu_init(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 1d64113de488..a46aa9b25150 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -80,24 +80,6 @@ static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
pmc->counter &= pmc_bitmask(pmc);
}

-static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
-{
- if (pmc->perf_event) {
- perf_event_release_kernel(pmc->perf_event);
- pmc->perf_event = NULL;
- pmc->current_config = 0;
- pmc_to_pmu(pmc)->event_count--;
- }
-}
-
-static inline void pmc_stop_counter(struct kvm_pmc *pmc)
-{
- if (pmc->perf_event) {
- pmc->counter = pmc_read_counter(pmc);
- pmc_release_perf_event(pmc);
- }
-}
-
static inline bool pmc_is_gp(struct kvm_pmc *pmc)
{
return pmc->type == KVM_PMC_GP;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 3fea8c47679e..60891b9ce25f 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -247,18 +247,6 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
}

-static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
-{
- /* Nested FLUSHBYASID is not supported yet. */
- switch(tlb_ctl) {
- case TLB_CONTROL_DO_NOTHING:
- case TLB_CONTROL_FLUSH_ALL_ASID:
- return true;
- default:
- return false;
- }
-}
-
static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
struct vmcb_ctrl_area_cached *control)
{
@@ -278,9 +266,6 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
IOPM_SIZE)))
return false;

- if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
- return false;
-
if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
!vmcb12_is_intercept(control, INTERCEPT_NMI))) {
return false;
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 373ff6a6687b..3fd47de14b38 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -233,21 +233,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
}
}

-static void amd_pmu_reset(struct kvm_vcpu *vcpu)
-{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- int i;
-
- for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
- struct kvm_pmc *pmc = &pmu->gp_counters[i];
-
- pmc_stop_counter(pmc);
- pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
- }
-
- pmu->global_ctrl = pmu->global_status = 0;
-}
-
struct kvm_pmu_ops amd_pmu_ops __initdata = {
.hw_event_available = amd_hw_event_available,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
@@ -259,7 +244,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.set_msr = amd_pmu_set_msr,
.refresh = amd_pmu_refresh,
.init = amd_pmu_init,
- .reset = amd_pmu_reset,
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 820d3e1f6b4f..90c1f7f07e53 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -632,26 +632,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)

static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- struct kvm_pmc *pmc = NULL;
- int i;
-
- for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
- pmc = &pmu->gp_counters[i];
-
- pmc_stop_counter(pmc);
- pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
- }
-
- for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
- pmc = &pmu->fixed_counters[i];
-
- pmc_stop_counter(pmc);
- pmc->counter = pmc->prev_counter = 0;
- }
-
- pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
-
intel_pmu_release_guest_lbr_event(vcpu);
}

diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
index 92cd8ecc3a2c..40b81c338ae5 100644
--- a/arch/x86/lib/misc.c
+++ b/arch/x86/lib/misc.c
@@ -8,7 +8,7 @@
*/
int num_digits(int val)
{
- int m = 10;
+ long long m = 10;
int d = 1;

if (val < 0) {
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 4b3efaa82ab7..e9497ee0f854 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -525,6 +525,8 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
static bool __ref
pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
{
+ struct resource *conflict;
+
if (!early && !acpi_disabled) {
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
"ACPI motherboard resource"))
@@ -542,8 +544,17 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
&cfg->res);

if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
- "EfiMemoryMappedIO"))
+ "EfiMemoryMappedIO")) {
+ conflict = insert_resource_conflict(&iomem_resource,
+ &cfg->res);
+ if (conflict)
+ pr_warn("MMCONFIG %pR conflicts with %s %pR\n",
+ &cfg->res, conflict->name, conflict);
+ else
+ pr_info("MMCONFIG %pR reserved to work around lack of ACPI motherboard _CRS\n",
+ &cfg->res);
return true;
+ }
}

/*
diff --git a/block/bio.c b/block/bio.c
index 816d412c06e9..5eba53ca953b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1145,13 +1145,22 @@ EXPORT_SYMBOL(bio_add_folio);

void __bio_release_pages(struct bio *bio, bool mark_dirty)
{
- struct bvec_iter_all iter_all;
- struct bio_vec *bvec;
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio) {
+ struct page *page;
+ size_t done = 0;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (mark_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- bio_release_page(bio, bvec->bv_page);
+ if (mark_dirty) {
+ folio_lock(fi.folio);
+ folio_mark_dirty(fi.folio);
+ folio_unlock(fi.folio);
+ }
+ page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
+ do {
+ bio_release_page(bio, page++);
+ done += PAGE_SIZE;
+ } while (done < fi.length);
}
}
EXPORT_SYMBOL_GPL(__bio_release_pages);
@@ -1439,18 +1448,12 @@ EXPORT_SYMBOL(bio_free_pages);
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
*
- * The problem is that we cannot run set_page_dirty() from interrupt context
+ * The problem is that we cannot run folio_mark_dirty() from interrupt context
* because the required locks are not interrupt-safe. So what we can do is to
* mark the pages dirty _before_ performing IO. And in interrupt context,
* check that the pages are still dirty. If so, fine. If not, redirty them
* in process context.
*
- * We special-case compound pages here: normally this means reads into hugetlb
- * pages. The logic in here doesn't really work right for compound pages
- * because the VM does not uniformly chase down the head page in all cases.
- * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
- * handle them at all. So we skip compound pages here at an early stage.
- *
* Note that this code is very hard to test under normal circumstances because
* direct-io pins the pages with get_user_pages(). This makes
* is_page_cache_freeable return false, and the VM will not clean the pages.
@@ -1466,12 +1469,12 @@ EXPORT_SYMBOL(bio_free_pages);
*/
void bio_set_pages_dirty(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
+ bio_for_each_folio_all(fi, bio) {
+ folio_lock(fi.folio);
+ folio_mark_dirty(fi.folio);
+ folio_unlock(fi.folio);
}
}
EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
@@ -1515,12 +1518,11 @@ static void bio_dirty_fn(struct work_struct *work)

void bio_check_pages_dirty(struct bio *bio)
{
- struct bio_vec *bvec;
+ struct folio_iter fi;
unsigned long flags;
- struct bvec_iter_all iter_all;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
+ bio_for_each_folio_all(fi, bio) {
+ if (!folio_test_dirty(fi.folio))
goto defer;
}

diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index fd482439afbc..b927a4a0ad03 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -252,7 +252,8 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
if (blkcg == &blkcg_root)
return q->root_blkg;

- blkg = rcu_dereference(blkcg->blkg_hint);
+ blkg = rcu_dereference_check(blkcg->blkg_hint,
+ lockdep_is_held(&q->queue_lock));
if (blkg && blkg->q == q)
return blkg;

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 20ecd0ab616f..6041e17492ec 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2968,12 +2968,6 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret;

bio = blk_queue_bounce(bio, q);
- if (bio_may_exceed_limits(bio, &q->limits)) {
- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
- if (!bio)
- return;
- }
-
bio_set_ioprio(bio);

if (plug) {
@@ -2982,6 +2976,11 @@ void blk_mq_submit_bio(struct bio *bio)
rq = NULL;
}
if (rq) {
+ if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ if (!bio)
+ return;
+ }
if (!bio_integrity_prep(bio))
return;
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
@@ -2992,6 +2991,11 @@ void blk_mq_submit_bio(struct bio *bio)
} else {
if (unlikely(bio_queue_enter(bio)))
return;
+ if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ if (!bio)
+ goto fail;
+ }
if (!bio_integrity_prep(bio))
goto fail;
}
diff --git a/block/genhd.c b/block/genhd.c
index cc32a0c704eb..f9b81be6c761 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -432,7 +432,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
DISK_MAX_PARTS);
disk->minors = DISK_MAX_PARTS;
}
- if (disk->first_minor + disk->minors > MINORMASK + 1)
+ if (disk->first_minor > MINORMASK ||
+ disk->minors > MINORMASK + 1 ||
+ disk->first_minor + disk->minors > MINORMASK + 1)
goto out_exit_elevator;
} else {
if (WARN_ON(disk->minors))
@@ -542,6 +544,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
kobject_put(disk->part0->bd_holder_dir);
out_del_block_link:
sysfs_remove_link(block_depr, dev_name(ddev));
+ pm_runtime_set_memalloc_noio(ddev, false);
out_device_del:
device_del(ddev);
out_free_ext_minor:
diff --git a/block/ioctl.c b/block/ioctl.c
index d5f5cd61efd7..a74ef911e279 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -18,7 +18,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
{
struct gendisk *disk = bdev->bd_disk;
struct blkpg_partition p;
- long long start, length;
+ sector_t start, length;

if (disk->flags & GENHD_FL_NO_PART)
return -EINVAL;
@@ -35,14 +35,17 @@ static int blkpg_do_ioctl(struct block_device *bdev,
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(disk, p.pno);

+ if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
+ return -EINVAL;
+ /* Check that the partition is aligned to the block size */
+ if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
+ return -EINVAL;
+
start = p.start >> SECTOR_SHIFT;
length = p.length >> SECTOR_SHIFT;

switch (op) {
case BLKPG_ADD_PARTITION:
- /* check if partition is aligned to blocksize */
- if (p.start & (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
return bdev_add_partition(disk, p.pno, start, length);
case BLKPG_RESIZE_PARTITION:
return bdev_resize_partition(disk, p.pno, start, length);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ea6fb8e89d06..68cc9290cabe 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1116,9 +1116,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
void af_alg_free_resources(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
+ struct af_alg_ctx *ctx;

af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen);
+
+ ctx = alg_sk(sk)->private;
+ ctx->inflight = false;
}
EXPORT_SYMBOL_GPL(af_alg_free_resources);

@@ -1188,11 +1192,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen)
{
- struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ struct af_alg_ctx *ctx = alg_sk(sk)->private;
+ struct af_alg_async_req *areq;
+
+ /* Only one AIO request can be in flight. */
+ if (ctx->inflight)
+ return ERR_PTR(-EBUSY);

+ areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);

+ ctx->inflight = true;
+
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
diff --git a/crypto/rsa.c b/crypto/rsa.c
index c79613cdce6e..b9cd11fb7d36 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -220,6 +220,8 @@ static int rsa_check_exponent_fips(MPI e)
}

e_max = mpi_alloc(0);
+ if (!e_max)
+ return -ENOMEM;
mpi_set_bit(e_max, 256);

if (mpi_cmp(e, e_max) >= 0) {
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 442a82c9de7d..b108a30a7600 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -117,6 +117,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch;
+ unsigned int dlen;
int ret;

if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
@@ -128,6 +129,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE;

+ dlen = req->dlen;
+
scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock);

@@ -145,6 +148,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
ret = -ENOMEM;
goto out;
}
+ } else if (req->dlen > dlen) {
+ ret = -ENOSPC;
+ goto out;
}
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1);
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 6a45a92344e9..a7f6c54c123e 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -682,7 +682,7 @@ static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if (!sec_attest_info)
return -ENOMEM;

- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
rc = -ENOMEM;
goto free_sec_attest_info;
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index e120a96e1eae..71e8d4e7a36c 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
static u32 err_seq;

estatus = extlog_elog_entry_check(cpu, bank);
- if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
+ if (!estatus)
return NOTIFY_DONE;

+ if (mce->kflags & MCE_HANDLED_CEC) {
+ estatus->block_status = 0;
+ return NOTIFY_DONE;
+ }
+
memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
/* clear record status to enable BIOS to update it again */
estatus->block_status = 0;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index c5598b6d5db8..794962c5c88e 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -105,7 +105,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
return;

info->frequency = lpit_native->counter_frequency ?
- lpit_native->counter_frequency : tsc_khz * 1000;
+ lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
if (!info->frequency)
info->frequency = 1;

diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 539e700de4d2..a052e0ab19e4 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -465,8 +465,9 @@ static int register_device_clock(struct acpi_device *adev,
if (!clk_name)
return -ENOMEM;
clk = clk_register_fractional_divider(NULL, clk_name, parent,
+ 0, prv_base, 1, 15, 16, 15,
CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
- prv_base, 1, 15, 16, 15, 0, NULL);
+ NULL);
parent = clk_name;

clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 35f071ad9532..27a6ae89f13a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1713,12 +1713,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
return;
count++;

- acpi_get_parent(device->dev->handle, &acpi_parent);
-
- pdev = acpi_get_pci_dev(acpi_parent);
- if (pdev) {
- parent = &pdev->dev;
- pci_dev_put(pdev);
+ if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
}

memset(&props, 0, sizeof(struct backlight_properties));
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 99b4e3355435..4d958a165da0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -851,6 +851,7 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
* @index: Index of the reference to return
* @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
*
* Find property with @name, verifify that it is a package containing at least
* one object reference and if so, store the ACPI device object pointer to the
@@ -907,6 +908,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (!device)
return -EINVAL;

+ if (!args)
+ return 0;
+
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5e2505fa82ff..34c27223cb7d 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (mm) {
mmap_write_unlock(mm);
- mmput(mm);
+ mmput_async(mm);
}
return 0;

@@ -304,7 +304,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
err_no_vma:
if (mm) {
mmap_write_unlock(mm);
- mmput(mm);
+ mmput_async(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
@@ -344,8 +344,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
continue;
if (!buffer->async_transaction)
continue;
- total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
- + sizeof(struct binder_buffer);
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
num_buffers++;
}

@@ -407,17 +406,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
- if (is_async &&
- alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
return ERR_PTR(-ENOSPC);
}

- /* Pad 0-size buffers so they get assigned unique addresses */
- size = max(size, sizeof(void *));
-
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -519,7 +518,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->pid = pid;
buffer->oneway_spam_suspect = false;
if (is_async) {
- alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@@ -657,8 +656,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);

if (buffer->async_transaction) {
- alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
-
+ alloc->free_async_space += buffer_size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 05d9df90f621..9cd489a57708 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -215,6 +215,7 @@ int class_register(const struct class *cls)
return 0;

err_out:
+ lockdep_unregister_key(key);
kfree(cp);
return error;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 493d533f8375..4d588f4658c8 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -868,11 +868,15 @@ int __register_one_node(int nid)
{
int error;
int cpu;
+ struct node *node;

- node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
- if (!node_devices[nid])
+ node = kzalloc(sizeof(struct node), GFP_KERNEL);
+ if (!node)
return -ENOMEM;

+ INIT_LIST_HEAD(&node->access_list);
+ node_devices[nid] = node;
+
error = register_node(node_devices[nid], nid);

/* link cpu under this node */
@@ -881,7 +885,6 @@ int __register_one_node(int nid)
register_cpu_under_node(cpu, nid);
}

- INIT_LIST_HEAD(&node_devices[nid]->access_list);
node_init_caches(nid);

return error;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 1886995a0b3a..079bd14bdedc 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -541,6 +541,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;

+ if (!args)
+ return 0;
+
args->fwnode = software_node_get(refnode);
args->nargs = nargs;

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9f2d412fc560..552f56a84a7e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -165,39 +165,37 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
}

+/*
+ * We support direct I/O only if lo_offset is aligned with the logical I/O size
+ * of backing device, and the logical block size of loop is bigger than that of
+ * the backing device.
+ */
+static bool lo_bdev_can_use_dio(struct loop_device *lo,
+ struct block_device *backing_bdev)
+{
+ unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
+
+ if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+ return false;
+ if (lo->lo_offset & (sb_bsize - 1))
+ return false;
+ return true;
+}
+
static void __loop_update_dio(struct loop_device *lo, bool dio)
{
struct file *file = lo->lo_backing_file;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- unsigned short sb_bsize = 0;
- unsigned dio_align = 0;
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *backing_bdev = NULL;
bool use_dio;

- if (inode->i_sb->s_bdev) {
- sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
- dio_align = sb_bsize - 1;
- }
+ if (S_ISBLK(inode->i_mode))
+ backing_bdev = I_BDEV(inode);
+ else if (inode->i_sb->s_bdev)
+ backing_bdev = inode->i_sb->s_bdev;

- /*
- * We support direct I/O only if lo_offset is aligned with the
- * logical I/O size of backing device, and the logical block
- * size of loop is bigger than the backing device's.
- *
- * TODO: the above condition may be loosed in the future, and
- * direct I/O may be switched runtime at that time because most
- * of requests in sane applications should be PAGE_SIZE aligned
- */
- if (dio) {
- if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
- !(lo->lo_offset & dio_align) &&
- (file->f_mode & FMODE_CAN_ODIRECT))
- use_dio = true;
- else
- use_dio = false;
- } else {
- use_dio = false;
- }
+ use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
+ (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));

if (lo->use_dio == use_dio)
return;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 968090935eb2..9544746de168 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2165,10 +2165,8 @@ static int null_add_dev(struct nullb_device *dev)

blk_queue_logical_block_size(nullb->q, dev->blocksize);
blk_queue_physical_block_size(nullb->q, dev->blocksize);
- if (!dev->max_sectors)
- dev->max_sectors = queue_max_hw_sectors(nullb->q);
- dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
- blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+ if (dev->max_sectors)
+ blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);

if (dev->virt_boundary)
blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
@@ -2268,12 +2266,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}

- if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
- pr_warn("invalid max sectors\n");
- pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
- g_max_sectors = BLK_DEF_MAX_SECTORS;
- }
-
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
pr_err("invalid home_node value\n");
g_home_node = NUMA_NO_NODE;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 935feab815d9..203a000a84e3 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -336,7 +336,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
return data;
}

-static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
const unsigned char *p_left = data, *p_h4;
@@ -375,25 +375,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
bt_dev_err(bdev->hdev,
"Frame reassembly failed (%d)", err);
bdev->rx_skb = NULL;
- return err;
+ return;
}

sz_left -= sz_h4;
p_left += sz_h4;
}
-
- return 0;
}

static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
size_t count)
{
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
- int err;

- err = btmtkuart_recv(bdev->hdev, data, count);
- if (err < 0)
- return err;
+ btmtkuart_recv(bdev->hdev, data, count);

bdev->hdev->stat.byte_rx += count;

diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index b7e66b7ac570..951fe3014a3f 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1276,11 +1276,10 @@ static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data,
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
/* Safe to ignore out-of-sync bootloader signatures */
- if (is_fw_downloading(nxpdev))
- return count;
- bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
+ if (!is_fw_downloading(nxpdev))
+ bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
nxpdev->rx_skb = NULL;
- return err;
+ return count;
}
if (!is_fw_downloading(nxpdev))
nxpdev->hdev->stat.byte_rx += count;
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 600881808982..582d5c166a75 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -71,45 +71,77 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
{
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;

- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
- event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
- event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+ event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
+ event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);

- return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+ ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
}

int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
{
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->dword[0] = MHI_SC_EV_DWORD0(state);
+ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);

- event.dword[0] = MHI_SC_EV_DWORD0(state);
- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);

- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+ return ret;
}

int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
{
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
+ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);

- event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);

- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+ return ret;
}

static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
{
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
- struct mhi_ring_element event = {};
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;

- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
- event.dword[0] = MHI_CC_EV_DWORD0(code);
- event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+ event->dword[0] = MHI_CC_EV_DWORD0(code);
+ event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);

- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
}

static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
@@ -292,10 +324,9 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
+ struct mhi_ep_buf_info buf_info = {};
struct mhi_ring_element *el;
bool tr_done = false;
- void *write_addr;
- u64 read_addr;
u32 buf_left;
int ret;

@@ -324,11 +355,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,

read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
- read_addr = mhi_chan->tre_loc + read_offset;
- write_addr = result->buf_addr + write_offset;
+
+ buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+ buf_info.dev_addr = result->buf_addr + write_offset;
+ buf_info.size = tr_len;

dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
return ret;
@@ -419,7 +452,7 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- result.buf_addr = kzalloc(len, GFP_KERNEL);
+ result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
if (!result.buf_addr)
return -ENOMEM;

@@ -427,7 +460,7 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- kfree(result.buf_addr);
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
return ret;
}

@@ -439,7 +472,7 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
/* Read until the ring becomes empty */
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));

- kfree(result.buf_addr);
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
}

return 0;
@@ -451,12 +484,11 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
struct mhi_ring_element *el;
u32 buf_left, read_offset;
struct mhi_ep_ring *ring;
enum mhi_ev_ccs code;
- void *read_addr;
- u64 write_addr;
size_t tr_len;
u32 tre_len;
int ret;
@@ -485,11 +517,13 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)

tr_len = min(buf_left, tre_len);
read_offset = skb->len - buf_left;
- read_addr = skb->data + read_offset;
- write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+ buf_info.dev_addr = skb->data + read_offset;
+ buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
+ buf_info.size = tr_len;

dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(dev, "Error writing to the channel\n");
goto err_exit;
@@ -748,14 +782,14 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}

/* Sanity check to make sure there are elements in the ring */
if (ring->rd_offset == ring->wr_offset) {
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}

@@ -767,12 +801,12 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
dev_err(dev, "Error processing ring for channel (%u): %d\n",
ring->ch_id, ret);
mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}

mutex_unlock(&chan->lock);
- kfree(itr);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
}
}

@@ -828,7 +862,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon
u32 ch_id = ch_idx + i;

ring = &mhi_cntrl->mhi_chan[ch_id].ring;
- item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
if (!item)
return;

@@ -1375,6 +1409,28 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_ch;
}

+ mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
+ sizeof(struct mhi_ring_element), 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!mhi_cntrl->ev_ring_el_cache) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!mhi_cntrl->tre_buf_cache) {
+ ret = -ENOMEM;
+ goto err_destroy_ev_ring_el_cache;
+ }
+
+ mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
+ sizeof(struct mhi_ep_ring_item), 0,
+ 0, NULL);
+ if (!mhi_cntrl->ev_ring_el_cache) {
+ ret = -ENOMEM;
+ goto err_destroy_tre_buf_cache;
+ }
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
@@ -1383,7 +1439,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
if (!mhi_cntrl->wq) {
ret = -ENOMEM;
- goto err_free_cmd;
+ goto err_destroy_ring_item_cache;
}

INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
@@ -1442,6 +1498,12 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
err_destroy_wq:
destroy_workqueue(mhi_cntrl->wq);
+err_destroy_ring_item_cache:
+ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
+err_destroy_ev_ring_el_cache:
+ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+err_destroy_tre_buf_cache:
+ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
err_free_cmd:
kfree(mhi_cntrl->mhi_cmd);
err_free_ch:
@@ -1463,6 +1525,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)

free_irq(mhi_cntrl->irq, mhi_cntrl);

+ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
+ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_chan);

diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index 115518ec76a4..c673d7200b3e 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t start, copy_size;
+ struct mhi_ep_buf_info buf_info = {};
+ size_t start;
int ret;

/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
@@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)

start = ring->wr_offset;
if (start < end) {
- copy_size = (end - start) * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
- (start * sizeof(struct mhi_ring_element)),
- &ring->ring_cache[start], copy_size);
+ buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
} else {
- copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
- (start * sizeof(struct mhi_ring_element)),
- &ring->ring_cache[start], copy_size);
+ buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;

if (end) {
- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
- &ring->ring_cache[0],
- end * sizeof(struct mhi_ring_element));
+ buf_info.host_addr = ring->rbase;
+ buf_info.dev_addr = &ring->ring_cache[0];
+ buf_info.size = end * sizeof(struct mhi_ring_element);
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
}
}

- dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);

return 0;
}
@@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
size_t old_offset = 0;
u32 num_free_elem;
__le64 rp;
@@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));

- ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
- sizeof(*el));
- if (ret < 0)
- return ret;
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);

- return 0;
+ return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
}

void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index 7d7b2cb75318..3b6ad2307a41 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -163,7 +163,7 @@ static u8 rs9_calc_dif(const struct rs9_driver_data *rs9, int idx)
enum rs9_model model = rs9->chip_info->model;

if (model == RENESAS_9FGV0241)
- return BIT(idx) + 1;
+ return BIT(idx + 1);
else if (model == RENESAS_9FGV0441)
return BIT(idx);

diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 9599857842c7..2920fe2e5e8b 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -895,10 +895,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
r[0] = r_div ? (r_div & 0xff) : 1;
r[1] = (r_div >> 8) & 0xff;
r[2] = (r_div >> 16) & 0xff;
- err = regmap_bulk_write(output->data->regmap,
+ return regmap_bulk_write(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3);
-
- return 0;
}

static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 01d3c4c7b0b2..7cb7d501d7a6 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -604,14 +604,14 @@ static int sp7021_clk_probe(struct platform_device *pdev)
int i;

clk_base = devm_platform_ioremap_resource(pdev, 0);
- if (!clk_base)
- return -ENXIO;
+ if (IS_ERR(clk_base))
+ return PTR_ERR(clk_base);
pll_base = devm_platform_ioremap_resource(pdev, 1);
- if (!pll_base)
- return -ENXIO;
+ if (IS_ERR(pll_base))
+ return PTR_ERR(pll_base);
sys_base = devm_platform_ioremap_resource(pdev, 2);
- if (!sys_base)
- return -ENXIO;
+ if (IS_ERR(sys_base))
+ return PTR_ERR(sys_base);

/* enable default clks */
for (i = 0; i < ARRAY_SIZE(sp_clken); i++)
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index aefa19f3c2c5..0b8f0904b339 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -81,6 +81,10 @@ static const struct alpha_pll_config disp_cc_pll0_config = {
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00182261,
.config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000005,
};
@@ -108,6 +112,10 @@ static const struct alpha_pll_config disp_cc_pll1_config = {
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00182261,
.config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000005,
};
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 586126c4dd90..b883dffe5f7a 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -401,7 +401,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -416,7 +416,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -431,7 +431,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -451,7 +451,7 @@ static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
.parent_data = gcc_parent_data_2,
.num_parents = ARRAY_SIZE(gcc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -471,7 +471,7 @@ static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -486,7 +486,7 @@ static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
.parent_data = gcc_parent_data_2,
.num_parents = ARRAY_SIZE(gcc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -501,7 +501,7 @@ static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -521,7 +521,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
};

static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@@ -1025,7 +1025,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.parent_data = gcc_parent_data_9,
.num_parents = ARRAY_SIZE(gcc_parent_data_9),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1048,7 +1048,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1071,7 +1071,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1093,7 +1093,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
.parent_data = gcc_parent_data_3,
.num_parents = ARRAY_SIZE(gcc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1114,7 +1114,7 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.parent_data = gcc_parent_data_4,
.num_parents = ARRAY_SIZE(gcc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1136,7 +1136,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1174,7 +1174,7 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -1189,7 +1189,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
.parent_data = gcc_parent_data_2,
.num_parents = ARRAY_SIZE(gcc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};

@@ -2998,38 +2998,46 @@ static struct clk_branch gcc_video_axi1_clk = {

static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
+ .collapse_ctrl = 0x52020,
+ .collapse_mask = BIT(0),
.pd = {
.name = "pcie_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc pcie_0_phy_gdsc = {
.gdscr = 0x6c000,
+ .collapse_ctrl = 0x52020,
+ .collapse_mask = BIT(3),
.pd = {
.name = "pcie_0_phy_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc pcie_1_gdsc = {
.gdscr = 0x8d004,
+ .collapse_ctrl = 0x52020,
+ .collapse_mask = BIT(1),
.pd = {
.name = "pcie_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc pcie_1_phy_gdsc = {
.gdscr = 0x8e000,
+ .collapse_ctrl = 0x52020,
+ .collapse_mask = BIT(4),
.pd = {
.name = "pcie_1_phy_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc ufs_phy_gdsc = {
@@ -3038,7 +3046,7 @@ static struct gdsc ufs_phy_gdsc = {
.name = "ufs_phy_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc ufs_mem_phy_gdsc = {
@@ -3047,7 +3055,7 @@ static struct gdsc ufs_mem_phy_gdsc = {
.name = "ufs_mem_phy_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc usb30_prim_gdsc = {
@@ -3056,7 +3064,7 @@ static struct gdsc usb30_prim_gdsc = {
.name = "usb30_prim_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct gdsc usb3_phy_gdsc = {
@@ -3065,7 +3073,7 @@ static struct gdsc usb3_phy_gdsc = {
.name = "usb3_phy_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = POLL_CFG_GDSCR,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

static struct clk_regmap *gcc_sm8550_clocks[] = {
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index 8422fd047493..c89a5b59ddb7 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -37,8 +37,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
.config_ctl_hi_val = 0x00002267,
.config_ctl_hi1_val = 0x00000024,
.test_ctl_val = 0x00000000,
- .test_ctl_hi_val = 0x00000002,
- .test_ctl_hi1_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000805,
.user_ctl_hi1_val = 0x000000d0,
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
index 1afdbe4a249d..52a9a453a143 100644
--- a/drivers/clk/qcom/videocc-sm8150.c
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -33,6 +33,7 @@ static struct alpha_pll_config video_pll0_config = {
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002267,
.config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000805,
.user_ctl_hi1_val = 0x000000D0,
@@ -214,6 +215,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {

static const struct qcom_reset_map video_cc_sm8150_resets[] = {
[VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
+ [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x870 },
+ [VIDEO_CC_MVS1_BCR] = { 0x8b0 },
+ [VIDEO_CC_MVSC_BCR] = { 0x810 },
};

static const struct qcom_cc_desc video_cc_sm8150_desc = {
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 3f01620e292b..75f9eca020ce 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1105,41 +1105,33 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,

#define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)

-static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->resets[id].off;
- u32 dis = BIT(info->resets[id].bit);
- u32 we = dis << 16;
-
- dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
-
- /* Reset module */
- writel(we, priv->base + CLK_RST_R(reg));
-
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
-
- /* Release module from reset state */
- writel(we | dis, priv->base + CLK_RST_R(reg));
-
- return 0;
-}
-
static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
unsigned int reg = info->resets[id].off;
- u32 value = BIT(info->resets[id].bit) << 16;
+ u32 mask = BIT(info->resets[id].bit);
+ s8 monbit = info->resets[id].monbit;
+ u32 value = mask << 16;

dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));

writel(value, priv->base + CLK_RST_R(reg));
- return 0;
+
+ if (info->has_clk_mon_regs) {
+ reg = CLK_MRST_R(reg);
+ } else if (monbit >= 0) {
+ reg = CPG_RST_MON;
+ mask = BIT(monbit);
+ } else {
+ /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
+ udelay(35);
+ return 0;
+ }
+
+ return readl_poll_timeout_atomic(priv->base + reg, value,
+ value & mask, 10, 200);
}

static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
@@ -1148,14 +1140,40 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
unsigned int reg = info->resets[id].off;
- u32 dis = BIT(info->resets[id].bit);
- u32 value = (dis << 16) | dis;
+ u32 mask = BIT(info->resets[id].bit);
+ s8 monbit = info->resets[id].monbit;
+ u32 value = (mask << 16) | mask;

dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
CLK_RST_R(reg));

writel(value, priv->base + CLK_RST_R(reg));
- return 0;
+
+ if (info->has_clk_mon_regs) {
+ reg = CLK_MRST_R(reg);
+ } else if (monbit >= 0) {
+ reg = CPG_RST_MON;
+ mask = BIT(monbit);
+ } else {
+ /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
+ udelay(35);
+ return 0;
+ }
+
+ return readl_poll_timeout_atomic(priv->base + reg, value,
+ !(value & mask), 10, 200);
+}
+
+static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = rzg2l_cpg_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ return rzg2l_cpg_deassert(rcdev, id);
}

static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
@@ -1163,18 +1181,21 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->resets[id].off;
- u32 bitmask = BIT(info->resets[id].bit);
s8 monbit = info->resets[id].monbit;
+ unsigned int reg;
+ u32 bitmask;

if (info->has_clk_mon_regs) {
- return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
+ reg = CLK_MRST_R(info->resets[id].off);
+ bitmask = BIT(info->resets[id].bit);
} else if (monbit >= 0) {
- u32 monbitmask = BIT(monbit);
-
- return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
+ reg = CPG_RST_MON;
+ bitmask = BIT(monbit);
+ } else {
+ return -ENOTSUPP;
}
- return -ENOTSUPP;
+
+ return !!(readl(priv->base + reg) & bitmask);
}

static const struct reset_control_ops rzg2l_cpg_reset_ops = {
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 60359333f26d..9b5d3050b742 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -89,7 +89,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
static const struct clk_ops zynqmp_clk_mux_ops = {
.get_parent = zynqmp_clk_mux_get_parent,
.set_parent = zynqmp_clk_mux_set_parent,
- .determine_rate = __clk_mux_determine_rate,
+ .determine_rate = __clk_mux_determine_rate_closest,
};

static const struct clk_ops zynqmp_clk_mux_ro_ops = {
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 33a3b2a22659..5a00487ae408 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -110,52 +110,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL(parent_rate, value);
}

-static void zynqmp_get_divider2_val(struct clk_hw *hw,
- unsigned long rate,
- struct zynqmp_clk_divider *divider,
- u32 *bestdiv)
-{
- int div1;
- int div2;
- long error = LONG_MAX;
- unsigned long div1_prate;
- struct clk_hw *div1_parent_hw;
- struct zynqmp_clk_divider *pdivider;
- struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
-
- if (!div2_parent_hw)
- return;
-
- pdivider = to_zynqmp_clk_divider(div2_parent_hw);
- if (!pdivider)
- return;
-
- div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
- if (!div1_parent_hw)
- return;
-
- div1_prate = clk_hw_get_rate(div1_parent_hw);
- *bestdiv = 1;
- for (div1 = 1; div1 <= pdivider->max_div;) {
- for (div2 = 1; div2 <= divider->max_div;) {
- long new_error = ((div1_prate / div1) / div2) - rate;
-
- if (abs(new_error) < abs(error)) {
- *bestdiv = div2;
- error = new_error;
- }
- if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
- div2 = div2 << 1;
- else
- div2++;
- }
- if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
- div1 = div1 << 1;
- else
- div1++;
- }
-}
-
/**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces
@@ -174,6 +128,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 bestdiv;
int ret;
+ u8 width;

/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
@@ -193,23 +148,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
}

- bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
-
- /*
- * In case of two divisors, compute best divider values and return
- * divider2 value based on compute value. div1 will be automatically
- * set to optimum based on required total divider value.
- */
- if (div_type == TYPE_DIV2 &&
- (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
- zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
- }
+ width = fls(divider->max_div);

- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
- bestdiv = rate % *prate ? 1 : bestdiv;
+ rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);

- bestdiv = min_t(u32, bestdiv, divider->max_div);
- *prate = rate * bestdiv;
+ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
+ *prate = rate;

return rate;
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 5f60f6bd3386..56acf2617262 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -183,7 +183,7 @@ static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
* dmtimer_write - write timer registers in posted and non-posted mode
* @timer: timer pointer over which write operation is to perform
* @reg: lowest byte holds the register offset
- * @value: data to write into the register
+ * @val: data to write into the register
*
* The posted mode bit is encoded in reg. Note that in posted mode, the write
* pending bit must be checked. Otherwise a write on a register which has a
@@ -949,7 +949,7 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,

/**
* omap_dm_timer_set_int_disable - disable timer interrupts
- * @timer: pointer to timer handle
+ * @cookie: pointer to timer cookie
* @mask: bit mask of interrupts to be disabled
*
* Disables the specified timer interrupts for a timer.
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index f34e6382a4c5..028df8a5f537 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -310,8 +310,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)

#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
- if (of_property_present(dev->of_node, "#clock-cells"))
- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (of_property_present(dev->of_node, "#clock-cells")) {
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
+ }
#endif

ret = cpufreq_register_driver(&scmi_cpufreq_driver);
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index e66df22f9695..d8515d5c0853 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -25,13 +25,12 @@ MODULE_PARM_DESC(force, "Load unconditionally");
static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
static enum cpuhp_state haltpoll_hp_state;

-static int default_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int default_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
- if (current_clr_polling_and_test()) {
- local_irq_enable();
+ if (current_clr_polling_and_test())
return index;
- }
+
arch_cpu_idle();
return index;
}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index aa4e1a500691..cb8e99936abb 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,

wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (dma_mapping_error(wa->dev, wa->dma.address))
+ if (dma_mapping_error(wa->dev, wa->dma.address)) {
+ kfree(wa->address);
+ wa->address = NULL;
return -ENOMEM;
+ }

wa->dma.length = len;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 3dce35debf63..b97ce0ee7140 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -117,8 +117,6 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43

-#define HPRE_DEV_ALG_MAX_LEN 256
-
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -134,12 +132,7 @@ struct hpre_hw_error {
const char *msg;
};

-struct hpre_dev_alg {
- u32 alg_msk;
- const char *alg;
-};
-
-static const struct hpre_dev_alg hpre_dev_algs[] = {
+static const struct qm_dev_alg hpre_dev_algs[] = {
{
.alg_msk = BIT(0),
.alg = "rsa\n"
@@ -232,6 +225,20 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};

+enum hpre_pre_store_cap_idx {
+ HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
+ HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
+ HPRE_DRV_ALG_BITMAP_CAP_IDX,
+ HPRE_DEV_ALG_BITMAP_CAP_IDX,
+};
+
+static const u32 hpre_pre_store_caps[] = {
+ HPRE_CLUSTER_NUM_CAP,
+ HPRE_CORE_ENABLE_BITMAP_CAP,
+ HPRE_DRV_ALG_BITMAP_CAP,
+ HPRE_DEV_ALG_BITMAP_CAP,
+};
+
static const struct hpre_hw_error hpre_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -354,42 +361,13 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;

- cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
+ cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
if (alg & cap_val)
return true;

return false;
}

-static int hpre_set_qm_algs(struct hisi_qm *qm)
-{
- struct device *dev = &qm->pdev->dev;
- char *algs, *ptr;
- u32 alg_msk;
- int i;
-
- if (!qm->use_sva)
- return 0;
-
- algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
- if (!algs)
- return -ENOMEM;
-
- alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
-
- for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
- if (alg_msk & hpre_dev_algs[i].alg_msk)
- strcat(algs, hpre_dev_algs[i].alg);
-
- ptr = strrchr(algs, '\n');
- if (ptr)
- *ptr = '\0';
-
- qm->uacce->algs = algs;
-
- return 0;
-}
-
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -459,16 +437,6 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");

-static inline int hpre_cluster_num(struct hisi_qm *qm)
-{
- return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
-}
-
-static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
-{
- return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
-}
-
struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
@@ -535,13 +503,15 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)

static int hpre_set_cluster(struct hisi_qm *qm)
{
- u32 cluster_core_mask = hpre_cluster_core_mask(qm);
- u8 clusters_num = hpre_cluster_num(qm);
struct device *dev = &qm->pdev->dev;
unsigned long offset;
+ u32 cluster_core_mask;
+ u8 clusters_num;
u32 val = 0;
int ret, i;

+ cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
+ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
for (i = 0; i < clusters_num; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;

@@ -736,11 +706,12 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)

static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
- u8 clusters_num = hpre_cluster_num(qm);
unsigned long offset;
+ u8 clusters_num;
int i;

/* clear clusterX/cluster_ctrl */
+ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@@ -1027,13 +998,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)

static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
- u8 clusters_num = hpre_cluster_num(qm);
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ u8 clusters_num;
int i, ret;

+ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@@ -1138,8 +1110,37 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
debugfs_remove_recursive(qm->debug.debug_root);
}

+static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
+{
+ struct hisi_qm_cap_record *hpre_cap;
+ struct device *dev = &qm->pdev->dev;
+ size_t i, size;
+
+ size = ARRAY_SIZE(hpre_pre_store_caps);
+ hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
+ if (!hpre_cap)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ hpre_cap[i].type = hpre_pre_store_caps[i];
+ hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ hpre_pre_store_caps[i], qm->cap_ver);
+ }
+
+ if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
+ dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
+ hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
+ return -EINVAL;
+ }
+
+ qm->cap_tables.dev_cap_table = hpre_cap;
+
+ return 0;
+}
+
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ u64 alg_msk;
int ret;

if (pdev->revision == QM_HW_V1) {
@@ -1170,7 +1171,16 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}

- ret = hpre_set_qm_algs(qm);
+ /* Fetch and save the value of capability registers */
+ ret = hpre_pre_store_cap_reg(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to pre-store capability registers!\n");
+ hisi_qm_uninit(qm);
+ return ret;
+ }
+
+ alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
+ ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
if (ret) {
pci_err(pdev, "Failed to set hpre algs!\n");
hisi_qm_uninit(qm);
@@ -1183,11 +1193,12 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
{
int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
- u8 clusters_num = hpre_cluster_num(qm);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u8 clusters_num;
int i, j, idx;

+ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
@@ -1224,10 +1235,10 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
{
int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
- u8 clusters_num = hpre_cluster_num(qm);
struct qm_debug *debug = &qm->debug;
struct pci_dev *pdev = qm->pdev;
void __iomem *io_base;
+ u8 clusters_num;
int i, j, idx;
u32 val;

@@ -1242,6 +1253,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
}

+ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
for (i = 0; i < clusters_num; i++) {
io_base = qm->io_base + hpre_cluster_offsets[i];
for (j = 0; j < cluster_dfx_regs_num; j++) {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f1589eb3b46a..e889363ed978 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -228,6 +228,8 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000

+#define QM_DEV_ALG_MAX_LEN 256
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -306,6 +308,13 @@ enum qm_basic_type {
QM_VF_IRQ_NUM_CAP,
};

+enum qm_pre_store_cap_idx {
+ QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
+ QM_AEQ_IRQ_TYPE_CAP_IDX,
+ QM_ABN_IRQ_TYPE_CAP_IDX,
+ QM_PF2VF_IRQ_TYPE_CAP_IDX,
+};
+
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
{QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
@@ -335,6 +344,13 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
};

+static const u32 qm_pre_store_caps[] = {
+ QM_EQ_IRQ_TYPE_CAP,
+ QM_AEQ_IRQ_TYPE_CAP,
+ QM_ABN_IRQ_TYPE_CAP,
+ QM_PF2VF_IRQ_TYPE_CAP,
+};
+
struct qm_mailbox {
__le16 w0;
__le16 queue_num;
@@ -787,6 +803,40 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
*high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
}

+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ int i;
+
+ if (!qm->uacce)
+ return 0;
+
+ if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) {
+ dev_err(dev, "algs size %u is equal or larger than %d.\n",
+ dev_algs_size, QM_DEV_ALG_MAX_LEN);
+ return -EINVAL;
+ }
+
+ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ for (i = 0; i < dev_algs_size; i++)
+ if (alg_msk & dev_algs[i].alg_msk)
+ strcat(algs, dev_algs[i].alg);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr) {
+ *ptr = '\0';
+ qm->uacce->algs = algs;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_set_algs);
+
static u32 qm_get_irq_num(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
@@ -4903,7 +4953,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return;

@@ -4920,7 +4970,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return 0;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;

@@ -4937,7 +4987,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;

@@ -4951,7 +5001,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;

@@ -4968,7 +5018,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;

@@ -4982,7 +5032,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;

@@ -5000,7 +5050,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;

@@ -5014,7 +5064,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;

- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;

@@ -5102,7 +5152,29 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}

-static void qm_get_hw_caps(struct hisi_qm *qm)
+static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
+{
+ struct hisi_qm_cap_record *qm_cap;
+ struct pci_dev *pdev = qm->pdev;
+ size_t i, size;
+
+ size = ARRAY_SIZE(qm_pre_store_caps);
+ qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
+ if (!qm_cap)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ qm_cap[i].type = qm_pre_store_caps[i];
+ qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
+ qm_pre_store_caps[i], qm->cap_ver);
+ }
+
+ qm->cap_tables.qm_cap_table = qm_cap;
+
+ return 0;
+}
+
+static int qm_get_hw_caps(struct hisi_qm *qm)
{
const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
qm_cap_info_pf : qm_cap_info_vf;
@@ -5133,6 +5205,9 @@ static void qm_get_hw_caps(struct hisi_qm *qm)
if (val)
set_bit(cap_info[i].type, &qm->caps);
}
+
+ /* Fetch and save the value of irq type related capability registers */
+ return qm_pre_store_irq_type_caps(qm);
}

static int qm_get_pci_res(struct hisi_qm *qm)
@@ -5154,7 +5229,10 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}

- qm_get_hw_caps(qm);
+ ret = qm_get_hw_caps(qm);
+ if (ret)
+ goto err_ioremap;
+
if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
qm->db_interval = QM_QP_DB_INTERVAL;
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 3e57fc04b377..410c83712e28 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -220,6 +220,13 @@ enum sec_cap_type {
SEC_CORE4_ALG_BITMAP_HIGH,
};

+enum sec_cap_reg_record_idx {
+ SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
+ SEC_DRV_ALG_BITMAP_HIGH_IDX,
+ SEC_DEV_ALG_BITMAP_LOW_IDX,
+ SEC_DEV_ALG_BITMAP_HIGH_IDX,
+};
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 074e50ef512c..c3a630cb27a6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2543,8 +2543,12 @@ static int sec_register_aead(u64 alg_mask)

int sec_register_to_crypto(struct hisi_qm *qm)
{
- u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
- int ret;
+ u64 alg_mask;
+ int ret = 0;
+
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
+ SEC_DRV_ALG_BITMAP_LOW_IDX);
+

ret = sec_register_skcipher(alg_mask);
if (ret)
@@ -2559,7 +2563,10 @@ int sec_register_to_crypto(struct hisi_qm *qm)

void sec_unregister_from_crypto(struct hisi_qm *qm)
{
- u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
+ u64 alg_mask;
+
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
+ SEC_DRV_ALG_BITMAP_LOW_IDX);

sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 62bd8936a915..bf02a6b2eed4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -120,7 +120,6 @@
GENMASK_ULL(42, 25))
#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
GENMASK_ULL(45, 43))
-#define SEC_DEV_ALG_MAX_LEN 256

struct sec_hw_error {
u32 int_msk;
@@ -132,11 +131,6 @@ struct sec_dfx_item {
u32 offset;
};

-struct sec_dev_alg {
- u64 alg_msk;
- const char *algs;
-};
-
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;

@@ -173,15 +167,22 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
};

-static const struct sec_dev_alg sec_dev_algs[] = { {
+static const u32 sec_pre_store_caps[] = {
+ SEC_DRV_ALG_BITMAP_LOW,
+ SEC_DRV_ALG_BITMAP_HIGH,
+ SEC_DEV_ALG_BITMAP_LOW,
+ SEC_DEV_ALG_BITMAP_HIGH,
+};
+
+static const struct qm_dev_alg sec_dev_algs[] = { {
.alg_msk = SEC_CIPHER_BITMAP,
- .algs = "cipher\n",
+ .alg = "cipher\n",
}, {
.alg_msk = SEC_DIGEST_BITMAP,
- .algs = "digest\n",
+ .alg = "digest\n",
}, {
.alg_msk = SEC_AEAD_BITMAP,
- .algs = "aead\n",
+ .alg = "aead\n",
},
};

@@ -394,8 +395,8 @@ u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
{
u32 cap_val_h, cap_val_l;

- cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
- cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
+ cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;
+ cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;

return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
}
@@ -1077,37 +1078,31 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return ret;
}

-static int sec_set_qm_algs(struct hisi_qm *qm)
+static int sec_pre_store_cap_reg(struct hisi_qm *qm)
{
- struct device *dev = &qm->pdev->dev;
- char *algs, *ptr;
- u64 alg_mask;
- int i;
-
- if (!qm->use_sva)
- return 0;
+ struct hisi_qm_cap_record *sec_cap;
+ struct pci_dev *pdev = qm->pdev;
+ size_t i, size;

- algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
- if (!algs)
+ size = ARRAY_SIZE(sec_pre_store_caps);
+ sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
+ if (!sec_cap)
return -ENOMEM;

- alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
-
- for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
- if (alg_mask & sec_dev_algs[i].alg_msk)
- strcat(algs, sec_dev_algs[i].algs);
-
- ptr = strrchr(algs, '\n');
- if (ptr)
- *ptr = '\0';
+ for (i = 0; i < size; i++) {
+ sec_cap[i].type = sec_pre_store_caps[i];
+ sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
+ sec_pre_store_caps[i], qm->cap_ver);
+ }

- qm->uacce->algs = algs;
+ qm->cap_tables.dev_cap_table = sec_cap;

return 0;
}

static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ u64 alg_msk;
int ret;

qm->pdev = pdev;
@@ -1142,7 +1137,16 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}

- ret = sec_set_qm_algs(qm);
+ /* Fetch and save the value of capability registers */
+ ret = sec_pre_store_cap_reg(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
+ hisi_qm_uninit(qm);
+ return ret;
+ }
+
+ alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
+ ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set sec algs!\n");
hisi_qm_uninit(qm);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 84dbaeb07ea8..cd7ecb2180bf 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -73,7 +73,6 @@
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)

-#define HZIP_DEV_ALG_MAX_LEN 256
#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
@@ -106,6 +105,14 @@
#define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \
HZIP_CORE_GATED_OOO_EN)

+/* zip comp high performance */
+#define HZIP_HIGH_PERF_OFFSET 0x301208
+
+enum {
+ HZIP_HIGH_COMP_RATE,
+ HZIP_HIGH_COMP_PERF,
+};
+
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;

@@ -119,23 +126,18 @@ struct zip_dfx_item {
u32 offset;
};

-struct zip_dev_alg {
- u32 alg_msk;
- const char *algs;
-};
-
-static const struct zip_dev_alg zip_dev_algs[] = { {
+static const struct qm_dev_alg zip_dev_algs[] = { {
.alg_msk = HZIP_ALG_ZLIB_BIT,
- .algs = "zlib\n",
+ .alg = "zlib\n",
}, {
.alg_msk = HZIP_ALG_GZIP_BIT,
- .algs = "gzip\n",
+ .alg = "gzip\n",
}, {
.alg_msk = HZIP_ALG_DEFLATE_BIT,
- .algs = "deflate\n",
+ .alg = "deflate\n",
}, {
.alg_msk = HZIP_ALG_LZ77_BIT,
- .algs = "lz77_zstd\n",
+ .alg = "lz77_zstd\n",
},
};

@@ -246,6 +248,26 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
};

+enum zip_pre_store_cap_idx {
+ ZIP_CORE_NUM_CAP_IDX = 0x0,
+ ZIP_CLUSTER_COMP_NUM_CAP_IDX,
+ ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
+ ZIP_DECOMP_ENABLE_BITMAP_IDX,
+ ZIP_COMP_ENABLE_BITMAP_IDX,
+ ZIP_DRV_ALG_BITMAP_IDX,
+ ZIP_DEV_ALG_BITMAP_IDX,
+};
+
+static const u32 zip_pre_store_caps[] = {
+ ZIP_CORE_NUM_CAP,
+ ZIP_CLUSTER_COMP_NUM_CAP,
+ ZIP_CLUSTER_DECOMP_NUM_CAP,
+ ZIP_DECOMP_ENABLE_BITMAP,
+ ZIP_COMP_ENABLE_BITMAP,
+ ZIP_DRV_ALG_BITMAP,
+ ZIP_DEV_ALG_BITMAP,
+};
+
enum {
HZIP_COMP_CORE0,
HZIP_COMP_CORE1,
@@ -351,6 +373,37 @@ static int hzip_diff_regs_show(struct seq_file *s, void *unused)
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
+
+static int perf_mode_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ u32 n;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != HZIP_HIGH_COMP_PERF &&
+ n != HZIP_HIGH_COMP_RATE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops zip_com_perf_ops = {
+ .set = perf_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * perf_mode = 0 means enable high compression rate mode,
+ * perf_mode = 1 means enable high compression performance mode.
+ * These two modes only apply to the compression direction.
+ */
+static u32 perf_mode = HZIP_HIGH_COMP_RATE;
+module_param_cb(perf_mode, &zip_com_perf_ops, &perf_mode, 0444);
+MODULE_PARM_DESC(perf_mode, "ZIP high perf mode 0(default), 1(enable)");
+
static const struct kernel_param_ops zip_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -409,40 +462,33 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;

- cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
+ cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
if ((alg & cap_val) == alg)
return true;

return false;
}

-static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
+static int hisi_zip_set_high_perf(struct hisi_qm *qm)
{
- struct device *dev = &qm->pdev->dev;
- char *algs, *ptr;
- u32 alg_mask;
- int i;
-
- if (!qm->use_sva)
- return 0;
-
- algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
- if (!algs)
- return -ENOMEM;
-
- alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
-
- for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
- if (alg_mask & zip_dev_algs[i].alg_msk)
- strcat(algs, zip_dev_algs[i].algs);
-
- ptr = strrchr(algs, '\n');
- if (ptr)
- *ptr = '\0';
+ u32 val;
+ int ret;

- qm->uacce->algs = algs;
+ val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
+ if (perf_mode == HZIP_HIGH_COMP_PERF)
+ val |= HZIP_HIGH_COMP_PERF;
+ else
+ val &= ~HZIP_HIGH_COMP_PERF;
+
+ /* Set perf mode */
+ writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
+ val, val == perf_mode, HZIP_DELAY_1_US,
+ HZIP_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to set perf mode\n");

- return 0;
+ return ret;
}

static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
@@ -541,10 +587,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}

/* let's open all compression/decompression cores */
- dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
- comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
+ dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
+ comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);

/* enable sqc,cqc writeback */
@@ -771,9 +815,8 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
char buf[HZIP_BUF_SIZE];
int i;

- zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
- zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
- qm->cap_ver);
+ zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;

for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -915,7 +958,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
u32 zip_core_num;
int i, j, idx;

- zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;

debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
sizeof(unsigned int), GFP_KERNEL);
@@ -971,9 +1014,9 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}

- zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
- zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
- qm->cap_ver);
+ zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
@@ -1114,6 +1157,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
if (ret)
return ret;

+ ret = hisi_zip_set_high_perf(qm);
+ if (ret)
+ return ret;
+
hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -1125,8 +1172,31 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return ret;
}

+static int zip_pre_store_cap_reg(struct hisi_qm *qm)
+{
+ struct hisi_qm_cap_record *zip_cap;
+ struct pci_dev *pdev = qm->pdev;
+ size_t i, size;
+
+ size = ARRAY_SIZE(zip_pre_store_caps);
+ zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
+ if (!zip_cap)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ zip_cap[i].type = zip_pre_store_caps[i];
+ zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ zip_pre_store_caps[i], qm->cap_ver);
+ }
+
+ qm->cap_tables.dev_cap_table = zip_cap;
+
+ return 0;
+}
+
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ u64 alg_msk;
int ret;

qm->pdev = pdev;
@@ -1162,7 +1232,16 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}

- ret = hisi_zip_set_qm_algs(qm);
+ /* Fetch and save the value of capability registers */
+ ret = zip_pre_store_cap_reg(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
+ hisi_qm_uninit(qm);
+ return ret;
+ }
+
+ alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
+ ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
hisi_qm_uninit(qm);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 272c28b5a088..b83818634ae4 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -742,9 +742,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
max(totlen_src, totlen_dst));
return -EINVAL;
}
- if (sreq->nr_src > 0)
- dma_map_sg(priv->dev, src, sreq->nr_src,
- DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0 &&
+ !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL))
+ return -EIO;
} else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
@@ -752,8 +752,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
return -EINVAL;
}

- if (sreq->nr_src > 0)
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+ if (sreq->nr_src > 0 &&
+ !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE))
+ return -EIO;

if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
@@ -762,9 +763,11 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
goto unmap;
}

- if (sreq->nr_dst > 0)
- dma_map_sg(priv->dev, dst, sreq->nr_dst,
- DMA_FROM_DEVICE);
+ if (sreq->nr_dst > 0 &&
+ !dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE)) {
+ ret = -EIO;
+ goto unmap;
+ }
}

memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 6238d34f8db2..94eb6f6afa25 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1869,9 +1869,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(ctx->fallback.aead,
crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- crypto_aead_setkey(ctx->fallback.aead, key, keylen);

- return 0;
+ return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
}

static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 62d93526920f..8e84dd98a273 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -43,7 +43,6 @@
#define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
-#define FLAGS_NEW_KEY BIT(3)

#define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -141,8 +140,6 @@ struct sahara_hw_link {
};

struct sahara_ctx {
- unsigned long flags;
-
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
@@ -151,6 +148,7 @@ struct sahara_ctx {

struct sahara_aes_reqctx {
unsigned long mode;
+ u8 iv_out[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};

@@ -446,27 +444,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
int ret;
int i, j;
int idx = 0;
+ u32 len;

- /* Copy new key if necessary */
- if (ctx->flags & FLAGS_NEW_KEY) {
- memcpy(dev->key_base, ctx->key, ctx->keylen);
- ctx->flags &= ~FLAGS_NEW_KEY;
+ memcpy(dev->key_base, ctx->key, ctx->keylen);

- if (dev->flags & FLAGS_CBC) {
- dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
- dev->hw_desc[idx]->p1 = dev->iv_phys_base;
- } else {
- dev->hw_desc[idx]->len1 = 0;
- dev->hw_desc[idx]->p1 = 0;
- }
- dev->hw_desc[idx]->len2 = ctx->keylen;
- dev->hw_desc[idx]->p2 = dev->key_phys_base;
- dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ if (dev->flags & FLAGS_CBC) {
+ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+ } else {
+ dev->hw_desc[idx]->len1 = 0;
+ dev->hw_desc[idx]->p1 = 0;
+ }
+ dev->hw_desc[idx]->len2 = ctx->keylen;
+ dev->hw_desc[idx]->p2 = dev->key_phys_base;
+ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);

- dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+ idx++;

- idx++;
- }

dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) {
@@ -488,24 +483,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
DMA_TO_DEVICE);
if (!ret) {
dev_err(dev->device, "couldn't map in sg\n");
- goto unmap_in;
+ return -EINVAL;
}
+
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
if (!ret) {
dev_err(dev->device, "couldn't map out sg\n");
- goto unmap_out;
+ goto unmap_in;
}

/* Create input links */
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg;
+ len = dev->total;
for (i = 0; i < dev->nb_in_sg; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -514,12 +512,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
/* Create output links */
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg;
+ len = dev->total;
for (j = i; j < dev->nb_out_sg + i; j++) {
- dev->hw_link[j]->len = sg->length;
+ dev->hw_link[j]->len = min(len, sg->length);
dev->hw_link[j]->p = sg->dma_address;
if (j == (dev->nb_out_sg + i - 1)) {
dev->hw_link[j]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
sg = sg_next(sg);
}
@@ -538,9 +538,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)

return 0;

-unmap_out:
- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
@@ -548,8 +545,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return -EINVAL;
}

+static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ /* Update IV buffer to contain the last ciphertext block */
+ if (rctx->mode & FLAGS_ENCRYPT) {
+ sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
+ ivsize, req->cryptlen - ivsize);
+ } else {
+ memcpy(req->iv, rctx->iv_out, ivsize);
+ }
+}
+
static int sahara_aes_process(struct skcipher_request *req)
{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
@@ -571,8 +584,17 @@ static int sahara_aes_process(struct skcipher_request *req)
rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;

- if ((dev->flags & FLAGS_CBC) && req->iv)
- memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
+ if ((dev->flags & FLAGS_CBC) && req->iv) {
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ memcpy(dev->iv_base, req->iv, ivsize);
+
+ if (!(dev->flags & FLAGS_ENCRYPT)) {
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ rctx->iv_out, ivsize,
+ req->cryptlen - ivsize);
+ }
+ }

/* assign new context to device */
dev->ctx = ctx;
@@ -585,16 +607,20 @@ static int sahara_aes_process(struct skcipher_request *req)

timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "AES timeout\n");
- return -ETIMEDOUT;
- }

dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);

+ if (!timeout) {
+ dev_err(dev->device, "AES timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if ((dev->flags & FLAGS_CBC) && req->iv)
+ sahara_aes_cbc_update_iv(req);
+
return 0;
}

@@ -608,7 +634,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
/* SAHARA only supports 128bit keys */
if (keylen == AES_KEYSIZE_128) {
memcpy(ctx->key, key, keylen);
- ctx->flags |= FLAGS_NEW_KEY;
return 0;
}

@@ -624,12 +649,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(ctx->fallback, key, keylen);
}

+static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
+{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ if (mode & FLAGS_ENCRYPT)
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
+
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
+}
+
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct sahara_dev *dev = dev_ptr;
int err = 0;

+ if (!req->cryptlen)
+ return 0;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128))
+ return sahara_aes_fallback(req, mode);
+
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));

@@ -652,81 +705,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)

static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_encrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
}

static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_decrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, 0);
}

static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_encrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}

static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_decrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, FLAGS_CBC);
}

@@ -783,6 +776,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
int start)
{
struct scatterlist *sg;
+ unsigned int len;
unsigned int i;
int ret;

@@ -804,12 +798,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
if (!ret)
return -EFAULT;

+ len = rctx->total;
for (i = start; i < dev->nb_in_sg + start; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -890,24 +886,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
return 0;
}

-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
-{
- if (!sg || !sg->length)
- return nbytes;
-
- while (nbytes && sg) {
- if (nbytes <= sg->length) {
- sg->length = nbytes;
- sg_mark_end(sg);
- break;
- }
- nbytes -= sg->length;
- sg = sg_next(sg);
- }
-
- return nbytes;
-}
-
static int sahara_sha_prepare_request(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -944,36 +922,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
hash_later, 0);
}

- /* nbytes should now be multiple of blocksize */
- req->nbytes = req->nbytes - hash_later;
-
- sahara_walk_and_recalc(req->src, req->nbytes);
-
+ rctx->total = len - hash_later;
/* have data from previous operation and current */
if (rctx->buf_cnt && req->nbytes) {
sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
-
sg_chain(rctx->in_sg_chain, 2, req->src);
-
- rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain;
-
- req->src = rctx->in_sg_chain;
/* only data from previous operation */
} else if (rctx->buf_cnt) {
- if (req->src)
- rctx->in_sg = req->src;
- else
- rctx->in_sg = rctx->in_sg_chain;
- /* buf was copied into rembuf above */
+ rctx->in_sg = rctx->in_sg_chain;
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
- rctx->total = rctx->buf_cnt;
/* no data from previous operation */
} else {
rctx->in_sg = req->src;
- rctx->total = req->nbytes;
- req->src = rctx->in_sg;
}

/* on next call, we only have the remaining data in the buffer */
@@ -994,7 +956,10 @@ static int sahara_sha_process(struct ahash_request *req)
return ret;

if (rctx->first) {
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ if (ret)
+ return ret;
+
dev->hw_desc[0]->next = 0;
rctx->first = 0;
} else {
@@ -1002,7 +967,10 @@ static int sahara_sha_process(struct ahash_request *req)

sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
dev->hw_desc[0]->next = dev->hw_phys_desc[1];
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ if (ret)
+ return ret;
+
dev->hw_desc[1]->next = 0;
}

@@ -1015,18 +983,19 @@ static int sahara_sha_process(struct ahash_request *req)

timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "SHA timeout\n");
- return -ETIMEDOUT;
- }

if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);

+ if (!timeout) {
+ dev_err(dev->device, "SHA timeout\n");
+ return -ETIMEDOUT;
+ }
+
memcpy(rctx->context, dev->context_base, rctx->context_size);

- if (req->result)
+ if (req->result && rctx->last)
memcpy(req->result, rctx->context, rctx->digest_size);

return 0;
@@ -1170,8 +1139,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct sahara_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ sizeof(struct sahara_sha_reqctx));

return 0;
}
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 08e974e0dd12..3a67ddc4d936 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -180,12 +180,8 @@ static int starfive_cryp_probe(struct platform_device *pdev)
spin_unlock(&dev_list.lock);

ret = starfive_dma_init(cryp);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- goto err_probe_defer;
- else
- goto err_dma_init;
- }
+ if (ret)
+ goto err_dma_init;

/* Initialize crypto engine */
cryp->engine = crypto_engine_alloc_init(&pdev->dev, 1);
@@ -233,7 +229,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)

tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
-err_probe_defer:
+
return ret;
}

diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 154590e1f764..7059bbe5a2eb 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -10,6 +10,7 @@
#include <linux/virtio.h>
#include <linux/crypto.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
@@ -28,6 +29,7 @@ struct data_queue {
char name[32];

struct crypto_engine *engine;
+ struct tasklet_struct done_task;
};

struct virtio_crypto {
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 43a0838d31ff..b909c6a2bf1c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -72,27 +72,28 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl
return 0;
}

-static void virtcrypto_dataq_callback(struct virtqueue *vq)
+static void virtcrypto_done_task(unsigned long data)
{
- struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *data_vq = (struct data_queue *)data;
+ struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
- unsigned long flags;
unsigned int len;
- unsigned int qid = vq->index;

- spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
- spin_unlock_irqrestore(
- &vcrypto->data_vq[qid].lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
- spin_lock_irqsave(
- &vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
- spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *dq = &vcrypto->data_vq[vq->index];
+
+ tasklet_schedule(&dq->done_task);
}

static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@@ -150,6 +151,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
+ tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
+ (unsigned long)&vi->data_vq[i]);
}

kfree(names);
@@ -497,12 +500,15 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
static void virtcrypto_remove(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
+ int i;

dev_info(&vdev->dev, "Start virtcrypto_remove.\n");

flush_work(&vcrypto->config_work);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
+ for (i = 0; i < vcrypto->max_data_queues; i++)
+ tasklet_kill(&vcrypto->data_vq[i].done_task);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index f430280fa6bd..c67cc8c9d5cc 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -172,14 +172,10 @@ static ssize_t target_list_show(struct device *dev,
{
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
ssize_t offset;
- unsigned int seq;
int rc;

- do {
- seq = read_seqbegin(&cxlsd->target_lock);
- rc = emit_target_list(cxlsd, buf);
- } while (read_seqretry(&cxlsd->target_lock, seq));
-
+ guard(rwsem_read)(&cxl_region_rwsem);
+ rc = emit_target_list(cxlsd, buf);
if (rc < 0)
return rc;
offset = rc;
@@ -1579,7 +1575,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
{
- int i, rc = 0;
+ int i;

if (!target_map)
return 0;
@@ -1589,19 +1585,16 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
if (xa_empty(&port->dports))
return -EINVAL;

- write_seqlock(&cxlsd->target_lock);
- for (i = 0; i < cxlsd->nr_targets; i++) {
+ guard(rwsem_write)(&cxl_region_rwsem);
+ for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);

- if (!dport) {
- rc = -ENXIO;
- break;
- }
+ if (!dport)
+ return -ENXIO;
cxlsd->target[i] = dport;
}
- write_sequnlock(&cxlsd->target_lock);

- return rc;
+ return 0;
}

struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
@@ -1671,7 +1664,6 @@ static int cxl_switch_decoder_init(struct cxl_port *port,
return -EINVAL;

cxlsd->nr_targets = nr_targets;
- seqlock_init(&cxlsd->target_lock);
return cxl_decoder_init(port, &cxlsd->cxld);
}

diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e7206367ec66..472bd510b5e2 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -397,7 +397,7 @@ static ssize_t interleave_ways_store(struct device *dev,
return rc;

/*
- * Even for x3, x9, and x12 interleaves the region interleave must be a
+ * Even for x3, x6, and x12 interleaves the region interleave must be a
* power of 2 multiple of the host bridge interleave.
*/
if (!is_power_of_2(val / cxld->interleave_ways) ||
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6c6afda0e4c6..de2c250c894b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -404,7 +404,6 @@ struct cxl_endpoint_decoder {
/**
* struct cxl_switch_decoder - Switch specific CXL HDM Decoder
* @cxld: base cxl_decoder object
- * @target_lock: coordinate coherent reads of the target list
* @nr_targets: number of elements in @target
* @target: active ordered target list in current decoder configuration
*
@@ -416,7 +415,6 @@ struct cxl_endpoint_decoder {
*/
struct cxl_switch_decoder {
struct cxl_decoder cxld;
- seqlock_t target_lock;
int nr_targets;
struct cxl_dport *target[];
};
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index b9c5772da959..90d46e5c4ff0 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE,
ocx_com_errors, ctx->reg_com_int);

- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);

for (lane = 0; lane < OCX_RX_LANES; lane++)
if (ctx->reg_com_int & BIT(lane)) {
@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
lane, ctx->reg_lane_int[lane],
lane, ctx->reg_lane_stat11[lane]);

- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);

decode_register(other, OCX_OTHER_SIZE,
ocx_lane_errors,
ctx->reg_lane_int[lane]);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
}

if (ctx->reg_com_int & OCX_COM_INT_CE)
@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE,
ocx_com_link_errors, ctx->reg_com_link_int);

- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);

if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)

decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);

- strncat(msg, other, L2C_MESSAGE_SIZE);
+ strlcat(msg, other, L2C_MESSAGE_SIZE);

if (ctx->reg_int & mask_ue)
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 66c3846c9147..3b4c9355cb60 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct resource *res;
- char debug_name[50] = "ti_sci_debug@";
+ char debug_name[50];

/* Debug region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
/* Setup NULL termination */
info->debug_buffer[info->debug_region_size] = 0;

- info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
- sizeof(debug_name) -
- sizeof("ti_sci_debug@")),
- 0444, NULL, info, &ti_sci_debug_fops);
+ snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
+ dev_name(dev));
+ info->d = debugfs_create_file(debug_name, 0444, NULL, info,
+ &ti_sci_debug_fops);
if (IS_ERR(info->d))
return PTR_ERR(info->d);

diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 7a3e1760fc5b..d5906d419b0a 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -215,6 +215,8 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);

gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index c7c5c19ebc66..12d853845bb8 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -817,7 +817,7 @@ static int __init gpiolib_sysfs_init(void)
* gpiochip_sysfs_register() acquires a mutex. This is unsafe
* and needs to be fixed.
*
- * Also it would be nice to use gpiochip_find() here so we
+ * Also it would be nice to use gpio_device_find() here so we
* can keep gpio_chips local to gpiolib.c, but the yield of
* gpio_lock prevents us from doing this.
*/
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 40a0022ea719..71492d213ef4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1014,16 +1014,10 @@ void gpiochip_remove(struct gpio_chip *gc)
}
EXPORT_SYMBOL_GPL(gpiochip_remove);

-/**
- * gpiochip_find() - iterator for locating a specific gpio_chip
- * @data: data to pass to match function
- * @match: Callback function to check gpio_chip
+/*
+ * FIXME: This will be removed soon.
*
- * Similar to bus_find_device. It returns a reference to a gpio_chip as
- * determined by a user supplied @match callback. The callback should return
- * 0 if the device doesn't match and non-zero if it does. If the callback is
- * non-zero, this function will return to the caller and not iterate over any
- * more gpio_chips.
+ * This function is depracated, don't use.
*/
struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *gc,
@@ -1031,21 +1025,62 @@ struct gpio_chip *gpiochip_find(void *data,
{
struct gpio_device *gdev;
struct gpio_chip *gc = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(gdev, &gpio_devices, list)
- if (gdev->chip && match(gdev->chip, data)) {
- gc = gdev->chip;
- break;
- }

- spin_unlock_irqrestore(&gpio_lock, flags);
+ gdev = gpio_device_find(data, match);
+ if (gdev) {
+ gc = gdev->chip;
+ gpio_device_put(gdev);
+ }

return gc;
}
EXPORT_SYMBOL_GPL(gpiochip_find);

+/**
+ * gpio_device_find() - find a specific GPIO device
+ * @data: data to pass to match function
+ * @match: Callback function to check gpio_chip
+ *
+ * Returns:
+ * New reference to struct gpio_device.
+ *
+ * Similar to bus_find_device(). It returns a reference to a gpio_device as
+ * determined by a user supplied @match callback. The callback should return
+ * 0 if the device doesn't match and non-zero if it does. If the callback
+ * returns non-zero, this function will return to the caller and not iterate
+ * over any more gpio_devices.
+ *
+ * The callback takes the GPIO chip structure as argument. During the execution
+ * of the callback function the chip is protected from being freed. TODO: This
+ * actually has yet to be implemented.
+ *
+ * If the function returns non-NULL, the returned reference must be freed by
+ * the caller using gpio_device_put().
+ */
+struct gpio_device *gpio_device_find(void *data,
+ int (*match)(struct gpio_chip *gc,
+ void *data))
+{
+ struct gpio_device *gdev;
+
+ /*
+ * Not yet but in the future the spinlock below will become a mutex.
+ * Annotate this function before anyone tries to use it in interrupt
+ * context like it happened with gpiochip_find().
+ */
+ might_sleep();
+
+ guard(spinlock_irqsave)(&gpio_lock);
+
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ if (gdev->chip && match(gdev->chip, data))
+ return gpio_device_get(gdev);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gpio_device_find);
+
static int gpiochip_match_name(struct gpio_chip *gc, void *data)
{
const char *name = data;
@@ -1058,6 +1093,30 @@ static struct gpio_chip *find_chip_by_name(const char *name)
return gpiochip_find((void *)name, gpiochip_match_name);
}

+/**
+ * gpio_device_get() - Increase the reference count of this GPIO device
+ * @gdev: GPIO device to increase the refcount for
+ *
+ * Returns:
+ * Pointer to @gdev.
+ */
+struct gpio_device *gpio_device_get(struct gpio_device *gdev)
+{
+ return to_gpio_device(get_device(&gdev->dev));
+}
+EXPORT_SYMBOL_GPL(gpio_device_get);
+
+/**
+ * gpio_device_put() - Decrease the reference count of this GPIO device and
+ * possibly free all resources associated with it.
+ * @gdev: GPIO device to decrease the reference count for
+ */
+void gpio_device_put(struct gpio_device *gdev)
+{
+ put_device(&gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gpio_device_put);
+
#ifdef CONFIG_GPIOLIB_IRQCHIP

/*
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index a0a67569300b..aa1926083689 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -86,16 +86,6 @@ static inline struct gpio_device *to_gpio_device(struct device *dev)
return container_of(dev, struct gpio_device, dev);
}

-static inline struct gpio_device *gpio_device_get(struct gpio_device *gdev)
-{
- return to_gpio_device(get_device(&gdev->dev));
-}
-
-static inline void gpio_device_put(struct gpio_device *gdev)
-{
- put_device(&gdev->dev);
-}
-
/* gpio suffixes used for ACPI and device tree lookup */
static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 3f2126f99923..418ff7cd662d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -755,7 +755,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
int r;

if (!adev->smc_rreg)
- return -EPERM;
+ return -EOPNOTSUPP;

if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -814,7 +814,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
int r;

if (!adev->smc_wreg)
- return -EPERM;
+ return -EOPNOTSUPP;

if (size & 0x3 || *pos & 0x3)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2c35036e4ba2..635b58553583 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2197,6 +2197,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,

pci_wake_from_d3(pdev, TRUE);

+ pci_wake_from_d3(pdev, TRUE);
+
/*
* For runpm implemented via BACO, PMFW will handle the
* timing for BACO in and out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d30dc0b718c7..58dab4f73a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1026,7 +1026,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
(void *)&ui32, &ui32_size)) {
- return -EINVAL;
+ /* fall back to input power for backwards compat */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
}
ui32 >>= 8;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 62b205dac63a..6604a3f99c5e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;

- /* dGPUs: the reserved space for kernel
- * before SVM
- */
- pdd->qpd.cwsr_base = SVM_CWSR_BASE;
- pdd->qpd.ib_base = SVM_IB_BASE;
-
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
@@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);

- pdd->gpuvm_base = PAGE_SIZE;
+ /* Raven needs SVM to support graphic handle, etc. Leave the small
+ * reserved space before SVM on Raven as well, even though we don't
+ * have to.
+ * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
+ * are used in Thunk to reserve SVM.
+ */
+ pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;

pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
-
- /*
- * Place TBA/TMA on opposite side of VM hole to prevent
- * stray faults from triggering SVM on these pages.
- */
- pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
}

int kfd_init_apertures(struct kfd_process *process)
@@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
+
+ /* dGPUs: the reserved space for kernel
+ * before SVM
+ */
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
}

dev_dbg(kfd_device, "node id %u\n", id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 7d82c7da223a..659313648b20 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1021,7 +1021,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
} else {
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
if (IS_ERR(res))
- return -ENOMEM;
+ return PTR_ERR(res);
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->type = MEMORY_DEVICE_PRIVATE;
@@ -1037,10 +1037,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
- /* Disable SVM support capability */
- pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ /* Disable SVM support capability */
+ pgmap->type = 0;
return PTR_ERR(r);
}

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index df7a5cdb8693..3287a3961395 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -971,7 +971,7 @@ struct kfd_process {
struct work_struct debug_event_workarea;

/* Tracks debug per-vmid request for debug flags */
- bool dbg_flags;
+ u32 dbg_flags;

atomic_t poison;
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c8c75ff7cea8..6e75e8fa18be 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
num_cpu++;
}

+ if (list_empty(&kdev->io_link_props))
+ return -ENODATA;
+
gpu_link = list_first_entry(&kdev->io_link_props,
- struct kfd_iolink_properties, list);
- if (!gpu_link)
- return -ENOMEM;
+ struct kfd_iolink_properties, list);

for (i = 0; i < num_cpu; i++) {
/* CPU <--> GPU */
@@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
peer->gpu->adev))
return ret;

+ if (list_empty(&kdev->io_link_props))
+ return -ENODATA;
+
iolink1 = list_first_entry(&kdev->io_link_props,
- struct kfd_iolink_properties, list);
- if (!iolink1)
- return -ENOMEM;
+ struct kfd_iolink_properties, list);
+
+ if (list_empty(&peer->io_link_props))
+ return -ENODATA;

iolink2 = list_first_entry(&peer->io_link_props,
- struct kfd_iolink_properties, list);
- if (!iolink2)
- return -ENOMEM;
+ struct kfd_iolink_properties, list);

props = kfd_alloc_struct(props);
if (!props)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5084833e3608..861b5e45e2a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6882,7 +6882,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
max_bpc);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
- dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
}

dm_new_connector_state->vcpi_slots =
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 28f5eb9ecbd3..10dd4cd6f59c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1636,7 +1636,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
} else {
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
if (pbn > full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 90339c2dfd84..5a0b04518956 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -807,7 +807,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane *dpcd_lane_settings)
{
uint32_t lane;

diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 7d027bac8255..851bd17317a0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -111,7 +111,7 @@ void dp_decide_lane_settings(
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+ union dpcd_training_lane *dpcd_lane_settings);

enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 5d28c951a319..5cb4725c773f 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2735,10 +2735,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 81fb4e5dd804..60377747bab4 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
@@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
+ if (ret)
return ret;
- }
}
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
@@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
return -ENOMEM;
- }

entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
@@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
return -ENOMEM;
- }
entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
@@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
le16_to_cpu(ext_hdr->usPPMTableOffset));
adev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.ppm_table)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
le16_to_cpu(ppm->usCpuCoreNumber);
@@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
return -ENOMEM;
- }
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
@@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.cac_tdp_table)
return -ENOMEM;
- }
if (rev > 0) {
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
(mode_info->atom_context->bios + data_offset +
@@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
ret = amdgpu_parse_clk_voltage_dep_table(
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+ if (ret)
return ret;
- }
}
}

diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 02e69ccff3ba..f81e4bd48110 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
return -ENOMEM;
- }
+
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 11372fcc59c8..b1a8799e2dee 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = smu7_get_evv_voltages(hwmgr);
if (result) {
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
return -EINVAL;
}
} else {
@@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}

result = smu7_update_edc_leakage_table(hwmgr);
- if (result)
+ if (result) {
+ smu7_hwmgr_backend_fini(hwmgr);
return result;
+ }

return 0;
}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 946212a95598..5e3b8edcf794 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)

static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
{
- int ret, tries = 3;
+ int ret = -EINVAL;
+ int tries = 3;
u32 i;

for (i = 0; i < tries; i++) {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index b45bffab7c81..d941c3a0e611 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client)
} else {
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
dev_err(dev, "failed to parse HPD number\n");
- return ret;
+ return -EINVAL;
}
}

diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index e0e015243a60..b588fea12502 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
return 0;
}

-static int __exit tpd12s015_remove(struct platform_device *pdev)
+static int tpd12s015_remove(struct platform_device *pdev)
{
struct tpd12s015_device *tpd = platform_get_drvdata(pdev);

@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);

static struct platform_driver tpd12s015_driver = {
.probe = tpd12s015_probe,
- .remove = __exit_p(tpd12s015_remove),
+ .remove = tpd12s015_remove,
.driver = {
.name = "tpd12s015",
.of_match_table = tpd12s015_of_match,
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 8c929ef72c72..6d169c83b062 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -4690,13 +4690,12 @@ EXPORT_SYMBOL(drm_dp_check_act_status);

/**
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
- * @clock: dot clock for the mode
- * @bpp: bpp for the mode.
- * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
+ * @clock: dot clock
+ * @bpp: bpp as .4 binary fixed point
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
+int drm_dp_calc_pbn_mode(int clock, int bpp)
{
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -4707,18 +4706,9 @@ int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
- *
- * If the bpp is in units of 1/16, further divide by 16. Put this
- * factor in the numerator rather than the denominator to avoid
- * integer overflow
*/
-
- if (dsc)
- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
- 8 * 54 * 1000 * 1000);
-
- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
- 8 * 54 * 1000 * 1000);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
+ 1000 * 8 * 54 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);

diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3eda026ffac6..71bb8806dc5f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -940,8 +940,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}

- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_register_all(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_modeset_register_all(dev);
+ if (ret)
+ goto err_unload;
+ }

DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -951,6 +954,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)

goto out_unlock;

+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
err_minors:
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 77bd1313c808..f104bd7f8c2a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -109,8 +109,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
continue;

crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- dsc ? bpp << 4 : bpp,
- dsc);
+ bpp << 4);

slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
@@ -941,7 +940,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return ret;

if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 22b65f4a0e30..4beb3b4bd694 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers =
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};

-static void imx_lcdc_release(struct drm_device *drm)
-{
- struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm);
-
- drm_kms_helper_poll_fini(drm);
- kfree(lcdc);
-}
-
DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);

static struct drm_driver imx_lcdc_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &imx_lcdc_drm_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
- .release = imx_lcdc_release,
.name = "imx-lcdc",
.desc = "i.MX LCDC driver",
.date = "20200716",
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index e525a6b9e5b0..22f768d923d5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -103,7 +103,7 @@ void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CTRL);

- if (priv->async_clk)
+ if (!cmdq_pkt && priv->async_clk)
reset_control_reset(priv->reset_ctl);
}

diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 0e285df6577e..4052a3133b57 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2784,3 +2784,4 @@ MODULE_AUTHOR("Markus Schneider-Pargmann <msp@xxxxxxxxxxxx>");
MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@xxxxxxxxxxxx>");
MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: phy_mtk_dp");
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 2f931e4e2b60..bc073a6b367e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -957,20 +957,6 @@ static const struct mtk_dpi_conf mt8186_conf = {
.csc_enable_bit = CSC_ENABLE,
};

-static const struct mtk_dpi_conf mt8188_dpintf_conf = {
- .cal_factor = mt8195_dpintf_calculate_factor,
- .max_clock_khz = 600000,
- .output_fmts = mt8195_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
- .pixels_per_iter = 4,
- .input_2pixel = false,
- .dimension_mask = DPINTF_HPW_MASK,
- .hvsize_mask = DPINTF_HSIZE_MASK,
- .channel_swap_shift = DPINTF_CH_SWAP,
- .yuv422_en_bit = DPINTF_YUV422_EN,
- .csc_enable_bit = DPINTF_CSC_ENABLE,
-};
-
static const struct mtk_dpi_conf mt8192_conf = {
.cal_factor = mt8183_calculate_factor,
.reg_h_fre_con = 0xe0,
@@ -1094,7 +1080,7 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf },
{ .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf },
{ .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf },
- { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8188_dpintf_conf },
+ { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8195_dpintf_conf },
{ .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
{ .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
{ /* sentinel */ },
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index c3adaeefd551..c7233d0ac210 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -246,8 +246,7 @@ int mtk_mdp_rdma_clk_enable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);

- clk_prepare_enable(rdma->clk);
- return 0;
+ return clk_prepare_enable(rdma->clk);
}

void mtk_mdp_rdma_clk_disable(struct device *dev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index f2d9d34ed50f..b7b527e21dac 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -462,7 +462,7 @@ static const struct adreno_info gpulist[] = {
{ 190, 1 },
),
}, {
- .chip_ids = ADRENO_CHIP_IDS(0x06080000),
+ .chip_ids = ADRENO_CHIP_IDS(0x06080001),
.family = ADRENO_6XX_GEN2,
.revn = 680,
.fw = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index f3de21025ca7..c92fbf24fbac 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -377,6 +377,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 5f9b437b82a6..ee781037ada9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -32,7 +32,7 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};

diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index d030c08636b4..69d3f7e5e095 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -25,7 +25,7 @@ static const struct dpu_mdp_cfg sc7180_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};

diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 3b5061c4402a..9195cb996f44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -25,7 +25,7 @@ static const struct dpu_mdp_cfg sc7280_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 8ce7586e2ddf..e238e4e8116c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@xxxxxxxxx>
@@ -125,7 +125,7 @@ static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
continue;

/* Calculate MISR over 1 frame */
- m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
+ m->hw_lm->ops.setup_misr(m->hw_lm);
}
}

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index d34e684a4178..b02aa2eb6c17 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@xxxxxxxxx>
*/
@@ -255,7 +255,7 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
continue;

- phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
+ phys->hw_intf->ops.setup_misr(phys->hw_intf);
}
}

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 8ec6505d9e78..da071b1c02af 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/

@@ -318,9 +318,9 @@ static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
return DPU_REG_READ(c, INTF_LINE_COUNT);
}

-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count)
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf)
{
- dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count);
+ dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1);
}

static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 77f80531782b..4e86108bee28 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/

@@ -94,7 +94,7 @@ struct dpu_hw_intf_ops {

void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
const enum dpu_pingpong pp);
- void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count);
+ void (*setup_misr)(struct dpu_hw_intf *intf);
int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);

// Tearcheck on INTF since DPU 5.0.0
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index d1c3bd8379ea..a590c1f7465f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/

@@ -81,9 +81,9 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}

-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
{
- dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count);
+ dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
}

static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 36992d046a53..98b77cda6547 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/

@@ -57,7 +58,7 @@ struct dpu_hw_lm_ops {
/**
* setup_misr: Enable/disable MISR
*/
- void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count);
+ void (*setup_misr)(struct dpu_hw_mixer *ctx);

/**
* collect_misr: Read MISR signature
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 9d2273fd2fed..6eee9f68ab4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -481,9 +481,11 @@ void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0);
}

+/*
+ * note: Aside from encoders, input_sel should be set to 0x0 by default
+ */
void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
- u32 misr_ctrl_offset,
- bool enable, u32 frame_count)
+ u32 misr_ctrl_offset, u8 input_sel)
{
u32 config = 0;

@@ -492,15 +494,9 @@ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
/* Clear old MISR value (in case it's read before a new value is calculated)*/
wmb();

- if (enable) {
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, misr_ctrl_offset, config);
- } else {
- DPU_REG_WRITE(c, misr_ctrl_offset, 0);
- }
-
+ config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK |
+ ((input_sel & 0xF) << 24);
+ DPU_REG_WRITE(c, misr_ctrl_offset, config);
}

int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 1f6079f47071..0aed54d7f6c9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/

@@ -13,7 +13,7 @@
#include "dpu_hw_catalog.h"

#define REG_MASK(n) ((BIT(n)) - 1)
-#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_FRAME_COUNT 0x1
#define MISR_CTRL_ENABLE BIT(8)
#define MISR_CTRL_STATUS BIT(9)
#define MISR_CTRL_STATUS_CLEAR BIT(10)
@@ -358,9 +358,7 @@ void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
const struct dpu_hw_qos_cfg *cfg);

void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
- u32 misr_ctrl_offset,
- bool enable,
- u32 frame_count);
+ u32 misr_ctrl_offset, u8 input_sel);

int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
u32 misr_ctrl_offset,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 169f9de4a12a..3100957225a7 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -269,6 +269,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ unsigned long flags;

DBG("%s", mdp4_crtc->name);

@@ -281,6 +282,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);

+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp4_crtc->event);
+ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
+ }
+
mdp4_crtc->enabled = false;
}

diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 05621e5e7d63..b6314bb66d2f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -516,7 +516,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
struct device *dev = &phy->pdev->dev;
int ret;

- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;

ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 617162aac060..de8041c94de5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -966,8 +966,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
const int clock = crtc_state->adjusted_mode.clock;

asyh->or.bpc = connector->display_info.bpc;
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
- false);
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3 << 4);
}

mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 5b71a5a5cd85..cdbc75e3d1f6 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = fence->channel->chan.push;
+ struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index afeeb7737552..e000577a95dd 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- bool fence_cookie = dma_fence_begin_signalling();

dispc_runtime_get(priv->dispc);

@@ -92,6 +91,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
omap_atomic_wait_for_completion(dev, old_state);

drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_hw_done(old_state);
} else {
/*
* OMAP3 DSS seems to have issues with the work-around above,
@@ -101,11 +102,9 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_planes(dev, old_state, 0);

drm_atomic_helper_commit_modeset_enables(dev, old_state);
- }

- drm_atomic_helper_commit_hw_done(old_state);
-
- dma_fence_end_signalling(fence_cookie);
+ drm_atomic_helper_commit_hw_done(old_state);
+ }

/*
* Wait for completion of the page flips to ensure that old buffers
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index e7be15b68102..6de117232346 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -104,6 +104,8 @@ static int kd35t133_unprepare(struct drm_panel *panel)
return ret;
}

+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vdd);

diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index ad98dd9322b4..227937afe257 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -261,6 +261,8 @@ static int panel_nv3051d_unprepare(struct drm_panel *panel)

usleep_range(10000, 15000);

+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
regulator_disable(ctx->vdd);

return 0;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 0459965e1b4f..036ac403ed21 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -288,7 +288,7 @@ static void st7701_init_sequence(struct st7701 *st7701)
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
- DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
+ DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200)));

/* T2D = 0.2us * T2D[3:0] */
ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index d28b99732dde..eca45b83e4e6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -71,7 +71,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
}

gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
- gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+ /* Only enable the interrupts we care about */
+ gpu_write(pfdev, GPU_INT_MASK,
+ GPU_IRQ_MASK_ERROR |
+ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
+ GPU_IRQ_CLEAN_CACHES_COMPLETED);

return 0;
}
@@ -321,28 +326,38 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.shader_present, pfdev->features.l2_present);
}

+static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
+{
+ u64 core_mask;
+
+ if (pfdev->features.l2_present == 1)
+ return U64_MAX;
+
+ /*
+ * Only support one core group now.
+ * ~(l2_present - 1) unsets all bits in l2_present except
+ * the bottom bit. (l2_present - 2) has all the bits in
+ * the first core group set. AND them together to generate
+ * a mask of cores in the first core group.
+ */
+ core_mask = ~(pfdev->features.l2_present - 1) &
+ (pfdev->features.l2_present - 2);
+ dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+ hweight64(core_mask),
+ hweight64(pfdev->features.shader_present));
+
+ return core_mask;
+}
+
void panfrost_gpu_power_on(struct panfrost_device *pfdev)
{
int ret;
u32 val;
- u64 core_mask = U64_MAX;
+ u64 core_mask;

panfrost_gpu_init_quirks(pfdev);
+ core_mask = panfrost_get_core_mask(pfdev);

- if (pfdev->features.l2_present != 1) {
- /*
- * Only support one core group now.
- * ~(l2_present - 1) unsets all bits in l2_present except
- * the bottom bit. (l2_present - 2) has all the bits in
- * the first core group set. AND them together to generate
- * a mask of cores in the first core group.
- */
- core_mask = ~(pfdev->features.l2_present - 1) &
- (pfdev->features.l2_present - 2);
- dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
- hweight64(core_mask),
- hweight64(pfdev->features.shader_present));
- }
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == (pfdev->features.l2_present & core_mask),
@@ -367,9 +382,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)

void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
- gpu_write(pfdev, TILER_PWROFF_LO, 0);
- gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, L2_PWROFF_LO, 0);
+ int ret;
+ u32 val;
+
+ gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
+ val, !val, 1, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "shader power transition timeout");
+
+ gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
+ val, !val, 1, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "tiler power transition timeout");
+
+ gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
+ ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
+ val, !val, 0, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "l2 power transition timeout");
}

int panfrost_gpu_init(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index affa9e0309b2..cfeca2694d5f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2321,7 +2321,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * track->max_indx * 4;
+ size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
@@ -2340,7 +2340,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * (nverts - 1) * 4;
+ size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 638f861af80f..6cf54a747749 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1275,7 +1275,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
- track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
@@ -1302,7 +1302,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+ track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 901e75ec70ff..efd18c8d84c8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
if (radeon_crtc == NULL)
return;

+ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+ if (!radeon_crtc->flip_queue) {
+ kfree(radeon_crtc);
+ return;
+ }
+
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);

drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
- radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc;

if (rdev->family >= CHIP_BONAIRE) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 987cabbf1318..c38b4d5d6a14 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
- if (r)
+ if (r) {
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
-
+ }
r = radeon_vm_clear_bo(rdev, vm->page_directory);
if (r) {
radeon_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
}

diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a91012447b56..85e9cba49cec 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev)
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
r = radeon_ring_lock(rdev, ring, 2);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }

/* clear the compute context state */
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f74f381af05f..d49c145db437 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
+ if (!rdev->pm.power_state[i].clock_info) {
+ kfree(rdev->pm.dpm.ps);
return -EINVAL;
+ }
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 08ea1c864cb2..ef1cc7bad20a 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
+ if (!rdev->pm.power_state[i].clock_info) {
+ kfree(rdev->pm.dpm.ps);
return -EINVAL;
+ }
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 545beea33e8c..e3c818dfc0e6 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -42,13 +42,13 @@ static const struct drm_dp_mst_calc_pbn_mode_test drm_dp_mst_calc_pbn_mode_cases
.clock = 332880,
.bpp = 24,
.dsc = true,
- .expected = 50
+ .expected = 1191
},
{
.clock = 324540,
.bpp = 24,
.dsc = true,
- .expected = 49
+ .expected = 1161
},
};

@@ -56,7 +56,7 @@ static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value;

- KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc),
+ KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp << 4),
params->expected);
}

diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 9d9dee7abaef..98efbaf3b0c2 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -2702,18 +2702,69 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}

-static void dispc_softreset(struct dispc_device *dispc)
+static int dispc_softreset(struct dispc_device *dispc)
{
u32 val;
int ret = 0;

+ /* K2G display controller does not support soft reset */
+ if (dispc->feat->subrev == DISPC_K2G)
+ return 0;
+
/* Soft reset */
REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
/* Wait for reset to complete */
ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
val, val & 1, 100, 5000);
+ if (ret) {
+ dev_err(dispc->dev, "failed to reset dispc\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dispc_init_hw(struct dispc_device *dispc)
+{
+ struct device *dev = dispc->dev;
+ int ret;
+
+ ret = pm_runtime_set_active(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to active\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dispc->fclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable DSS fclk\n");
+ goto err_runtime_suspend;
+ }
+
+ ret = dispc_softreset(dispc);
if (ret)
- dev_warn(dispc->dev, "failed to reset dispc\n");
+ goto err_clk_disable;
+
+ clk_disable_unprepare(dispc->fclk);
+ ret = pm_runtime_set_suspended(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to suspended\n");
+ return ret;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(dispc->fclk);
+
+err_runtime_suspend:
+ ret = pm_runtime_set_suspended(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to suspended\n");
+ return ret;
+ }
+
+ return ret;
}

int dispc_init(struct tidss_device *tidss)
@@ -2777,10 +2828,6 @@ int dispc_init(struct tidss_device *tidss)
return r;
}

- /* K2G display controller does not support soft reset */
- if (feat->subrev != DISPC_K2G)
- dispc_softreset(dispc);
-
for (i = 0; i < dispc->feat->num_vps; i++) {
u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
u32 *gamma_table;
@@ -2829,6 +2876,10 @@ int dispc_init(struct tidss_device *tidss)
of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
&dispc->memory_bandwidth_limit);

+ r = dispc_init_hw(dispc);
+ if (r)
+ return r;
+
tidss->dispc = dispc;

return 0;
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index c979ad1af236..d096d8d2bc8f 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -4,8 +4,6 @@
* Author: Tomi Valkeinen <tomi.valkeinen@xxxxxx>
*/

-#include <linux/dma-fence.h>
-
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -25,7 +23,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *ddev = old_state->dev;
struct tidss_device *tidss = to_tidss(ddev);
- bool fence_cookie = dma_fence_begin_signalling();

dev_dbg(ddev->dev, "%s\n", __func__);

@@ -36,7 +33,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(ddev, old_state);

drm_atomic_helper_commit_hw_done(old_state);
- dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);

drm_atomic_helper_cleanup_planes(ddev, old_state);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 8ebd7134ee21..2f6eaac7f659 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -138,7 +138,7 @@ static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq)
if (ret)
return ret;

- priv->irq_enabled = false;
+ priv->irq_enabled = true;

return 0;
}
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 2eba152e8b90..26e93a331a51 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -632,7 +632,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
}
INIT_LIST_HEAD(&hdev->inputs);

- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | HID_CONNECT_DRIVER);
if (ret) {
hid_err(hdev, "hw start failed\n");
return ret;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 471db78dbbf0..8289ce763704 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2649,8 +2649,8 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
{
struct hid_data *hid_data = &wacom_wac->hid_data;
bool mt = wacom_wac->features.touch_max > 1;
- bool prox = hid_data->tipswitch &&
- report_touch_events(wacom_wac);
+ bool touch_down = hid_data->tipswitch && hid_data->confidence;
+ bool prox = touch_down && report_touch_events(wacom_wac);

if (touch_is_muted(wacom_wac)) {
if (!wacom_wac->shared->touch_down)
@@ -2700,24 +2700,6 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}

-static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
-{
- struct input_mt *mt = dev->mt;
- struct input_mt_slot *s;
-
- if (!mt)
- return false;
-
- for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
- if (s->key == key &&
- input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
- return true;
- }
- }
-
- return false;
-}
-
static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
@@ -2768,14 +2750,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
}

if (usage->usage_index + 1 == field->report_count) {
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
- bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
- wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
-
- if (wacom_wac->hid_data.confidence || touch_removed) {
- wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
- }
- }
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
+ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
}

diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 127eb3805fac..c324cb3c97e2 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -216,8 +216,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
int tries;

for (tries = 50; tries; --tries) {
- if (readl(i2c->regs + S3C2410_IICCON)
- & S3C2410_IICCON_IRQPEND) {
+ unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
+
+ if (!(tmp & S3C2410_IICCON_ACKEN)) {
+ /*
+ * Wait a bit for the bus to stabilize,
+ * delay estimated experimentally.
+ */
+ usleep_range(100, 200);
+ return true;
+ }
+ if (tmp & S3C2410_IICCON_IRQPEND) {
if (!(readl(i2c->regs + S3C2410_IICSTAT)
& S3C2410_IICSTAT_LASTBIT))
return true;
@@ -270,16 +279,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,

stat |= S3C2410_IICSTAT_START;
writel(stat, i2c->regs + S3C2410_IICSTAT);
-
- if (i2c->quirks & QUIRK_POLL) {
- while ((i2c->msg_num != 0) && is_ack(i2c)) {
- i2c_s3c_irq_nextbyte(i2c, stat);
- stat = readl(i2c->regs + S3C2410_IICSTAT);
-
- if (stat & S3C2410_IICSTAT_ARBITR)
- dev_err(i2c->dev, "deal with arbitration loss\n");
- }
- }
}

static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
@@ -686,7 +685,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long timeout;
+ unsigned long timeout = 0;
int ret;

ret = s3c24xx_i2c_set_master(i2c);
@@ -706,16 +705,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_message_start(i2c, msgs);

if (i2c->quirks & QUIRK_POLL) {
- ret = i2c->msg_idx;
+ while ((i2c->msg_num != 0) && is_ack(i2c)) {
+ unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);

- if (ret != num)
- dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
+ i2c_s3c_irq_nextbyte(i2c, stat);

- goto out;
+ stat = readl(i2c->regs + S3C2410_IICSTAT);
+ if (stat & S3C2410_IICSTAT_ARBITR)
+ dev_err(i2c->dev, "deal with arbitration loss\n");
+ }
+ } else {
+ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
}

- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
-
ret = i2c->msg_idx;

/*
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ea5a6a14c553..45500d2d5b4b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -131,11 +131,12 @@ static unsigned int mwait_substates __initdata;
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)

static __always_inline int __intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+ struct cpuidle_driver *drv,
+ int index, bool irqoff)
{
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
- unsigned long ecx = 1; /* break on interrupt flag */
+ unsigned long ecx = 1*irqoff; /* break on interrupt flag */

mwait_idle_with_hints(eax, ecx);

@@ -159,19 +160,13 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- return __intel_idle(dev, drv, index);
+ return __intel_idle(dev, drv, index, true);
}

static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- int ret;
-
- raw_local_irq_enable();
- ret = __intel_idle(dev, drv, index);
- raw_local_irq_disable();
-
- return ret;
+ return __intel_idle(dev, drv, index, false);
}

static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
@@ -184,7 +179,7 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
if (smt_active)
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);

- ret = __intel_idle(dev, drv, index);
+ ret = __intel_idle(dev, drv, index, true);

if (smt_active)
native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
@@ -196,7 +191,7 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
fpu_idle_fpregs();
- return __intel_idle(dev, drv, index);
+ return __intel_idle(dev, drv, index, true);
}

/**
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index 8e252cde735b..0e5d3d2e9c98 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -174,8 +174,8 @@ static const struct iio_info ad7091r_info = {

static irqreturn_t ad7091r_event_handler(int irq, void *private)
{
- struct ad7091r_state *st = (struct ad7091r_state *) private;
- struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
+ struct iio_dev *iio_dev = private;
+ struct ad7091r_state *st = iio_priv(iio_dev);
unsigned int i, read_val;
int ret;
s64 timestamp = iio_get_time_ns(iio_dev);
@@ -234,7 +234,7 @@ int ad7091r_probe(struct device *dev, const char *name,
if (irq) {
ret = devm_request_threaded_irq(dev, irq, NULL,
ad7091r_event_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
if (ret)
return ret;
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 39eccc28debe..f668313730cb 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -4,8 +4,9 @@
*
* Copyright 2012-2020 Analog Devices Inc.
*/
-
+#include <linux/cleanup.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -119,9 +120,11 @@ struct ad9467_state {
struct spi_device *spi;
struct clk *clk;
unsigned int output_mode;
+ unsigned int (*scales)[2];

struct gpio_desc *pwrdown_gpio;
- struct gpio_desc *reset_gpio;
+ /* ensure consistent state obtained on multiple related accesses */
+ struct mutex lock;
};

static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
@@ -162,10 +165,12 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
int ret;

if (readval == NULL) {
+ guard(mutex)(&st->lock);
ret = ad9467_spi_write(spi, reg, writeval);
- ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
- AN877_ADC_TRANSFER_SYNC);
- return ret;
+ if (ret)
+ return ret;
+ return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
}

ret = ad9467_spi_read(spi, reg);
@@ -212,6 +217,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
.channel = _chan, \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = _si, \
.scan_type = { \
.sign = _sign, \
@@ -273,10 +279,13 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
unsigned int i, vref_val;
+ int ret;

- vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+ ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+ if (ret < 0)
+ return ret;

- vref_val &= info1->vref_mask;
+ vref_val = ret & info1->vref_mask;

for (i = 0; i < info->num_scales; i++) {
if (vref_val == info->scale_table[i][1])
@@ -297,6 +306,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
unsigned int scale_val[2];
unsigned int i;
+ int ret;

if (val != 0)
return -EINVAL;
@@ -306,11 +316,14 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
if (scale_val[0] != val || scale_val[1] != val2)
continue;

- ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
- info->scale_table[i][1]);
- ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
- AN877_ADC_TRANSFER_SYNC);
- return 0;
+ guard(mutex)(&st->lock);
+ ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+ info->scale_table[i][1]);
+ if (ret < 0)
+ return ret;
+
+ return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
}

return -EINVAL;
@@ -359,6 +372,26 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
}
}

+static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)st->scales;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = info->num_scales * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
{
int ret;
@@ -371,6 +404,26 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
AN877_ADC_TRANSFER_SYNC);
}

+static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int i, val1, val2;
+
+ st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
+ sizeof(*st->scales), GFP_KERNEL);
+ if (!st->scales)
+ return -ENOMEM;
+
+ for (i = 0; i < info->num_scales; i++) {
+ __ad9467_get_scale(conv, i, &val1, &val2);
+ st->scales[i][0] = val1;
+ st->scales[i][1] = val2;
+ }
+
+ return 0;
+}
+
static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
{
struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
@@ -378,6 +431,21 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
return ad9467_outputmode_set(st->spi, st->output_mode);
}

+static int ad9467_reset(struct device *dev)
+{
+ struct gpio_desc *gpio;
+
+ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR_OR_NULL(gpio))
+ return PTR_ERR_OR_ZERO(gpio);
+
+ fsleep(1);
+ gpiod_set_value_cansleep(gpio, 0);
+ fsleep(10 * USEC_PER_MSEC);
+
+ return 0;
+}
+
static int ad9467_probe(struct spi_device *spi)
{
const struct ad9467_chip_info *info;
@@ -408,21 +476,16 @@ static int ad9467_probe(struct spi_device *spi)
if (IS_ERR(st->pwrdown_gpio))
return PTR_ERR(st->pwrdown_gpio);

- st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
- GPIOD_OUT_LOW);
- if (IS_ERR(st->reset_gpio))
- return PTR_ERR(st->reset_gpio);
-
- if (st->reset_gpio) {
- udelay(1);
- ret = gpiod_direction_output(st->reset_gpio, 1);
- if (ret)
- return ret;
- mdelay(10);
- }
+ ret = ad9467_reset(&spi->dev);
+ if (ret)
+ return ret;

conv->chip_info = &info->axi_adc_info;

+ ret = ad9467_scale_fill(conv);
+ if (ret)
+ return ret;
+
id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
if (id != conv->chip_info->id) {
dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
@@ -433,6 +496,7 @@ static int ad9467_probe(struct spi_device *spi)
conv->reg_access = ad9467_reg_access;
conv->write_raw = ad9467_write_raw;
conv->read_raw = ad9467_read_raw;
+ conv->read_avail = ad9467_read_avail;
conv->preenable_setup = ad9467_preenable_setup;

st->output_mode = info->default_output_mode |
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index aff0532a974a..ae83ada7f9f2 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -144,6 +144,20 @@ static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
return conv->write_raw(conv, chan, val, val2, mask);
}

+static int adi_axi_adc_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->read_avail)
+ return -EOPNOTSUPP;
+
+ return conv->read_avail(conv, chan, vals, type, length, mask);
+}
+
static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
@@ -228,69 +242,11 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);

-static ssize_t in_voltage_scale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
- size_t len = 0;
- int i;
-
- for (i = 0; i < conv->chip_info->num_scales; i++) {
- const unsigned int *s = conv->chip_info->scale_table[i];
-
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%u.%06u ", s[0], s[1]);
- }
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
-
-enum {
- ADI_AXI_ATTR_SCALE_AVAIL,
-};
-
-#define ADI_AXI_ATTR(_en_, _file_) \
- [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
-
-static struct attribute *adi_axi_adc_attributes[] = {
- ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
- NULL
-};
-
-static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- switch (n) {
- case ADI_AXI_ATTR_SCALE_AVAIL:
- if (!conv->chip_info->num_scales)
- return 0;
- return attr->mode;
- default:
- return attr->mode;
- }
-}
-
-static const struct attribute_group adi_axi_adc_attribute_group = {
- .attrs = adi_axi_adc_attributes,
- .is_visible = axi_adc_attr_is_visible,
-};
-
static const struct iio_info adi_axi_adc_info = {
.read_raw = &adi_axi_adc_read_raw,
.write_raw = &adi_axi_adc_write_raw,
- .attrs = &adi_axi_adc_attribute_group,
.update_scan_mode = &adi_axi_adc_update_scan_mode,
+ .read_avail = &adi_axi_adc_read_avail,
};

static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 486d635b6e3a..3c62a0042da4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2693,6 +2693,10 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
return 0;

create_failed_qp:
+ for (i--; i >= 0; i--) {
+ hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL);
+ kfree(free_mr->rsv_qp[i]);
+ }
hns_roce_destroy_cq(cq, NULL);
kfree(cq);

@@ -5634,7 +5638,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,

/* Resizing SRQs is not supported yet */
if (srq_attr_mask & IB_SRQ_MAX_WR)
- return -EINVAL;
+ return -EOPNOTSUPP;

if (srq_attr_mask & IB_SRQ_LIMIT) {
if (srq_attr->srq_limit > srq->wqe_cnt)
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 783e71852c50..bd1fe89ca205 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -150,7 +150,7 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
int ret;

if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
- return -EINVAL;
+ return -EOPNOTSUPP;

ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
if (ret)
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f330ce895d88..8fe0cef7e2be 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)

int mthca_SYS_EN(struct mthca_dev *dev)
{
- u64 out;
+ u64 out = 0;
int ret;

ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
@@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
u16 *hash)
{
- u64 imm;
+ u64 imm = 0;
int err;

err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index b54bc8865dae..1ab268b77096 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
struct mthca_init_hca_param *init_hca,
u64 icm_size)
{
- u64 aux_pages;
+ u64 aux_pages = 0;
int err;

err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index dee8c97ff056..d967d5532459 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -317,12 +317,10 @@ struct iser_device {
*
* @mr: memory region
* @sig_mr: signature memory region
- * @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
struct ib_mr *mr;
struct ib_mr *sig_mr;
- u8 mr_valid:1;
};

/**
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 39ea73f69016..f5f090dc4f1e 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -581,7 +581,10 @@ static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
return -EINVAL;
}

- desc->rsc.mr_valid = 0;
+ if (desc->sig_protected)
+ desc->rsc.sig_mr->need_inval = false;
+ else
+ desc->rsc.mr->need_inval = false;

return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 29ae2c6a250a..6efcb79c8efe 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -264,7 +264,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,

iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);

- if (rsc->mr_valid)
+ if (rsc->sig_mr->need_inval)
iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);

ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
@@ -288,7 +288,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
wr->access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
- rsc->mr_valid = 1;
+ rsc->sig_mr->need_inval = true;

sig_reg->sge.lkey = mr->lkey;
sig_reg->rkey = mr->rkey;
@@ -313,7 +313,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_reg_wr *wr = &tx_desc->reg_wr;
int n;

- if (rsc->mr_valid)
+ if (rsc->mr->need_inval)
iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);

ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
@@ -336,7 +336,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ;

- rsc->mr_valid = 1;
+ rsc->mr->need_inval = true;

reg->sge.lkey = mr->lkey;
reg->rkey = mr->rkey;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 95b8eebf7e04..6801b70dc9e0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -129,7 +129,6 @@ iser_create_fastreg_desc(struct iser_device *device,
goto err_alloc_mr_integrity;
}
}
- desc->rsc.mr_valid = 0;

return desc;

diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 786f00f6b7fd..13ef6284223d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -791,9 +791,9 @@ static bool atkbd_is_portable_device(void)
* not work. So in this case simply assume a keyboard is connected to avoid
* confusing some laptop keyboards.
*
- * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
- * ok in translated mode, only atkbd_select_set() checks atkbd->id and in
- * translated mode that is a no-op.
+ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard
+ * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id
+ * and in translated mode that is a no-op.
*/
static bool atkbd_skip_getid(struct atkbd *atkbd)
{
@@ -811,6 +811,7 @@ static int atkbd_probe(struct atkbd *atkbd)
{
struct ps2dev *ps2dev = &atkbd->ps2dev;
unsigned char param[2];
+ bool skip_getid;

/*
* Some systems, where the bit-twiddling when testing the io-lines of the
@@ -832,7 +833,8 @@ static int atkbd_probe(struct atkbd *atkbd)
*/

param[0] = param[1] = 0xa5; /* initialize with invalid values */
- if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ skip_getid = atkbd_skip_getid(atkbd);
+ if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {

/*
* If the get ID command was skipped or failed, we check if we can at least set
@@ -842,7 +844,7 @@ static int atkbd_probe(struct atkbd *atkbd)
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
return -1;
- atkbd->id = 0xabba;
+ atkbd->id = skip_getid ? 0xab83 : 0xabba;
return 0;
}

diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 7f52ac67495f..40503376d80c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -243,6 +243,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,

static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno" },
+ { .compatible = "qcom,adreno-gmu" },
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,sc7180-mdss" },
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4b1a88f514c9..e5d087bd6da1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
+#include <trace/events/swiotlb.h>

#include "dma-iommu.h"

@@ -1052,6 +1053,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
}

+ trace_swiotlb_bounced(dev, phys, size);
+
aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 35ba090f3b5e..42cffb0ee5e2 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -260,7 +260,14 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
phys_addr_t iova;
size_t length;

+ if (of_dma_is_coherent(dev->of_node))
+ prot |= IOMMU_CACHE;
+
maps = of_translate_dma_region(np, maps, &iova, &length);
+ if (length == 0) {
+ dev_warn(dev, "Cannot reserve IOVA region of 0 size\n");
+ continue;
+ }
type = iommu_resv_region_get_type(dev, &phys, iova, length);

region = iommu_alloc_resv_region(iova, length, prot, type,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b92208eccdea..3132439f99e0 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -110,6 +110,7 @@ config LEDS_AW200XX
config LEDS_AW2013
tristate "LED support for Awinic AW2013"
depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
help
This option enables support for the AW2013 3-channel
LED driver.
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index 691a743cc9b0..5142efea2339 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -74,6 +74,10 @@
#define AW200XX_LED2REG(x, columns) \
((x) + (((x) / (columns)) * (AW200XX_DSIZE_COLUMNS_MAX - (columns))))

+/* DIM current configuration register on page 1 */
+#define AW200XX_REG_DIM_PAGE1(x, columns) \
+ AW200XX_REG(AW200XX_PAGE1, AW200XX_LED2REG(x, columns))
+
/*
* DIM current configuration register (page 4).
* The even address for current DIM configuration.
@@ -153,7 +157,8 @@ static ssize_t dim_store(struct device *dev, struct device_attribute *devattr,

if (dim >= 0) {
ret = regmap_write(chip->regmap,
- AW200XX_REG_DIM(led->num, columns), dim);
+ AW200XX_REG_DIM_PAGE1(led->num, columns),
+ dim);
if (ret)
goto out_unlock;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b2ef6af8376a..8c40c1c3959c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -494,6 +494,9 @@ static void md_end_flush(struct bio *bio)
rdev_dec_pending(rdev, mddev);

if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pair is percpu_ref_get() from md_flush_request() */
+ percpu_ref_put(&mddev->active_io);
+
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
@@ -513,12 +516,8 @@ static void submit_flushes(struct work_struct *ws)
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
- /* Take two references, one is dropped
- * when request finishes, one after
- * we reclaim rcu_read_lock
- */
struct bio *bi;
- atomic_inc(&rdev->nr_pending);
+
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc_bioset(rdev->bdev, 0,
@@ -529,7 +528,6 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
- rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
if (atomic_dec_and_test(&mddev->flush_pending))
@@ -582,6 +580,18 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
/* new request after previous flush is completed */
if (ktime_after(req_start, mddev->prev_flush_start)) {
WARN_ON(mddev->flush_bio);
+ /*
+ * Grab a reference to make sure mddev_suspend() will wait for
+ * this flush to be done.
+ *
+ * md_flush_reqeust() is called under md_handle_request() and
+ * 'active_io' is already grabbed, hence percpu_ref_is_zero()
+ * won't pass, percpu_ref_tryget_live() can't be used because
+ * percpu_ref_kill() can be called by mddev_suspend()
+ * concurrently.
+ */
+ WARN_ON(percpu_ref_is_zero(&mddev->active_io));
+ percpu_ref_get(&mddev->active_io);
mddev->flush_bio = bio;
bio = NULL;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 2aabac773fe7..911670273d1b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1983,12 +1983,12 @@ static void end_sync_write(struct bio *bio)
}

static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, blk_opf_t rw)
{
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (rw == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
&rdev->flags))
@@ -2105,7 +2105,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (r1_sync_page_io(rdev, sect, s,
pages[idx],
- WRITE) == 0) {
+ REQ_OP_WRITE) == 0) {
r1_bio->bios[d]->bi_end_io = NULL;
rdev_dec_pending(rdev, mddev);
}
@@ -2120,7 +2120,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (r1_sync_page_io(rdev, sect, s,
pages[idx],
- READ) != 0)
+ REQ_OP_READ) != 0)
atomic_add(s, &rdev->corrected_errors);
}
sectors -= s;
@@ -2332,7 +2332,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
r1_sync_page_io(rdev, sect, s,
- conf->tmppage, WRITE);
+ conf->tmppage, REQ_OP_WRITE);
rdev_dec_pending(rdev, mddev);
} else
rcu_read_unlock();
@@ -2349,7 +2349,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
if (r1_sync_page_io(rdev, sect, s,
- conf->tmppage, READ)) {
+ conf->tmppage, REQ_OP_READ)) {
atomic_add(s, &rdev->corrected_errors);
pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4bbd2a247dc7..68d86dbecb4a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/

#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -6842,7 +6843,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);

diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 305bb21d843c..49f0eb7d0b9d 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -104,6 +104,8 @@ static int dvb_device_open(struct inode *inode, struct file *file)
err = file->f_op->open(inode, file);
up_read(&minor_rwsem);
mutex_unlock(&dvbdev_mutex);
+ if (err)
+ dvb_device_put(dvbdev);
return err;
}
fail:
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index cf037b61b226..affaa36d3147 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1894,7 +1894,7 @@ static int m88ds3103_probe(struct i2c_client *client)
/* get frontend address */
ret = regmap_read(dev->regmap, 0x29, &utmp);
if (ret)
- goto err_kfree;
+ goto err_del_adapters;
dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1;
dev_dbg(&client->dev, "dt addr is 0x%02x\n", dev->dt_addr);

@@ -1902,11 +1902,14 @@ static int m88ds3103_probe(struct i2c_client *client)
dev->dt_addr);
if (IS_ERR(dev->dt_client)) {
ret = PTR_ERR(dev->dt_client);
- goto err_kfree;
+ goto err_del_adapters;
}
}

return 0;
+
+err_del_adapters:
+ i2c_mux_del_adapters(dev->muxc);
err_kfree:
kfree(dev);
err:
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 09a193bb87df..49a3dd70ec0f 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1536,13 +1536,11 @@ static void buf_cleanup(struct vb2_buffer *vb)

static int start_streaming(struct vb2_queue *q, unsigned int count)
{
- int ret = 1;
int seqnr = 0;
struct bttv_buffer *buf;
struct bttv *btv = vb2_get_drv_priv(q);

- ret = check_alloc_btres_lock(btv, RESOURCE_VIDEO_STREAM);
- if (ret == 0) {
+ if (!check_alloc_btres_lock(btv, RESOURCE_VIDEO_STREAM)) {
if (btv->field_count)
seqnr++;
while (!list_empty(&btv->capture)) {
@@ -1553,7 +1551,7 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
vb2_buffer_done(&buf->vbuf.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
- return !ret;
+ return -EBUSY;
}
if (!vb2_is_streaming(&btv->vbiq)) {
init_irqreg(btv);
@@ -2774,6 +2772,27 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
return;
wakeup->vbuf.vb2_buf.timestamp = ktime_get_ns();
wakeup->vbuf.sequence = btv->field_count >> 1;
+
+ /*
+ * Ugly hack for backwards compatibility.
+ * Some applications expect that the last 4 bytes of
+ * the VBI data contains the sequence number.
+ *
+ * This makes it possible to associate the VBI data
+ * with the video frame if you use read() to get the
+ * VBI data.
+ */
+ if (vb2_fileio_is_active(wakeup->vbuf.vb2_buf.vb2_queue)) {
+ u32 *vaddr = vb2_plane_vaddr(&wakeup->vbuf.vb2_buf, 0);
+ unsigned long size =
+ vb2_get_plane_payload(&wakeup->vbuf.vb2_buf, 0) / 4;
+
+ if (vaddr && size) {
+ vaddr += size - 1;
+ *vaddr = wakeup->vbuf.sequence;
+ }
+ }
+
vb2_buffer_done(&wakeup->vbuf.vb2_buf, state);
if (btv->field_count == 0)
btor(BT848_INT_VSYNC, BT848_INT_MASK);
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index ab213e51ec95..e489a3acb4b9 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -123,14 +123,12 @@ static void buf_cleanup_vbi(struct vb2_buffer *vb)

static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
{
- int ret;
int seqnr = 0;
struct bttv_buffer *buf;
struct bttv *btv = vb2_get_drv_priv(q);

btv->framedrop = 0;
- ret = check_alloc_btres_lock(btv, RESOURCE_VBI);
- if (ret == 0) {
+ if (!check_alloc_btres_lock(btv, RESOURCE_VBI)) {
if (btv->field_count)
seqnr++;
while (!list_empty(&btv->vcapture)) {
@@ -141,13 +139,13 @@ static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
vb2_buffer_done(&buf->vbuf.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
- return !ret;
+ return -EBUSY;
}
if (!vb2_is_streaming(&btv->capq)) {
init_irqreg(btv);
btv->field_count = 0;
}
- return !ret;
+ return 0;
}

static void stop_streaming_vbi(struct vb2_queue *q)
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 1af6fc9460d4..3a2030d02e45 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -642,7 +642,7 @@ static int vpu_core_probe(struct platform_device *pdev)
return -ENODEV;

core->type = core->res->type;
- core->id = of_alias_get_id(dev->of_node, "vpu_core");
+ core->id = of_alias_get_id(dev->of_node, "vpu-core");
if (core->id < 0) {
dev_err(dev, "can't get vpu core id\n");
return core->id;
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 7194f88edc0f..60425c99a2b8 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1403,7 +1403,6 @@ static void mtk_jpeg_remove(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);

- cancel_delayed_work_sync(&jpeg->job_timeout_work);
pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->vdev);
v4l2_m2m_release(jpeg->m2m_dev);
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 5f93712bf485..142ac7b73e14 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -1437,24 +1437,18 @@ static int mipi_csis_probe(struct platform_device *pdev)
/* Reset PHY and enable the clocks. */
mipi_csis_phy_reset(csis);

- ret = mipi_csis_clk_enable(csis);
- if (ret < 0) {
- dev_err(csis->dev, "failed to enable clocks: %d\n", ret);
- return ret;
- }
-
/* Now that the hardware is initialized, request the interrupt. */
ret = devm_request_irq(dev, irq, mipi_csis_irq_handler, 0,
dev_name(dev), csis);
if (ret) {
dev_err(dev, "Interrupt request failed\n");
- goto err_disable_clock;
+ return ret;
}

/* Initialize and register the subdev. */
ret = mipi_csis_subdev_init(csis);
if (ret < 0)
- goto err_disable_clock;
+ return ret;

platform_set_drvdata(pdev, &csis->sd);

@@ -1488,8 +1482,6 @@ static int mipi_csis_probe(struct platform_device *pdev)
v4l2_async_nf_unregister(&csis->notifier);
v4l2_async_nf_cleanup(&csis->notifier);
v4l2_async_unregister_subdev(&csis->sd);
-err_disable_clock:
- mipi_csis_clk_disable(csis);

return ret;
}
@@ -1504,9 +1496,10 @@ static void mipi_csis_remove(struct platform_device *pdev)
v4l2_async_nf_cleanup(&csis->notifier);
v4l2_async_unregister_subdev(&csis->sd);

+ if (!pm_runtime_enabled(&pdev->dev))
+ mipi_csis_runtime_suspend(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
- mipi_csis_runtime_suspend(&pdev->dev);
- mipi_csis_clk_disable(csis);
v4l2_subdev_cleanup(&csis->sd);
media_entity_cleanup(&csis->sd.entity);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index c41abd2833f1..894d5afaff4e 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -582,7 +582,7 @@ static int rkisp1_probe(struct platform_device *pdev)

ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
if (ret)
- goto err_pm_runtime_disable;
+ goto err_media_dev_cleanup;

ret = media_device_register(&rkisp1->media_dev);
if (ret) {
@@ -617,6 +617,8 @@ static int rkisp1_probe(struct platform_device *pdev)
media_device_unregister(&rkisp1->media_dev);
err_unreg_v4l2_dev:
v4l2_device_unregister(&rkisp1->v4l2_dev);
+err_media_dev_cleanup:
+ media_device_cleanup(&rkisp1->media_dev);
err_pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
return ret;
@@ -637,6 +639,8 @@ static void rkisp1_remove(struct platform_device *pdev)
media_device_unregister(&rkisp1->media_dev);
v4l2_device_unregister(&rkisp1->v4l2_dev);

+ media_device_cleanup(&rkisp1->media_dev);
+
pm_runtime_disable(&pdev->dev);
}

diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 50ec24c753e9..1874c976081f 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -904,6 +904,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)

if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
vpu->encoder = func;
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
} else {
vpu->decoder = func;
v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index b3ae037a50f6..db145519fc5d 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -785,6 +785,9 @@ const struct v4l2_ioctl_ops hantro_ioctl_ops = {
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,

+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+
.vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
.vidioc_encoder_cmd = vidioc_encoder_cmd,
};
diff --git a/drivers/media/test-drivers/visl/visl-video.c b/drivers/media/test-drivers/visl/visl-video.c
index 7cac6a6456eb..9303a3e118d7 100644
--- a/drivers/media/test-drivers/visl/visl-video.c
+++ b/drivers/media/test-drivers/visl/visl-video.c
@@ -525,6 +525,9 @@ const struct v4l2_ioctl_ops visl_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,

+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 727e6268567f..f1feccc28bf0 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -1024,6 +1024,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
if (!dev->video_mode.isoc_ctl.urb) {
dev_err(dev->dev,
"cannot alloc memory for usb buffers\n");
+ kfree(dma_q->p_left_data);
return -ENOMEM;
}

@@ -1033,6 +1034,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dev_err(dev->dev,
"cannot allocate memory for usbtransfer\n");
kfree(dev->video_mode.isoc_ctl.urb);
+ kfree(dma_q->p_left_data);
return -ENOMEM;
}

diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index 14170a5d72b3..1764674de98b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -268,7 +268,8 @@ void pvr2_context_disconnect(struct pvr2_context *mp)
{
pvr2_hdw_disconnect(mp->hdw);
mp->disconnect_flag = !0;
- pvr2_context_notify(mp);
+ if (!pvr2_context_shutok())
+ pvr2_context_notify(mp);
}


diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 091e8cf4114b..8cfd593d293d 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -880,7 +880,6 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
&asc->notifier->waiting_list);

v4l2_async_unbind_subdev_one(asc->notifier, asc);
- list_del(&asc->asc_subdev_entry);
}
}

diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
index 7392b3d2e6b9..4be4df9dd8cf 100644
--- a/drivers/mfd/cs42l43-sdw.c
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -17,13 +17,12 @@

#include "cs42l43.h"

-enum cs42l43_sdw_ports {
- CS42L43_DMIC_DEC_ASP_PORT = 1,
- CS42L43_SPK_TX_PORT,
- CS42L43_SPDIF_HP_PORT,
- CS42L43_SPK_RX_PORT,
- CS42L43_ASP_PORT,
-};
+#define CS42L43_SDW_PORT(port, chans) { \
+ .num = port, \
+ .max_ch = chans, \
+ .type = SDW_DPN_FULL, \
+ .max_word = 24, \
+}

static const struct regmap_config cs42l43_sdw_regmap = {
.reg_bits = 32,
@@ -42,65 +41,48 @@ static const struct regmap_config cs42l43_sdw_regmap = {
.num_reg_defaults = ARRAY_SIZE(cs42l43_reg_default),
};

+static const struct sdw_dpn_prop cs42l43_src_port_props[] = {
+ CS42L43_SDW_PORT(1, 4),
+ CS42L43_SDW_PORT(2, 2),
+ CS42L43_SDW_PORT(3, 2),
+ CS42L43_SDW_PORT(4, 2),
+};
+
+static const struct sdw_dpn_prop cs42l43_sink_port_props[] = {
+ CS42L43_SDW_PORT(5, 2),
+ CS42L43_SDW_PORT(6, 2),
+ CS42L43_SDW_PORT(7, 2),
+};
+
static int cs42l43_read_prop(struct sdw_slave *sdw)
{
struct sdw_slave_prop *prop = &sdw->prop;
struct device *dev = &sdw->dev;
- struct sdw_dpn_prop *dpn;
- unsigned long addr;
- int nval;
int i;
- u32 bit;

prop->use_domain_irq = true;
prop->paging_support = true;
prop->wake_capable = true;
- prop->source_ports = BIT(CS42L43_DMIC_DEC_ASP_PORT) | BIT(CS42L43_SPK_TX_PORT);
- prop->sink_ports = BIT(CS42L43_SPDIF_HP_PORT) |
- BIT(CS42L43_SPK_RX_PORT) | BIT(CS42L43_ASP_PORT);
prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY |
SDW_SCP_INT1_IMPL_DEF;

- nval = hweight32(prop->source_ports);
- prop->src_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->src_dpn_prop),
- GFP_KERNEL);
+ for (i = 0; i < ARRAY_SIZE(cs42l43_src_port_props); i++)
+ prop->source_ports |= BIT(cs42l43_src_port_props[i].num);
+
+ prop->src_dpn_prop = devm_kmemdup(dev, cs42l43_src_port_props,
+ sizeof(cs42l43_src_port_props), GFP_KERNEL);
if (!prop->src_dpn_prop)
return -ENOMEM;

- i = 0;
- dpn = prop->src_dpn_prop;
- addr = prop->source_ports;
- for_each_set_bit(bit, &addr, 32) {
- dpn[i].num = bit;
- dpn[i].max_ch = 2;
- dpn[i].type = SDW_DPN_FULL;
- dpn[i].max_word = 24;
- i++;
- }
- /*
- * All ports are 2 channels max, except the first one,
- * CS42L43_DMIC_DEC_ASP_PORT.
- */
- dpn[CS42L43_DMIC_DEC_ASP_PORT].max_ch = 4;
+ for (i = 0; i < ARRAY_SIZE(cs42l43_sink_port_props); i++)
+ prop->sink_ports |= BIT(cs42l43_sink_port_props[i].num);

- nval = hweight32(prop->sink_ports);
- prop->sink_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->sink_dpn_prop),
- GFP_KERNEL);
+ prop->sink_dpn_prop = devm_kmemdup(dev, cs42l43_sink_port_props,
+ sizeof(cs42l43_sink_port_props), GFP_KERNEL);
if (!prop->sink_dpn_prop)
return -ENOMEM;

- i = 0;
- dpn = prop->sink_dpn_prop;
- addr = prop->sink_ports;
- for_each_set_bit(bit, &addr, 32) {
- dpn[i].num = bit;
- dpn[i].max_ch = 2;
- dpn[i].type = SDW_DPN_FULL;
- dpn[i].max_word = 24;
- i++;
- }
-
return 0;
}

diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 9591b354072a..00e7b578bb3e 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -301,8 +301,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,

snprintf(name, sizeof(name), "%s-div", devname);
tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
+ 0, lpss->priv, 1, 15, 16, 15,
CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
- lpss->priv, 1, 15, 16, 15, 0,
NULL);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index 11a831e92da8..a577f950c632 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -53,76 +53,68 @@ static const struct resource rk817_charger_resources[] = {
};

static const struct mfd_cell rk805s[] = {
- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
- { .name = "rk805-pinctrl", .id = PLATFORM_DEVID_NONE, },
+ { .name = "rk808-clkout", },
+ { .name = "rk808-regulator", },
+ { .name = "rk805-pinctrl", },
{
.name = "rk808-rtc",
.num_resources = ARRAY_SIZE(rtc_resources),
.resources = &rtc_resources[0],
- .id = PLATFORM_DEVID_NONE,
},
{ .name = "rk805-pwrkey",
.num_resources = ARRAY_SIZE(rk805_key_resources),
.resources = &rk805_key_resources[0],
- .id = PLATFORM_DEVID_NONE,
},
};

static const struct mfd_cell rk806s[] = {
- { .name = "rk805-pinctrl", .id = PLATFORM_DEVID_AUTO, },
- { .name = "rk808-regulator", .id = PLATFORM_DEVID_AUTO, },
+ { .name = "rk805-pinctrl", },
+ { .name = "rk808-regulator", },
{
.name = "rk805-pwrkey",
.resources = rk806_pwrkey_resources,
.num_resources = ARRAY_SIZE(rk806_pwrkey_resources),
- .id = PLATFORM_DEVID_AUTO,
},
};

static const struct mfd_cell rk808s[] = {
- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+ { .name = "rk808-clkout", },
+ { .name = "rk808-regulator", },
{
.name = "rk808-rtc",
.num_resources = ARRAY_SIZE(rtc_resources),
.resources = rtc_resources,
- .id = PLATFORM_DEVID_NONE,
},
};

static const struct mfd_cell rk817s[] = {
- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+ { .name = "rk808-clkout", },
+ { .name = "rk808-regulator", },
{
.name = "rk805-pwrkey",
.num_resources = ARRAY_SIZE(rk817_pwrkey_resources),
.resources = &rk817_pwrkey_resources[0],
- .id = PLATFORM_DEVID_NONE,
},
{
.name = "rk808-rtc",
.num_resources = ARRAY_SIZE(rk817_rtc_resources),
.resources = &rk817_rtc_resources[0],
- .id = PLATFORM_DEVID_NONE,
},
- { .name = "rk817-codec", .id = PLATFORM_DEVID_NONE, },
+ { .name = "rk817-codec", },
{
.name = "rk817-charger",
.num_resources = ARRAY_SIZE(rk817_charger_resources),
.resources = &rk817_charger_resources[0],
- .id = PLATFORM_DEVID_NONE,
},
};

static const struct mfd_cell rk818s[] = {
- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+ { .name = "rk808-clkout", },
+ { .name = "rk808-regulator", },
{
.name = "rk808-rtc",
.num_resources = ARRAY_SIZE(rtc_resources),
.resources = rtc_resources,
- .id = PLATFORM_DEVID_NONE,
},
};

@@ -680,7 +672,7 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap
pre_init_reg[i].addr);
}

- ret = devm_mfd_add_devices(dev, 0, cells, nr_cells, NULL, 0,
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, nr_cells, NULL, 0,
regmap_irq_get_domain(rk808->irq_data));
if (ret)
return dev_err_probe(dev, ret, "failed to add MFD devices\n");
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 57b29c325131..c9550368d9ea 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -105,6 +105,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
}

syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
+ if (!syscon_config.name) {
+ ret = -ENOMEM;
+ goto err_regmap;
+ }
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
index 0fb9c5cf213a..783ee59901e8 100644
--- a/drivers/mfd/tps6594-core.c
+++ b/drivers/mfd/tps6594-core.c
@@ -433,6 +433,9 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
tps6594_irq_chip.name = devm_kasprintf(dev, GFP_KERNEL, "%s-%ld-0x%02x",
dev->driver->name, tps->chip_id, tps->reg);

+ if (!tps6594_irq_chip.name)
+ return -ENOMEM;
+
ret = devm_regmap_add_irq_chip(dev, tps->regmap, tps->irq, IRQF_SHARED | IRQF_ONESHOT,
0, &tps6594_irq_chip, &tps->irq_data);
if (ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 554e67103c1a..bc7e2ad37002 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -1018,14 +1018,15 @@ config MMC_SDHCI_XENON

config MMC_SDHCI_OMAP
tristate "TI SDHCI Controller Support"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in TI's DRA7 SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller
+ supports SD/MMC/SDIO devices.

If you have a controller with this interface, say Y or M here.

@@ -1033,14 +1034,15 @@ config MMC_SDHCI_OMAP

config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
+ depends on ARCH_K3 || COMPILE_TEST
depends on MMC_SDHCI_PLTFM && OF
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
select REGMAP_MMIO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in TI's AM654 SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller
+ supports SD/MMC/SDIO devices.

If you have a controller with this interface, say Y or M here.

diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ff18636e0889..5bc32108ca03 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -463,7 +463,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;

- if (mtd->type == MTD_ABSENT)
+ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
return;

list_for_each_entry(tr, &blktrans_majors, list)
@@ -503,7 +503,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
- if (mtd->type != MTD_ABSENT)
+ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
return 0;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 20bb1e0cb5eb..f0e2318ce088 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -21,7 +21,7 @@

#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
-#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
for IFC NAND Machine */

struct fsl_ifc_ctrl;
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 2d20be6ffb7e..ddd087c2c3ed 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,7 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
-#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/udp.h>
@@ -80,11 +80,11 @@ static struct mld2_grec mldv2_zero_grec;

static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
+ BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
sizeof_field(struct sk_buff, cb));

return (struct amt_skb_cb *)((void *)skb->cb +
- sizeof(struct qdisc_skb_cb));
+ sizeof(struct tc_skb_cb));
}

static void __amt_source_gc_work(void)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 4f09e7438f3b..c99fb1bd4c25 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1118,6 +1118,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)

vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
vsc->chipid);
+ if (!vsc->gc.label)
+ return -ENOMEM;
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 4728ba34b0e3..76218f1cb459 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -506,6 +506,7 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
rpm_t *rpm = rpmd;
u8 num_lmacs;
u32 fifo_len;
+ u16 max_lmac;

lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
/* LMACs are divided into two groups and each group
@@ -513,7 +514,11 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
* Group0 lmac_id range {0..3}
* Group1 lmac_id range {4..7}
*/
- fifo_len = rpm->mac_ops->fifo_len / 2;
+ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
+ if (max_lmac > 4)
+ fifo_len = rpm->mac_ops->fifo_len / 2;
+ else
+ fifo_len = rpm->mac_ops->fifo_len;

if (lmac_id < 4) {
num_lmacs = hweight8(lmac_info & 0xF);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 694de9513b9f..aaf1faed4133 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -130,9 +130,15 @@ static int mlxbf_gige_open(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
+ u64 control;
u64 int_en;
int err;

+ /* Perform general init of GigE block */
+ control = readq(priv->base + MLXBF_GIGE_CONTROL);
+ control |= MLXBF_GIGE_CONTROL_PORT_EN;
+ writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
err = mlxbf_gige_request_irqs(priv);
if (err)
return err;
@@ -147,14 +153,14 @@ static int mlxbf_gige_open(struct net_device *netdev)
*/
priv->valid_polarity = 0;

- err = mlxbf_gige_rx_init(priv);
+ phy_start(phydev);
+
+ err = mlxbf_gige_tx_init(priv);
if (err)
goto free_irqs;
- err = mlxbf_gige_tx_init(priv);
+ err = mlxbf_gige_rx_init(priv);
if (err)
- goto rx_deinit;
-
- phy_start(phydev);
+ goto tx_deinit;

netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
@@ -176,8 +182,8 @@ static int mlxbf_gige_open(struct net_device *netdev)

return 0;

-rx_deinit:
- mlxbf_gige_rx_deinit(priv);
+tx_deinit:
+ mlxbf_gige_tx_deinit(priv);

free_irqs:
mlxbf_gige_free_irqs(priv);
@@ -365,7 +371,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
- u64 control;
int err;

base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@@ -380,11 +385,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
if (IS_ERR(plu_base))
return PTR_ERR(plu_base);

- /* Perform general init of GigE block */
- control = readq(base + MLXBF_GIGE_CONTROL);
- control |= MLXBF_GIGE_CONTROL_PORT_EN;
- writeq(control, base + MLXBF_GIGE_CONTROL);
-
netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
if (!netdev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 227d01cace3f..699984358493 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -142,6 +142,9 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);

+ writeq(ilog2(priv->rx_q_entries),
+ priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+
/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
* indicate readiness to receive interrupts
*/
@@ -154,9 +157,6 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
data |= MLXBF_GIGE_RX_DMA_EN;
writeq(data, priv->base + MLXBF_GIGE_RX_DMA);

- writeq(ilog2(priv->rx_q_entries),
- priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
-
return 0;

free_wqe_and_skb:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 4c98950380d5..d231f4d2888b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
unsigned long *p_index)
{
unsigned int num_rows, entry_size;
+ unsigned long index;

/* We only allow allocations of entire rows */
if (num_erps % erp_core->num_erp_banks != 0)
@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
entry_size = erp_core->erpt_entries_size[region_type];
num_rows = num_erps / erp_core->num_erp_banks;

- *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
- if (*p_index == 0)
+ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (!index)
return -ENOBUFS;
- *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;

return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index d50786b0a6ce..50ea1eff02b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -681,13 +681,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
+ struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
- mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
- region->id);
+ mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
kfree(region);
}

@@ -1564,6 +1564,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
+ tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
+ MLXSW_REG_PAGT_ACL_MAX_NUM);

err = ops->init(mlxsw_sp, tcam->priv, tcam);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index debd2c466f11..ae2fb9efbc50 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -11458,6 +11458,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_netevent_notifier;

+ mlxsw_sp->router->netdevice_nb.notifier_call =
+ mlxsw_sp_router_netdevice_event;
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
+ if (err)
+ goto err_register_netdev_notifier;
+
mlxsw_sp->router->nexthop_nb.notifier_call =
mlxsw_sp_nexthop_obj_event;
err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
@@ -11473,22 +11480,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_fib_notifier;

- mlxsw_sp->router->netdevice_nb.notifier_call =
- mlxsw_sp_router_netdevice_event;
- err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->netdevice_nb);
- if (err)
- goto err_register_netdev_notifier;
-
return 0;

-err_register_netdev_notifier:
- unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->fib_nb);
err_register_fib_notifier:
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb);
err_register_nexthop_notifier:
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &router->netdevice_nb);
+err_register_netdev_notifier:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
@@ -11536,11 +11536,11 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_router *router = mlxsw_sp->router;

- unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
- &router->netdevice_nb);
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&router->nexthop_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &router->netdevice_nb);
unregister_netevent_notifier(&router->netevent_nb);
unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 39d24e07f306..5b69b9268c75 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -396,7 +396,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)

struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.kind = "rmnet",
- .maxtype = __IFLA_RMNET_MAX,
+ .maxtype = IFLA_RMNET_MAX,
.priv_size = sizeof(struct rmnet_priv),
.setup = rmnet_vnd_setup,
.validate = rmnet_rtnl_validate,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3c2a6b23c202..8fec0dbbbe7b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1949,7 +1949,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
- u32 dma_addr;
+ dma_addr_t dma_addr;
void *buffer;
u32 entry;
u32 len;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cd7a9768de5f..b8c93b881a65 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -255,6 +255,7 @@ struct stmmac_priv {
u32 msg_enable;
int wolopts;
int wol_irq;
+ bool wol_irq_disabled;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 6aa5c0556d22..69c8c2528524 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -311,8 +311,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);

- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII) {
+ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+ (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII)) {
struct rgmii_adv adv;
u32 supported, advertising, lp_advertising;

@@ -397,8 +398,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);

- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII) {
+ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+ (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII)) {
/* Only support ANE */
if (cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
@@ -543,15 +545,12 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
int q, stat;
- u64 *pos;
char *p;

- pos = data;
for (q = 0; q < tx_cnt; q++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
struct stmmac_txq_stats snapshot;

- data = pos;
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
snapshot = *txq_stats;
@@ -559,17 +558,15 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)

p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ += (*(u64 *)p);
+ *data++ = (*(u64 *)p);
p += sizeof(u64);
}
}

- pos = data;
for (q = 0; q < rx_cnt; q++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
struct stmmac_rxq_stats snapshot;

- data = pos;
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
snapshot = *rxq_stats;
@@ -577,7 +574,7 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)

p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ += (*(u64 *)p);
+ *data++ = (*(u64 *)p);
p += sizeof(u64);
}
}
@@ -825,10 +822,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
pr_info("stmmac: wakeup enable\n");
device_set_wakeup_enable(priv->device, 1);
- enable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced enable_irq_wake calls */
+ if (priv->wol_irq_disabled)
+ enable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(priv->device, 0);
- disable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced disable_irq_wake calls */
+ if (!priv->wol_irq_disabled)
+ disable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = true;
}

mutex_lock(&priv->lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 69b9c71f0ede..1bfcf673b3ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3549,6 +3549,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
+ priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 24120605502f..c62b0f99f2bc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -56,7 +56,7 @@
#define AM65_CPSW_MAX_PORTS 8

#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
-#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define AM65_CPSW_MAX_PACKET_SIZE 2024

#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_REG_STAT_PORT_EN 0x014
@@ -2167,7 +2167,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
eth_hw_addr_set(port->ndev, port->slave.mac_addr);

port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
- port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
+ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
+ (VLAN_ETH_HLEN + ETH_FCS_LEN);
port->ndev->hw_features = NETIF_F_SG |
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM |
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 2eac92f49631..d8ca82addfe1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -369,6 +369,12 @@ static int nsim_init_netdevsim_vf(struct netdevsim *ns)
return err;
}

+static void nsim_exit_netdevsim(struct netdevsim *ns)
+{
+ nsim_udp_tunnels_info_destroy(ns->netdev);
+ mock_phc_destroy(ns->phc);
+}
+
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
{
@@ -417,8 +423,7 @@ void nsim_destroy(struct netdevsim *ns)
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
- nsim_udp_tunnels_info_destroy(dev);
- mock_phc_destroy(ns->phc);
+ nsim_exit_netdevsim(ns);
free_netdev(dev);
}

diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 927d3d54658e..dfd5f8e78e29 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -3313,8 +3313,10 @@ static int lan8814_probe(struct phy_device *phydev)
#define LAN8841_ADC_CHANNEL_MASK 198
#define LAN8841_PTP_RX_PARSE_L2_ADDR_EN 370
#define LAN8841_PTP_RX_PARSE_IP_ADDR_EN 371
+#define LAN8841_PTP_RX_VERSION 374
#define LAN8841_PTP_TX_PARSE_L2_ADDR_EN 434
#define LAN8841_PTP_TX_PARSE_IP_ADDR_EN 435
+#define LAN8841_PTP_TX_VERSION 438
#define LAN8841_PTP_CMD_CTL 256
#define LAN8841_PTP_CMD_CTL_PTP_ENABLE BIT(2)
#define LAN8841_PTP_CMD_CTL_PTP_DISABLE BIT(1)
@@ -3358,6 +3360,12 @@ static int lan8841_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
LAN8841_PTP_RX_PARSE_IP_ADDR_EN, 0);

+ /* Disable checking for minorVersionPTP field */
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_VERSION, 0xff00);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_VERSION, 0xff00);
+
/* 100BT Clause 40 improvenent errata */
phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
LAN8841_ANALOG_CONTROL_1,
@@ -4820,6 +4828,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9131_type,
.probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
.config_init = ksz9131_config_init,
.config_intr = kszphy_config_intr,
.config_aneg = ksz9131_config_aneg,
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 1215ebdf173a..ef11c138bf30 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -802,8 +802,8 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)

prproc = rproc_get_by_phandle(rproc_phandle);
if (!prproc) {
- ath11k_err(ab, "failed to get rproc\n");
- return -EINVAL;
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
+ return -EPROBE_DEFER;
}
ab_ahb->tgt_rproc = prproc;

diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 31176897b746..e3120ab893f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1012,7 +1012,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK);
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &=
~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index cf27f106d4d5..7057421e513b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1673,7 +1673,7 @@ static ssize_t _iwl_dbgfs_link_sta_##name##_write(struct file *file, \
char buf[buflen] = {}; \
size_t buf_size = min(count, sizeof(buf) - 1); \
\
- if (copy_from_user(buf, user_buf, sizeof(buf))) \
+ if (copy_from_user(buf, user_buf, buf_size)) \
return -EFAULT; \
\
return _iwl_dbgfs_link_sta_wrap_write(iwl_dbgfs_##name##_write, \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 2ddb6f763a0b..1e58f0234293 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -269,17 +269,17 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
}
}

+ mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
+
if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
mvmvif->link[link_id]->listen_lmac = true;
ret = iwl_mvm_esr_mode_active(mvm, vif);
if (ret) {
IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
- return ret;
+ goto out;
}
}

- mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
-
if (switching_chanctx) {
/* reactivate if we turned this off during channel switch */
if (vif->type == NL80211_IFTYPE_AP)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index a5b432bc9e2f..9c582e23ebba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -99,17 +99,6 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
active_cnt = 2;
}

- /*
- * If the firmware requested it, then we know that it supports
- * getting zero for the values to indicate "use one, but pick
- * which one yourself", which means it can dynamically pick one
- * that e.g. has better RSSI.
- */
- if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
- idle_cnt = 0;
- active_cnt = 0;
- }
-
*rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
*rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 177a4628a913..6fdb2c38518e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -2236,7 +2236,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));

if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
- cmd.flags |= CMD_WANT_SKB;
+ cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;

IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
sta_id, tids);
diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig
index 6d62ab49aa8d..c7d02adb3eea 100644
--- a/drivers/net/wireless/marvell/libertas/Kconfig
+++ b/drivers/net/wireless/marvell/libertas/Kconfig
@@ -2,8 +2,6 @@
config LIBERTAS
tristate "Marvell 8xxx Libertas WLAN driver support"
depends on CFG80211
- select WIRELESS_EXT
- select WEXT_SPY
select LIB80211
select FW_LOADER
help
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index ba4e29713a8c..4389cf3f889f 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2046,6 +2046,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,

mwifiex_set_sys_config_invalid_data(bss_cfg);

+ memcpy(bss_cfg->mac_addr, priv->curr_addr, ETH_ALEN);
+
if (params->beacon_interval)
bss_cfg->beacon_period = params->beacon_interval;
if (params->dtim_period)
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 8e6db904e5b2..62f3c9a52a1d 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
+#define TLV_TYPE_UAP_MAC_ADDRESS (PROPRIETARY_TLV_BASE_ID + 43)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 091e7ca79376..e8825f302de8 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -107,6 +107,7 @@ struct mwifiex_uap_bss_param {
u8 qos_info;
u8 power_constraint;
struct mwifiex_types_wmm_info wmm_info;
+ u8 mac_addr[ETH_ALEN];
};

enum {
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 774858cfe86f..e66ba0d156f8 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -331,6 +331,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = false,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -346,6 +347,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -361,6 +363,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -376,6 +379,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_dump_fw = true,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
@@ -392,6 +396,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
.fw_dump_enh = true,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
@@ -408,6 +413,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
.fw_dump_enh = true,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = true,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
@@ -425,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.fw_dump_enh = true,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -440,6 +447,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.can_dump_fw = false,
.can_auto_tdls = true,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
@@ -456,6 +464,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
.fw_dump_enh = true,
.can_auto_tdls = true,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
@@ -471,6 +480,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.can_dump_fw = false,
.can_auto_tdls = false,
.can_ext_scan = true,
+ .fw_ready_extra_delay = false,
};

static struct memory_type_mapping generic_mem_type_map[] = {
@@ -563,6 +573,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->fw_dump_enh = data->fw_dump_enh;
card->can_auto_tdls = data->can_auto_tdls;
card->can_ext_scan = data->can_ext_scan;
+ card->fw_ready_extra_delay = data->fw_ready_extra_delay;
INIT_WORK(&card->work, mwifiex_sdio_work);
}

@@ -766,8 +777,9 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
u32 poll_num)
{
+ struct sdio_mmc_card *card = adapter->card;
int ret = 0;
- u16 firmware_stat;
+ u16 firmware_stat = 0;
u32 tries;

for (tries = 0; tries < poll_num; tries++) {
@@ -783,6 +795,13 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
ret = -1;
}

+ if (card->fw_ready_extra_delay &&
+ firmware_stat == FIRMWARE_READY_SDIO)
+ /* firmware might pretend to be ready, when it's not.
+ * Wait a little bit more as a workaround.
+ */
+ msleep(100);
+
return ret;
}

diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index ae94c172310f..a5112cb35cdc 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -258,6 +258,7 @@ struct sdio_mmc_card {
bool fw_dump_enh;
bool can_auto_tdls;
bool can_ext_scan;
+ bool fw_ready_extra_delay;

struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -281,6 +282,7 @@ struct mwifiex_sdio_device {
bool fw_dump_enh;
bool can_auto_tdls;
bool can_ext_scan;
+ bool fw_ready_extra_delay;
};

/*
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index e78a201cd150..491e36611909 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -468,6 +468,7 @@ void mwifiex_config_uap_11d(struct mwifiex_private *priv,
static int
mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
{
+ struct host_cmd_tlv_mac_addr *mac_tlv;
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
@@ -487,6 +488,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
int i;
u16 cmd_size = *param_size;

+ mac_tlv = (struct host_cmd_tlv_mac_addr *)tlv;
+ mac_tlv->header.type = cpu_to_le16(TLV_TYPE_UAP_MAC_ADDRESS);
+ mac_tlv->header.len = cpu_to_le16(ETH_ALEN);
+ memcpy(mac_tlv->mac_addr, bss_cfg->mac_addr, ETH_ALEN);
+ cmd_size += sizeof(struct host_cmd_tlv_mac_addr);
+ tlv += sizeof(struct host_cmd_tlv_mac_addr);
+
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 36564930aef1..1de3c734e136 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -67,7 +67,7 @@ static int mt76_get_of_epprom_from_mtd(struct mt76_dev *dev, void *eep, int offs
goto out_put_node;
}

- offset = be32_to_cpup(list);
+ offset += be32_to_cpup(list);
ret = mtd_read(mtd, offset, len, &retlen, eep);
put_mtd_device(mtd);
if (mtd_is_bitflip(ret))
@@ -106,7 +106,7 @@ static int mt76_get_of_epprom_from_mtd(struct mt76_dev *dev, void *eep, int offs
#endif
}

-static int mt76_get_of_epprom_from_nvmem(struct mt76_dev *dev, void *eep, int len)
+static int mt76_get_of_eeprom_from_nvmem(struct mt76_dev *dev, void *eep, int len)
{
struct device_node *np = dev->dev->of_node;
struct nvmem_cell *cell;
@@ -153,7 +153,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
if (!ret)
return 0;

- return mt76_get_of_epprom_from_nvmem(dev, eep, len);
+ return mt76_get_of_eeprom_from_nvmem(dev, eep, len);
}
EXPORT_SYMBOL_GPL(mt76_get_of_eeprom);

diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index dae5410d67e8..7f44736ca26f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -572,8 +572,7 @@ struct mt76_sdio {
struct mt76_worker txrx_worker;
struct mt76_worker status_worker;
struct mt76_worker net_worker;
-
- struct work_struct stat_work;
+ struct mt76_worker stat_worker;

u8 *xmit_buf;
u32 xmit_buf_sz;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index fc547a0031ea..67cedd2555f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -204,8 +204,8 @@ static int mt7663s_suspend(struct device *dev)
mt76_worker_disable(&mdev->mt76.sdio.txrx_worker);
mt76_worker_disable(&mdev->mt76.sdio.status_worker);
mt76_worker_disable(&mdev->mt76.sdio.net_worker);
+ mt76_worker_disable(&mdev->mt76.sdio.stat_worker);

- cancel_work_sync(&mdev->mt76.sdio.stat_work);
clear_bit(MT76_READING_STATS, &mdev->mphy.state);

mt76_tx_status_check(&mdev->mt76, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index f3e56817d36e..adc26a222823 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -144,7 +144,8 @@ static inline bool
mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
{
u8 *eep = dev->mt76.eeprom.data;
- u8 val = eep[MT_EE_WIFI_CONF + 7];
+ u8 offs = is_mt7981(&dev->mt76) ? 8 : 7;
+ u8 val = eep[MT_EE_WIFI_CONF + offs];

if (band == NL80211_BAND_2GHZ)
return val & MT_EE_WIFI_CONF7_TSSI0_2G;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index d85105a43d70..3196f56cdf4a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1047,8 +1047,9 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)

phy->mt76->antenna_mask = tx_ant;

- /* handle a variant of mt7916 which has 3T3R but nss2 on 5 GHz band */
- if (is_mt7916(&dev->mt76) && band && hweight8(tx_ant) == max_nss)
+ /* handle a variant of mt7916/mt7981 which has 3T3R but nss2 on 5 GHz band */
+ if ((is_mt7916(&dev->mt76) || is_mt7981(&dev->mt76)) &&
+ band && hweight8(tx_ant) == max_nss)
phy->mt76->chainmask = (dev->chainmask >> chainshift) << chainshift;
else
phy->mt76->chainmask = tx_ant << (chainshift * band);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index fc7ace638ce8..f4ad7219f94f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -742,7 +742,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,

res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
if (!res)
- return -ENOMEM;
+ return 0;

wed->wlan.platform_dev = plat_dev;
wed->wlan.bus_type = MTK_WED_BUS_AXI;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 90c93970acab..d1b1b8f767fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1136,22 +1136,27 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
u8 type[2];
u8 rsvd[64];
} __packed req = {
+ .ver = 1,
.idx = idx,
.env = env_cap,
.acpi_conf = mt792x_acpi_get_flags(&dev->phy),
};
int ret, valid_cnt = 0;
- u8 i, *pos;
+ u16 buf_len = 0;
+ u8 *pos;

if (!clc)
return 0;

+ buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
pos = clc->data;
- for (i = 0; i < clc->nr_country; i++) {
+ while (buf_len > 16) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
u16 len = le16_to_cpu(rule->len);
+ u16 offset = len + sizeof(*rule);

- pos += len + sizeof(*rule);
+ pos += offset;
+ buf_len -= offset;
if (rule->alpha2[0] != alpha2[0] ||
rule->alpha2[1] != alpha2[1])
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index dc1beb76df3e..7591e54d2897 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -228,7 +228,7 @@ static int mt7921s_suspend(struct device *__dev)
mt76_txq_schedule_all(&dev->mphy);
mt76_worker_disable(&mdev->tx_worker);
mt76_worker_disable(&mdev->sdio.status_worker);
- cancel_work_sync(&mdev->sdio.stat_work);
+ mt76_worker_disable(&mdev->sdio.stat_worker);
clear_bit(MT76_READING_STATS, &dev->mphy.state);
mt76_tx_status_check(mdev, true);

@@ -260,6 +260,7 @@ static int mt7921s_suspend(struct device *__dev)
restore_worker:
mt76_worker_enable(&mdev->tx_worker);
mt76_worker_enable(&mdev->sdio.status_worker);
+ mt76_worker_enable(&mdev->sdio.stat_worker);

if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(mdev, false);
@@ -292,6 +293,7 @@ static int mt7921s_resume(struct device *__dev)
mt76_worker_enable(&mdev->sdio.txrx_worker);
mt76_worker_enable(&mdev->sdio.status_worker);
mt76_worker_enable(&mdev->sdio.net_worker);
+ mt76_worker_enable(&mdev->sdio.stat_worker);

/* restore previous ds setting */
if (!pm->ds_enable)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index 8edd0291c128..389eb0903807 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -107,7 +107,7 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
mt76_worker_disable(&dev->mt76.sdio.txrx_worker);
mt76_worker_disable(&dev->mt76.sdio.status_worker);
mt76_worker_disable(&dev->mt76.sdio.net_worker);
- cancel_work_sync(&dev->mt76.sdio.stat_work);
+ mt76_worker_disable(&dev->mt76.sdio.stat_worker);

mt7921s_disable_irq(&dev->mt76);
mt7921s_wfsys_reset(dev);
@@ -115,6 +115,7 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
mt76_worker_enable(&dev->mt76.sdio.status_worker);
mt76_worker_enable(&dev->mt76.sdio.net_worker);
+ mt76_worker_enable(&dev->mt76.sdio.stat_worker);

dev->fw_assert = false;
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index c43839a20508..26d5675202ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -840,10 +840,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct mt76_vif *mvif;
u16 tx_count = 15;
u32 val;
- bool beacon = !!(changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY));
+ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);

mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
if (mvif) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index e4b31228ba0d..dc8d0a30c707 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -221,7 +221,7 @@ struct bss_rate_tlv {
u8 short_preamble;
u8 bc_fixed_rate;
u8 mc_fixed_rate;
- u8 __rsv2[1];
+ u8 __rsv2[9];
} __packed;

struct bss_ra_tlv {
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 419723118ded..c52d550f0c32 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -481,21 +481,21 @@ static void mt76s_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data && ndata_frames > 0 &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
!test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
- ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
+ mt76_worker_schedule(&sdio->stat_worker);
} while (nframes > 0);

if (resched)
mt76_worker_schedule(&dev->tx_worker);
}

-static void mt76s_tx_status_data(struct work_struct *work)
+static void mt76s_tx_status_data(struct mt76_worker *worker)
{
struct mt76_sdio *sdio;
struct mt76_dev *dev;
u8 update = 1;
u16 count = 0;

- sdio = container_of(work, struct mt76_sdio, stat_work);
+ sdio = container_of(worker, struct mt76_sdio, stat_worker);
dev = container_of(sdio, struct mt76_dev, sdio);

while (true) {
@@ -508,7 +508,7 @@ static void mt76s_tx_status_data(struct work_struct *work)
}

if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
- ieee80211_queue_work(dev->hw, &sdio->stat_work);
+ mt76_worker_schedule(&sdio->status_worker);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
@@ -600,8 +600,8 @@ void mt76s_deinit(struct mt76_dev *dev)
mt76_worker_teardown(&sdio->txrx_worker);
mt76_worker_teardown(&sdio->status_worker);
mt76_worker_teardown(&sdio->net_worker);
+ mt76_worker_teardown(&sdio->stat_worker);

- cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);

mt76_tx_status_check(dev, true);
@@ -644,10 +644,14 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
if (err)
return err;

+ err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data,
+ "sdio-sta");
+ if (err)
+ return err;
+
sched_set_fifo_low(sdio->status_worker.task);
sched_set_fifo_low(sdio->net_worker.task);
-
- INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
+ sched_set_fifo_low(sdio->stat_worker.task);

dev->queue_ops = &sdio_queue_ops;
dev->bus = bus_ops;
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 76d0a778636a..311676c1ece0 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -493,9 +493,12 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
void *context)
{
struct usb_device *udev = interface_to_usbdev(usb->ez_usb);
- struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC);
+ struct urb *urb;
int r;

+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
(void *)buffer, buffer_len, complete_fn, context);

diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 9886e719739b..b118df035243 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -164,21 +164,29 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));

+ value &= PCI_EXP_LNKCTL_ASPMC;
+
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
- value |= 0x40;
+ value |= PCI_EXP_LNKCTL_CCC;

- pci_write_config_byte(rtlpci->pdev, 0x80, value);
+ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC | value,
+ value);

return false;
}

-/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
-static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+/* @value is PCI_EXP_LNKCTL_CLKREQ_EN or 0 to enable/disable clk request. */
+static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u16 value)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));

- pci_write_config_byte(rtlpci->pdev, 0x81, value);
+ value &= PCI_EXP_LNKCTL_CLKREQ_EN;
+
+ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN,
+ value);

if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
udelay(100);
@@ -192,11 +200,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
/*Retrieve original configuration settings. */
u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
- u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
- pcibridge_linkctrlreg;
u16 aspmlevel = 0;
u8 tmp_u1b = 0;

@@ -221,16 +226,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
/*Set corresponding value. */
aspmlevel |= BIT(0) | BIT(1);
linkctrl_reg &= ~aspmlevel;
- pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));

_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
- udelay(50);
-
- /*4 Disable Pci Bridge ASPM */
- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
- pcibridge_linkctrlreg);
-
- udelay(50);
}

/*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
@@ -245,9 +242,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
u16 aspmlevel;
- u8 u_pcibridge_aspmsetting;
u8 u_device_aspmsetting;

if (!ppsc->support_aspm)
@@ -259,25 +254,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
return;
}

- /*4 Enable Pci Bridge ASPM */
-
- u_pcibridge_aspmsetting =
- pcipriv->ndis_adapter.pcibridge_linkctrlreg |
- rtlpci->const_hostpci_aspm_setting;
-
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
- u_pcibridge_aspmsetting &= ~BIT(0);
-
- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
- u_pcibridge_aspmsetting);
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "PlatformEnableASPM(): Write reg[%x] = %x\n",
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting);
-
- udelay(50);
-
/*Get ASPM level (with/without Clock Req) */
aspmlevel = rtlpci->const_devicepci_aspm_setting;
u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
@@ -291,7 +267,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)

if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
_rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
- RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
+ RT_RF_OFF_LEVL_CLK_REQ) ?
+ PCI_EXP_LNKCTL_CLKREQ_EN : 0);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
}
udelay(100);
@@ -358,22 +335,6 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
return tpriv != NULL;
}

-static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
-{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
- u8 linkctrl_reg;
- u8 num4bbytes;
-
- num4bbytes = (capabilityoffset + 0x10) / 4;
-
- /*Read Link Control Register */
- pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
-
- pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
-}
-
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
@@ -2028,12 +1989,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
PCI_SLOT(bridge_pdev->devfn);
pcipriv->ndis_adapter.pcibridge_funcnum =
PCI_FUNC(bridge_pdev->devfn);
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
- pci_pcie_cap(bridge_pdev);
- pcipriv->ndis_adapter.num4bytes =
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
-
- rtl_pci_get_linkcontrol_field(hw);

if (pcipriv->ndis_adapter.pcibridge_vendor ==
PCI_BRIDGE_VENDOR_AMD) {
@@ -2050,13 +2005,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);

rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ "pci_bridge busnumber:devnumber:funcnumber:vendor:amd %d:%d:%d:%x:%x\n",
pcipriv->ndis_adapter.pcibridge_busnum,
pcipriv->ndis_adapter.pcibridge_devnum,
pcipriv->ndis_adapter.pcibridge_funcnum,
pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
pcipriv->ndis_adapter.amd_l1_patch);

rtl_pci_parse_configuration(pdev, hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index 866861626a0a..d6307197dfea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -236,11 +236,6 @@ struct mp_adapter {
u16 pcibridge_vendorid;
u16 pcibridge_deviceid;

- u8 num4bytes;
-
- u8 pcibridge_pciehdr_offset;
- u8 pcibridge_linkctrlreg;
-
bool amd_l1_patch;
};

diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 12d0b3a87af7..0fab3a0c7d49 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -16,12 +16,6 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -51,7 +45,7 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;

rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
@@ -74,7 +68,7 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,

if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}

@@ -99,7 +93,7 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,


original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;

spin_unlock(&rtlpriv->locks.rf_lock);
@@ -127,7 +121,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl88e_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 3d29c8dbb255..144ee780e1b6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -17,7 +17,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;

rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
@@ -40,7 +40,7 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,

if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}

@@ -143,14 +143,6 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);

-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
-EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
-
static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
{
rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
index 75afa6253ad0..e64d377dfe9e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
@@ -196,7 +196,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
void rtl92c_phy_set_io(struct ieee80211_hw *hw);
void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
enum wireless_mode wirelessmode,
u8 txpwridx);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index da54e51badd3..fa70a7d5539f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -39,7 +39,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
rfpath, regaddr);
}

- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;

spin_unlock(&rtlpriv->locks.rf_lock);
@@ -110,7 +110,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
@@ -122,7 +122,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_fw_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
index 7582a162bd11..c7a0d4c776f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
@@ -94,7 +94,6 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
u32 offset);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset);
-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset, u32 data);
void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index a8d9fe269f31..0b8cb7e61fd8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -32,7 +32,7 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_fw_rf_serial_read(hw,
rfpath, regaddr);
}
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -56,7 +56,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
@@ -67,7 +67,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_fw_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d18c092b6142..d835a27429f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -169,13 +169,6 @@ static const u8 channel_all[59] = {
157, 159, 161, 163, 165
};

-static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
-
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -198,7 +191,7 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
} else {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
}
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
"BBR MASK=0x%x Addr[0x%x]=0x%x\n",
@@ -230,7 +223,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
dbi_direct);
else
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob)
@@ -317,7 +310,7 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock(&rtlpriv->locks.rf_lock);
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
@@ -343,7 +336,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92d_phy_rf_serial_read(hw,
rfpath, regaddr);
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index cc0bcaf13e96..73ef602bfb01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -16,7 +16,6 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask);
static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw);
static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
@@ -46,7 +45,7 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;

rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
@@ -68,7 +67,7 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,

if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}

@@ -92,7 +91,7 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_lock(&rtlpriv->locks.rf_lock);

original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;

spin_unlock(&rtlpriv->locks.rf_lock);
@@ -119,7 +118,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,

if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = (original_value & (~bitmask)) | (data << bitshift);
}

@@ -201,13 +200,6 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
pphyreg->rf3wire_offset, data_and_addr);
}

-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
-
bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
{
return _rtl92ee_phy_config_mac_with_headerfile(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 09591a0b5a81..d9ef7e1da1db 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -14,13 +14,6 @@
#include "hw.h"
#include "table.h"

-static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
-
u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -30,7 +23,7 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
regaddr, bitmask);

originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;

rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
@@ -52,7 +45,7 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,

if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}

@@ -157,7 +150,7 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,

original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);

- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;

spin_unlock(&rtlpriv->locks.rf_lock);
@@ -188,7 +181,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) | (data << bitshift));
}

diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 5323ead30db0..fa1839d8ee55 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -29,9 +29,10 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
u32 data);
static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i = ffs(bitmask);
+ if (WARN_ON_ONCE(!bitmask))
+ return 0;

- return i ? i - 1 : 32;
+ return __ffs(bitmask);
}
static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
/*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 2e7e04f91279..8cbf3fb38853 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -3080,4 +3080,11 @@ static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
return ieee80211_find_sta(mac->vif, mac_addr);
}

+static inline u32 calculate_bit_shift(u32 bitmask)
+{
+ if (WARN_ON_ONCE(!bitmask))
+ return 0;
+
+ return __ffs(bitmask);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index a99b53d44267..d8d68f16014e 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -280,9 +280,9 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,

if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
- rtwdev->hal.rcr |= BIT_AM | BIT_AB;
+ rtwdev->hal.rcr |= BIT_AM;
else
- rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
+ rtwdev->hal.rcr &= ~(BIT_AM);
}
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL)
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 2c1fb2dabd40..0cae5746f540 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -500,19 +500,40 @@ static u32 rtw_sdio_get_tx_addr(struct rtw_dev *rtwdev, size_t size,
static int rtw_sdio_read_port(struct rtw_dev *rtwdev, u8 *buf, size_t count)
{
struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
+ struct mmc_host *host = rtwsdio->sdio_func->card->host;
bool bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
u32 rxaddr = rtwsdio->rx_addr++;
- int ret;
+ int ret = 0, err;
+ size_t bytes;

if (bus_claim)
sdio_claim_host(rtwsdio->sdio_func);

- ret = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
- RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr), count);
- if (ret)
- rtw_warn(rtwdev,
- "Failed to read %zu byte(s) from SDIO port 0x%08x",
- count, rxaddr);
+ while (count > 0) {
+ bytes = min_t(size_t, host->max_req_size, count);
+
+ err = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
+ RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr),
+ bytes);
+ if (err) {
+ rtw_warn(rtwdev,
+ "Failed to read %zu byte(s) from SDIO port 0x%08x: %d",
+ bytes, rxaddr, err);
+
+ /* Signal to the caller that reading did not work and
+ * that the data in the buffer is short/corrupted.
+ */
+ ret = err;
+
+ /* Don't stop here - instead drain the remaining data
+ * from the card's buffer, else the card will return
+ * corrupt data for the next rtw_sdio_read_port() call.
+ */
+ }
+
+ count -= bytes;
+ buf += bytes;
+ }

if (bus_claim)
sdio_release_host(rtwsdio->sdio_func);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 88f760a7cbc3..d7503aef599f 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}

for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
- shinfo->nr_frags++, gop++, nr_slots--) {
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
+ ++txp;
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;

if (txp == first)
txp = txfrags;
@@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;

- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
- shinfo->nr_frags++, txp++, gop++) {
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+ push_tx_responses(queue);
+ spin_unlock_irqrestore(&queue->response_lock,
+ flags);
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
}

- skb_shinfo(skb)->frag_list = nskb;
- } else if (nskb) {
+ if (shinfo->nr_frags) {
+ skb_shinfo(skb)->frag_list = nskb;
+ nskb = NULL;
+ }
+ }
+
+ if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
- * because enough slots were converted to copy ops above.
+ * because enough slots were converted to copy ops above or some
+ * were empty.
*/
kfree_skb(nskb);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 197fc2ecb164..a4f802790ca0 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -19,6 +19,7 @@
#include "nvmet.h"

#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */

static int param_store_val(const char *str, int *val, int min, int max)
{
@@ -900,7 +901,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
+ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -953,6 +954,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
+ unsigned int exp_data_len;

if (likely(queue->nr_cmds)) {
if (unlikely(data->ttag >= queue->nr_cmds)) {
@@ -971,12 +973,24 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
/* FIXME: use path and transport errors */
- nvmet_req_complete(&cmd->req,
- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_tcp_fatal_error(queue);
return -EPROTO;
}

+ exp_data_len = le32_to_cpu(data->hdr.plen) -
+ nvmet_tcp_hdgst_len(queue) -
+ nvmet_tcp_ddgst_len(queue) -
+ sizeof(*data);
+
cmd->pdu_len = le32_to_cpu(data->data_length);
+ if (unlikely(cmd->pdu_len != exp_data_len ||
+ cmd->pdu_len == 0 ||
+ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ /* FIXME: use proper transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd->pdu_recv = 0;
nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 6109b3806b12..974d99d47f51 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -53,8 +53,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
return;
}

- strncpy(name, req->ns->device_path,
- min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
+ strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
}
#endif

@@ -85,7 +84,7 @@ TRACE_EVENT(nvmet_req_init,
__entry->flags = cmd->common.flags;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, &cmd->common.cdw10,
+ memcpy(__entry->cdw10, &cmd->common.cdws,
sizeof(__entry->cdw10));
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 8d93cb6ea9cd..b0ad8fc06e80 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1464,6 +1464,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
out_args->np = new;
of_node_put(cur);
cur = new;
+ new = NULL;
}
put:
of_node_put(cur);
diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
index d01f92f0f0db..554a996b2ef1 100644
--- a/drivers/of/unittest-data/tests-phandle.dtsi
+++ b/drivers/of/unittest-data/tests-phandle.dtsi
@@ -40,6 +40,13 @@ provider4: provider4 {
phandle-map-pass-thru = <0x0 0xf0>;
};

+ provider5: provider5 {
+ #phandle-cells = <2>;
+ phandle-map = <2 7 &provider4 2 3>;
+ phandle-map-mask = <0xff 0xf>;
+ phandle-map-pass-thru = <0x0 0xf0>;
+ };
+
consumer-a {
phandle-list = <&provider1 1>,
<&provider2 2 0>,
@@ -66,7 +73,8 @@ consumer-b {
<&provider4 4 0x100>,
<&provider4 0 0x61>,
<&provider0>,
- <&provider4 19 0x20>;
+ <&provider4 19 0x20>,
+ <&provider5 2 7>;
phandle-list-bad-phandle = <12345678 0 0>;
phandle-list-bad-args = <&provider2 1 0>,
<&provider4 0>;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index ad2b7879cc67..f278def7ef03 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -456,6 +456,9 @@ static void __init of_unittest_parse_phandle_with_args(void)

unittest(passed, "index %i - data error on node %pOF rc=%i\n",
i, args.np, rc);
+
+ if (rc == 0)
+ of_node_put(args.np);
}

/* Check for missing list property */
@@ -545,8 +548,9 @@ static void __init of_unittest_parse_phandle_with_args(void)

static void __init of_unittest_parse_phandle_with_args_map(void)
{
- struct device_node *np, *p0, *p1, *p2, *p3;
+ struct device_node *np, *p[6] = {};
struct of_phandle_args args;
+ unsigned int prefs[6];
int i, rc;

np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
@@ -555,34 +559,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
return;
}

- p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
- if (!p0) {
- pr_err("missing testcase data\n");
- return;
- }
-
- p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
- if (!p1) {
- pr_err("missing testcase data\n");
- return;
- }
-
- p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
- if (!p2) {
- pr_err("missing testcase data\n");
- return;
- }
-
- p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
- if (!p3) {
- pr_err("missing testcase data\n");
- return;
+ p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
+ p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
+ p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
+ p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
+ p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4");
+ p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5");
+ for (i = 0; i < ARRAY_SIZE(p); ++i) {
+ if (!p[i]) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+ prefs[i] = kref_read(&p[i]->kobj.kref);
}

rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
- unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
+ unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc);

- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 9; i++) {
bool passed = true;

memset(&args, 0, sizeof(args));
@@ -593,13 +587,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
switch (i) {
case 0:
passed &= !rc;
- passed &= (args.np == p1);
+ passed &= (args.np == p[1]);
passed &= (args.args_count == 1);
passed &= (args.args[0] == 1);
break;
case 1:
passed &= !rc;
- passed &= (args.np == p3);
+ passed &= (args.np == p[3]);
passed &= (args.args_count == 3);
passed &= (args.args[0] == 2);
passed &= (args.args[1] == 5);
@@ -610,28 +604,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
break;
case 3:
passed &= !rc;
- passed &= (args.np == p0);
+ passed &= (args.np == p[0]);
passed &= (args.args_count == 0);
break;
case 4:
passed &= !rc;
- passed &= (args.np == p1);
+ passed &= (args.np == p[1]);
passed &= (args.args_count == 1);
passed &= (args.args[0] == 3);
break;
case 5:
passed &= !rc;
- passed &= (args.np == p0);
+ passed &= (args.np == p[0]);
passed &= (args.args_count == 0);
break;
case 6:
passed &= !rc;
- passed &= (args.np == p2);
+ passed &= (args.np == p[2]);
passed &= (args.args_count == 2);
passed &= (args.args[0] == 15);
passed &= (args.args[1] == 0x20);
break;
case 7:
+ passed &= !rc;
+ passed &= (args.np == p[3]);
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 2);
+ passed &= (args.args[1] == 5);
+ passed &= (args.args[2] == 3);
+ break;
+ case 8:
passed &= (rc == -ENOENT);
break;
default:
@@ -640,6 +642,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)

unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
+
+ if (rc == 0)
+ of_node_put(args.np);
}

/* Check for missing list property */
@@ -686,6 +691,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
"OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");

unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ for (i = 0; i < ARRAY_SIZE(p); ++i) {
+ unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
+ "provider%d: expected:%d got:%d\n",
+ i, prefs[i], kref_read(&p[i]->kobj.kref));
+ of_node_put(p[i]);
+ }
}

static void __init of_unittest_property_string(void)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 0def919f89fa..cf3836561316 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -1218,7 +1218,16 @@ static int ks_pcie_probe(struct platform_device *pdev)
goto err_link;
}

+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
if (ret) {
dev_err(dev, "failed to enable phy\n");
goto err_link;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index f9182f8d552f..8d79dd0e1d60 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -598,6 +598,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}

aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr &= ~aligned_offset;
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index e0e27645fdf4..975b3024fb08 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -245,35 +245,60 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
resource_size_t cpu_addr,
resource_size_t pci_addr,
resource_size_t size,
- unsigned long type, int num)
+ unsigned long type, int *num)
{
+ resource_size_t remaining = size;
+ resource_size_t table_size;
+ resource_size_t addr_align;
+ const char *range_type;
void __iomem *table;
u32 val;

- if (num >= PCIE_MAX_TRANS_TABLES) {
- dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
- (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
- return -ENODEV;
- }
+ while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
+ /* Table size needs to be a power of 2 */
+ table_size = BIT(fls(remaining) - 1);
+
+ if (cpu_addr > 0) {
+ addr_align = BIT(ffs(cpu_addr) - 1);
+ table_size = min(table_size, addr_align);
+ }
+
+ /* Minimum size of translate table is 4KiB */
+ if (table_size < 0x1000) {
+ dev_err(pcie->dev, "illegal table size %#llx\n",
+ (unsigned long long)table_size);
+ return -EINVAL;
+ }

- table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
- num * PCIE_ATR_TLB_SET_OFFSET;
+ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
+ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
+ writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+ writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+ writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);

- writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
- table);
- writel_relaxed(upper_32_bits(cpu_addr),
- table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
- writel_relaxed(lower_32_bits(pci_addr),
- table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
- writel_relaxed(upper_32_bits(pci_addr),
- table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+ if (type == IORESOURCE_IO) {
+ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+ range_type = "IO";
+ } else {
+ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ range_type = "MEM";
+ }

- if (type == IORESOURCE_IO)
- val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
- else
- val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);

- writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ range_type, *num, (unsigned long long)cpu_addr,
+ (unsigned long long)pci_addr, (unsigned long long)table_size);
+
+ cpu_addr += table_size;
+ pci_addr += table_size;
+ remaining -= table_size;
+ (*num)++;
+ }
+
+ if (remaining)
+ dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);

return 0;
}
@@ -380,30 +405,20 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
resource_size_t cpu_addr;
resource_size_t pci_addr;
resource_size_t size;
- const char *range_type;

- if (type == IORESOURCE_IO) {
+ if (type == IORESOURCE_IO)
cpu_addr = pci_pio_to_address(res->start);
- range_type = "IO";
- } else if (type == IORESOURCE_MEM) {
+ else if (type == IORESOURCE_MEM)
cpu_addr = res->start;
- range_type = "MEM";
- } else {
+ else
continue;
- }

pci_addr = res->start - entry->offset;
size = resource_size(res);
err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
- type, table_index);
+ type, &table_index);
if (err)
return err;
-
- dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
- range_type, table_index, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)size);
-
- table_index++;
}

return 0;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 66a8f73296fc..48372013f26d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -617,12 +617,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
if (status & MSI_STATUS){
unsigned long imsi_status;

+ /*
+ * The interrupt status can be cleared even if the
+ * MSI status remains pending. As such, given the
+ * edge-triggered interrupt type, its status should
+ * be cleared before being dispatched to the
+ * handler of the underlying device.
+ */
+ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
generic_handle_domain_irq(port->inner_domain, bit);
}
- /* Clear MSI interrupt status */
- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
}
}

diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index b7b9d3e21f97..6dc918a8a023 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -209,28 +209,28 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
vector + 1);
}

-static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
- void *to, size_t size)
+static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- size_t offset = get_align_offset(epf_mhi, from);
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;

mutex_lock(&epf_mhi->lock);

- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
- offset, size);
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}

- memcpy_fromio(to, tre_buf, size);
+ memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);

- __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
- size);
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);

mutex_unlock(&epf_mhi->lock);

@@ -238,27 +238,27 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
}

static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
- void *from, u64 to, size_t size)
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- size_t offset = get_align_offset(epf_mhi, to);
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;

mutex_lock(&epf_mhi->lock);

- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
- offset, size);
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}

- memcpy_toio(tre_buf, from, size);
+ memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);

- __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
- size);
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);

mutex_unlock(&epf_mhi->lock);

@@ -270,8 +270,8 @@ static void pci_epf_mhi_dma_callback(void *param)
complete(param);
}

-static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
- void *to, size_t size)
+static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
@@ -284,13 +284,13 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
dma_addr_t dst_addr;
int ret;

- if (size < SZ_4K)
- return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);

mutex_lock(&epf_mhi->lock);

config.direction = DMA_DEV_TO_MEM;
- config.src_addr = from;
+ config.src_addr = buf_info->host_addr;

ret = dmaengine_slave_config(chan, &config);
if (ret) {
@@ -298,14 +298,16 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
goto err_unlock;
}

- dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
ret = dma_mapping_error(dma_dev, dst_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}

- desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
@@ -332,15 +334,15 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
}

err_unmap:
- dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);

return ret;
}

-static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
- u64 to, size_t size)
+static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
@@ -353,13 +355,13 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
dma_addr_t src_addr;
int ret;

- if (size < SZ_4K)
- return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);

mutex_lock(&epf_mhi->lock);

config.direction = DMA_MEM_TO_DEV;
- config.dst_addr = to;
+ config.dst_addr = buf_info->host_addr;

ret = dmaengine_slave_config(chan, &config);
if (ret) {
@@ -367,14 +369,16 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
goto err_unlock;
}

- src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
ret = dma_mapping_error(dma_dev, src_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}

- desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
@@ -401,7 +405,7 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
}

err_unmap:
- dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);

diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index caae2d3e9d3e..6404b17d3aeb 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -811,7 +811,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
#define CMN_EVENT_HNF_OCC(_model, _name, _event) \
CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
- CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event)
+ CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)

diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 63da05e5831c..636fb79647c8 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -383,8 +383,8 @@ static struct attribute *hisi_uc_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
- HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
- HISI_PMU_EVENT_ATTR(cycles, 0x9c),
+ HISI_PMU_EVENT_ATTR(cpu_mru, 0x1c),
+ HISI_PMU_EVENT_ATTR(cycles, 0x95),
HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index c1f9e4471b28..343ab6a82c01 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -120,6 +120,8 @@ static void intel_vsec_dev_release(struct device *dev)
{
struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);

+ xa_erase(&auxdev_array, intel_vsec_dev->id);
+
mutex_lock(&vsec_ida_lock);
ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
mutex_unlock(&vsec_ida_lock);
@@ -135,19 +137,28 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
int ret, id;

- mutex_lock(&vsec_ida_lock);
- ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
- mutex_unlock(&vsec_ida_lock);
+ ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
+ PMT_XA_LIMIT, GFP_KERNEL);
if (ret < 0) {
kfree(intel_vsec_dev->resource);
kfree(intel_vsec_dev);
return ret;
}

+ mutex_lock(&vsec_ida_lock);
+ id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ mutex_unlock(&vsec_ida_lock);
+ if (id < 0) {
+ xa_erase(&auxdev_array, intel_vsec_dev->id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return id;
+ }
+
if (!parent)
parent = &pdev->dev;

- auxdev->id = ret;
+ auxdev->id = id;
auxdev->name = name;
auxdev->dev.parent = parent;
auxdev->dev.release = intel_vsec_dev_release;
@@ -169,12 +180,6 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
if (ret < 0)
return ret;

- /* Add auxdev to list */
- ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
- GFP_KERNEL);
- if (ret)
- return ret;
-
return 0;
}
EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
index 0fd042c171ba..0a6201b4a0e9 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/drivers/platform/x86/intel/vsec.h
@@ -45,6 +45,7 @@ struct intel_vsec_device {
struct ida *ida;
struct intel_vsec_platform_info *info;
int num_resources;
+ int id; /* xa */
void *priv_data;
size_t priv_data_size;
};
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index 82d3cd5ee2f9..c8368dae69c7 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1574,13 +1574,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
wd_reg_val = i;
break;
}
- if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+ if (i + 1 < BQ256XX_NUM_WD_VAL &&
+ bq->watchdog_timer > bq256xx_watchdog_time[i] &&
bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
wd_reg_val = i;
}
ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
BQ256XX_WATCHDOG_MASK, wd_reg_val <<
BQ256XX_WDT_BIT_SHIFT);
+ if (ret)
+ return ret;

ret = power_supply_get_battery_info(bq->charger, &bat_info);
if (ret == -ENOMEM)
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index bb29e9ebd24a..99f3ccdc30a6 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -491,7 +491,7 @@ static int cw_battery_get_property(struct power_supply *psy,

case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
if (cw_battery_valid_time_to_empty(cw_bat))
- val->intval = cw_bat->time_to_empty;
+ val->intval = cw_bat->time_to_empty * 60;
else
val->intval = 0;
break;
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index 10f4dd0caca1..22c7c0e7c522 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -973,10 +973,14 @@ static int smb2_probe(struct platform_device *pdev)
supply_config.of_node = pdev->dev.of_node;

desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
memcpy(desc, &smb2_psy_desc, sizeof(smb2_psy_desc));
desc->name =
devm_kasprintf(chip->dev, GFP_KERNEL, "%s-charger",
(const char *)device_get_match_data(chip->dev));
+ if (!desc->name)
+ return -ENOMEM;

chip->chg_psy =
devm_power_supply_register(chip->dev, desc, &supply_config);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index dc66e3405bf5..0c8c63239adb 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -176,7 +176,7 @@ of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
pwm->args.period = args->args[0];
pwm->args.polarity = PWM_POLARITY_NORMAL;

- if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED)
+ if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED)
pwm->args.polarity = PWM_POLARITY_INVERSED;

return pwm;
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index ef1293f2a897..7758d274a26c 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -60,9 +60,10 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);

clk = clk_get(chip->dev, name);
- if (IS_ERR(clk))
- return dev_err_probe(chip->dev, PTR_ERR(clk),
- "Failed to get clock\n");
+ if (IS_ERR(clk)) {
+ dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
+ return PTR_ERR(clk);
+ }

err = clk_prepare_enable(clk);
if (err < 0) {
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 3d6be7749e23..dd2ee5d9ca06 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -579,32 +579,23 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
priv->have_complementary_output = (ccer != 0);
}

-static int stm32_pwm_detect_channels(struct stm32_pwm *priv)
+static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
+ unsigned int *num_enabled)
{
- u32 ccer;
- int npwm = 0;
+ u32 ccer, ccer_backup;

/*
* If channels enable bits don't exist writing 1 will have no
* effect so we can detect and count them.
*/
+ regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
regmap_read(priv->regmap, TIM_CCER, &ccer);
- regmap_clear_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
-
- if (ccer & TIM_CCER_CC1E)
- npwm++;
-
- if (ccer & TIM_CCER_CC2E)
- npwm++;
+ regmap_write(priv->regmap, TIM_CCER, ccer_backup);

- if (ccer & TIM_CCER_CC3E)
- npwm++;
+ *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);

- if (ccer & TIM_CCER_CC4E)
- npwm++;
-
- return npwm;
+ return hweight32(ccer & TIM_CCER_CCXE);
}

static int stm32_pwm_probe(struct platform_device *pdev)
@@ -613,6 +604,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_pwm *priv;
+ unsigned int num_enabled;
+ unsigned int i;
int ret;

priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -635,7 +628,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)

priv->chip.dev = dev;
priv->chip.ops = &stm32pwm_ops;
- priv->chip.npwm = stm32_pwm_detect_channels(priv);
+ priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
+
+ /* Initialize clock refcount to number of enabled PWM channels. */
+ for (i = 0; i < num_enabled; i++)
+ clk_enable(priv->clk);

ret = devm_pwmchip_add(dev, &priv->chip);
if (ret < 0)
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 520f9152f3bf..d4ceca2d435e 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2550,7 +2550,7 @@ bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port,
int lunmask_cfg)
{
- const u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;
+ const blist_flags_t scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;
struct bfad_itnim_s *itnim;
struct scsi_device *sdev;
unsigned long flags;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index c4d9ed0d7d75..2619a2d4f5f1 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -52,9 +52,10 @@ int fnic_debugfs_init(void)
fc_trc_flag->fnic_trace = 2;
fc_trc_flag->fc_trace = 3;
fc_trc_flag->fc_clear = 4;
+ return 0;
}

- return 0;
+ return -ENOMEM;
}

/*
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9472b9743aef..b155ac800979 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1565,12 +1565,12 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
{
if (!hisi_hba->hw->soft_reset)
- return -1;
+ return -ENOENT;

down(&hisi_hba->sem);
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
up(&hisi_hba->sem);
- return -1;
+ return -EPERM;
}

if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
@@ -1641,7 +1641,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);

- if (slot && task->task_proto & SAS_PROTOCOL_SSP) {
+ if (!slot)
+ goto out;
+
+ if (task->task_proto & SAS_PROTOCOL_SSP) {
u16 tag = slot->idx;
int rc2;

@@ -1688,7 +1691,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_softreset_ata_disk(device);
}
}
- } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) {
+ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
u32 tag = slot->idx;
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 089186fe1791..520fffc14282 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3478,7 +3478,7 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
int i;

- for (i = 0; i < debugfs_axi_reg.count; i++, databuf++)
+ for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
*databuf = hisi_sas_read32(hisi_hba, 4 * i);
}

@@ -5098,6 +5098,7 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
struct device *dev = hisi_hba->dev;
int rc;

@@ -5106,6 +5107,10 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
rc = hw_init_v3_hw(hisi_hba);
if (rc) {
dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
return;
}

@@ -5148,7 +5153,7 @@ static int _suspend_v3_hw(struct device *device)
}

if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- return -1;
+ return -EPERM;

dev_warn(dev, "entering suspend state\n");

diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 08645a99ad6b..9dacbb8570c9 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -223,6 +223,22 @@ static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
return rval;
}

+ if (mrioc->unrecoverable) {
+ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ return -EAGAIN;
+ }
+
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
&pel_enable, sizeof(pel_enable));
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 89ba015c5d7e..c7c75257425d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -1047,8 +1047,9 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
- tgtdev->host_exposed && tgtdev->starget &&
- tgtdev->starget->hostdata) {
+ tgtdev->is_hidden &&
+ tgtdev->host_exposed && tgtdev->starget &&
+ tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_removed = 1;
atomic_set(&tgt_priv->block_io, 0);
@@ -1064,14 +1065,24 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
mpi3mr_tgtdev_put(tgtdev);
+ } else if (tgtdev->is_hidden & tgtdev->host_exposed) {
+ dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
+ tgtdev->perst_id);
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
}
}

tgtdev = NULL;
list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
- !tgtdev->is_hidden && !tgtdev->host_exposed)
- mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ !tgtdev->is_hidden) {
+ if (!tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc,
+ tgtdev->perst_id);
+ else if (tgtdev->starget)
+ starget_for_each_device(tgtdev->starget,
+ (void *)tgtdev, mpi3mr_update_sdev);
+ }
}
}

diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index c61848595da0..e877aace1155 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -46,7 +46,7 @@
#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
-#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_8 * n)
+#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n)

#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
#define LLCC_TRP_PCB_ACT 0x21f04
@@ -785,15 +785,15 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
u32 disable_cap_alloc, retain_pc;

disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
- ret = regmap_write(drv_data->bcast_regmap,
- LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
+ BIT(config->slice_id), disable_cap_alloc);
if (ret)
return ret;

if (drv_data->version < LLCC_VERSION_4_1_0_0) {
retain_pc = config->retain_on_pc << config->slice_id;
- ret = regmap_write(drv_data->bcast_regmap,
- LLCC_TRP_PCB_ACT, retain_pc);
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
+ BIT(config->slice_id), retain_pc);
if (ret)
return ret;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bcbf840cd41c..3ce0fd5df8e9 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1165,9 +1165,10 @@ config SPI_ZYNQ_QSPI

config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
- depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
+ depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
+ This controller only supports SPI memory interface.

config SPI_AMD
tristate "AMD SPI controller"
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index b50db71ac4cc..2064dc4ea935 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1825,7 +1825,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (ddata->jh7110_clk_init) {
ret = cqspi_jh7110_clk_init(pdev, cqspi);
if (ret)
- goto probe_clk_failed;
+ goto probe_reset_failed;
}

if (of_device_is_compatible(pdev->dev.of_node,
@@ -1872,6 +1872,8 @@ static int cqspi_probe(struct platform_device *pdev)
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
pm_runtime_put_sync(dev);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index f0b630fe16c3..b341b6908df0 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -441,7 +441,6 @@ static void mcfqspi_remove(struct platform_device *pdev)
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);

mcfqspi_cs_teardown(mcfqspi);
- clk_disable_unprepare(mcfqspi->clk);
}

#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fb452bc78372..cfc3b1ddbd22 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -29,12 +29,15 @@

#include <asm/unaligned.h>

+#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0)
+
struct sh_msiof_chipdata {
u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
u16 ctlr_flags;
u16 min_div_pow;
+ u32 flags;
};

struct sh_msiof_spi_priv {
@@ -1072,6 +1075,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
.min_div_pow = 1,
};

+static const struct sh_msiof_chipdata rcar_r8a7795_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+ .flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
+};
+
static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
@@ -1082,6 +1095,7 @@ static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
@@ -1279,6 +1293,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
return -ENXIO;
}

+ if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
+ info->dtdl = 200;
+
if (info->mode == MSIOF_SPI_TARGET)
ctlr = spi_alloc_target(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
index b3c991e1ea40..54c35f5535cb 100644
--- a/drivers/spmi/spmi-mtk-pmif.c
+++ b/drivers/spmi/spmi-mtk-pmif.c
@@ -50,6 +50,7 @@ struct pmif {
struct clk_bulk_data clks[PMIF_MAX_CLKS];
size_t nclks;
const struct pmif_data *data;
+ raw_spinlock_t lock;
};

static const char * const pmif_clock_names[] = {
@@ -314,6 +315,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
struct ch_reg *inf_reg;
int ret;
u32 data, cmd;
+ unsigned long flags;

/* Check for argument validation. */
if (sid & ~0xf) {
@@ -334,6 +336,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;

+ raw_spin_lock_irqsave(&arb->lock, flags);
/* Wait for Software Interface FSM state to be IDLE. */
inf_reg = &arb->chan;
ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
@@ -343,6 +346,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
/* set channel ready if the data has transferred */
if (pmif_is_fsm_vldclr(arb))
pmif_writel(arb, 1, inf_reg->ch_rdy);
+ raw_spin_unlock_irqrestore(&arb->lock, flags);
dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
return ret;
}
@@ -350,6 +354,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
/* Send the command. */
cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
pmif_writel(arb, cmd, inf_reg->ch_send);
+ raw_spin_unlock_irqrestore(&arb->lock, flags);

/*
* Wait for Software Interface FSM state to be WFVLDCLR,
@@ -376,7 +381,8 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
struct pmif *arb = spmi_controller_get_drvdata(ctrl);
struct ch_reg *inf_reg;
int ret;
- u32 data, cmd;
+ u32 data, wdata, cmd;
+ unsigned long flags;

if (len > 4) {
dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
@@ -394,6 +400,10 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;

+ /* Set the write data. */
+ memcpy(&wdata, buf, len);
+
+ raw_spin_lock_irqsave(&arb->lock, flags);
/* Wait for Software Interface FSM state to be IDLE. */
inf_reg = &arb->chan;
ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
@@ -403,17 +413,17 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
/* set channel ready if the data has transferred */
if (pmif_is_fsm_vldclr(arb))
pmif_writel(arb, 1, inf_reg->ch_rdy);
+ raw_spin_unlock_irqrestore(&arb->lock, flags);
dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
return ret;
}

- /* Set the write data. */
- memcpy(&data, buf, len);
- pmif_writel(arb, data, inf_reg->wdata);
+ pmif_writel(arb, wdata, inf_reg->wdata);

/* Send the command. */
cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
pmif_writel(arb, cmd, inf_reg->ch_send);
+ raw_spin_unlock_irqrestore(&arb->lock, flags);

return 0;
}
@@ -488,6 +498,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;

+ raw_spin_lock_init(&arb->lock);
+
platform_set_drvdata(pdev, ctrl);

err = spmi_controller_add(ctrl);
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 84a41792cb4b..ac398b5a9736 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -461,6 +461,9 @@ static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = {

.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
};

static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 4d447520bab8..4e4cf6c34a77 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -332,11 +332,13 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
}

iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
- if (is_write)
+ if (is_write) {
+ file_start_write(fd);
ret = vfs_iter_write(fd, &iter, &pos, 0);
- else
+ file_end_write(fd);
+ } else {
ret = vfs_iter_read(fd, &iter, &pos, 0);
-
+ }
if (is_write) {
if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
@@ -467,7 +469,9 @@ fd_execute_write_same(struct se_cmd *cmd)
}

iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
+ file_start_write(fd_dev->fd_file);
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
+ file_end_write(fd_dev->fd_file);

kfree(bvec);
if (ret < 0 || ret != len) {
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
index 133098dc0854..99ca0c7bc41c 100644
--- a/drivers/thermal/loongson2_thermal.c
+++ b/drivers/thermal/loongson2_thermal.c
@@ -127,7 +127,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
if (!IS_ERR(tzd))
break;

- if (PTR_ERR(tzd) != ENODEV)
+ if (PTR_ERR(tzd) != -ENODEV)
continue;

return dev_err_probe(dev, PTR_ERR(tzd), "failed to register");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index e6f3166a9208..2de524fb7be5 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1382,7 +1382,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
device_del(&tz->device);
release_device:
put_device(&tz->device);
- tz = NULL;
remove_id:
ida_free(&thermal_tz_ida, id);
free_tzp:
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 15a2387a5b25..4f4502fb5454 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -119,6 +119,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)

/* get the clock - this also enables the HW */
data->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(data->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");

/* get the interrupt */
ret = platform_get_irq(pdev, 0);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 077c3ba3539e..8385be846840 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -446,7 +446,7 @@ static int generic_rs485_config(struct uart_port *port, struct ktermios *termios
}

static const struct serial_rs485 generic_rs485_supported = {
- .flags = SER_RS485_ENABLED,
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
};

static const struct exar8250_platform exar8250_default_platform = {
@@ -490,7 +490,8 @@ static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios
}

static const struct serial_rs485 iot2040_rs485_supported = {
- .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
};

static const struct property_entry iot2040_gpio_properties[] = {
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 1122f37fe744..346167afe9e1 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1581,7 +1581,7 @@ static int omap8250_remove(struct platform_device *pdev)

err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
- return err;
+ dev_err(&pdev->dev, "Failed to resume hardware\n");

up = serial8250_get_port(priv->line);
omap_8250_shutdown(&up->port);
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index d7658f380838..d3cb341f2c55 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -122,7 +122,7 @@ static void apbuart_tx_chars(struct uart_port *port)
{
u8 ch;

- uart_port_tx_limited(port, ch, port->fifosize >> 1,
+ uart_port_tx_limited(port, ch, port->fifosize,
true,
UART_PUT_CHAR(port, ch),
({}));
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 13cb78340709..cd36251ba1c0 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -421,13 +421,13 @@ static void imx_uart_stop_tx(struct uart_port *port)
ucr1 = imx_uart_readl(sport, UCR1);
imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);

+ ucr4 = imx_uart_readl(sport, UCR4);
usr2 = imx_uart_readl(sport, USR2);
- if (!(usr2 & USR2_TXDC)) {
+ if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) {
/* The shifter is still busy, so retry once TC triggers */
return;
}

- ucr4 = imx_uart_readl(sport, UCR4);
ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);

@@ -2214,7 +2214,6 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
return HRTIMER_NORESTART;
}

-static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
static const struct serial_rs485 imx_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
SER_RS485_RX_DURING_TX,
@@ -2298,8 +2297,6 @@ static int imx_uart_probe(struct platform_device *pdev)
/* RTS is required to control the RS485 transmitter */
if (sport->have_rtscts || sport->have_rtsgpio)
sport->port.rs485_supported = imx_rs485_supported;
- else
- sport->port.rs485_supported = imx_no_rs485;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_uart_timeout, 0);

@@ -2326,19 +2323,13 @@ static int imx_uart_probe(struct platform_device *pdev)
/* For register access, we only need to enable the ipg clock. */
ret = clk_prepare_enable(sport->clk_ipg);
if (ret) {
- dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
return ret;
}

ret = uart_get_rs485_mode(&sport->port);
- if (ret) {
- clk_disable_unprepare(sport->clk_ipg);
- return ret;
- }
-
- if (sport->port.rs485.flags & SER_RS485_ENABLED &&
- (!sport->have_rtscts && !sport->have_rtsgpio))
- dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
+ if (ret)
+ goto err_clk;

/*
* If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
@@ -2418,8 +2409,6 @@ static int imx_uart_probe(struct platform_device *pdev)
imx_uart_writel(sport, ucr3, UCR3);
}

- clk_disable_unprepare(sport->clk_ipg);
-
hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sport->trigger_start_tx.function = imx_trigger_start_tx;
@@ -2435,7 +2424,7 @@ static int imx_uart_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request rx irq: %d\n",
ret);
- return ret;
+ goto err_clk;
}

ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
@@ -2443,7 +2432,7 @@ static int imx_uart_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request tx irq: %d\n",
ret);
- return ret;
+ goto err_clk;
}

ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
@@ -2451,14 +2440,14 @@ static int imx_uart_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request rts irq: %d\n",
ret);
- return ret;
+ goto err_clk;
}
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
dev_name(&pdev->dev), sport);
if (ret) {
dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
- return ret;
+ goto err_clk;
}
}

@@ -2466,7 +2455,12 @@ static int imx_uart_probe(struct platform_device *pdev)

platform_set_drvdata(pdev, sport);

- return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
+ ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port);
+
+err_clk:
+ clk_disable_unprepare(sport->clk_ipg);
+
+ return ret;
}

static int imx_uart_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 0ead88c5a19a..135a838f517a 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1483,6 +1483,13 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
return omap_up_info;
}

+static const struct serial_rs485 serial_omap_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device *dev)
{
@@ -1497,6 +1504,9 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
if (!np)
return 0;

+ up->port.rs485_config = serial_omap_config_rs485;
+ up->port.rs485_supported = serial_omap_rs485_supported;
+
ret = uart_get_rs485_mode(&up->port);
if (ret)
return ret;
@@ -1531,13 +1541,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
return 0;
}

-static const struct serial_rs485 serial_omap_rs485_supported = {
- .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
- SER_RS485_RX_DURING_TX,
- .delay_rts_before_send = 1,
- .delay_rts_after_send = 1,
-};
-
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
@@ -1604,17 +1607,11 @@ static int serial_omap_probe(struct platform_device *pdev)
dev_info(up->port.dev, "no wakeirq for uart%d\n",
up->port.line);

- ret = serial_omap_probe_rs485(up, &pdev->dev);
- if (ret < 0)
- goto err_rs485;
-
sprintf(up->name, "OMAP UART%d", up->port.line);
up->port.mapbase = mem->start;
up->port.membase = base;
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
- up->port.rs485_config = serial_omap_config_rs485;
- up->port.rs485_supported = serial_omap_rs485_supported;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
@@ -1622,6 +1619,10 @@ static int serial_omap_probe(struct platform_device *pdev)
DEFAULT_CLK_SPEED);
}

+ ret = serial_omap_probe_rs485(up, &pdev->dev);
+ if (ret < 0)
+ goto err_rs485;
+
up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 6a0a3208d090..6aeb821d9b1d 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -24,6 +24,7 @@
#include <linux/tty_flip.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#include <uapi/linux/sched/types.h>

#define SC16IS7XX_NAME "sc16is7xx"
@@ -1714,9 +1715,12 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)

/* Setup SPI bus */
spi->bits_per_word = 8;
- /* only supports mode 0 on SC16IS762 */
+ /* For all variants, only mode 0 is supported */
+ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
+ return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n");
+
spi->mode = spi->mode ? : SPI_MODE_0;
- spi->max_speed_hz = spi->max_speed_hz ? : 15000000;
+ spi->max_speed_hz = spi->max_speed_hz ? : 4 * HZ_PER_MHZ;
ret = spi_setup(spi);
if (ret)
return ret;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f912f8bf1e63..18b49b1439a5 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1370,19 +1370,27 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4
return;
}

+ rs485->flags &= supported_flags;
+
/* Pick sane settings if the user hasn't */
- if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
- !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
!(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
- dev_warn_ratelimited(port->dev,
- "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
- port->name, port->line);
- rs485->flags |= SER_RS485_RTS_ON_SEND;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
- }
+ if (supported_flags & SER_RS485_RTS_ON_SEND) {
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;

- rs485->flags &= supported_flags;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ } else {
+ rs485->flags |= SER_RS485_RTS_AFTER_SEND;
+ rs485->flags &= ~SER_RS485_RTS_ON_SEND;
+
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n",
+ port->name, port->line);
+ }
+ }

uart_sanitize_serial_rs485_delays(port, rs485);

@@ -1445,7 +1453,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
int ret;
unsigned long flags;

- if (!port->rs485_config)
+ if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
return -ENOTTY;

if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
@@ -3568,6 +3576,9 @@ int uart_get_rs485_mode(struct uart_port *port)
int ret;
int rx_during_tx_gpio_flag;

+ if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
+ return 0;
+
ret = device_property_read_u32_array(dev, "rs485-rts-delay",
rs485_delay, 2);
if (!ret) {
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8a94e5a43c6d..493fc4742895 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2475,22 +2475,25 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
return 0;

if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
- retval = tty->ops->break_ctl(tty, duration);
- else {
- /* Do the work ourselves */
- if (tty_write_lock(tty, false) < 0)
- return -EINTR;
- retval = tty->ops->break_ctl(tty, -1);
- if (retval)
- goto out;
- if (!signal_pending(current))
- msleep_interruptible(duration);
+ return tty->ops->break_ctl(tty, duration);
+
+ /* Do the work ourselves */
+ if (tty_write_lock(tty, false) < 0)
+ return -EINTR;
+
+ retval = tty->ops->break_ctl(tty, -1);
+ if (!retval) {
+ msleep_interruptible(duration);
retval = tty->ops->break_ctl(tty, 0);
-out:
- tty_write_unlock(tty);
- if (signal_pending(current))
- retval = -EINTR;
+ } else if (retval == -EOPNOTSUPP) {
+ /* some drivers can tell only dynamically */
+ retval = 0;
}
+ tty_write_unlock(tty);
+
+ if (signal_pending(current))
+ retval = -EINTR;
+
return retval;
}

diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a885cc177cff..0971ae37f2a7 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8540,7 +8540,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)

ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);

out:
return ret;
@@ -8808,15 +8807,15 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)

/* Probe and add UFS logical units */
ret = ufshcd_add_lus(hba);
+
out:
+ pm_runtime_put_sync(hba->dev);
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
*/
- if (ret) {
- pm_runtime_put_sync(hba->dev);
+ if (ret)
ufshcd_hba_exit(hba);
- }
}

static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index fc40726e13c2..0cbe14aca877 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -159,7 +159,7 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
- return -EINVAL;
+ return -EOPNOTSUPP;

if (config_enable)
return qcom_ice_program_key(host->ice,
@@ -1675,7 +1675,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
if (!res->resource) {
dev_info(hba->dev, "Resource %s not provided\n", res->name);
if (i == RES_UFS)
- return -ENOMEM;
+ return -ENODEV;
continue;
} else if (i == RES_UFS) {
res_mem = res->resource;
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 11a5b3437c32..d14001025700 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1119,6 +1119,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
dma_addr_t trb_dma;
u32 togle_pcs = 1;
int sg_iter = 0;
+ int num_trb_req;
+ int trb_burst;
int num_trb;
int address;
u32 control;
@@ -1127,15 +1129,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
struct scatterlist *s = NULL;
bool sg_supported = !!(request->num_mapped_sgs);

+ num_trb_req = sg_supported ? request->num_mapped_sgs : 1;
+
+ /* ISO transfer require each SOF have a TD, each TD include some TRBs */
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
- num_trb = priv_ep->interval;
+ num_trb = priv_ep->interval * num_trb_req;
else
- num_trb = sg_supported ? request->num_mapped_sgs : 1;
-
- if (num_trb > priv_ep->free_trbs) {
- priv_ep->flags |= EP_RING_FULL;
- return -ENOBUFS;
- }
+ num_trb = num_trb_req;

priv_req = to_cdns3_request(request);
address = priv_ep->endpoint.desc->bEndpointAddress;
@@ -1184,14 +1184,31 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,

link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
+
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * ISO require LINK TRB must be first one of TD.
+ * Fill LINK TRBs for left trb space to simply software process logic.
+ */
+ while (priv_ep->enqueue) {
+ *trb = *link_trb;
+ trace_cdns3_prepare_trb(priv_ep, trb);
+
+ cdns3_ep_inc_enq(priv_ep);
+ trb = priv_ep->trb_pool + priv_ep->enqueue;
+ priv_req->trb = trb;
+ }
+ }
+ }
+
+ if (num_trb > priv_ep->free_trbs) {
+ priv_ep->flags |= EP_RING_FULL;
+ return -ENOBUFS;
}

if (priv_dev->dev_ver <= DEV_VER_V2)
togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);

- if (sg_supported)
- s = request->sg;
-
/* set incorrect Cycle Bit for first trb*/
control = priv_ep->pcs ? 0 : TRB_CYCLE;
trb->length = 0;
@@ -1209,6 +1226,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
do {
u32 length;

+ if (!(sg_iter % num_trb_req) && sg_supported)
+ s = request->sg;
+
/* fill TRB */
control |= TRB_TYPE(TRB_NORMAL);
if (sg_supported) {
@@ -1223,7 +1243,36 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
total_tdl += DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);

- trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
+ trb_burst = priv_ep->trb_burst_size;
+
+ /*
+ * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still
+ * met problem when do ISO transfer if sg enabled.
+ *
+ * Data pattern likes below when sg enabled, package size is 1k and mult is 2
+ * [UVC Header(8B) ] [data(3k - 8)] ...
+ *
+ * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen
+ * as below pattern:
+ * 0xd000: wrong
+ * 0xe000: wrong
+ * 0xf000: correct
+ * 0x10000: wrong
+ * 0x11000: wrong
+ * 0x12000: correct
+ * ...
+ *
+ * But it is still unclear about why error have not happen below 0xd000, it should
+ * cross 4k bounder. But anyway, the below code can fix this problem.
+ *
+ * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2)
+ if (ALIGN_DOWN(trb->buffer, SZ_4K) !=
+ ALIGN_DOWN(trb->buffer + length, SZ_4K))
+ trb_burst = 16;
+
+ trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) |
TRB_LEN(length));
pcs = priv_ep->pcs ? TRB_CYCLE : 0;

@@ -1250,7 +1299,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (sg_supported) {
trb->control |= cpu_to_le32(TRB_ISP);
/* Don't set chain bit for last TRB */
- if (sg_iter < num_trb - 1)
+ if ((sg_iter % num_trb_req) < num_trb_req - 1)
trb->control |= cpu_to_le32(TRB_CHAIN);

s = sg_next(s);
@@ -1508,6 +1557,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,

/* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+
+ /* ISO ep_traddr may stop at LINK TRB */
+ if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) &&
+ priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+ break;
+
trace_cdns3_complete_trb(priv_ep, trb);
cdns3_ep_inc_deq(priv_ep);
trb = priv_ep->trb_pool + priv_ep->dequeue;
@@ -1540,6 +1595,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
}

if (request_handled) {
+ /* TRBs are duplicated by priv_ep->interval time for ISO IN */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir)
+ request->actual /= priv_ep->interval;
+
cdns3_gadget_giveback(priv_ep, priv_req, 0);
request_handled = false;
transfer_end = false;
@@ -2035,11 +2094,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
- u32 max_packet_size = 0;
- u8 maxburst = 0;
+ u32 max_packet_size = priv_ep->wMaxPacketSize;
+ u8 maxburst = priv_ep->bMaxBurst;
u32 ep_cfg = 0;
u8 buffering;
- u8 mult = 0;
int ret;

buffering = priv_dev->ep_buf_size - 1;
@@ -2061,8 +2119,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
break;
default:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
- mult = priv_dev->ep_iso_burst - 1;
- buffering = mult + 1;
+ buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1;
}

switch (priv_dev->gadget.speed) {
@@ -2073,17 +2130,8 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
max_packet_size = is_iso_ep ? 1024 : 512;
break;
case USB_SPEED_SUPER:
- /* It's limitation that driver assumes in driver. */
- mult = 0;
- max_packet_size = 1024;
- if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
- maxburst = priv_dev->ep_iso_burst - 1;
- buffering = (mult + 1) *
- (maxburst + 1);
-
- if (priv_ep->interval > 1)
- buffering++;
- } else {
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ max_packet_size = 1024;
maxburst = priv_dev->ep_buf_size - 1;
}
break;
@@ -2112,7 +2160,6 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
if (priv_dev->dev_ver < DEV_VER_V2)
priv_ep->trb_burst_size = 16;

- mult = min_t(u8, mult, EP_CFG_MULT_MAX);
buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);

@@ -2146,7 +2193,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
}

ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
- EP_CFG_MULT(mult) |
+ EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */
EP_CFG_BUFFERING(buffering) |
EP_CFG_MAXBURST(maxburst);

@@ -2236,6 +2283,13 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
priv_ep->type = usb_endpoint_type(desc);
priv_ep->flags |= EP_CLAIMED;
priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+ priv_ep->wMaxPacketSize = usb_endpoint_maxp(desc);
+ priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize);
+ priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK;
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) {
+ priv_ep->mult = USB_SS_MULT(comp_desc->bmAttributes) - 1;
+ priv_ep->bMaxBurst = comp_desc->bMaxBurst;
+ }

spin_unlock_irqrestore(&priv_dev->lock, flags);
return &priv_ep->endpoint;
@@ -3019,22 +3073,40 @@ static int cdns3_gadget_check_config(struct usb_gadget *gadget)
struct cdns3_endpoint *priv_ep;
struct usb_ep *ep;
int n_in = 0;
+ int iso = 0;
+ int out = 1;
int total;
+ int n;

list_for_each_entry(ep, &gadget->ep_list, ep_list) {
priv_ep = ep_to_cdns3_ep(ep);
- if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
- n_in++;
+ if (!(priv_ep->flags & EP_CLAIMED))
+ continue;
+
+ n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1);
+ if (ep->address & USB_DIR_IN) {
+ /*
+ * ISO transfer: DMA start move data when get ISO, only transfer
+ * data as min(TD size, iso). No benefit for allocate bigger
+ * internal memory than 'iso'.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+ iso += n;
+ else
+ n_in++;
+ } else {
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+ out = max_t(int, out, n);
+ }
}

/* 2KB are reserved for EP0, 1KB for out*/
- total = 2 + n_in + 1;
+ total = 2 + n_in + out + iso;

if (total > priv_dev->onchip_buffers)
return -ENOMEM;

- priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
- (priv_dev->onchip_buffers - 2) / (n_in + 1);
+ priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out);

return 0;
}
diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
index fbe4a8e3aa89..086a7bb83897 100644
--- a/drivers/usb/cdns3/cdns3-gadget.h
+++ b/drivers/usb/cdns3/cdns3-gadget.h
@@ -1168,6 +1168,9 @@ struct cdns3_endpoint {
u8 dir;
u8 num;
u8 type;
+ u8 mult;
+ u8 bMaxBurst;
+ u16 wMaxPacketSize;
int interval;

int free_trbs;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 7ac39a281b8c..85e9c3ab66e9 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -523,6 +523,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
u32 otgsc = 0;

if (ci->in_lpm) {
+ /*
+ * If we already have a wakeup irq pending there,
+ * let's just return to wait resume finished firstly.
+ */
+ if (ci->wakeup_int)
+ return IRQ_HANDLED;
+
disable_irq_nosync(irq);
ci->wakeup_int = true;
pm_runtime_get(ci->dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index a1f4e1ead97f..0e7439dba8fe 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -916,6 +916,9 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
struct acm *acm = tty->driver_data;
int retval;

+ if (!(acm->ctrl_caps & USB_CDC_CAP_BRK))
+ return -EOPNOTSUPP;
+
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
dev_dbg(&acm->control->dev,
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 8d5af9ccb602..b73b79fea281 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -277,48 +277,11 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
/*
* We're resetting only the device side because, if we're in host mode,
* XHCI driver will reset the host block. If dwc3 was configured for
- * host-only mode or current role is host, then we can return early.
+ * host-only mode, then we can return early.
*/
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;

- /*
- * If the dr_mode is host and the dwc->current_dr_role is not the
- * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
- * isn't executed yet. Ensure the phy is ready before the controller
- * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
- * the phy.
- *
- * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
- * is port index. If this is a multiport host, then we need to reset
- * all active ports.
- */
- if (dwc->dr_mode == USB_DR_MODE_HOST) {
- u32 usb3_port;
- u32 usb2_port;
-
- usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
-
- usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
-
- /* Small delay for phy reset assertion */
- usleep_range(1000, 2000);
-
- usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
-
- usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
-
- /* Wait for clock synchronization */
- msleep(50);
- return 0;
- }
-
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
reg &= ~DWC3_DCTL_RUN_STOP;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index b94243237293..6ae8a36f21cf 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -238,7 +238,10 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
struct dwc3_request *req;

req = next_request(&dep->pending_list);
- dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ if (!dwc->connected)
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ else
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
}

dwc->eps[0]->trb_enqueue = 0;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 858fe4c299b7..89de363ecf8b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2103,7 +2103,17 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,

list_for_each_entry(r, &dep->pending_list, list) {
if (r == req) {
- dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ /*
+ * Explicitly check for EP0/1 as dequeue for those
+ * EPs need to be handled differently. Control EP
+ * only deals with one USB req, and giveback will
+ * occur during dwc3_ep0_stall_and_restart(). EP0
+ * requests are never added to started_list.
+ */
+ if (dep->number > 1)
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ else
+ dwc3_ep0_reset_state(dwc);
goto out;
}
}
@@ -3973,6 +3983,13 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);

dwc3_ep0_reset_state(dwc);
+
+ /*
+ * Request PM idle to address condition where usage count is
+ * already decremented to zero, but waiting for the disconnect
+ * interrupt to set dwc->connected to FALSE.
+ */
+ pm_request_idle(dwc->dev);
}

static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index faa398109431..2e6bafb2a554 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -719,13 +719,29 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
}
uvc->enable_interrupt_ep = opts->enable_interrupt_ep;

- ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
+ /*
+ * gadget_is_{super|dual}speed() API check UDC controller capitblity. It should pass down
+ * highest speed endpoint descriptor to UDC controller. So UDC controller driver can reserve
+ * enough resource at check_config(), especially mult and maxburst. So UDC driver (such as
+ * cdns3) can know need at least (mult + 1) * (maxburst + 1) * wMaxPacketSize internal
+ * memory for this uvc functions. This is the only straightforward method to resolve the UDC
+ * resource allocation issue in the current gadget framework.
+ */
+ if (gadget_is_superspeed(c->cdev->gadget))
+ ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
+ &uvc_ss_streaming_comp);
+ else if (gadget_is_dualspeed(cdev->gadget))
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep);
+ else
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
+
if (!ep) {
uvcg_info(f, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;

+ uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;

@@ -950,7 +966,8 @@ static void uvc_free(struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
func_inst);
- config_item_put(&uvc->header->item);
+ if (!opts->header)
+ config_item_put(&uvc->header->item);
--opts->refcnt;
kfree(uvc);
}
@@ -1042,25 +1059,29 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
uvc->desc.hs_streaming = opts->hs_streaming;
uvc->desc.ss_streaming = opts->ss_streaming;

- streaming = config_group_find_item(&opts->func_inst.group, "streaming");
- if (!streaming)
- goto err_config;
-
- header = config_group_find_item(to_config_group(streaming), "header");
- config_item_put(streaming);
- if (!header)
- goto err_config;
-
- h = config_group_find_item(to_config_group(header), "h");
- config_item_put(header);
- if (!h)
- goto err_config;
-
- uvc->header = to_uvcg_streaming_header(h);
- if (!uvc->header->linked) {
- mutex_unlock(&opts->lock);
- kfree(uvc);
- return ERR_PTR(-EBUSY);
+ if (opts->header) {
+ uvc->header = opts->header;
+ } else {
+ streaming = config_group_find_item(&opts->func_inst.group, "streaming");
+ if (!streaming)
+ goto err_config;
+
+ header = config_group_find_item(to_config_group(streaming), "header");
+ config_item_put(streaming);
+ if (!header)
+ goto err_config;
+
+ h = config_group_find_item(to_config_group(header), "h");
+ config_item_put(header);
+ if (!h)
+ goto err_config;
+
+ uvc->header = to_uvcg_streaming_header(h);
+ if (!uvc->header->linked) {
+ mutex_unlock(&opts->lock);
+ kfree(uvc);
+ return ERR_PTR(-EBUSY);
+ }
}

uvc->desc.extension_units = &opts->extension_units;
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 1ce58f61253c..3ac392cbb779 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -98,6 +98,12 @@ struct f_uvc_opts {
*/
struct mutex lock;
int refcnt;
+
+ /*
+ * Only for legacy gadget. Shall be NULL for configfs-composed gadgets,
+ * which is guaranteed by alloc_inst implementation of f_uvc doing kzalloc.
+ */
+ struct uvcg_streaming_header *header;
};

#endif /* U_UVC_H */
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index c06dd1af7a0c..c395438d3978 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -12,6 +12,7 @@
#include <linux/usb/video.h>

#include "u_uvc.h"
+#include "uvc_configfs.h"

USB_GADGET_COMPOSITE_OPTIONS();

@@ -84,8 +85,6 @@ static struct usb_device_descriptor webcam_device_descriptor = {
.bNumConfigurations = 0, /* dynamic */
};

-DECLARE_UVC_HEADER_DESCRIPTOR(1);
-
static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
.bLength = UVC_DT_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
@@ -158,43 +157,112 @@ static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
.bmaControls[1][0] = 4,
};

-static const struct uvc_format_uncompressed uvc_format_yuv = {
- .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
- .bFormatIndex = 1,
- .bNumFrameDescriptors = 2,
- .guidFormat =
- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
- .bBitsPerPixel = 16,
- .bDefaultFrameIndex = 1,
- .bAspectRatioX = 0,
- .bAspectRatioY = 0,
- .bmInterlaceFlags = 0,
- .bCopyProtect = 0,
+static const struct uvcg_color_matching uvcg_color_matching = {
+ .desc = {
+ .bLength = UVC_DT_COLOR_MATCHING_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_COLORFORMAT,
+ .bColorPrimaries = 1,
+ .bTransferCharacteristics = 1,
+ .bMatrixCoefficients = 4,
+ },
+};
+
+static struct uvcg_uncompressed uvcg_format_yuv = {
+ .fmt = {
+ .type = UVCG_UNCOMPRESSED,
+ /* add to .frames and fill .num_frames at runtime */
+ .color_matching = (struct uvcg_color_matching *)&uvcg_color_matching,
+ },
+ .desc = {
+ .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
+ .bFormatIndex = 1,
+ .bNumFrameDescriptors = 2,
+ .guidFormat = {
+ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
+ },
+ .bBitsPerPixel = 16,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterlaceFlags = 0,
+ .bCopyProtect = 0,
+ },
+};
+
+static struct uvcg_format_ptr uvcg_format_ptr_yuv = {
+ .fmt = &uvcg_format_yuv.fmt,
};

DECLARE_UVC_FRAME_UNCOMPRESSED(1);
DECLARE_UVC_FRAME_UNCOMPRESSED(3);

+#define UVCG_WIDTH_360P 640
+#define UVCG_HEIGHT_360P 360
+#define UVCG_MIN_BITRATE_360P 18432000
+#define UVCG_MAX_BITRATE_360P 55296000
+#define UVCG_MAX_VIDEO_FB_SZ_360P 460800
+#define UVCG_FRM_INTERV_0_360P 666666
+#define UVCG_FRM_INTERV_1_360P 1000000
+#define UVCG_FRM_INTERV_2_360P 5000000
+#define UVCG_DEFAULT_FRM_INTERV_360P UVCG_FRM_INTERV_0_360P
+
static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
.bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 1,
.bmCapabilities = 0,
- .wWidth = cpu_to_le16(640),
- .wHeight = cpu_to_le16(360),
- .dwMinBitRate = cpu_to_le32(18432000),
- .dwMaxBitRate = cpu_to_le32(55296000),
- .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
- .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .wWidth = cpu_to_le16(UVCG_WIDTH_360P),
+ .wHeight = cpu_to_le16(UVCG_HEIGHT_360P),
+ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_360P),
+ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_360P),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_360P),
+ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_360P),
.bFrameIntervalType = 3,
- .dwFrameInterval[0] = cpu_to_le32(666666),
- .dwFrameInterval[1] = cpu_to_le32(1000000),
- .dwFrameInterval[2] = cpu_to_le32(5000000),
+ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_360P),
+ .dwFrameInterval[1] = cpu_to_le32(UVCG_FRM_INTERV_1_360P),
+ .dwFrameInterval[2] = cpu_to_le32(UVCG_FRM_INTERV_2_360P),
+};
+
+static u32 uvcg_frame_yuv_360p_dw_frame_interval[] = {
+ [0] = UVCG_FRM_INTERV_0_360P,
+ [1] = UVCG_FRM_INTERV_1_360P,
+ [2] = UVCG_FRM_INTERV_2_360P,
+};
+
+static const struct uvcg_frame uvcg_frame_yuv_360p = {
+ .fmt_type = UVCG_UNCOMPRESSED,
+ .frame = {
+ .b_length = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+ .b_descriptor_type = USB_DT_CS_INTERFACE,
+ .b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED,
+ .b_frame_index = 1,
+ .bm_capabilities = 0,
+ .w_width = UVCG_WIDTH_360P,
+ .w_height = UVCG_HEIGHT_360P,
+ .dw_min_bit_rate = UVCG_MIN_BITRATE_360P,
+ .dw_max_bit_rate = UVCG_MAX_BITRATE_360P,
+ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_360P,
+ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_360P,
+ .b_frame_interval_type = 3,
+ },
+ .dw_frame_interval = uvcg_frame_yuv_360p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_yuv_360p = {
+ .frm = (struct uvcg_frame *)&uvcg_frame_yuv_360p,
};
+#define UVCG_WIDTH_720P 1280
+#define UVCG_HEIGHT_720P 720
+#define UVCG_MIN_BITRATE_720P 29491200
+#define UVCG_MAX_BITRATE_720P 29491200
+#define UVCG_MAX_VIDEO_FB_SZ_720P 1843200
+#define UVCG_FRM_INTERV_0_720P 5000000
+#define UVCG_DEFAULT_FRM_INTERV_720P UVCG_FRM_INTERV_0_720P

static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
.bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
@@ -202,28 +270,66 @@ static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 2,
.bmCapabilities = 0,
- .wWidth = cpu_to_le16(1280),
- .wHeight = cpu_to_le16(720),
- .dwMinBitRate = cpu_to_le32(29491200),
- .dwMaxBitRate = cpu_to_le32(29491200),
- .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
- .dwDefaultFrameInterval = cpu_to_le32(5000000),
+ .wWidth = cpu_to_le16(UVCG_WIDTH_720P),
+ .wHeight = cpu_to_le16(UVCG_HEIGHT_720P),
+ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_720P),
+ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_720P),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_720P),
+ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_720P),
.bFrameIntervalType = 1,
- .dwFrameInterval[0] = cpu_to_le32(5000000),
+ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_720P),
};

-static const struct uvc_format_mjpeg uvc_format_mjpg = {
- .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
- .bFormatIndex = 2,
- .bNumFrameDescriptors = 2,
- .bmFlags = 0,
- .bDefaultFrameIndex = 1,
- .bAspectRatioX = 0,
- .bAspectRatioY = 0,
- .bmInterlaceFlags = 0,
- .bCopyProtect = 0,
+static u32 uvcg_frame_yuv_720p_dw_frame_interval[] = {
+ [0] = UVCG_FRM_INTERV_0_720P,
+};
+
+static const struct uvcg_frame uvcg_frame_yuv_720p = {
+ .fmt_type = UVCG_UNCOMPRESSED,
+ .frame = {
+ .b_length = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+ .b_descriptor_type = USB_DT_CS_INTERFACE,
+ .b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED,
+ .b_frame_index = 2,
+ .bm_capabilities = 0,
+ .w_width = UVCG_WIDTH_720P,
+ .w_height = UVCG_HEIGHT_720P,
+ .dw_min_bit_rate = UVCG_MIN_BITRATE_720P,
+ .dw_max_bit_rate = UVCG_MAX_BITRATE_720P,
+ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_720P,
+ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_720P,
+ .b_frame_interval_type = 1,
+ },
+ .dw_frame_interval = uvcg_frame_yuv_720p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_yuv_720p = {
+ .frm = (struct uvcg_frame *)&uvcg_frame_yuv_720p,
+};
+
+static struct uvcg_mjpeg uvcg_format_mjpeg = {
+ .fmt = {
+ .type = UVCG_MJPEG,
+ /* add to .frames and fill .num_frames at runtime */
+ .color_matching = (struct uvcg_color_matching *)&uvcg_color_matching,
+ },
+ .desc = {
+ .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
+ .bFormatIndex = 2,
+ .bNumFrameDescriptors = 2,
+ .bmFlags = 0,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterlaceFlags = 0,
+ .bCopyProtect = 0,
+ },
+};
+
+static struct uvcg_format_ptr uvcg_format_ptr_mjpeg = {
+ .fmt = &uvcg_format_mjpeg.fmt,
};

DECLARE_UVC_FRAME_MJPEG(1);
@@ -235,16 +341,45 @@ static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 1,
.bmCapabilities = 0,
- .wWidth = cpu_to_le16(640),
- .wHeight = cpu_to_le16(360),
- .dwMinBitRate = cpu_to_le32(18432000),
- .dwMaxBitRate = cpu_to_le32(55296000),
- .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
- .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .wWidth = cpu_to_le16(UVCG_WIDTH_360P),
+ .wHeight = cpu_to_le16(UVCG_HEIGHT_360P),
+ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_360P),
+ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_360P),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_360P),
+ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_360P),
.bFrameIntervalType = 3,
- .dwFrameInterval[0] = cpu_to_le32(666666),
- .dwFrameInterval[1] = cpu_to_le32(1000000),
- .dwFrameInterval[2] = cpu_to_le32(5000000),
+ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_360P),
+ .dwFrameInterval[1] = cpu_to_le32(UVCG_FRM_INTERV_1_360P),
+ .dwFrameInterval[2] = cpu_to_le32(UVCG_FRM_INTERV_2_360P),
+};
+
+static u32 uvcg_frame_mjpeg_360p_dw_frame_interval[] = {
+ [0] = UVCG_FRM_INTERV_0_360P,
+ [1] = UVCG_FRM_INTERV_1_360P,
+ [2] = UVCG_FRM_INTERV_2_360P,
+};
+
+static const struct uvcg_frame uvcg_frame_mjpeg_360p = {
+ .fmt_type = UVCG_MJPEG,
+ .frame = {
+ .b_length = UVC_DT_FRAME_MJPEG_SIZE(3),
+ .b_descriptor_type = USB_DT_CS_INTERFACE,
+ .b_descriptor_subtype = UVC_VS_FRAME_MJPEG,
+ .b_frame_index = 1,
+ .bm_capabilities = 0,
+ .w_width = UVCG_WIDTH_360P,
+ .w_height = UVCG_HEIGHT_360P,
+ .dw_min_bit_rate = UVCG_MIN_BITRATE_360P,
+ .dw_max_bit_rate = UVCG_MAX_BITRATE_360P,
+ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_360P,
+ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_360P,
+ .b_frame_interval_type = 3,
+ },
+ .dw_frame_interval = uvcg_frame_mjpeg_360p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_mjpeg_360p = {
+ .frm = (struct uvcg_frame *)&uvcg_frame_mjpeg_360p,
};

static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
@@ -253,23 +388,44 @@ static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 2,
.bmCapabilities = 0,
- .wWidth = cpu_to_le16(1280),
- .wHeight = cpu_to_le16(720),
- .dwMinBitRate = cpu_to_le32(29491200),
- .dwMaxBitRate = cpu_to_le32(29491200),
- .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
- .dwDefaultFrameInterval = cpu_to_le32(5000000),
+ .wWidth = cpu_to_le16(UVCG_WIDTH_720P),
+ .wHeight = cpu_to_le16(UVCG_HEIGHT_720P),
+ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_720P),
+ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_720P),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_720P),
+ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_720P),
.bFrameIntervalType = 1,
- .dwFrameInterval[0] = cpu_to_le32(5000000),
+ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_720P),
};

-static const struct uvc_color_matching_descriptor uvc_color_matching = {
- .bLength = UVC_DT_COLOR_MATCHING_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubType = UVC_VS_COLORFORMAT,
- .bColorPrimaries = 1,
- .bTransferCharacteristics = 1,
- .bMatrixCoefficients = 4,
+static u32 uvcg_frame_mjpeg_720p_dw_frame_interval[] = {
+ [0] = UVCG_FRM_INTERV_0_720P,
+};
+
+static const struct uvcg_frame uvcg_frame_mjpeg_720p = {
+ .fmt_type = UVCG_MJPEG,
+ .frame = {
+ .b_length = UVC_DT_FRAME_MJPEG_SIZE(1),
+ .b_descriptor_type = USB_DT_CS_INTERFACE,
+ .b_descriptor_subtype = UVC_VS_FRAME_MJPEG,
+ .b_frame_index = 2,
+ .bm_capabilities = 0,
+ .w_width = UVCG_WIDTH_720P,
+ .w_height = UVCG_HEIGHT_720P,
+ .dw_min_bit_rate = UVCG_MIN_BITRATE_720P,
+ .dw_max_bit_rate = UVCG_MAX_BITRATE_720P,
+ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_720P,
+ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_720P,
+ .b_frame_interval_type = 1,
+ },
+ .dw_frame_interval = uvcg_frame_mjpeg_720p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_mjpeg_720p = {
+ .frm = (struct uvcg_frame *)&uvcg_frame_mjpeg_720p,
+};
+
+static struct uvcg_streaming_header uvcg_streaming_header = {
};

static const struct uvc_descriptor_header * const uvc_fs_control_cls[] = {
@@ -290,40 +446,40 @@ static const struct uvc_descriptor_header * const uvc_ss_control_cls[] = {

static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
- (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
- (const struct uvc_descriptor_header *) &uvc_color_matching,
- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+ (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
- (const struct uvc_descriptor_header *) &uvc_color_matching,
+ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
NULL,
};

static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
- (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
- (const struct uvc_descriptor_header *) &uvc_color_matching,
- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+ (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
- (const struct uvc_descriptor_header *) &uvc_color_matching,
+ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
NULL,
};

static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
- (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
- (const struct uvc_descriptor_header *) &uvc_color_matching,
- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+ (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
- (const struct uvc_descriptor_header *) &uvc_color_matching,
+ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
NULL,
};

@@ -387,6 +543,23 @@ webcam_bind(struct usb_composite_dev *cdev)
uvc_opts->hs_streaming = uvc_hs_streaming_cls;
uvc_opts->ss_streaming = uvc_ss_streaming_cls;

+ INIT_LIST_HEAD(&uvcg_format_yuv.fmt.frames);
+ list_add_tail(&uvcg_frame_ptr_yuv_360p.entry, &uvcg_format_yuv.fmt.frames);
+ list_add_tail(&uvcg_frame_ptr_yuv_720p.entry, &uvcg_format_yuv.fmt.frames);
+ uvcg_format_yuv.fmt.num_frames = 2;
+
+ INIT_LIST_HEAD(&uvcg_format_mjpeg.fmt.frames);
+ list_add_tail(&uvcg_frame_ptr_mjpeg_360p.entry, &uvcg_format_mjpeg.fmt.frames);
+ list_add_tail(&uvcg_frame_ptr_mjpeg_720p.entry, &uvcg_format_mjpeg.fmt.frames);
+ uvcg_format_mjpeg.fmt.num_frames = 2;
+
+ INIT_LIST_HEAD(&uvcg_streaming_header.formats);
+ list_add_tail(&uvcg_format_ptr_yuv.entry, &uvcg_streaming_header.formats);
+ list_add_tail(&uvcg_format_ptr_mjpeg.entry, &uvcg_streaming_header.formats);
+ uvcg_streaming_header.num_fmt = 2;
+
+ uvc_opts->header = &uvcg_streaming_header;
+
/* Allocate string descriptor numbers ... note that string contents
* can be overridden by the composite_dev glue.
*/
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index bbdf1b0b7be1..3252e3d2d79c 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -7,6 +7,7 @@
* Chunfeng Yun <chunfeng.yun@xxxxxxxxxxxx>
*/

+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -73,6 +74,9 @@
#define FRMCNT_LEV1_RANG (0x12b << 8)
#define FRMCNT_LEV1_RANG_MASK GENMASK(19, 8)

+#define HSCH_CFG1 0x960
+#define SCH3_RXFIFO_DEPTH_MASK GENMASK(21, 20)
+
#define SS_GEN2_EOF_CFG 0x990
#define SSG2EOF_OFFSET 0x3c

@@ -114,6 +118,8 @@
#define SSC_IP_SLEEP_EN BIT(4)
#define SSC_SPM_INT_EN BIT(1)

+#define SCH_FIFO_TO_KB(x) ((x) >> 10)
+
enum ssusb_uwk_vers {
SSUSB_UWK_V1 = 1,
SSUSB_UWK_V2,
@@ -165,6 +171,35 @@ static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk)
writel(value, hcd->regs + SS_GEN2_EOF_CFG);
}

+/*
+ * workaround: usb3.2 gen1 isoc rx hw issue
+ * host send out unexpected ACK afer device fininsh a burst transfer with
+ * a short packet.
+ */
+static void xhci_mtk_rxfifo_depth_set(struct xhci_hcd_mtk *mtk)
+{
+ struct usb_hcd *hcd = mtk->hcd;
+ u32 value;
+
+ if (!mtk->rxfifo_depth)
+ return;
+
+ value = readl(hcd->regs + HSCH_CFG1);
+ value &= ~SCH3_RXFIFO_DEPTH_MASK;
+ value |= FIELD_PREP(SCH3_RXFIFO_DEPTH_MASK,
+ SCH_FIFO_TO_KB(mtk->rxfifo_depth) - 1);
+ writel(value, hcd->regs + HSCH_CFG1);
+}
+
+static void xhci_mtk_init_quirk(struct xhci_hcd_mtk *mtk)
+{
+ /* workaround only for mt8195 */
+ xhci_mtk_set_frame_interval(mtk);
+
+ /* workaround for SoCs using SSUSB about before IPM v1.6.0 */
+ xhci_mtk_rxfifo_depth_set(mtk);
+}
+
static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
@@ -448,8 +483,7 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
if (ret)
return ret;

- /* workaround only for mt8195 */
- xhci_mtk_set_frame_interval(mtk);
+ xhci_mtk_init_quirk(mtk);
}

ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
@@ -527,6 +561,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
of_property_read_u32(node, "mediatek,u2p-dis-msk",
&mtk->u2p_dis_msk);

+ of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth);
+
ret = usb_wakeup_of_property_parse(mtk, node);
if (ret) {
dev_err(dev, "failed to parse uwk property\n");
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index faaaf05e36ce..ac042077db8c 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -160,6 +160,8 @@ struct xhci_hcd_mtk {
struct regmap *uwk;
u32 uwk_reg_base;
u32 uwk_vers;
+ /* quirk */
+ u32 rxfifo_depth;
};

static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 9ca9305243fe..4e30de4db1c0 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1250,14 +1250,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
struct mon_reader_bin *rp = vmf->vma->vm_private_data;
unsigned long offset, chunk_idx;
struct page *pageptr;
+ unsigned long flags;

+ spin_lock_irqsave(&rp->b_lock, flags);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size)
+ if (offset >= rp->b_size) {
+ spin_unlock_irqrestore(&rp->b_lock, flags);
return VM_FAULT_SIGBUS;
+ }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
vmf->page = pageptr;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
return 0;
}

diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index acd46b72899e..920a32cd094d 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -388,8 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)

static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
{
- return IS_ENABLED(CONFIG_USB_OTG) &&
- mxs_phy->phy.last_event == USB_EVENT_ID;
+ return mxs_phy->phy.last_event == USB_EVENT_ID;
}

static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 6e80fab11788..41382b1e2907 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -262,11 +262,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
{
struct altmode *partner = altmode->partner;
struct typec_altmode *adev;
+ struct typec_altmode *partner_adev;

if (!partner)
return;

adev = &altmode->adev;
+ partner_adev = &partner->adev;

if (is_typec_plug(adev->dev.parent)) {
struct typec_plug *plug = to_typec_plug(adev->dev.parent);
@@ -275,7 +277,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
} else {
partner->partner = NULL;
}
- put_device(&adev->dev);
+ put_device(&partner_adev->dev);
}

/**
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index 5a09a09cca70..cce3d1837104 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -497,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!eni_vdpa->vring) {
ret = -ENOMEM;
ENI_ERR(pdev, "failed to allocate virtqueues\n");
- goto err;
+ goto err_remove_vp_legacy;
}

for (i = 0; i < eni_vdpa->queues; i++) {
@@ -509,11 +509,13 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
if (ret) {
ENI_ERR(pdev, "failed to register to vdpa bus\n");
- goto err;
+ goto err_remove_vp_legacy;
}

return 0;

+err_remove_vp_legacy:
+ vp_legacy_remove(&eni_vdpa->ldev);
err:
put_device(&eni_vdpa->vdpa.dev);
return ret;
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index b2f9778c8366..4d27465c8f1a 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -694,6 +694,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu
size_t len, loff_t *pos)
{
struct hisi_acc_vf_migration_file *migf = filp->private_data;
+ u8 *vf_data = (u8 *)&migf->vf_data;
loff_t requested_length;
ssize_t done = 0;
int ret;
@@ -715,7 +716,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu
goto out_unlock;
}

- ret = copy_from_user(&migf->vf_data, buf, len);
+ ret = copy_from_user(vf_data + *pos, buf, len);
if (ret) {
done = -EFAULT;
goto out_unlock;
@@ -835,7 +836,9 @@ static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t

len = min_t(size_t, migf->total_length - *pos, len);
if (len) {
- ret = copy_to_user(buf, &migf->vf_data, len);
+ u8 *vf_data = (u8 *)&migf->vf_data;
+
+ ret = copy_to_user(buf, vf_data + *pos, len);
if (ret) {
done = -EFAULT;
goto out_unlock;
diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
index c937aa6f3954..27607d7b9030 100644
--- a/drivers/vfio/pci/pds/dirty.c
+++ b/drivers/vfio/pci/pds/dirty.c
@@ -478,8 +478,7 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
pds_vfio->vf_id, iova, length, pds_vfio->dirty.region_page_size,
pages, bitmap_size);

- if (!length || ((dirty->region_start + iova + length) >
- (dirty->region_start + dirty->region_size))) {
+ if (!length || ((iova - dirty->region_start + length) > dirty->region_size)) {
dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
iova, length);
return -EINVAL;
@@ -496,7 +495,8 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
return -EINVAL;
}

- bmp_offset = DIV_ROUND_UP(iova / dirty->region_page_size, sizeof(u64));
+ bmp_offset = DIV_ROUND_UP((iova - dirty->region_start) /
+ dirty->region_page_size, sizeof(u64));

dev_dbg(dev,
"Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 817d377a3f36..61255855d490 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -438,6 +438,7 @@ static struct virtio_transport vhost_transport = {
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size,
+ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,

.read_skb = virtio_transport_read_skb,
},
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 163d2c9f951c..f0600f6ca254 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)

static const struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
- FB_IOMEM_DEFAULT_OPS,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = acornfb_check_var,
.fb_set_par = acornfb_set_par,
.fb_setcolreg = acornfb_setcolreg,
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 274f5d0fa247..1ae1d35a5942 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -132,11 +132,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
return 0;

inode_lock(inode);
- /* Kill off the delayed work */
- cancel_delayed_work_sync(&info->deferred_work);
-
- /* Run it immediately */
- schedule_delayed_work(&info->deferred_work, 0);
+ flush_delayed_work(&info->deferred_work);
inode_unlock(inode);

return 0;
@@ -317,7 +313,7 @@ static void fb_deferred_io_lastclose(struct fb_info *info)
struct page *page;
int i;

- cancel_delayed_work_sync(&info->deferred_work);
+ flush_delayed_work(&info->deferred_work);

/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 84201c9608d3..7042a43b81d8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -42,6 +42,7 @@
#include <video/videomode.h>

#define PCR_TFT (1 << 31)
+#define PCR_COLOR (1 << 30)
#define PCR_BPIX_8 (3 << 25)
#define PCR_BPIX_12 (4 << 25)
#define PCR_BPIX_16 (5 << 25)
@@ -150,6 +151,12 @@ enum imxfb_type {
IMX21_FB,
};

+enum imxfb_panel_type {
+ PANEL_TYPE_MONOCHROME,
+ PANEL_TYPE_CSTN,
+ PANEL_TYPE_TFT,
+};
+
struct imxfb_info {
struct platform_device *pdev;
void __iomem *regs;
@@ -157,6 +164,7 @@ struct imxfb_info {
struct clk *clk_ahb;
struct clk *clk_per;
enum imxfb_type devtype;
+ enum imxfb_panel_type panel_type;
bool enabled;

/*
@@ -444,6 +452,13 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (!is_imx1_fb(fbi) && imxfb_mode->aus_mode)
fbi->lauscr = LAUSCR_AUS_MODE;

+ if (imxfb_mode->pcr & PCR_TFT)
+ fbi->panel_type = PANEL_TYPE_TFT;
+ else if (imxfb_mode->pcr & PCR_COLOR)
+ fbi->panel_type = PANEL_TYPE_CSTN;
+ else
+ fbi->panel_type = PANEL_TYPE_MONOCHROME;
+
/*
* Copy the RGB parameters for this display
* from the machine specific parameters.
@@ -596,6 +611,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
{
struct imxfb_info *fbi = info->par;
u32 ymax_mask = is_imx1_fb(fbi) ? YMAX_MASK_IMX1 : YMAX_MASK_IMX21;
+ u8 left_margin_low;

pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
var->xres, var->hsync_len,
@@ -604,6 +620,13 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
var->yres, var->vsync_len,
var->upper_margin, var->lower_margin);

+ if (fbi->panel_type == PANEL_TYPE_TFT)
+ left_margin_low = 3;
+ else if (fbi->panel_type == PANEL_TYPE_CSTN)
+ left_margin_low = 2;
+ else
+ left_margin_low = 0;
+
#if DEBUG_VAR
if (var->xres < 16 || var->xres > 1024)
printk(KERN_ERR "%s: invalid xres %d\n",
@@ -611,7 +634,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
if (var->hsync_len < 1 || var->hsync_len > 64)
printk(KERN_ERR "%s: invalid hsync_len %d\n",
info->fix.id, var->hsync_len);
- if (var->left_margin < 3 || var->left_margin > 255)
+ if (var->left_margin < left_margin_low || var->left_margin > 255)
printk(KERN_ERR "%s: invalid left_margin %d\n",
info->fix.id, var->left_margin);
if (var->right_margin < 1 || var->right_margin > 255)
@@ -637,7 +660,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf

writel(HCR_H_WIDTH(var->hsync_len - 1) |
HCR_H_WAIT_1(var->right_margin - 1) |
- HCR_H_WAIT_2(var->left_margin - 3),
+ HCR_H_WAIT_2(var->left_margin - left_margin_low),
fbi->regs + LCDC_HCR);

writel(VCR_V_WIDTH(var->vsync_len) |
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 7a855289ff5e..bb001c5d7f17 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -42,6 +42,7 @@

#define SECS_TO_WDOG_TICKS(x) ((x) << 16)
#define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
+#define WDOG_TICKS_TO_MSECS(x) ((x) * 1000 >> 16)

struct bcm2835_wdt {
void __iomem *base;
@@ -140,7 +141,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
.info = &bcm2835_wdt_info,
.ops = &bcm2835_wdt_ops,
.min_timeout = 1,
- .max_timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
+ .max_hw_heartbeat_ms = WDOG_TICKS_TO_MSECS(PM_WDOG_TIME_SET),
.timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
};

diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index f79f932bca14..79ed1626d8ea 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -178,7 +178,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
"3. OA Forward Progress Log\n"
"4. iLO Event Log";

- if (ilo5 && ulReason == NMI_UNKNOWN && !mynmi)
+ if (ulReason == NMI_UNKNOWN && !mynmi)
return NMI_DONE;

if (ilo5 && !pretimeout && !mynmi)
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 8e1be7ba0103..9215793a1c81 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -77,6 +77,11 @@ static int rti_wdt_start(struct watchdog_device *wdd)
{
u32 timer_margin;
struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(wdd->parent);
+ if (ret)
+ return ret;

/* set timeout period */
timer_margin = (u64)wdd->timeout * wdt->freq;
@@ -343,6 +348,9 @@ static int rti_wdt_probe(struct platform_device *pdev)
if (last_ping)
watchdog_set_last_hw_keepalive(wdd, last_ping);

+ if (!watchdog_hw_running(wdd))
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;

err_iomap:
@@ -357,7 +365,10 @@ static void rti_wdt_remove(struct platform_device *pdev)
struct rti_wdt_device *wdt = platform_get_drvdata(pdev);

watchdog_unregister_device(&wdt->wdd);
- pm_runtime_put(&pdev->dev);
+
+ if (!pm_runtime_suspended(&pdev->dev))
+ pm_runtime_put(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
}

diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 15df74e11a59..e2bd266b1b5b 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1073,6 +1073,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)

/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
+ wd_data->cdev.owner = wdd->ops->owner;

/* Add the device */
err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
@@ -1087,8 +1088,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
return err;
}

- wd_data->cdev.owner = wdd->ops->owner;
-
/* Record time of most recent heartbeat as 'just before now'. */
wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
watchdog_set_open_deadline(wd_data);
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 94df854147d3..7249d70e1a43 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -7,6 +7,7 @@ config CEPH_FS
select CRYPTO_AES
select CRYPTO
select NETFS_SUPPORT
+ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
default n
help
Choose Y or M here to include support for mounting the
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index c93359ceaae6..d2c035387595 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -748,7 +748,7 @@ static int table_open4(struct inode *inode, struct file *file)
struct seq_file *seq;
int ret;

- ret = seq_open(file, &format5_seq_ops);
+ ret = seq_open(file, &format4_seq_ops);
if (ret)
return ret;

diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 996271473609..d59d9670965c 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/magic.h>
#include <linux/statfs.h>
+#include <linux/printk.h>

#include "internal.h"

@@ -275,8 +276,19 @@ static int efivarfs_get_tree(struct fs_context *fc)
return get_tree_single(fc, efivarfs_fill_super);
}

+static int efivarfs_reconfigure(struct fs_context *fc)
+{
+ if (!efivar_supports_writes() && !(fc->sb_flags & SB_RDONLY)) {
+ pr_err("Firmware does not support SetVariableRT. Can not remount with rw\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct fs_context_operations efivarfs_context_ops = {
.get_tree = efivarfs_get_tree,
+ .reconfigure = efivarfs_reconfigure,
};

static int efivarfs_init_fs_context(struct fs_context *fc)
@@ -287,6 +299,8 @@ static int efivarfs_init_fs_context(struct fs_context *fc)

static void efivarfs_kill_sb(struct super_block *sb)
{
+ struct efivarfs_fs_info *sfi = sb->s_fs_info;
+
kill_litter_super(sb);

if (!efivar_is_available())
@@ -294,6 +308,7 @@ static void efivarfs_kill_sb(struct super_block *sb)

/* Remove all entries and destroy */
efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL);
+ kfree(sfi);
}

static struct file_system_type efivarfs_type = {
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 349c3316ae6b..279933e007d2 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -21,6 +21,8 @@ struct z_erofs_decompress_req {
};

struct z_erofs_decompressor {
+ int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
+ void *data, int size);
int (*decompress)(struct z_erofs_decompress_req *rq,
struct page **pagepool);
char *name;
@@ -92,6 +94,10 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
extern const struct z_erofs_decompressor erofs_decompressors[];

/* prototypes for specific algorithms */
+int z_erofs_load_lzma_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size);
+int z_erofs_load_deflate_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size);
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 332ec5f74002..8d1f86487d6e 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -24,11 +24,11 @@ struct z_erofs_lz4_decompress_ctx {
unsigned int oend;
};

-int z_erofs_load_lz4_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lz4_cfgs *lz4, int size)
+static int z_erofs_load_lz4_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct z_erofs_lz4_cfgs *lz4 = data;
u16 distance;

if (lz4) {
@@ -370,19 +370,75 @@ const struct z_erofs_decompressor erofs_decompressors[] = {
.name = "interlaced"
},
[Z_EROFS_COMPRESSION_LZ4] = {
+ .config = z_erofs_load_lz4_config,
.decompress = z_erofs_lz4_decompress,
.name = "lz4"
},
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
[Z_EROFS_COMPRESSION_LZMA] = {
+ .config = z_erofs_load_lzma_config,
.decompress = z_erofs_lzma_decompress,
.name = "lzma"
},
#endif
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
[Z_EROFS_COMPRESSION_DEFLATE] = {
+ .config = z_erofs_load_deflate_config,
.decompress = z_erofs_deflate_decompress,
.name = "deflate"
},
#endif
};
+
+int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+ unsigned int algs, alg;
+ erofs_off_t offset;
+ int size, ret = 0;
+
+ if (!erofs_sb_has_compr_cfgs(sbi)) {
+ sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
+ return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
+ }
+
+ sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
+ if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
+ erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
+ sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
+ return -EOPNOTSUPP;
+ }
+
+ erofs_init_metabuf(&buf, sb);
+ offset = EROFS_SUPER_OFFSET + sbi->sb_size;
+ alg = 0;
+ for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
+ void *data;
+
+ if (!(algs & 1))
+ continue;
+
+ data = erofs_read_metadata(sb, &buf, &offset, &size);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ break;
+ }
+
+ if (alg >= ARRAY_SIZE(erofs_decompressors) ||
+ !erofs_decompressors[alg].config) {
+ erofs_err(sb, "algorithm %d isn't enabled on this kernel",
+ alg);
+ ret = -EOPNOTSUPP;
+ } else {
+ ret = erofs_decompressors[alg].config(sb,
+ dsb, data, size);
+ }
+
+ kfree(data);
+ if (ret)
+ break;
+ }
+ erofs_put_metabuf(&buf);
+ return ret;
+}
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
index 19e5bdeb30b6..0e1946a6bda5 100644
--- a/fs/erofs/decompressor_deflate.c
+++ b/fs/erofs/decompressor_deflate.c
@@ -77,9 +77,10 @@ int __init z_erofs_deflate_init(void)
}

int z_erofs_load_deflate_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_deflate_cfgs *dfl, int size)
+ struct erofs_super_block *dsb, void *data, int size)
{
+ struct z_erofs_deflate_cfgs *dfl = data;
+
if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
erofs_err(sb, "invalid deflate cfgs, size=%u", size);
return -EINVAL;
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index dee10d22ada9..ba4ec73f4aae 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -72,10 +72,10 @@ int __init z_erofs_lzma_init(void)
}

int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lzma_cfgs *lzma, int size)
+ struct erofs_super_block *dsb, void *data, int size)
{
static DEFINE_MUTEX(lzma_resize_mutex);
+ struct z_erofs_lzma_cfgs *lzma = data;
unsigned int dict_size, i;
struct z_erofs_lzma *strm, *head = NULL;
int err;
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 4ff88d0dd980..d8de61350dc0 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -469,9 +469,6 @@ int __init z_erofs_init_zip_subsystem(void);
void z_erofs_exit_zip_subsystem(void);
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
-int z_erofs_load_lz4_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lz4_cfgs *lz4, int len);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags);
void *erofs_get_pcpubuf(unsigned int requiredpages);
@@ -480,6 +477,7 @@ int erofs_pcpubuf_growsize(unsigned int nrpages);
void __init erofs_pcpubuf_init(void);
void erofs_pcpubuf_exit(void);
int erofs_init_managed_cache(struct super_block *sb);
+int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
#else
static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
@@ -487,16 +485,6 @@ static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
static inline void z_erofs_exit_zip_subsystem(void) {}
-static inline int z_erofs_load_lz4_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lz4_cfgs *lz4, int len)
-{
- if (lz4 || dsb->u1.lz4_max_distance) {
- erofs_err(sb, "lz4 algorithm isn't enabled");
- return -EINVAL;
- }
- return 0;
-}
static inline void erofs_pcpubuf_init(void) {}
static inline void erofs_pcpubuf_exit(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
@@ -505,41 +493,17 @@ static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
int __init z_erofs_lzma_init(void);
void z_erofs_lzma_exit(void);
-int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lzma_cfgs *lzma, int size);
#else
static inline int z_erofs_lzma_init(void) { return 0; }
static inline int z_erofs_lzma_exit(void) { return 0; }
-static inline int z_erofs_load_lzma_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_lzma_cfgs *lzma, int size) {
- if (lzma) {
- erofs_err(sb, "lzma algorithm isn't enabled");
- return -EINVAL;
- }
- return 0;
-}
#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */

#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
int __init z_erofs_deflate_init(void);
void z_erofs_deflate_exit(void);
-int z_erofs_load_deflate_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_deflate_cfgs *dfl, int size);
#else
static inline int z_erofs_deflate_init(void) { return 0; }
static inline int z_erofs_deflate_exit(void) { return 0; }
-static inline int z_erofs_load_deflate_config(struct super_block *sb,
- struct erofs_super_block *dsb,
- struct z_erofs_deflate_cfgs *dfl, int size) {
- if (dfl) {
- erofs_err(sb, "deflate algorithm isn't enabled");
- return -EINVAL;
- }
- return 0;
-}
#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */

#ifdef CONFIG_EROFS_FS_ONDEMAND
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 3700af9ee173..cc44fb2e001e 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -156,68 +156,15 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
return buffer;
}

-#ifdef CONFIG_EROFS_FS_ZIP
-static int erofs_load_compr_cfgs(struct super_block *sb,
- struct erofs_super_block *dsb)
+#ifndef CONFIG_EROFS_FS_ZIP
+static int z_erofs_parse_cfgs(struct super_block *sb,
+ struct erofs_super_block *dsb)
{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
- unsigned int algs, alg;
- erofs_off_t offset;
- int size, ret = 0;
-
- sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
- if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
- erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
- sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
- return -EINVAL;
- }
-
- erofs_init_metabuf(&buf, sb);
- offset = EROFS_SUPER_OFFSET + sbi->sb_size;
- alg = 0;
- for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
- void *data;
-
- if (!(algs & 1))
- continue;
-
- data = erofs_read_metadata(sb, &buf, &offset, &size);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
+ if (!dsb->u1.available_compr_algs)
+ return 0;

- switch (alg) {
- case Z_EROFS_COMPRESSION_LZ4:
- ret = z_erofs_load_lz4_config(sb, dsb, data, size);
- break;
- case Z_EROFS_COMPRESSION_LZMA:
- ret = z_erofs_load_lzma_config(sb, dsb, data, size);
- break;
- case Z_EROFS_COMPRESSION_DEFLATE:
- ret = z_erofs_load_deflate_config(sb, dsb, data, size);
- break;
- default:
- DBG_BUGON(1);
- ret = -EFAULT;
- }
- kfree(data);
- if (ret)
- break;
- }
- erofs_put_metabuf(&buf);
- return ret;
-}
-#else
-static int erofs_load_compr_cfgs(struct super_block *sb,
- struct erofs_super_block *dsb)
-{
- if (dsb->u1.available_compr_algs) {
- erofs_err(sb, "try to load compressed fs when compression is disabled");
- return -EINVAL;
- }
- return 0;
+ erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
+ return -EOPNOTSUPP;
}
#endif

@@ -406,10 +353,7 @@ static int erofs_read_superblock(struct super_block *sb)
}

/* parse on-disk compression configurations */
- if (erofs_sb_has_compr_cfgs(sbi))
- ret = erofs_load_compr_cfgs(sb, dsb);
- else
- ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
+ ret = z_erofs_parse_cfgs(sb, dsb);
if (ret < 0)
goto out;

diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index a7e6847f6f8f..a33cd6757f98 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1309,12 +1309,11 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
put_page(page);
} else {
for (i = 0; i < pclusterpages; ++i) {
- page = pcl->compressed_bvecs[i].page;
+ /* consider shortlived pages added when decompressing */
+ page = be->compressed_pages[i];

if (erofs_page_is_managed(sbi, page))
continue;
-
- /* recycle all individual short-lived pages */
(void)z_erofs_put_shortlivedpage(be->pagepool, page);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 7b55111fd533..7a1a24ae4a2d 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -458,7 +458,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
.map = map,
};
int err = 0;
- unsigned int lclusterbits, endoff;
+ unsigned int lclusterbits, endoff, afmt;
unsigned long initial_lcn;
unsigned long long ofs, end;

@@ -547,17 +547,20 @@ static int z_erofs_do_map_blocks(struct inode *inode,
err = -EFSCORRUPTED;
goto unmap_out;
}
- if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
- map->m_algorithmformat =
- Z_EROFS_COMPRESSION_INTERLACED;
- else
- map->m_algorithmformat =
- Z_EROFS_COMPRESSION_SHIFTED;
- } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
- map->m_algorithmformat = vi->z_algorithmtype[1];
+ afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
+ Z_EROFS_COMPRESSION_INTERLACED :
+ Z_EROFS_COMPRESSION_SHIFTED;
} else {
- map->m_algorithmformat = vi->z_algorithmtype[0];
+ afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
+ vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
+ if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
+ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
+ afmt, vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
}
+ map->m_algorithmformat = afmt;

if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 1ac34eb49a0e..f5f33926acf8 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2568,9 +2568,6 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)

page = fio->compressed_page ? fio->compressed_page : fio->page;

- /* wait for GCed page writeback via META_MAPPING */
- f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
-
if (fscrypt_inode_uses_inline_crypto(inode))
return 0;

@@ -2752,6 +2749,10 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
goto out_writepage;
}

+ /* wait for GCed page writeback via META_MAPPING */
+ if (fio->post_read)
+ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
+
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index a06f03d23762..a631d706e117 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -42,7 +42,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
vm_fault_t ret;

ret = filemap_fault(vmf);
- if (!ret)
+ if (ret & VM_FAULT_LOCKED)
f2fs_update_iostat(F2FS_I_SB(inode), inode,
APP_MAPPED_READ_IO, F2FS_BLKSIZE);

@@ -2818,6 +2818,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out;
}

+ if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
ret = -EINVAL;
if (pos_in + len > src->i_size || pos_in + len < pos_in)
goto out_unlock;
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 193b22a2d6bf..02d9c47797be 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1106,7 +1106,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
}

if (old_dir_entry) {
- if (old_dir != new_dir && !whiteout)
+ if (old_dir != new_dir)
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
else
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8b30f11f37b4..9e00932770d8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2751,11 +2751,11 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
f2fs_update_inode_page(inode);

/* 3: update and set xattr node page dirty */
- if (page)
+ if (page) {
memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
VALID_XATTR_BLOCK_SIZE);
-
- set_page_dirty(xpage);
+ set_page_dirty(xpage);
+ }
f2fs_put_page(xpage, 1);

return 0;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index e197657db36b..e47cc917d118 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -660,11 +660,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
if (!here) {
if (!F2FS_I(inode)->i_xattr_nid) {
+ error = f2fs_recover_xattr_data(inode, NULL);
f2fs_notice(F2FS_I_SB(inode),
- "recover xattr in inode (%lu)", inode->i_ino);
- f2fs_recover_xattr_data(inode, NULL);
- kfree(base_addr);
- goto retry;
+ "recover xattr in inode (%lu), error(%d)",
+ inode->i_ino, error);
+ if (!error) {
+ kfree(base_addr);
+ goto retry;
+ }
}
f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
inode->i_ino);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 41d0232532a0..f689847bab40 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1493,7 +1493,8 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
LIST_HEAD(dispose);
int count;

- BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
+ BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
+ test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));

spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 9308190895c8..307b952a41f8 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -2306,7 +2306,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
- if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
+ if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;

gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
diff --git a/fs/namespace.c b/fs/namespace.c
index e157efc54023..bfc5cff0e196 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2873,7 +2873,12 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
if (IS_ERR(fc))
return PTR_ERR(fc);

+ /*
+ * Indicate to the filesystem that the remount request is coming
+ * from the legacy mount system call.
+ */
fc->oldapi = true;
+
err = parse_monolithic_mount_data(fc, data);
if (!err) {
down_write(&sb->s_umount);
@@ -3322,6 +3327,12 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
if (IS_ERR(fc))
return PTR_ERR(fc);

+ /*
+ * Indicate to the filesystem that the mount request is coming
+ * from the legacy mount system call.
+ */
+ fc->oldapi = true;
+
if (subtype)
err = vfs_parse_fs_string(fc, "subtype",
subtype, strlen(subtype));
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 943aeea1eb16..6be13e0ec170 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -580,6 +580,8 @@ bl_find_get_deviceid(struct nfs_server *server,
nfs4_delete_deviceid(node->ld, node->nfs_client, id);
goto retry;
}
+
+ nfs4_put_deviceid_node(node);
return ERR_PTR(-ENODEV);
}

@@ -893,10 +895,9 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
}

if (pgio->pg_dreq == NULL)
- wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
- req->wb_index);
+ wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index);
else
- wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+ wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req));

pnfs_generic_pg_init_write(pgio, req, wb_size);

diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e6a51fd94fea..9fc5061d51b2 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2968,7 +2968,7 @@ static u64 nfs_access_login_time(const struct task_struct *task,
rcu_read_lock();
for (;;) {
parent = rcu_dereference(task->real_parent);
- pcred = rcu_dereference(parent->cred);
+ pcred = __task_cred(parent);
if (parent == task || cred_fscmp(pcred, cred) != 0)
break;
task = parent;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f6c74f424691..5918c67dae0d 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -205,9 +205,10 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
kref_put(&dreq->kref, nfs_direct_req_free);
}

-ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
+ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
{
- return dreq->bytes_left;
+ loff_t start = offset - dreq->io_start;
+ return dreq->max_count - start;
}
EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);

diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9c9cf764f600..b1fa81c9dff6 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -655,7 +655,7 @@ extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
/* direct.c */
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq);
-extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
+extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset);

/* nfs4proc.c */
extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0ff913b4e9e0..e8b52e36906c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -170,6 +170,7 @@ static int nfs4_map_errors(int err)
case -NFS4ERR_RESOURCE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
case -NFS4ERR_WRONG_CRED:
@@ -558,6 +559,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
case -NFS4ERR_GRACE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
exception->delay = 1;
return 0;

@@ -9662,6 +9664,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
status = -EBUSY;
break;
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
status = -ERECALLCONFLICT;
break;
case -NFS4ERR_DELEG_REVOKED:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 84343aefbbd6..9084f156d67b 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2729,7 +2729,8 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
if (pgio->pg_dreq == NULL)
rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
else
- rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq,
+ req_offset(req));

pgio->pg_lseg =
pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 650e437b55e6..f1848cdd6d34 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -190,7 +190,7 @@ static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
{
int numerr;
struct persistent_ram_buffer *buffer = prz->buffer;
- int ecc_blocks;
+ size_t ecc_blocks;
size_t ecc_total;

if (!ecc_info || !ecc_info->ecc_size)
diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
index 4a4b2b03ff33..b931a99ab9c8 100644
--- a/fs/smb/server/asn1.c
+++ b/fs/smb/server/asn1.c
@@ -214,10 +214,15 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
{
struct ksmbd_conn *conn = context;

+ if (!vlen)
+ return -EINVAL;
+
conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
if (!conn->mechToken)
return -ENOMEM;

+ conn->mechTokenLen = (unsigned int)vlen;
+
return 0;
}

diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index b6fa1e285c40..7977827c6541 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -415,13 +415,7 @@ static void stop_sessions(void)
again:
down_read(&conn_list_lock);
list_for_each_entry(conn, &conn_list, conns_list) {
- struct task_struct *task;
-
t = conn->transport;
- task = t->handler;
- if (task)
- ksmbd_debug(CONN, "Stop session handler %s/%d\n",
- task->comm, task_pid_nr(task));
ksmbd_conn_set_exiting(conn);
if (t->ops->shutdown) {
up_read(&conn_list_lock);
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 3c005246a32e..0e04cf8b1d89 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -88,6 +88,7 @@ struct ksmbd_conn {
__u16 dialect;

char *mechToken;
+ unsigned int mechTokenLen;

struct ksmbd_conn_ops *conn_ops;

@@ -134,7 +135,6 @@ struct ksmbd_transport_ops {
struct ksmbd_transport {
struct ksmbd_conn *conn;
struct ksmbd_transport_ops *ops;
- struct task_struct *handler;
};

#define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 562b180459a1..e0eb7cb2a525 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -1191,6 +1191,12 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
bool prev_op_has_lease;
__le32 prev_op_state = 0;

+ /* Only v2 leases handle the directory */
+ if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
+ if (!lctx || lctx->version != 2)
+ return 0;
+ }
+
opinfo = alloc_opinfo(work, pid, tid);
if (!opinfo)
return -ENOMEM;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 0e4d514a8c2b..297ed5c5ce8d 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1414,7 +1414,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
char *name;
unsigned int name_off, name_len, secbuf_len;

- secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+ if (conn->use_spnego && conn->mechToken)
+ secbuf_len = conn->mechTokenLen;
+ else
+ secbuf_len = le16_to_cpu(req->SecurityBufferLength);
if (secbuf_len < sizeof(struct authenticate_message)) {
ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
return NULL;
@@ -1505,7 +1508,10 @@ static int ntlm_authenticate(struct ksmbd_work *work,
struct authenticate_message *authblob;

authblob = user_authblob(conn, req);
- sz = le16_to_cpu(req->SecurityBufferLength);
+ if (conn->use_spnego && conn->mechToken)
+ sz = conn->mechTokenLen;
+ else
+ sz = le16_to_cpu(req->SecurityBufferLength);
rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
if (rc) {
set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
@@ -1778,8 +1784,7 @@ int smb2_sess_setup(struct ksmbd_work *work)

negblob_off = le16_to_cpu(req->SecurityBufferOffset);
negblob_len = le16_to_cpu(req->SecurityBufferLength);
- if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
- negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+ if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer)) {
rc = -EINVAL;
goto out_err;
}
@@ -1788,8 +1793,15 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off);

if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
- if (conn->mechToken)
+ if (conn->mechToken) {
negblob = (struct negotiate_message *)conn->mechToken;
+ negblob_len = conn->mechTokenLen;
+ }
+ }
+
+ if (negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+ rc = -EINVAL;
+ goto out_err;
}

if (server_conf.auth_mechs & conn->auth_mechs) {
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 6691ae68af0c..7c98bf699772 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -158,8 +158,12 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
*/
bool ksmbd_smb_request(struct ksmbd_conn *conn)
{
- __le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
+ __le32 *proto;

+ if (conn->request_buf[0] != 0)
+ return false;
+
+ proto = (__le32 *)smb2_get_msg(conn->request_buf);
if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
pr_err_ratelimited("smb2 compression not support yet");
return false;
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index c5629a68c8b7..8faa25c6e129 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -2039,6 +2039,7 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
{
struct smb_direct_transport *t;
+ struct task_struct *handler;
int ret;

if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
@@ -2056,11 +2057,11 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
if (ret)
goto out_err;

- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
- KSMBD_TRANS(t)->conn, "ksmbd:r%u",
- smb_direct_port);
- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
- ret = PTR_ERR(KSMBD_TRANS(t)->handler);
+ handler = kthread_run(ksmbd_conn_handler_loop,
+ KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+ smb_direct_port);
+ if (IS_ERR(handler)) {
+ ret = PTR_ERR(handler);
pr_err("Can't start thread\n");
goto out_err;
}
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index eff7a1d793f0..9d4222154dcc 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -185,6 +185,7 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
struct sockaddr *csin;
int rc = 0;
struct tcp_transport *t;
+ struct task_struct *handler;

t = alloc_transport(client_sk);
if (!t) {
@@ -199,13 +200,13 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
goto out_error;
}

- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
- KSMBD_TRANS(t)->conn,
- "ksmbd:%u",
- ksmbd_tcp_get_port(csin));
- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+ handler = kthread_run(ksmbd_conn_handler_loop,
+ KSMBD_TRANS(t)->conn,
+ "ksmbd:%u",
+ ksmbd_tcp_get_port(csin));
+ if (IS_ERR(handler)) {
pr_err("cannot start conn thread\n");
- rc = PTR_ERR(KSMBD_TRANS(t)->handler);
+ rc = PTR_ERR(handler);
free_transport(t);
}
return rc;
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index 3df9f59a544e..f27d66fdc00a 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -34,7 +34,7 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
*(u16 *)ptr = (new & 0xffffu);
break;
case 4: prev = *(u32 *)ptr;
- if (prev == (old & 0xffffffffffu))
+ if (prev == (old & 0xffffffffu))
*(u32 *)ptr = (new & 0xffffffffu);
break;
case 8: prev = *(u64 *)ptr;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index ef8ce86b1f78..08b803a4fcde 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -136,6 +136,7 @@ struct af_alg_async_req {
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
@@ -154,6 +155,8 @@ struct af_alg_ctx {
bool init;

unsigned int len;
+
+ unsigned int inflight;
};

int af_alg_register_type(const struct af_alg_type *type);
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index ed5c9660563c..8eeb6730ac6d 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -832,7 +832,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
int link_rate, int link_lane_count);

-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
+int drm_dp_calc_pbn_mode(int clock, int bpp);

void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);

diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index c339fc85fd07..103ff57fc83c 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -192,7 +192,7 @@ struct drm_bridge_funcs {
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
+ * signals) feeding it is no longer running when this callback is
* called.
*
* The @post_disable callback is optional.
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
index e24ee840cfdb..c557b78dc572 100644
--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
@@ -16,6 +16,10 @@

/* VIDEO_CC Resets */
#define VIDEO_CC_MVSC_CORE_CLK_BCR 0
+#define VIDEO_CC_INTERFACE_BCR 1
+#define VIDEO_CC_MVS0_BCR 2
+#define VIDEO_CC_MVS1_BCR 3
+#define VIDEO_CC_MVSC_BCR 4

/* VIDEO_CC GDSCRs */
#define VENUS_GDSC 0
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 41d417ee1349..0286bada25ce 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -286,6 +286,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
{
struct bio_vec *bvec = bio_first_bvec_all(bio) + i;

+ if (unlikely(i >= bio->bi_vcnt)) {
+ fi->folio = NULL;
+ return;
+ }
+
fi->folio = page_folio(bvec->bv_page);
fi->offset = bvec->bv_offset +
PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
@@ -303,10 +308,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
fi->offset = 0;
fi->length = min(folio_size(fi->folio), fi->_seg_count);
fi->_next = folio_next(fi->folio);
- } else if (fi->_i + 1 < bio->bi_vcnt) {
- bio_first_folio(fi, bio, fi->_i + 1);
} else {
- fi->folio = NULL;
+ bio_first_folio(fi, bio, fi->_i + 1);
}
}

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 75e039081f92..9b08d792fa95 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -106,7 +106,11 @@ struct bpf_map_ops {
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
- void (*map_fd_put_ptr)(void *ptr);
+ /* If need_defer is true, the implementation should guarantee that
+ * the to-be-put element is still alive before the bpf program, which
+ * may manipulate it, exists.
+ */
+ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
@@ -271,7 +275,11 @@ struct bpf_map {
*/
atomic64_t refcnt ____cacheline_aligned;
atomic64_t usercnt;
- struct work_struct work;
+ /* rcu is used before freeing and work is only used during freeing */
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct mutex freeze_mutex;
atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
@@ -287,6 +295,7 @@ struct bpf_map {
} owner;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
s64 __percpu *elem_count;
};

diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index d644bbb298af..bb1223b21308 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -11,6 +11,7 @@ struct bpf_mem_caches;
struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
+ bool percpu;
struct work_struct work;
};

diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index ace3a4ce2fc9..1293c38ddb7f 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -448,8 +448,8 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
parent_hw, flags, fixed_rate, fixed_accuracy) \
- __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
- NULL, NULL, (flags), (fixed_rate), \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), \
(fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 4f0c5d62c8f3..d6e38a500833 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -607,6 +607,12 @@ extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip
extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *gc, void *data));

+struct gpio_device *gpio_device_find(void *data,
+ int (*match)(struct gpio_chip *gc, void *data));
+
+struct gpio_device *gpio_device_get(struct gpio_device *gdev);
+void gpio_device_put(struct gpio_device *gdev);
+
bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 7262c9993c39..5c4b3a68053f 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -160,6 +160,11 @@ enum qm_cap_bits {
QM_SUPPORT_RPM,
};

+struct qm_dev_alg {
+ u64 alg_msk;
+ const char *alg;
+};
+
struct dfx_diff_registers {
u32 *regs;
u32 reg_offset;
@@ -265,6 +270,16 @@ struct hisi_qm_cap_info {
u32 v3_val;
};

+struct hisi_qm_cap_record {
+ u32 type;
+ u32 cap_val;
+};
+
+struct hisi_qm_cap_tables {
+ struct hisi_qm_cap_record *qm_cap_table;
+ struct hisi_qm_cap_record *dev_cap_table;
+};
+
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
@@ -352,7 +367,6 @@ struct hisi_qm {
struct work_struct rst_work;
struct work_struct cmd_process;

- const char *algs;
bool use_sva;

resource_size_t phys_base;
@@ -363,6 +377,8 @@ struct hisi_qm {
u32 mb_qos;
u32 type_rate;
struct qm_err_isolate isolate_data;
+
+ struct hisi_qm_cap_tables cap_tables;
};

struct hisi_qp_status {
@@ -536,6 +552,8 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
const struct hisi_qm_cap_info *info_table,
u32 index, bool is_read);
+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size);

/* Used by VFIO ACC live migration driver */
struct pci_driver *hisi_sec_get_pf_driver(void);
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
index 52620e5b8052..b7904992d561 100644
--- a/include/linux/iio/adc/adi-axi-adc.h
+++ b/include/linux/iio/adc/adi-axi-adc.h
@@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info {
* @reg_access IIO debugfs_reg_access hook for the client ADC
* @read_raw IIO read_raw hook for the client ADC
* @write_raw IIO write_raw hook for the client ADC
+ * @read_avail IIO read_avail hook for the client ADC
*/
struct adi_axi_adc_conv {
const struct adi_axi_adc_chip_info *chip_info;
@@ -54,6 +55,9 @@ struct adi_axi_adc_conv {
int (*write_raw)(struct adi_axi_adc_conv *conv,
struct iio_chan_spec const *chan,
int val, int val2, long mask);
+ int (*read_avail)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ const int **val, int *type, int *length, long mask);
};

struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index f198a8ac7ee7..96f3a133540d 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -49,6 +49,18 @@ struct mhi_ep_db_info {
u32 status;
};

+/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ */
+struct mhi_ep_buf_info {
+ void *dev_addr;
+ u64 host_addr;
+ size_t size;
+};
+
/**
* struct mhi_ep_cntrl - MHI Endpoint controller structure
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
@@ -128,14 +140,17 @@ struct mhi_ep_cntrl {
struct work_struct reset_work;
struct work_struct cmd_ring_work;
struct work_struct ch_ring_work;
+ struct kmem_cache *ring_item_cache;
+ struct kmem_cache *ev_ring_el_cache;
+ struct kmem_cache *tre_buf_cache;

void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+ int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);

enum mhi_state mhi_state;

diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f980edfdd278..743475ca7e9d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
if (!nf_bridge)
return 0;

- return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
+ return nf_bridge->physinif;
}

static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
}

static inline struct net_device *
-nf_bridge_get_physindev(const struct sk_buff *skb)
+nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);

- return nf_bridge ? nf_bridge->physindev : NULL;
+ return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
}

static inline struct net_device *
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1596b1205b8d..3af5f2998551 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2101,14 +2101,14 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
(pci_resource_end((dev), (bar)) ? \
resource_size(pci_resource_n((dev), (bar))) : 0)

-#define __pci_dev_for_each_res0(dev, res, ...) \
- for (unsigned int __b = 0; \
- res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
+#define __pci_dev_for_each_res0(dev, res, ...) \
+ for (unsigned int __b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
__b++)

-#define __pci_dev_for_each_res1(dev, res, __b) \
- for (__b = 0; \
- res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
+#define __pci_dev_for_each_res1(dev, res, __b) \
+ for (__b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
__b++)

#define pci_dev_for_each_resource(dev, res, ...) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5e5f920ade90..44aab5c0bd2c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -303,6 +303,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}

+static inline void rcu_try_lock_acquire(struct lockdep_map *map)
+{
+ lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
+}
+
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
@@ -317,6 +322,7 @@ int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_try_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)

static inline int rcu_read_lock_held(void)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 97bfef071255..ddfe86deb4e7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -295,7 +295,7 @@ struct nf_bridge_info {
u8 bridged_dnat:1;
u8 sabotage_in_done:1;
__u16 frag_max_size;
- struct net_device *physindev;
+ int physinif;

/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 127ef3b2e607..236610e4a8fa 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -229,7 +229,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp

srcu_check_nmi_safety(ssp, true);
retval = __srcu_read_lock_nmisafe(ssp);
- rcu_lock_acquire(&ssp->dep_map);
+ rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}

diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 27cc1d464321..4dfa9b69ca8d 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,6 +3,8 @@
#define _LINUX_VIRTIO_NET_H

#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/udp.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/virtio_net.h>
@@ -49,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
{
+ unsigned int nh_min_len = sizeof(struct iphdr);
unsigned int gso_type = 0;
unsigned int thlen = 0;
unsigned int p_off = 0;
@@ -65,6 +68,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
gso_type = SKB_GSO_TCPV6;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
+ nh_min_len = sizeof(struct ipv6hdr);
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
@@ -100,7 +104,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;

- p_off = skb_transport_offset(skb) + thlen;
+ nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb));
+ p_off = nh_min_len + thlen;
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
} else {
@@ -140,7 +145,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,

skb_set_transport_header(skb, keys.control.thoff);
} else if (gso_type) {
- p_off = thlen;
+ p_off = nh_min_len + thlen;
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
}
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c58453699ee9..fbf30721bac9 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -246,4 +246,5 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
+int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index b01cf9ac2437..dc3cb16835b6 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -137,7 +137,6 @@ struct vsock_transport {
u64 (*stream_rcvhiwat)(struct vsock_sock *);
bool (*stream_is_active)(struct vsock_sock *);
bool (*stream_allow)(u32 cid, u32 port);
- int (*set_rcvlowat)(struct vsock_sock *vsk, int val);

/* SEQ_PACKET. */
ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
@@ -168,6 +167,7 @@ struct vsock_transport {
struct vsock_transport_send_notify_data *);
/* sk_lock held by the caller */
void (*notify_buffer_size)(struct vsock_sock *, u64 *);
+ int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);

/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 7a4be33bd07a..b83cfcf66664 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -957,7 +957,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
- HCI_CONN_REAUTH_PEND,
HCI_CONN_ENCRYPT_PEND,
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index d68b0a483431..8b8ed4e13d74 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -128,7 +128,7 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
netdev_txq_completed_mb(txq, pkts, bytes); \
\
_res = -1; \
- if (pkts && likely(get_desc > start_thrs)) { \
+ if (pkts && likely(get_desc >= start_thrs)) { \
_res = 1; \
if (unlikely(netif_tx_queue_stopped(txq)) && \
!(down_cond)) { \
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0448700890f7..ada7acb91a1b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4490,6 +4490,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
+ * Note: the user stack will only be populated if the *task* is
+ * the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@@ -4501,6 +4503,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
+ * The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 5dfd30b13f48..21d065a55ad8 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -510,7 +510,10 @@ struct file_system_type rootfs_fs_type = {

void __init init_rootfs(void)
{
- if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
- (!root_fs_names || strstr(root_fs_names, "tmpfs")))
- is_tmpfs = true;
+ if (IS_ENABLED(CONFIG_TMPFS)) {
+ if (!saved_root_name[0] && !root_fs_names)
+ is_tmpfs = true;
+ else if (root_fs_names && !!strstr(root_fs_names, "tmpfs"))
+ is_tmpfs = true;
+ }
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 586ac65becfd..ea772a02c140 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1339,7 +1339,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
nr_tw = nr_tw_prev + 1;
/* Large enough to fail the nr_wait comparison below */
if (!(flags & IOU_F_TWQ_LAZY_WAKE))
- nr_tw = -1U;
+ nr_tw = INT_MAX;

req->nr_tw = nr_tw;
req->io_task_work.node.next = first;
@@ -1891,7 +1891,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
io_req_complete_defer(req);
else
io_req_complete_post(req, issue_flags);
- } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
+
+ return 0;
+ }
+
+ if (ret != IOU_ISSUE_SKIP_COMPLETE)
return ret;

/* If the op doesn't have a file, we're not polling for it */
@@ -2626,8 +2630,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
__set_current_state(TASK_RUNNING);
atomic_set(&ctx->cq_wait_nr, 0);

- if (ret < 0)
- break;
/*
* Run task_work after scheduling and before io_should_wake().
* If we got woken because of task_work being processed, run it
@@ -2637,6 +2639,18 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (!llist_empty(&ctx->work_llist))
io_run_local_work(ctx);

+ /*
+ * Non-local task_work will be run on exit to userspace, but
+ * if we're using DEFER_TASKRUN, then we could have waited
+ * with a timeout for a number of requests. If the timeout
+ * hits, we could have some requests ready to process. Ensure
+ * this break is _after_ we have run task_work, to avoid
+ * deferring running potentially pending requests until the
+ * next time we wait for events.
+ */
+ if (ret < 0)
+ break;
+
check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) {
/* let the caller flush overflows, retry */
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 8f68d5ad4564..0a0c1c9db0f9 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -549,15 +549,19 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
struct iovec *iov;
int ret;

+ iorw->bytes_done = 0;
+ iorw->free_iovec = NULL;
+
/* submission path, ->uring_lock should already be taken */
ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
if (unlikely(ret < 0))
return ret;

- iorw->bytes_done = 0;
- iorw->free_iovec = iov;
- if (iov)
+ if (iov) {
+ iorw->free_iovec = iov;
req->flags |= REQ_F_NEED_CLEANUP;
+ }
+
return 0;
}

diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index c85ff9162a5c..9bfad7e96913 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -867,7 +867,7 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
}

if (old_ptr)
- map->ops->map_fd_put_ptr(old_ptr);
+ map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
}

@@ -890,7 +890,7 @@ static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
}

if (old_ptr) {
- map->ops->map_fd_put_ptr(old_ptr);
+ map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
} else {
return -ENOENT;
@@ -913,8 +913,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
return prog;
}

-static void prog_fd_array_put_ptr(void *ptr)
+static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
+ /* bpf_prog is freed after one RCU or tasks trace grace period */
bpf_prog_put(ptr);
}

@@ -1201,8 +1202,9 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
return ee;
}

-static void perf_event_fd_array_put_ptr(void *ptr)
+static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
+ /* bpf_perf_event is freed after one RCU grace period */
bpf_event_entry_free_rcu(ptr);
}

@@ -1256,7 +1258,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
return cgroup_get_from_fd(fd);
}

-static void cgroup_fd_array_put_ptr(void *ptr)
+static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* cgroup_put free cgrp after a rcu grace period */
cgroup_put(ptr);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index fd8d4b0addfc..5b9146fa825f 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -897,7 +897,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)

if (map->ops->map_fd_put_ptr) {
ptr = fd_htab_map_get_ptr(map, l);
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, true);
}
}

@@ -2484,7 +2484,7 @@ static void fd_htab_map_free(struct bpf_map *map)
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
void *ptr = fd_htab_map_get_ptr(map, l);

- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, false);
}
}

@@ -2525,7 +2525,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,

ret = htab_map_update_elem(map, key, &ptr, map_flags);
if (ret)
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, false);

return ret;
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 17c7e7782a1f..b32be680da6c 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -231,6 +231,9 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
struct lpm_trie_node *node, *found = NULL;
struct bpf_lpm_trie_key *key = _key;

+ if (key->prefixlen > trie->max_prefixlen)
+ return NULL;
+
/* Start walking the trie from the root node ... */

for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index cd5eafaba97e..3248ff5d8161 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -127,12 +127,17 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
return inner_map;
}

-void bpf_map_fd_put_ptr(void *ptr)
+void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
- /* ptr->ops->map_free() has to go through one
- * rcu grace period by itself.
+ struct bpf_map *inner_map = ptr;
+
+ /* The inner map may still be used by both non-sleepable and sleepable
+ * bpf program, so free it after one RCU grace period and one tasks
+ * trace RCU grace period.
*/
- bpf_map_put(ptr);
+ if (need_defer)
+ WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
+ bpf_map_put(inner_map);
}

u32 bpf_map_fd_sys_lookup_elem(void *ptr)
diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
index bcb7534afb3c..7d61602354de 100644
--- a/kernel/bpf/map_in_map.h
+++ b/kernel/bpf/map_in_map.h
@@ -13,7 +13,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
void bpf_map_meta_free(struct bpf_map *map_meta);
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
int ufd);
-void bpf_map_fd_put_ptr(void *ptr);
+void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
u32 bpf_map_fd_sys_lookup_elem(void *ptr);

#endif
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 956f80ee6f5c..85f9501ff6e6 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -486,31 +486,6 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
}

-static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
-{
- struct llist_node *first;
- unsigned int obj_size;
-
- /* For per-cpu allocator, the size of free objects in free list doesn't
- * match with unit_size and now there is no way to get the size of
- * per-cpu pointer saved in free object, so just skip the checking.
- */
- if (c->percpu_size)
- return 0;
-
- first = c->free_llist.first;
- if (!first)
- return 0;
-
- obj_size = ksize(first);
- if (obj_size != c->unit_size) {
- WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
- idx, obj_size, c->unit_size);
- return -EINVAL;
- }
- return 0;
-}
-
/* When size != 0 bpf_mem_cache for each cpu.
* This is typical bpf hash map use case when all elements have equal size.
*
@@ -521,10 +496,12 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
- int cpu, i, err, unit_size, percpu_size = 0;
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
+ int cpu, i, unit_size, percpu_size = 0;
+
+ ma->percpu = percpu;

if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
@@ -562,7 +539,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
- err = 0;
#ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current();
#endif
@@ -575,28 +551,12 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->tgt = c;

init_refill_work(c);
- /* Another bpf_mem_cache will be used when allocating
- * c->unit_size in bpf_mem_alloc(), so doesn't prefill
- * for the bpf_mem_cache because these free objects will
- * never be used.
- */
- if (i != bpf_mem_cache_idx(c->unit_size))
- continue;
prefill_mem_cache(c, cpu);
- err = check_obj_size(c, i);
- if (err)
- goto out;
}
}

-out:
ma->caches = pcc;
- /* refill_work is either zeroed or initialized, so it is safe to
- * call irq_work_sync().
- */
- if (err)
- bpf_mem_alloc_destroy(ma);
- return err;
+ return 0;
}

static void drain_mem_cache(struct bpf_mem_cache *c)
@@ -860,7 +820,7 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
void *ret;

if (!size)
- return ZERO_SIZE_PTR;
+ return NULL;

idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
if (idx < 0)
@@ -872,13 +832,15 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)

void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
{
+ struct bpf_mem_cache *c;
int idx;

if (!ptr)
return;

- idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
- if (idx < 0)
+ c = *(void **)(ptr - LLIST_NODE_SZ);
+ idx = bpf_mem_cache_idx(c->unit_size);
+ if (WARN_ON_ONCE(idx < 0))
return;

unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
@@ -886,13 +848,15 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)

void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
{
+ struct bpf_mem_cache *c;
int idx;

if (!ptr)
return;

- idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
- if (idx < 0)
+ c = *(void **)(ptr - LLIST_NODE_SZ);
+ idx = bpf_mem_cache_idx(c->unit_size);
+ if (WARN_ON_ONCE(idx < 0))
return;

unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
@@ -966,35 +930,3 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)

return !ret ? NULL : ret + LLIST_NODE_SZ;
}
-
-static __init int bpf_mem_cache_adjust_size(void)
-{
- unsigned int size;
-
- /* Adjusting the indexes in size_index() according to the object_size
- * of underlying slab cache, so bpf_mem_alloc() will select a
- * bpf_mem_cache with unit_size equal to the object_size of
- * the underlying slab cache.
- *
- * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
- * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
- */
- for (size = 192; size >= 8; size -= 8) {
- unsigned int kmalloc_size, index;
-
- kmalloc_size = kmalloc_size_roundup(size);
- if (kmalloc_size == size)
- continue;
-
- if (kmalloc_size <= 192)
- index = size_index[(kmalloc_size - 1) / 8];
- else
- index = fls(kmalloc_size - 1) - 1;
- /* Only overwrite if necessary */
- if (size_index[(size - 1) / 8] != index)
- size_index[(size - 1) / 8] = index;
- }
-
- return 0;
-}
-subsys_initcall(bpf_mem_cache_adjust_size);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 458bb80b14d5..36775c4bc33f 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -388,6 +388,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
{
u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
+ bool crosstask = task && task != current;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
bool user = flags & BPF_F_USER_STACK;
struct perf_callchain_entry *trace;
@@ -410,6 +411,14 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (task && user && !user_mode(regs))
goto err_fault;

+ /* get_perf_callchain does not support crosstask user stack walking
+ * but returns an empty stack instead of NULL.
+ */
+ if (crosstask && user) {
+ err = -EOPNOTSUPP;
+ goto clear;
+ }
+
num_elem = size / elem_size;
max_depth = num_elem + skip;
if (sysctl_perf_event_max_stack < max_depth)
@@ -421,7 +430,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
trace = get_callchain_entry_for_task(task, max_depth);
else
trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
- false, false);
+ crosstask, false);
if (unlikely(!trace))
goto err_fault;

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index d77b2f8b9364..f61c53237c19 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -692,6 +692,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
struct btf_record *rec = map->record;
+ struct btf *btf = map->btf;

security_bpf_map_free(map);
bpf_map_release_memcg(map);
@@ -707,6 +708,10 @@ static void bpf_map_free_deferred(struct work_struct *work)
* template bpf_map struct used during verification.
*/
btf_record_free(rec);
+ /* Delay freeing of btf for maps, as map_free callback may need
+ * struct_meta info which will be freed with btf_put().
+ */
+ btf_put(btf);
}

static void bpf_map_put_uref(struct bpf_map *map)
@@ -717,6 +722,28 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
}

+static void bpf_map_free_in_work(struct bpf_map *map)
+{
+ INIT_WORK(&map->work, bpf_map_free_deferred);
+ /* Avoid spawning kworkers, since they all might contend
+ * for the same mutex like slab_mutex.
+ */
+ queue_work(system_unbound_wq, &map->work);
+}
+
+static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
+{
+ bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
+}
+
+static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
+{
+ if (rcu_trace_implies_rcu_gp())
+ bpf_map_free_rcu_gp(rcu);
+ else
+ call_rcu(rcu, bpf_map_free_rcu_gp);
+}
+
/* decrement map refcnt and schedule it for freeing via workqueue
* (underlying map implementation ops->map_free() might sleep)
*/
@@ -725,12 +752,11 @@ void bpf_map_put(struct bpf_map *map)
if (atomic64_dec_and_test(&map->refcnt)) {
/* bpf_map_free_id() must be called first */
bpf_map_free_id(map);
- btf_put(map->btf);
- INIT_WORK(&map->work, bpf_map_free_deferred);
- /* Avoid spawning kworkers, since they all might contend
- * for the same mutex like slab_mutex.
- */
- queue_work(system_unbound_wq, &map->work);
+
+ if (READ_ONCE(map->free_after_mult_rcu_gp))
+ call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
+ else
+ bpf_map_free_in_work(map);
}
}
EXPORT_SYMBOL_GPL(bpf_map_put);
@@ -3171,6 +3197,10 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
*
* - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
* was detached and is going for re-attachment.
+ *
+ * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
+ * are NULL, then program was already attached and user did not provide
+ * tgt_prog_fd so we have no way to find out or create trampoline
*/
if (!prog->aux->dst_trampoline && !tgt_prog) {
/*
@@ -3184,6 +3214,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
err = -EINVAL;
goto out_unlock;
}
+ /* We can allow re-attach only if we have valid attach_btf. */
+ if (!prog->aux->attach_btf) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
btf_id = prog->aux->attach_btf_id;
key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 824531d4c262..97fd1766818b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1632,7 +1632,10 @@ static int resize_reference_state(struct bpf_func_state *state, size_t n)
return 0;
}

-static int grow_stack_state(struct bpf_func_state *state, int size)
+/* Possibly update state->allocated_stack to be at least size bytes. Also
+ * possibly update the function's high-water mark in its bpf_subprog_info.
+ */
+static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
{
size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;

@@ -1644,6 +1647,11 @@ static int grow_stack_state(struct bpf_func_state *state, int size)
return -ENOMEM;

state->allocated_stack = size;
+
+ /* update known max for given subprogram */
+ if (env->subprog_info[state->subprogno].stack_depth < size)
+ env->subprog_info[state->subprogno].stack_depth = size;
+
return 0;
}

@@ -4323,14 +4331,11 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
struct bpf_reg_state *reg = NULL;
u32 dst_reg = insn->dst_reg;

- err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
- if (err)
- return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (!env->allow_ptr_leaks &&
- state->stack[spi].slot_type[0] == STACK_SPILL &&
+ is_spilled_reg(&state->stack[spi]) &&
size != BPF_REG_SIZE) {
verbose(env, "attempt to corrupt spilled pointer on stack\n");
return -EACCES;
@@ -4481,10 +4486,6 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
(!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
writing_zero = true;

- err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
- if (err)
- return err;
-
for (i = min_off; i < max_off; i++) {
int spi;

@@ -5599,20 +5600,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}

-static int update_stack_depth(struct bpf_verifier_env *env,
- const struct bpf_func_state *func,
- int off)
-{
- u16 stack = env->subprog_info[func->subprogno].stack_depth;
-
- if (stack >= -off)
- return 0;
-
- /* update known max for given subprogram */
- env->subprog_info[func->subprogno].stack_depth = -off;
- return 0;
-}
-
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
@@ -6371,13 +6358,14 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
* The minimum valid offset is -MAX_BPF_STACK for writes, and
* -state->allocated_stack for reads.
*/
-static int check_stack_slot_within_bounds(int off,
- struct bpf_func_state *state,
- enum bpf_access_type t)
+static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
+ s64 off,
+ struct bpf_func_state *state,
+ enum bpf_access_type t)
{
int min_valid_off;

- if (t == BPF_WRITE)
+ if (t == BPF_WRITE || env->allow_uninit_stack)
min_valid_off = -MAX_BPF_STACK;
else
min_valid_off = -state->allocated_stack;
@@ -6400,7 +6388,7 @@ static int check_stack_access_within_bounds(
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
struct bpf_func_state *state = func(env, reg);
- int min_off, max_off;
+ s64 min_off, max_off;
int err;
char *err_extra;

@@ -6413,11 +6401,8 @@ static int check_stack_access_within_bounds(
err_extra = " write to";

if (tnum_is_const(reg->var_off)) {
- min_off = reg->var_off.value + off;
- if (access_size > 0)
- max_off = min_off + access_size - 1;
- else
- max_off = min_off;
+ min_off = (s64)reg->var_off.value + off;
+ max_off = min_off + access_size;
} else {
if (reg->smax_value >= BPF_MAX_VAR_OFF ||
reg->smin_value <= -BPF_MAX_VAR_OFF) {
@@ -6426,15 +6411,12 @@ static int check_stack_access_within_bounds(
return -EACCES;
}
min_off = reg->smin_value + off;
- if (access_size > 0)
- max_off = reg->smax_value + off + access_size - 1;
- else
- max_off = min_off;
+ max_off = reg->smax_value + off + access_size;
}

- err = check_stack_slot_within_bounds(min_off, state, type);
- if (!err)
- err = check_stack_slot_within_bounds(max_off, state, type);
+ err = check_stack_slot_within_bounds(env, min_off, state, type);
+ if (!err && max_off > 0)
+ err = -EINVAL; /* out of stack access into non-negative offsets */

if (err) {
if (tnum_is_const(reg->var_off)) {
@@ -6447,8 +6429,10 @@ static int check_stack_access_within_bounds(
verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
err_extra, regno, tn_buf, access_size);
}
+ return err;
}
- return err;
+
+ return grow_stack_state(env, state, round_up(-min_off, BPF_REG_SIZE));
}

/* check whether memory at (regno + off) is accessible for t = (read | write)
@@ -6463,7 +6447,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
- struct bpf_func_state *state;
int size, err = 0;

size = bpf_size_to_bytes(bpf_size);
@@ -6606,11 +6589,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err)
return err;

- state = func(env, reg);
- err = update_stack_depth(env, state, off);
- if (err)
- return err;
-
if (t == BPF_READ)
err = check_stack_read(env, regno, off, size,
value_regno);
@@ -6805,7 +6783,8 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i

/* When register 'regno' is used to read the stack (either directly or through
* a helper function) make sure that it's within stack boundary and, depending
- * on the access type, that all elements of the stack are initialized.
+ * on the access type and privileges, that all elements of the stack are
+ * initialized.
*
* 'off' includes 'regno->off', but not its dynamic part (if any).
*
@@ -6913,8 +6892,11 @@ static int check_stack_range_initialized(

slot = -i - 1;
spi = slot / BPF_REG_SIZE;
- if (state->allocated_stack <= slot)
- goto err;
+ if (state->allocated_stack <= slot) {
+ verbose(env, "verifier bug: allocated_stack too small");
+ return -EFAULT;
+ }
+
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
@@ -6938,7 +6920,6 @@ static int check_stack_range_initialized(
goto mark;
}

-err:
if (tnum_is_const(reg->var_off)) {
verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
err_extra, regno, min_off, i - min_off, access_size);
@@ -6963,7 +6944,7 @@ static int check_stack_range_initialized(
* helper may write to the entire memory range.
*/
}
- return update_stack_depth(env, state, min_off);
+ return 0;
}

static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -9284,6 +9265,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
verbose(env, "R0 not a scalar value\n");
return -EACCES;
}
+
+ /* we are going to rely on register's precise value */
+ err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
+ err = err ?: mark_chain_precision(env, BPF_REG_0);
+ if (err)
+ return err;
+
if (!tnum_in(range, r0->var_off)) {
verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
return -EINVAL;
@@ -12099,6 +12087,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}

switch (base_type(ptr_reg->type)) {
+ case PTR_TO_FLOW_KEYS:
+ if (known)
+ break;
+ fallthrough;
case CONST_PTR_TO_MAP:
/* smin_val represents the known value */
if (known && smin_val == 0 && opcode == BPF_ADD)
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 438b868cbfa9..35aa2e98a92a 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1349,8 +1349,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
/* PROMPT can only be set if we have MEM_READ permission. */
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
raw_smp_processor_id());
- if (defcmd_in_progress)
- strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);

/*
* Fetch command from keyboard
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index c21abc77c53e..ff5683a57f77 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -132,8 +132,10 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,

void dma_release_coherent_memory(struct device *dev)
{
- if (dev)
+ if (dev) {
_dma_release_coherent_memory(dev->dma_mem);
+ dev->dma_mem = NULL;
+ }
}

static void *__dma_alloc_from_coherent(struct device *dev,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fa9fff0f9620..d336af9cba13 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3771,17 +3771,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
- if (!curr) {
- /*
- * The entity's vruntime has been adjusted, so let's check
- * whether the rq-wide min_vruntime needs updated too. Since
- * the calculations above require stable min_vruntime rather
- * than up-to-date one, we do the update at the end of the
- * reweight process.
- */
+ if (!curr)
__enqueue_entity(cfs_rq, se);
- update_min_vruntime(cfs_rq);
- }
+
+ /*
+ * The entity's vruntime has been adjusted, so let's check
+ * whether the rq-wide min_vruntime needs updated too. Since
+ * the calculations above require stable min_vruntime rather
+ * than up-to-date one, we do the update at the end of the
+ * reweight process.
+ */
+ update_min_vruntime(cfs_rq);
}
}

diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 87015e9deacc..5cbd0cee83c0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1547,13 +1547,18 @@ void tick_setup_sched_timer(void)
void tick_cancel_sched_timer(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t idle_sleeptime, iowait_sleeptime;

# ifdef CONFIG_HIGH_RES_TIMERS
if (ts->sched_timer.base)
hrtimer_cancel(&ts->sched_timer);
# endif

+ idle_sleeptime = ts->idle_sleeptime;
+ iowait_sleeptime = ts->iowait_sleeptime;
memset(ts, 0, sizeof(*ts));
+ ts->idle_sleeptime = idle_sleeptime;
+ ts->iowait_sleeptime = iowait_sleeptime;
}
#endif

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 868008f56fec..1d76f3b014ae 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -41,6 +41,9 @@
#define bpf_event_rcu_dereference(p) \
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))

+#define MAX_UPROBE_MULTI_CNT (1U << 20)
+#define MAX_KPROBE_MULTI_CNT (1U << 20)
+
#ifdef CONFIG_MODULES
struct bpf_trace_module {
struct module *module;
@@ -2895,6 +2898,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
cnt = attr->link_create.kprobe_multi.cnt;
if (!cnt)
return -EINVAL;
+ if (cnt > MAX_KPROBE_MULTI_CNT)
+ return -E2BIG;

size = cnt * sizeof(*addrs);
addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
@@ -3198,6 +3203,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr

if (!upath || !uoffsets || !cnt)
return -EINVAL;
+ if (cnt > MAX_UPROBE_MULTI_CNT)
+ return -E2BIG;

uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 22c5c496a68f..35ddb329dad6 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -53,12 +53,14 @@ static void debugfs_print_result(struct seq_file *seq,
static int debugfs_print_results(struct seq_file *seq, void *v)
{
struct kunit_suite *suite = (struct kunit_suite *)seq->private;
- enum kunit_status success = kunit_suite_has_succeeded(suite);
+ enum kunit_status success;
struct kunit_case *test_case;

if (!suite)
return 0;

+ success = kunit_suite_has_succeeded(suite);
+
/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
seq_puts(seq, "KTAP version 1\n");
seq_puts(seq, "1..1\n");
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index f3139c4c20fc..1fdf4b905365 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -2380,12 +2380,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);

- /* If we're already encrypted set the REAUTH_PEND flag,
- * otherwise set the ENCRYPT_PEND.
+ /* Set the ENCRYPT_PEND to trigger encryption after
+ * authentication.
*/
- if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
- set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
- else
+ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
}

diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 6b7741f6e95b..233453807b50 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -1046,10 +1046,12 @@ static int min_key_size_set(void *data, u64 val)
{
struct hci_dev *hdev = data;

- if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE)
+ hci_dev_lock(hdev);
+ if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }

- hci_dev_lock(hdev);
hdev->le_min_key_size = val;
hci_dev_unlock(hdev);

@@ -1074,10 +1076,12 @@ static int max_key_size_set(void *data, u64 val)
{
struct hci_dev *hdev = data;

- if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size)
+ hci_dev_lock(hdev);
+ if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) {
+ hci_dev_unlock(hdev);
return -EINVAL;
+ }

- hci_dev_lock(hdev);
hdev->le_max_key_size = val;
hci_dev_unlock(hdev);

diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f7ebbbd30218..16e442773229 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3500,14 +3500,8 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,

if (!ev->status) {
clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
-
- if (!hci_conn_ssp_enabled(conn) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
- bt_dev_info(hdev, "re-auth of legacy device is not possible.");
- } else {
- set_bit(HCI_CONN_AUTH, &conn->flags);
- conn->sec_level = conn->pending_sec_level;
- }
+ set_bit(HCI_CONN_AUTH, &conn->flags);
+ conn->sec_level = conn->pending_sec_level;
} else {
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
@@ -3516,7 +3510,6 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
}

clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
- clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);

if (conn->state == BT_CONFIG) {
if (!ev->status && hci_conn_ssp_enabled(conn)) {
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 033034d68f1f..92dae4c4922c 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -279,8 +279,17 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_

if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
READ_ONCE(neigh->hh.hh_len)) {
+ struct net_device *br_indev;
+
+ br_indev = nf_bridge_get_physindev(skb, net);
+ if (!br_indev) {
+ neigh_release(neigh);
+ goto free_skb;
+ }
+
neigh_hh_bridge(&neigh->hh, skb);
- skb->dev = nf_bridge->physindev;
+ skb->dev = br_indev;
+
ret = br_handle_frame_finish(net, sk, skb);
} else {
/* the neighbour function below overwrites the complete
@@ -352,12 +361,18 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
*/
static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb->dev;
+ struct net_device *dev = skb->dev, *br_indev;
struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
int err;

+ br_indev = nf_bridge_get_physindev(skb, net);
+ if (!br_indev) {
+ kfree_skb(skb);
+ return 0;
+ }
+
nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;

if (nf_bridge->pkt_otherhost) {
@@ -397,7 +412,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
- skb->dev = nf_bridge->physindev;
+ skb->dev = br_indev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
@@ -410,7 +425,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
skb->pkt_type = PACKET_HOST;
}
} else {
- rt = bridge_parent_rtable(nf_bridge->physindev);
+ rt = bridge_parent_rtable(br_indev);
if (!rt) {
kfree_skb(skb);
return 0;
@@ -419,7 +434,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
skb_dst_set_noref(skb, &rt->dst);
}

- skb->dev = nf_bridge->physindev;
+ skb->dev = br_indev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
@@ -456,7 +471,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
}

nf_bridge->in_prerouting = 1;
- nf_bridge->physindev = skb->dev;
+ nf_bridge->physinif = skb->dev->ifindex;
skb->dev = brnf_get_logical_dev(skb, skb->dev, net);

if (skb->protocol == htons(ETH_P_8021Q))
@@ -553,7 +568,11 @@ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff
if (skb->protocol == htons(ETH_P_IPV6))
nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;

- in = nf_bridge->physindev;
+ in = nf_bridge_get_physindev(skb, net);
+ if (!in) {
+ kfree_skb(skb);
+ return 0;
+ }
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
@@ -897,6 +916,13 @@ static unsigned int ip_sabotage_in(void *priv,
static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ struct net_device *br_indev;
+
+ br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
+ if (!br_indev) {
+ kfree_skb(skb);
+ return;
+ }

skb_pull(skb, ETH_HLEN);
nf_bridge->bridged_dnat = 0;
@@ -906,7 +932,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
nf_bridge->neigh_header,
ETH_HLEN - ETH_ALEN);
- skb->dev = nf_bridge->physindev;
+ skb->dev = br_indev;

nf_bridge->physoutdev = NULL;
br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 550039dfc31a..ad268bd19d5b 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -102,9 +102,15 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
- struct net_device *dev = skb->dev;
+ struct net_device *dev = skb->dev, *br_indev;
const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();

+ br_indev = nf_bridge_get_physindev(skb, net);
+ if (!br_indev) {
+ kfree_skb(skb);
+ return 0;
+ }
+
nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;

if (nf_bridge->pkt_otherhost) {
@@ -122,7 +128,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
}

if (skb_dst(skb)->dev == dev) {
- skb->dev = nf_bridge->physindev;
+ skb->dev = br_indev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
@@ -133,7 +139,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb->pkt_type = PACKET_HOST;
} else {
- rt = bridge_parent_rtable(nf_bridge->physindev);
+ rt = bridge_parent_rtable(br_indev);
if (!rt) {
kfree_skb(skb);
return 0;
@@ -142,7 +148,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
skb_dst_set_noref(skb, &rt->dst);
}

- skb->dev = nf_bridge->physindev;
+ skb->dev = br_indev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 53c377d054f0..fcf331a447ee 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2869,13 +2869,6 @@ static int do_setlink(const struct sk_buff *skb,
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
}

- if (tb[IFLA_MASTER]) {
- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
- if (err)
- goto errout;
- status |= DO_SETLINK_MODIFIED;
- }
-
if (ifm->ifi_flags || ifm->ifi_change) {
err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
extack);
@@ -2883,6 +2876,13 @@ static int do_setlink(const struct sk_buff *skb,
goto errout;
}

+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+ if (err)
+ goto errout;
+ status |= DO_SETLINK_MODIFIED;
+ }
+
if (tb[IFLA_CARRIER]) {
err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
if (err)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index f18ca02aa95a..c42ddd85ff1f 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -104,7 +104,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
const struct dns_server_list_v1_header *v1;

/* It may be a server list. */
- if (datalen <= sizeof(*v1))
+ if (datalen < sizeof(*v1))
return -EINVAL;

v1 = (const struct dns_server_list_v1_header *)data;
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
index a79af8c25a07..b6cb101d7f19 100644
--- a/net/ethtool/features.c
+++ b/net/ethtool/features.c
@@ -234,17 +234,20 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
dev = req_info.dev;

rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
ethnl_features_to_bitmap(old_active, dev->features);
ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
tb[ETHTOOL_A_FEATURES_WANTED],
netdev_features_strings, info->extack);
if (ret < 0)
- goto out_rtnl;
+ goto out_ops;
if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
ret = -EINVAL;
- goto out_rtnl;
+ goto out_ops;
}

/* set req_wanted bits not in req_mask from old_wanted */
@@ -281,6 +284,8 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
if (mod)
netdev_features_change(dev);

+out_ops:
+ ethnl_ops_complete(dev);
out_rtnl:
rtnl_unlock();
ethnl_parse_header_dev_put(&req_info);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 2713c9b06c4c..b0a5de1303b5 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1632,6 +1632,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
#endif
return -EINVAL;
}
+EXPORT_SYMBOL(inet_recv_error);

int inet_gro_complete(struct sk_buff *skb, int nhoff)
{
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9e222a57bc2b..0063a237253b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1025,6 +1025,10 @@ static int ipmr_cache_report(const struct mr_table *mrt,
struct sk_buff *skb;
int ret;

+ mroute_sk = rcu_dereference(mrt->mroute_sk);
+ if (!mroute_sk)
+ return -EINVAL;
+
if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
else
@@ -1069,7 +1073,8 @@ static int ipmr_cache_report(const struct mr_table *mrt,
msg = (struct igmpmsg *)skb_network_header(skb);
msg->im_vif = vifi;
msg->im_vif_hi = vifi >> 8;
- skb_dst_set(skb, dst_clone(skb_dst(pkt)));
+ ipv4_pktinfo_prepare(mroute_sk, pkt);
+ memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
/* Add our header */
igmp = skb_put(skb, sizeof(struct igmphdr));
igmp->type = assert;
@@ -1079,12 +1084,6 @@ static int ipmr_cache_report(const struct mr_table *mrt,
skb->transport_header = skb->network_header;
}

- mroute_sk = rcu_dereference(mrt->mroute_sk);
- if (!mroute_sk) {
- kfree_skb(skb);
- return -EINVAL;
- }
-
igmpmsg_netlink_event(mrt, skb);

/* Deliver to mrouted */
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index f33aeab9424f..fc761915c5f6 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -239,7 +239,6 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
- struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
struct iphdr *niph;
const struct tcphdr *oth;
@@ -289,9 +288,13 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
* build the eth header using the original destination's MAC as the
* source, and send the RST packet directly.
*/
- br_indev = nf_bridge_get_physindev(oldskb);
- if (br_indev) {
+ if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct net_device *br_indev;
+
+ br_indev = nf_bridge_get_physindev(oldskb, net);
+ if (!br_indev)
+ goto free_nskb;

nskb->dev = br_indev;
niph->tot_len = htons(nskb->len);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c3ff984b6354..9cb22a6ae1dc 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -805,7 +805,7 @@ void udp_flush_pending_frames(struct sock *sk)

if (up->pending) {
up->len = 0;
- up->pending = 0;
+ WRITE_ONCE(up->pending, 0);
ip_flush_pending_frames(sk);
}
}
@@ -993,7 +993,7 @@ int udp_push_pending_frames(struct sock *sk)

out:
up->len = 0;
- up->pending = 0;
+ WRITE_ONCE(up->pending, 0);
return err;
}
EXPORT_SYMBOL(udp_push_pending_frames);
@@ -1069,7 +1069,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

fl4 = &inet->cork.fl.u.ip4;
- if (up->pending) {
+ if (READ_ONCE(up->pending)) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
@@ -1265,7 +1265,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
- up->pending = AF_INET;
+ WRITE_ONCE(up->pending, AF_INET);

do_append_data:
up->len += ulen;
@@ -1277,7 +1277,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
- up->pending = 0;
+ WRITE_ONCE(up->pending, 0);
release_sock(sk);

out:
@@ -1315,7 +1315,7 @@ void udp_splice_eof(struct socket *sock)
struct sock *sk = sock->sk;
struct udp_sock *up = udp_sk(sk);

- if (!up->pending || udp_test_bit(CORK, sk))
+ if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
return;

lock_sock(sk);
@@ -3116,16 +3116,18 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
struct bpf_udp_iter_state *iter = seq->private;
struct udp_iter_state *state = &iter->state;
struct net *net = seq_file_net(seq);
+ int resume_bucket, resume_offset;
struct udp_table *udptable;
unsigned int batch_sks = 0;
bool resized = false;
struct sock *sk;

+ resume_bucket = state->bucket;
+ resume_offset = iter->offset;
+
/* The current batch is done, so advance the bucket. */
- if (iter->st_bucket_done) {
+ if (iter->st_bucket_done)
state->bucket++;
- iter->offset = 0;
- }

udptable = udp_get_table_seq(seq, net);

@@ -3145,19 +3147,19 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
for (; state->bucket <= udptable->mask; state->bucket++) {
struct udp_hslot *hslot2 = &udptable->hash2[state->bucket];

- if (hlist_empty(&hslot2->head)) {
- iter->offset = 0;
+ if (hlist_empty(&hslot2->head))
continue;
- }

+ iter->offset = 0;
spin_lock_bh(&hslot2->lock);
udp_portaddr_for_each_entry(sk, &hslot2->head) {
if (seq_sk_match(seq, sk)) {
/* Resume from the last iterated socket at the
* offset in the bucket before iterator was stopped.
*/
- if (iter->offset) {
- --iter->offset;
+ if (state->bucket == resume_bucket &&
+ iter->offset < resume_offset) {
+ ++iter->offset;
continue;
}
if (iter->end_sk < iter->max_sk) {
@@ -3171,9 +3173,6 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)

if (iter->end_sk)
break;
-
- /* Reset the current bucket's offset before moving to the next bucket. */
- iter->offset = 0;
}

/* All done: no batch made. */
@@ -3192,7 +3191,6 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
/* After allocating a larger batch, retry one more time to grab
* the whole bucket.
*/
- state->bucket--;
goto again;
}
done:
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5e80e517f071..46c19bd48990 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -399,7 +399,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
unsigned int nhoff = raw - skb->data;
unsigned int off = nhoff + sizeof(*ipv6h);
- u8 next, nexthdr = ipv6h->nexthdr;
+ u8 nexthdr = ipv6h->nexthdr;

while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
struct ipv6_opt_hdr *hdr;
@@ -410,25 +410,25 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)

hdr = (struct ipv6_opt_hdr *)(skb->data + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
- struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
- if (frag_hdr->frag_off)
- break;
optlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
optlen = ipv6_authlen(hdr);
} else {
optlen = ipv6_optlen(hdr);
}
- /* cache hdr->nexthdr, since pskb_may_pull() might
- * invalidate hdr
- */
- next = hdr->nexthdr;
- if (nexthdr == NEXTHDR_DEST) {
- u16 i = 2;

- /* Remember : hdr is no longer valid at this point. */
- if (!pskb_may_pull(skb, off + optlen))
+ if (!pskb_may_pull(skb, off + optlen))
+ break;
+
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+ struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr;
+
+ if (frag_hdr->frag_off)
break;
+ }
+ if (nexthdr == NEXTHDR_DEST) {
+ u16 i = 2;

while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
@@ -449,7 +449,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
i++;
}
}
- nexthdr = next;
+ nexthdr = hdr->nexthdr;
off += optlen;
}
return 0;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 5ce25bcb9974..f948cf7bfc44 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2723,8 +2723,12 @@ void ipv6_mc_down(struct inet6_dev *idev)
synchronize_net();
mld_query_stop_work(idev);
mld_report_stop_work(idev);
+
+ mutex_lock(&idev->mc_lock);
mld_ifc_stop_work(idev);
mld_gq_stop_work(idev);
+ mutex_unlock(&idev->mc_lock);
+
mld_dad_stop_work(idev);
}

diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 58ccdb08c0fd..71d692728230 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -278,7 +278,6 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
- struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
struct tcphdr _otcph;
const struct tcphdr *otcph;
@@ -354,9 +353,15 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
* build the eth header using the original destination's MAC as the
* source, and send the RST packet directly.
*/
- br_indev = nf_bridge_get_physindev(oldskb);
- if (br_indev) {
+ if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct net_device *br_indev;
+
+ br_indev = nf_bridge_get_physindev(oldskb, net);
+ if (!br_indev) {
+ kfree_skb(nskb);
+ return;
+ }

nskb->dev = br_indev;
nskb->protocol = htons(ETH_P_IPV6);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f60ba4295435..f1170dcc21d9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1134,7 +1134,7 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
udp_flush_pending_frames(sk);
else if (up->pending) {
up->len = 0;
- up->pending = 0;
+ WRITE_ONCE(up->pending, 0);
ip6_flush_pending_frames(sk);
}
}
@@ -1312,7 +1312,7 @@ static int udp_v6_push_pending_frames(struct sock *sk)
&inet_sk(sk)->cork.base);
out:
up->len = 0;
- up->pending = 0;
+ WRITE_ONCE(up->pending, 0);
return err;
}

@@ -1369,7 +1369,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
default:
return -EINVAL;
}
- } else if (!up->pending) {
+ } else if (!READ_ONCE(up->pending)) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
@@ -1400,8 +1400,8 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EMSGSIZE;

getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
- if (up->pending) {
- if (up->pending == AF_INET)
+ if (READ_ONCE(up->pending)) {
+ if (READ_ONCE(up->pending) == AF_INET)
return udp_sendmsg(sk, msg, len);
/*
* There are pending frames.
@@ -1591,7 +1591,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto out;
}

- up->pending = AF_INET6;
+ WRITE_ONCE(up->pending, AF_INET6);

do_append_data:
if (ipc6.dontfrag < 0)
@@ -1605,7 +1605,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
- up->pending = 0;
+ WRITE_ONCE(up->pending, 0);

if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
@@ -1646,7 +1646,7 @@ static void udpv6_splice_eof(struct socket *sock)
struct sock *sk = sock->sk;
struct udp_sock *up = udp_sk(sk);

- if (!up->pending || udp_test_bit(CORK, sk))
+ if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
return;

lock_sock(sk);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index c53914012d01..d2527d189a79 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -123,8 +123,8 @@ static void mptcp_parse_option(const struct sk_buff *skb,
break;

case MPTCPOPT_MP_JOIN:
- mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
+ mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYN;
mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
mp_opt->join_id = *ptr++;
mp_opt->token = get_unaligned_be32(ptr);
@@ -135,6 +135,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->backup, mp_opt->join_id,
mp_opt->token, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+ mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYNACK;
mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
mp_opt->join_id = *ptr++;
mp_opt->thmac = get_unaligned_be64(ptr);
@@ -145,11 +146,10 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->backup, mp_opt->join_id,
mp_opt->thmac, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+ mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
ptr += 2;
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
pr_debug("MP_JOIN hmac");
- } else {
- mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
}
break;

diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 45b8115b363b..d3c5ecf8ddf5 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -157,8 +157,8 @@ static int subflow_check_req(struct request_sock *req,

mptcp_get_options(skb, &mp_opt);

- opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
- opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
+ opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
+ opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
if (opt_mp_capable) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);

@@ -254,8 +254,8 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
subflow_init_req(req, sk_listener);
mptcp_get_options(skb, &mp_opt);

- opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
- opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
+ opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
+ opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
if (opt_mp_capable && opt_mp_join)
return -EINVAL;

@@ -486,7 +486,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)

mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
+ if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
MPTCP_INC_STATS(sock_net(sk),
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
mptcp_do_fallback(sk);
@@ -506,7 +506,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];

- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
+ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
subflow->reset_reason = MPTCP_RST_EMPTCP;
goto do_reset;
}
@@ -783,12 +783,13 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
* options.
*/
mptcp_get_options(skb, &mp_opt);
- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC))
+ if (!(mp_opt.suboptions &
+ (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
fallback = true;

} else if (subflow_req->mp_join) {
mptcp_get_options(skb, &mp_opt);
- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
+ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
!subflow_hmac_valid(req, &mp_opt) ||
!mptcp_can_accept_new_subflow(subflow_req->msk)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 03757e76bb6b..374412ed780b 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -105,8 +105,11 @@ enum {


struct ncsi_channel_version {
- u32 version; /* Supported BCD encoded NCSI version */
- u32 alpha2; /* Supported BCD encoded NCSI version */
+ u8 major; /* NCSI version major */
+ u8 minor; /* NCSI version minor */
+ u8 update; /* NCSI version update */
+ char alpha1; /* NCSI version alpha1 */
+ char alpha2; /* NCSI version alpha2 */
u8 fw_name[12]; /* Firmware name string */
u32 fw_version; /* Firmware version */
u16 pci_ids[4]; /* PCI identification */
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index a3a6753a1db7..2f872d064396 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -71,8 +71,8 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
if (nc == nc->package->preferred_channel)
nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);

- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
+ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.major);
+ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.minor);
nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);

vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
index ba66c7dc3a21..c9d1da34dc4d 100644
--- a/net/ncsi/ncsi-pkt.h
+++ b/net/ncsi/ncsi-pkt.h
@@ -197,9 +197,12 @@ struct ncsi_rsp_gls_pkt {
/* Get Version ID */
struct ncsi_rsp_gvi_pkt {
struct ncsi_rsp_pkt_hdr rsp; /* Response header */
- __be32 ncsi_version; /* NCSI version */
+ unsigned char major; /* NCSI version major */
+ unsigned char minor; /* NCSI version minor */
+ unsigned char update; /* NCSI version update */
+ unsigned char alpha1; /* NCSI version alpha1 */
unsigned char reserved[3]; /* Reserved */
- unsigned char alpha2; /* NCSI version */
+ unsigned char alpha2; /* NCSI version alpha2 */
unsigned char fw_name[12]; /* f/w name string */
__be32 fw_version; /* f/w version */
__be16 pci_ids[4]; /* PCI IDs */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 069c2659074b..480e80e3c283 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -19,6 +19,19 @@
#include "ncsi-pkt.h"
#include "ncsi-netlink.h"

+/* Nibbles within [0xA, 0xF] add zero "0" to the returned value.
+ * Optional fields (encoded as 0xFF) will default to zero.
+ */
+static u8 decode_bcd_u8(u8 x)
+{
+ int lo = x & 0xF;
+ int hi = x >> 4;
+
+ lo = lo < 0xA ? lo : 0;
+ hi = hi < 0xA ? hi : 0;
+ return lo + hi * 10;
+}
+
static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
unsigned short payload)
{
@@ -755,9 +768,18 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
if (!nc)
return -ENODEV;

- /* Update to channel's version info */
+ /* Update channel's version info
+ *
+ * Major, minor, and update fields are supposed to be
+ * unsigned integers encoded as packed BCD.
+ *
+ * Alpha1 and alpha2 are ISO/IEC 8859-1 characters.
+ */
ncv = &nc->version;
- ncv->version = ntohl(rsp->ncsi_version);
+ ncv->major = decode_bcd_u8(rsp->major);
+ ncv->minor = decode_bcd_u8(rsp->minor);
+ ncv->update = decode_bcd_u8(rsp->update);
+ ncv->alpha1 = rsp->alpha1;
ncv->alpha2 = rsp->alpha2;
memcpy(ncv->fw_name, rsp->fw_name, 12);
ncv->fw_version = ntohl(rsp->fw_version);
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 95aeb31c60e0..30a655e5c4fd 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -138,9 +138,9 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
#include "ip_set_hash_gen.h"

#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-static const char *get_physindev_name(const struct sk_buff *skb)
+static const char *get_physindev_name(const struct sk_buff *skb, struct net *net)
{
- struct net_device *dev = nf_bridge_get_physindev(skb);
+ struct net_device *dev = nf_bridge_get_physindev(skb, net);

return dev ? dev->name : NULL;
}
@@ -177,7 +177,7 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,

if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- const char *eiface = SRCDIR ? get_physindev_name(skb) :
+ const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
get_physoutdev_name(skb);

if (!eiface)
@@ -395,7 +395,7 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,

if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- const char *eiface = SRCDIR ? get_physindev_name(skb) :
+ const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
get_physoutdev_name(skb);

if (!eiface)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 9193e109e6b3..65e0259178da 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED,
ICMPV6_EXC_HOPLIMIT, 0);
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);

return false;
}
@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
{
if (ip_hdr(skb)->ttl <= 1) {
/* Tell the sender its packet died... */
- __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+ IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
return false;
}
diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c
index c66689ad2b49..58402226045e 100644
--- a/net/netfilter/nf_log_syslog.c
+++ b/net/netfilter/nf_log_syslog.c
@@ -111,7 +111,8 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
- const struct nf_loginfo *loginfo, const char *prefix)
+ const struct nf_loginfo *loginfo, const char *prefix,
+ struct net *net)
{
const struct net_device *physoutdev __maybe_unused;
const struct net_device *physindev __maybe_unused;
@@ -121,7 +122,7 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
in ? in->name : "",
out ? out->name : "");
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- physindev = nf_bridge_get_physindev(skb);
+ physindev = nf_bridge_get_physindev(skb, net);
if (physindev && in != physindev)
nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
physoutdev = nf_bridge_get_physoutdev(skb);
@@ -148,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
loginfo = &default_loginfo;

nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
- prefix);
+ prefix, net);
dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));

nf_log_buf_close(m);
@@ -845,7 +846,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
loginfo = &default_loginfo;

nf_log_dump_packet_common(m, pf, hooknum, skb, in,
- out, loginfo, prefix);
+ out, loginfo, prefix, net);

if (in)
dump_mac_header(m, loginfo, skb);
@@ -880,7 +881,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
loginfo = &default_loginfo;

nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
- loginfo, prefix);
+ loginfo, prefix, net);

if (in)
dump_mac_header(m, loginfo, skb);
@@ -916,7 +917,7 @@ static void nf_log_unknown_packet(struct net *net, u_int8_t pf,
loginfo = &default_loginfo;

nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
- prefix);
+ prefix, net);

dump_mac_header(m, loginfo, skb);

diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 63d1516816b1..e2f334f70281 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -82,11 +82,9 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
const struct sk_buff *skb = entry->skb;
- struct nf_bridge_info *nf_bridge;

- nf_bridge = nf_bridge_info_get(skb);
- if (nf_bridge) {
- entry->physin = nf_bridge_get_physindev(skb);
+ if (nf_bridge_info_exists(skb)) {
+ entry->physin = nf_bridge_get_physindev(skb, entry->state.net);
entry->physout = nf_bridge_get_physoutdev(skb);
} else {
entry->physin = NULL;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5a14e1ab0b13..b28fbcb86e94 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2263,7 +2263,16 @@ static int nft_chain_parse_hook(struct net *net,
return -EOPNOTSUPP;
}

- type = basechain->type;
+ if (nla[NFTA_CHAIN_TYPE]) {
+ type = __nf_tables_chain_type_lookup(nla[NFTA_CHAIN_TYPE],
+ family);
+ if (!type) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
+ return -ENOENT;
+ }
+ } else {
+ type = basechain->type;
+ }
}

if (!try_module_get(type->owner)) {
@@ -4744,8 +4753,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
static int nft_set_desc_concat(struct nft_set_desc *desc,
const struct nlattr *nla)
{
+ u32 num_regs = 0, key_num_regs = 0;
struct nlattr *attr;
- u32 num_regs = 0;
int rem, err, i;

nla_for_each_nested(attr, nla, rem) {
@@ -4760,6 +4769,10 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
for (i = 0; i < desc->field_count; i++)
num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));

+ key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+ if (key_num_regs != num_regs)
+ return -EINVAL;
+
if (num_regs > NFT_REG32_COUNT)
return -E2BIG;

@@ -4981,16 +4994,28 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
}

desc.policy = NFT_SET_POL_PERFORMANCE;
- if (nla[NFTA_SET_POLICY] != NULL)
+ if (nla[NFTA_SET_POLICY] != NULL) {
desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+ switch (desc.policy) {
+ case NFT_SET_POL_PERFORMANCE:
+ case NFT_SET_POL_MEMORY:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }

if (nla[NFTA_SET_DESC] != NULL) {
err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
if (err < 0)
return err;

- if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
+ if (desc.field_count > 1) {
+ if (!(flags & NFT_SET_CONCAT))
+ return -EINVAL;
+ } else if (flags & NFT_SET_CONCAT) {
return -EINVAL;
+ }
} else if (flags & NFT_SET_CONCAT) {
return -EINVAL;
}
@@ -5641,7 +5666,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
struct nft_set_dump_args *args;

- if (nft_set_elem_expired(ext))
+ if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
return 0;

args = container_of(iter, struct nft_set_dump_args, iter);
@@ -6422,7 +6447,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,

list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_is_active(net, ext))
+ if (!nft_is_active_next(net, ext))
continue;

kfree(elem->priv);
@@ -10345,6 +10370,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_trans_destroy(trans);
break;
}
+ nft_trans_set(trans)->dead = 1;
list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index f03f4d4d7d88..134e05d31061 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -508,7 +508,7 @@ __build_packet_message(struct nfnl_log_net *log,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
- struct net_device *physindev;
+ int physinif;

/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
@@ -516,10 +516,10 @@ __build_packet_message(struct nfnl_log_net *log,
htonl(indev->ifindex)))
goto nla_put_failure;

- physindev = nf_bridge_get_physindev(skb);
- if (physindev &&
+ physinif = nf_bridge_get_physinif(skb);
+ if (physinif &&
nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
- htonl(physindev->ifindex)))
+ htonl(physinif)))
goto nla_put_failure;
}
#endif
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 145dc62c6247..79039afde34e 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -58,6 +58,7 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
static int nft_limit_init(struct nft_limit_priv *priv,
const struct nlattr * const tb[], bool pkts)
{
+ bool invert = false;
u64 unit, tokens;

if (tb[NFTA_LIMIT_RATE] == NULL ||
@@ -90,19 +91,23 @@ static int nft_limit_init(struct nft_limit_priv *priv,
priv->rate);
}

+ if (tb[NFTA_LIMIT_FLAGS]) {
+ u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
+
+ if (flags & ~NFT_LIMIT_F_INV)
+ return -EOPNOTSUPP;
+
+ if (flags & NFT_LIMIT_F_INV)
+ invert = true;
+ }
+
priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
if (!priv->limit)
return -ENOMEM;

priv->limit->tokens = tokens;
priv->tokens_max = priv->limit->tokens;
-
- if (tb[NFTA_LIMIT_FLAGS]) {
- u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
-
- if (flags & NFT_LIMIT_F_INV)
- priv->invert = true;
- }
+ priv->invert = invert;
priv->limit->last = ktime_get_ns();
spin_lock_init(&priv->limit->lock);

diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index ec6ed6fda96c..343e65f377d4 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -59,7 +59,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
(!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
return false;

- physdev = nf_bridge_get_physindev(skb);
+ physdev = nf_bridge_get_physindev(skb, xt_net(par));
indev = physdev ? physdev->name : NULL;

if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index f1d5b8465217..a07c2216d28b 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -54,6 +54,28 @@ static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = {
[NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 },
};

+static const struct netlbl_calipso_ops *calipso_ops;
+
+/**
+ * netlbl_calipso_ops_register - Register the CALIPSO operations
+ * @ops: ops to register
+ *
+ * Description:
+ * Register the CALIPSO packet engine operations.
+ *
+ */
+const struct netlbl_calipso_ops *
+netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
+{
+ return xchg(&calipso_ops, ops);
+}
+EXPORT_SYMBOL(netlbl_calipso_ops_register);
+
+static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
+{
+ return READ_ONCE(calipso_ops);
+}
+
/* NetLabel Command Handlers
*/
/**
@@ -96,15 +118,18 @@ static int netlbl_calipso_add_pass(struct genl_info *info,
*
*/
static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
-
{
int ret_val = -EINVAL;
struct netlbl_audit audit_info;
+ const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get();

if (!info->attrs[NLBL_CALIPSO_A_DOI] ||
!info->attrs[NLBL_CALIPSO_A_MTYPE])
return -EINVAL;

+ if (!ops)
+ return -EOPNOTSUPP;
+
netlbl_netlink_auditinfo(&audit_info);
switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
case CALIPSO_MAP_PASS:
@@ -363,28 +388,6 @@ int __init netlbl_calipso_genl_init(void)
return genl_register_family(&netlbl_calipso_gnl_family);
}

-static const struct netlbl_calipso_ops *calipso_ops;
-
-/**
- * netlbl_calipso_ops_register - Register the CALIPSO operations
- * @ops: ops to register
- *
- * Description:
- * Register the CALIPSO packet engine operations.
- *
- */
-const struct netlbl_calipso_ops *
-netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
-{
- return xchg(&calipso_ops, ops);
-}
-EXPORT_SYMBOL(netlbl_calipso_ops_register);
-
-static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
-{
- return READ_ONCE(calipso_ops);
-}
-
/**
* calipso_doi_add - Add a new DOI to the CALIPSO protocol engine
* @doi_def: the DOI structure
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index e8e14c6f904d..e8b43408136a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1076,6 +1076,7 @@ void rxrpc_send_version_request(struct rxrpc_local *local,
/*
* local_object.c
*/
+void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set);
struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace);
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 773eecd1e979..f10b37c14772 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -545,8 +545,8 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
*/
static void rxrpc_cleanup_ring(struct rxrpc_call *call)
{
- skb_queue_purge(&call->recvmsg_queue);
- skb_queue_purge(&call->rx_oos_queue);
+ rxrpc_purge_queue(&call->recvmsg_queue);
+ rxrpc_purge_queue(&call->rx_oos_queue);
}

/*
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index c553a30e9c83..34d307368135 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -36,6 +36,17 @@ static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
return ipv6_icmp_error(sk, skb, err, port, info, payload);
}

+/*
+ * Set or clear the Don't Fragment flag on a socket.
+ */
+void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set)
+{
+ if (set)
+ ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DO);
+ else
+ ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DONT);
+}
+
/*
* Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
* same or greater than.
@@ -203,7 +214,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
ip_sock_set_recverr(usk);

/* we want to set the don't fragment bit */
- ip_sock_set_mtu_discover(usk, IP_PMTUDISC_DO);
+ rxrpc_local_dont_fragment(local, true);

/* We want receive timestamps. */
sock_enable_timestamps(usk);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 5e53429c6922..a0906145e829 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -494,14 +494,12 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
switch (conn->local->srx.transport.family) {
case AF_INET6:
case AF_INET:
- ip_sock_set_mtu_discover(conn->local->socket->sk,
- IP_PMTUDISC_DONT);
+ rxrpc_local_dont_fragment(conn->local, false);
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
ret = do_udp_sendmsg(conn->local->socket, &msg, len);
conn->peer->last_tx_at = ktime_get_seconds();

- ip_sock_set_mtu_discover(conn->local->socket->sk,
- IP_PMTUDISC_DO);
+ rxrpc_local_dont_fragment(conn->local, true);
break;

default:
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 1bf571a66e02..b52dedcebce0 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -724,7 +724,9 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
serial = atomic_inc_return(&conn->serial);
whdr.serial = htonl(serial);

+ rxrpc_local_dont_fragment(conn->local, false);
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
+ rxrpc_local_dont_fragment(conn->local, true);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_point_rxkad_response);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index dd710fb9f490..a7b3f60dd0a8 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -851,7 +851,6 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (err || !frag)
return err;

- skb_get(skb);
err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
if (err)
return err;
@@ -995,12 +994,8 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
nh_ofs = skb_network_offset(skb);
skb_pull_rcsum(skb, nh_ofs);
err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
- if (err == -EINPROGRESS) {
- retval = TC_ACT_STOLEN;
- goto out_clear;
- }
if (err)
- goto drop;
+ goto out_frag;

err = nf_ct_skb_network_trim(skb, family);
if (err)
@@ -1087,6 +1082,11 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
qdisc_skb_cb(skb)->pkt_len = skb->len;
return retval;

+out_frag:
+ if (err != -EINPROGRESS)
+ tcf_action_inc_drop_qstats(&c->common);
+ return TC_ACT_CONSUMED;
+
drop:
tcf_action_inc_drop_qstats(&c->common);
return TC_ACT_SHOT;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7f89e43154c0..6b9fcdb0952a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2099,6 +2099,13 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
pr_debug("%s: sk:%p, msghdr:%p, len:%zd, flags:0x%x, addr_len:%p)\n",
__func__, sk, msg, len, flags, addr_len);

+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+ if (sk_can_busy_loop(sk) &&
+ skb_queue_empty_lockless(&sk->sk_receive_queue))
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+
lock_sock(sk);

if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
@@ -9043,12 +9050,6 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int *err)
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;

- if (sk_can_busy_loop(sk)) {
- sk_busy_loop(sk, flags & MSG_DONTWAIT);
-
- if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
- continue;
- }

/* User doesn't want to wait. */
error = -EAGAIN;
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 701250b305db..74ee2271251e 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -284,7 +284,7 @@ struct rpc_xprt *_xprt_switch_find_current_entry(struct list_head *head,
if (cur == pos)
found = true;
if (found && ((find_active && xprt_is_active(pos)) ||
- (!find_active && xprt_is_active(pos))))
+ (!find_active && !xprt_is_active(pos))))
return pos;
}
return NULL;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 27cc0f0a90e1..dba523cdc73d 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1052,7 +1052,11 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
if (ret < 0)
goto send_end;
tls_ctx->pending_open_record_frags = true;
- if (full_record || eor || sk_msg_full(msg_pl))
+
+ if (sk_msg_full(msg_pl))
+ full_record = true;
+
+ if (full_record || eor)
goto copied;
continue;
}
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index 7ea7c3a0d0d0..bd84785bf8d6 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -161,15 +161,30 @@ int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool r
{
struct sock *sk_pair;

+ /* Restore does not decrement the sk_pair reference yet because we must
+ * keep the a reference to the socket until after an RCU grace period
+ * and any pending sends have completed.
+ */
if (restore) {
sk->sk_write_space = psock->saved_write_space;
sock_replace_proto(sk, psock->sk_proto);
return 0;
}

- sk_pair = unix_peer(sk);
- sock_hold(sk_pair);
- psock->sk_pair = sk_pair;
+ /* psock_update_sk_prot can be called multiple times if psock is
+ * added to multiple maps and/or slots in the same map. There is
+ * also an edge case where replacing a psock with itself can trigger
+ * an extra psock_update_sk_prot during the insert process. So it
+ * must be safe to do multiple calls. Here we need to ensure we don't
+ * increment the refcnt through sock_hold many times. There will only
+ * be a single matching destroy operation.
+ */
+ if (!psock->sk_pair) {
+ sk_pair = unix_peer(sk);
+ sock_hold(sk_pair);
+ psock->sk_pair = sk_pair;
+ }
+
unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
sock_replace_proto(sk, &unix_stream_bpf_prot);
return 0;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ccd8cefeea7b..4afb6a541cf3 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -2214,8 +2214,13 @@ static int vsock_set_rcvlowat(struct sock *sk, int val)

transport = vsk->transport;

- if (transport && transport->set_rcvlowat)
- return transport->set_rcvlowat(vsk, val);
+ if (transport && transport->notify_set_rcvlowat) {
+ int err;
+
+ err = transport->notify_set_rcvlowat(vsk, val);
+ if (err)
+ return err;
+ }

WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
return 0;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 7cb1a9d2cdb4..e2157e387217 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -816,7 +816,7 @@ int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
}

static
-int hvs_set_rcvlowat(struct vsock_sock *vsk, int val)
+int hvs_notify_set_rcvlowat(struct vsock_sock *vsk, int val)
{
return -EOPNOTSUPP;
}
@@ -856,7 +856,7 @@ static struct vsock_transport hvs_transport = {
.notify_send_pre_enqueue = hvs_notify_send_pre_enqueue,
.notify_send_post_enqueue = hvs_notify_send_post_enqueue,

- .set_rcvlowat = hvs_set_rcvlowat
+ .notify_set_rcvlowat = hvs_notify_set_rcvlowat
};

static bool hvs_check_transport(struct vsock_sock *vsk)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index b80bf681327b..a64bf601b480 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -457,6 +457,7 @@ static struct virtio_transport virtio_transport = {
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size,
+ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,

.read_skb = virtio_transport_read_skb,
},
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 4084578b0b91..e87fd9480acd 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -398,6 +398,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
struct virtio_vsock_sock *vvs = vsk->trans;
size_t bytes, total = 0;
struct sk_buff *skb;
+ u32 fwd_cnt_delta;
+ bool low_rx_bytes;
int err = -EFAULT;
u32 free_space;

@@ -439,7 +441,10 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
}
}

- free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
+ fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
+ free_space = vvs->buf_alloc - fwd_cnt_delta;
+ low_rx_bytes = (vvs->rx_bytes <
+ sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX));

spin_unlock_bh(&vvs->rx_lock);

@@ -449,9 +454,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
* too high causes extra messages. Too low causes transmitter
* stalls. As stalls are in theory more expensive than extra
* messages, we set the limit to a high value. TODO: experiment
- * with different values.
+ * with different values. Also send credit update message when
+ * number of bytes in rx queue is not enough to wake up reader.
*/
- if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
+ if (fwd_cnt_delta &&
+ (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes))
virtio_transport_send_credit_update(vsk);

return total;
@@ -1519,6 +1526,36 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
}
EXPORT_SYMBOL_GPL(virtio_transport_read_skb);

+int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool send_update;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ /* If number of available bytes is less than new SO_RCVLOWAT value,
+ * kick sender to send more data, because sender may sleep in its
+ * 'send()' syscall waiting for enough space at our side. Also
+ * don't send credit update when peer already knows actual value -
+ * such transmission will be useless.
+ */
+ send_update = (vvs->rx_bytes < val) &&
+ (vvs->fwd_cnt != vvs->last_fwd_cnt);
+
+ spin_unlock_bh(&vvs->rx_lock);
+
+ if (send_update) {
+ int err;
+
+ err = virtio_transport_send_credit_update(vsk);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_set_rcvlowat);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Asias He");
MODULE_DESCRIPTION("common code for virtio vsock");
diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
index 5c6360df1f31..0ce65d0a4a44 100644
--- a/net/vmw_vsock/vsock_loopback.c
+++ b/net/vmw_vsock/vsock_loopback.c
@@ -90,6 +90,7 @@ static struct virtio_transport loopback_transport = {
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size,
+ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,

.read_skb = virtio_transport_read_skb,
},
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index e4cc6209c7b9..bd4dd75e446e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2569,10 +2569,12 @@ cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
return false;
}

-static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
- struct cfg80211_inform_single_bss_data *tx_data,
- struct cfg80211_bss *source_bss,
- gfp_t gfp)
+static void
+cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
+ struct cfg80211_inform_single_bss_data *tx_data,
+ struct cfg80211_bss *source_bss,
+ const struct element *elem,
+ gfp_t gfp)
{
struct cfg80211_inform_single_bss_data data = {
.drv_data = tx_data->drv_data,
@@ -2581,7 +2583,6 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
.bss_source = BSS_SOURCE_STA_PROFILE,
};
struct ieee80211_multi_link_elem *ml_elem;
- const struct element *elem;
struct cfg80211_mle *mle;
u16 control;
u8 *new_ie;
@@ -2591,15 +2592,7 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
const u8 *pos;
u8 i;

- if (!source_bss)
- return;
-
- if (tx_data->ftype != CFG80211_BSS_FTYPE_PRESP)
- return;
-
- elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
- tx_data->ie, tx_data->ielen);
- if (!elem || !ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
+ if (!ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
return;

ml_elem = (void *)elem->data + 1;
@@ -2625,8 +2618,11 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
/* MLD capabilities and operations */
pos += 2;

- /* Not included when the (nontransmitted) AP is responding itself,
- * but defined to zero then (Draft P802.11be_D3.0, 9.4.2.170.2)
+ /*
+ * The MLD ID of the reporting AP is always zero. It is set if the AP
+ * is part of an MBSSID set and will be non-zero for ML Elements
+ * relating to a nontransmitted BSS (matching the Multi-BSSID Index,
+ * Draft P802.11be_D3.2, 35.3.4.2)
*/
if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
mld_id = *pos;
@@ -2731,6 +2727,25 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
kfree(mle);
}

+static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+ struct cfg80211_inform_single_bss_data *tx_data,
+ struct cfg80211_bss *source_bss,
+ gfp_t gfp)
+{
+ const struct element *elem;
+
+ if (!source_bss)
+ return;
+
+ if (tx_data->ftype != CFG80211_BSS_FTYPE_PRESP)
+ return;
+
+ for_each_element_extid(elem, WLAN_EID_EXT_EHT_MULTI_LINK,
+ tx_data->ie, tx_data->ielen)
+ cfg80211_parse_ml_elem_sta_data(wiphy, tx_data, source_bss,
+ elem, gfp);
+}
+
struct cfg80211_bss *
cfg80211_inform_bss_data(struct wiphy *wiphy,
struct cfg80211_inform_bss *data,
diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
index 552d9a85925b..a721d466bee4 100644
--- a/rust/bindgen_parameters
+++ b/rust/bindgen_parameters
@@ -20,3 +20,7 @@

# `seccomp`'s comment gets understood as a doctest
--no-doc-comments
+
+# These functions use the `__preserve_most` calling convention, which neither bindgen
+# nor Rust currently understand, and which Clang currently declares to be unstable.
+--blocklist-function __list_.*_report
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index c87bccafff44..7182a8b821fb 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -41,6 +41,7 @@ void aa_free_str_table(struct aa_str_table *t)
kfree_sensitive(t->table[i]);
kfree_sensitive(t->table);
t->table = NULL;
+ t->size = 0;
}
}

diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 6fdab1b5ede5..366cdfd6a7ba 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -839,7 +839,6 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo
cl = aa_get_newest_cred_label(cred);
error = aa_may_signal(cred, cl, tc, tl, sig);
aa_put_label(cl);
- return error;
} else {
cl = __begin_current_label_crit_section();
error = aa_may_signal(current_cred(), cl, tc, tl, sig);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index dbc83455d900..d92788da6704 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -478,6 +478,8 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
if (!table)
goto fail;

+ strs->table = table;
+ strs->size = size;
for (i = 0; i < size; i++) {
char *str;
int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
@@ -520,14 +522,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
-
- strs->table = table;
- strs->size = size;
}
return true;

fail:
- kfree_sensitive(table);
+ aa_free_str_table(strs);
e->pos = saved_pos;
return false;
}
@@ -826,6 +825,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)

tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
if (tmpns) {
+ if (!tmpname) {
+ info = "empty profile name";
+ goto fail;
+ }
*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
if (!*ns_name) {
info = "out of memory";
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 208503340721..aa0afc122f09 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4661,6 +4661,13 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
return -EINVAL;
addr4 = (struct sockaddr_in *)address;
if (family_sa == AF_UNSPEC) {
+ if (family == PF_INET6) {
+ /* Length check from inet6_bind_sk() */
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ /* Family check from __inet6_bind() */
+ goto err_af;
+ }
/* see __inet_bind(), we only want to allow
* AF_UNSPEC if the address is INADDR_ANY
*/
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 78cee53fee02..038db8902c9e 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2301,6 +2301,7 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);

for (idx = 0; idx < pcm_num; idx++) {
+ struct hdmi_spec_per_cvt *per_cvt;
struct hda_pcm *info;
struct hda_pcm_stream *pstr;

@@ -2316,6 +2317,11 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
pstr->substreams = 1;
pstr->ops = generic_ops;
+
+ per_cvt = get_cvt(spec, 0);
+ pstr->channels_min = per_cvt->channels_min;
+ pstr->channels_max = per_cvt->channels_max;
+
/* pcm number is less than pcm_rec array size */
if (spec->pcm_used >= ARRAY_SIZE(spec->pcm_rec))
break;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 576e0f265abd..375569d0864b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9754,6 +9754,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
@@ -9848,6 +9849,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
@@ -10114,6 +10116,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 46705ec77b48..eb3aca16359c 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
newreg = oldreg & ~0x0707;
newreg = newreg | (value->value.integer.value[0] & 7);
- newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
+ newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
change = newreg != oldreg;
if (change)
oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
diff --git a/sound/soc/amd/vangogh/acp5x-mach.c b/sound/soc/amd/vangogh/acp5x-mach.c
index eda464545866..2ccc95d57783 100644
--- a/sound/soc/amd/vangogh/acp5x-mach.c
+++ b/sound/soc/amd/vangogh/acp5x-mach.c
@@ -439,7 +439,15 @@ static const struct dmi_system_id acp5x_vg_quirk_table[] = {
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Valve"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
- }
+ },
+ .driver_data = (void *)&acp5x_8821_35l41_card,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
+ },
+ .driver_data = (void *)&acp5x_8821_98388_card,
},
{}
};
@@ -452,25 +460,15 @@ static int acp5x_probe(struct platform_device *pdev)
struct snd_soc_card *card;
int ret;

- card = (struct snd_soc_card *)device_get_match_data(dev);
- if (!card) {
- /*
- * This is normally the result of directly probing the driver
- * in pci-acp5x through platform_device_register_full(), which
- * is necessary for the CS35L41 variant, as it doesn't support
- * ACPI probing and relies on DMI quirks.
- */
- dmi_id = dmi_first_match(acp5x_vg_quirk_table);
- if (!dmi_id)
- return -ENODEV;
-
- card = &acp5x_8821_35l41_card;
- }
+ dmi_id = dmi_first_match(acp5x_vg_quirk_table);
+ if (!dmi_id || !dmi_id->driver_data)
+ return -ENODEV;

machine = devm_kzalloc(dev, sizeof(*machine), GFP_KERNEL);
if (!machine)
return -ENOMEM;

+ card = dmi_id->driver_data;
card->dev = dev;
platform_set_drvdata(pdev, card);
snd_soc_card_set_drvdata(card, machine);
@@ -482,17 +480,10 @@ static int acp5x_probe(struct platform_device *pdev)
return 0;
}

-static const struct acpi_device_id acp5x_acpi_match[] = {
- { "AMDI8821", (kernel_ulong_t)&acp5x_8821_98388_card },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, acp5x_acpi_match);
-
static struct platform_driver acp5x_mach_driver = {
.driver = {
.name = DRV_NAME,
.pm = &snd_soc_pm_ops,
- .acpi_match_table = acp5x_acpi_match,
},
.probe = acp5x_probe,
};
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 9968c2e189e6..d25455f39566 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -22,13 +22,11 @@
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <sound/cs35l33.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
-#include <linux/of_gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -1167,7 +1165,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client)

/* We could issue !RST or skip it based on AMP topology */
cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
- "reset-gpios", GPIOD_OUT_HIGH);
+ "reset", GPIOD_OUT_HIGH);
if (IS_ERR(cs35l33->reset_gpio)) {
dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
__func__);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index a0d01d71d8b5..edcb85bd8ea7 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3854,14 +3854,6 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&ecs_ef20_platform_data,
},
- {
- .ident = "EF20EA",
- .callback = cht_rt5645_ef20_quirk_cb,
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
- },
- .driver_data = (void *)&ecs_ef20_platform_data,
- },
{ }
};

diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
index 1dfac9b2fca2..61b05629a9a9 100644
--- a/sound/soc/codecs/tas2781-fmwlib.c
+++ b/sound/soc/codecs/tas2781-fmwlib.c
@@ -2012,6 +2012,7 @@ static int tasdevice_dspfw_ready(const struct firmware *fmw,
case 0x301:
case 0x302:
case 0x502:
+ case 0x503:
tas_priv->fw_parse_variable_header =
fw_parse_variable_header_kernel;
tas_priv->fw_parse_program_data =
diff --git a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
index 4360b9f5ff2c..890517eb63f6 100644
--- a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+++ b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
@@ -168,6 +168,7 @@ int sof_sdw_rt_sdca_jack_exit(struct snd_soc_card *card, struct snd_soc_dai_link

device_remove_software_node(ctx->headset_codec_dev);
put_device(ctx->headset_codec_dev);
+ ctx->headset_codec_dev = NULL;

return 0;
}
diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
index 387e73100884..8911c90bbaf6 100644
--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
@@ -19,6 +19,11 @@ static const struct snd_soc_acpi_codecs glk_codecs = {
.codecs = {"MX98357A"}
};

+static const struct snd_soc_acpi_codecs glk_rt5682_rt5682s_hp = {
+ .num_codecs = 2,
+ .codecs = {"10EC5682", "RTL5682"},
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
{
.id = "INT343A",
@@ -35,20 +40,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
.sof_tplg_filename = "sof-glk-da7219.tplg",
},
{
- .id = "10EC5682",
+ .comp_ids = &glk_rt5682_rt5682s_hp,
.drv_name = "glk_rt5682_mx98357a",
.fw_filename = "intel/dsp_fw_glk.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &glk_codecs,
.sof_tplg_filename = "sof-glk-rt5682.tplg",
},
- {
- .id = "RTL5682",
- .drv_name = "glk_rt5682_max98357a",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &glk_codecs,
- .sof_tplg_filename = "sof-glk-rt5682.tplg",
- },
{
.id = "10134242",
.drv_name = "glk_cs4242_mx98357a",
diff --git a/sound/soc/mediatek/common/mtk-dsp-sof-common.c b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
index 6fef16306f74..21a9403b7e92 100644
--- a/sound/soc/mediatek/common/mtk-dsp-sof-common.c
+++ b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
@@ -24,7 +24,7 @@ int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai_link *sof_dai_link = NULL;
const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];

- if (strcmp(rtd->dai_link->name, conn->normal_link))
+ if (conn->normal_link && strcmp(rtd->dai_link->name, conn->normal_link))
continue;

for_each_card_rtds(card, runtime) {
diff --git a/sound/soc/sof/ipc4-loader.c b/sound/soc/sof/ipc4-loader.c
index eaa04762eb11..4d37e89b592a 100644
--- a/sound/soc/sof/ipc4-loader.c
+++ b/sound/soc/sof/ipc4-loader.c
@@ -479,13 +479,10 @@ void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
msg = "No CPC match in the firmware file's manifest";

no_cpc:
- dev_warn(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
- fw_module->man4_module_entry.name,
- &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
- basecfg->obs);
- dev_warn_once(sdev->dev, "Please try to update the firmware.\n");
- dev_warn_once(sdev->dev, "If the issue persists, file a bug at\n");
- dev_warn_once(sdev->dev, "https://github.com/thesofproject/sof/issues/\n";);
+ dev_dbg(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
+ fw_module->man4_module_entry.name,
+ &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
+ basecfg->obs);
}

const struct sof_ipc_fw_loader_ops ipc4_loader_ops = {
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 37ec671a2d76..7133ec13322b 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -1134,7 +1134,7 @@ static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
list_for_each_entry(rtd, &card->rtd_list, list) {
/* does stream match DAI link ? */
if (!rtd->dai_link->stream_name ||
- strcmp(sname, rtd->dai_link->stream_name))
+ !strstr(rtd->dai_link->stream_name, sname))
continue;

for_each_rtd_cpu_dais(rtd, i, cpu_dai)
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index d260be8cb6bc..c04cff722541 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -1337,9 +1337,11 @@ static void scarlett2_config_save(struct usb_mixer_interface *mixer)
{
__le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);

- scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
- &req, sizeof(u32),
- NULL, 0);
+ int err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+ &req, sizeof(u32),
+ NULL, 0);
+ if (err < 0)
+ usb_audio_err(mixer->chip, "config save failed: %d\n", err);
}

/* Delayed work to save config */
@@ -1388,7 +1390,10 @@ static int scarlett2_usb_set_config(
size = 1;
offset = config_item->offset;

- scarlett2_usb_get(mixer, offset, &tmp, 1);
+ err = scarlett2_usb_get(mixer, offset, &tmp, 1);
+ if (err < 0)
+ return err;
+
if (value)
tmp |= (1 << index);
else
@@ -1793,14 +1798,20 @@ static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->sync_updated)
- scarlett2_update_sync(mixer);
+
+ if (private->sync_updated) {
+ err = scarlett2_update_sync(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.enumerated.item[0] = private->sync;
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static const struct snd_kcontrol_new scarlett2_sync_ctl = {
@@ -1883,22 +1894,35 @@ static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->vol_updated)
- scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);

+ if (private->vol_updated) {
+ err = scarlett2_update_volumes(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] = private->master_vol;
- return 0;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int line_out_remap(struct scarlett2_data *private, int index)
{
const struct scarlett2_device_info *info = private->info;
+ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+ int line_out_count =
+ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];

if (!info->line_out_remap_enable)
return index;
+
+ if (index >= line_out_count)
+ return index;
+
return info->line_out_remap[index];
}

@@ -1909,14 +1933,20 @@ static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
int index = line_out_remap(private, elem->control);
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->vol_updated)
- scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);

+ if (private->vol_updated) {
+ err = scarlett2_update_volumes(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] = private->vol[index];
- return 0;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
@@ -1983,14 +2013,20 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
int index = line_out_remap(private, elem->control);
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->vol_updated)
- scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);

+ if (private->vol_updated) {
+ err = scarlett2_update_volumes(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] = private->mute_switch[index];
- return 0;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
@@ -2236,14 +2272,20 @@ static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
const struct scarlett2_device_info *info = private->info;

int index = elem->control + info->level_input_first;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->input_other_updated)
- scarlett2_update_input_other(mixer);
+
+ if (private->input_other_updated) {
+ err = scarlett2_update_input_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.enumerated.item[0] = private->level_switch[index];
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
@@ -2294,15 +2336,21 @@ static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->input_other_updated)
- scarlett2_update_input_other(mixer);
+
+ if (private->input_other_updated) {
+ err = scarlett2_update_input_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] =
private->pad_switch[elem->control];
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
@@ -2352,14 +2400,20 @@ static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->input_other_updated)
- scarlett2_update_input_other(mixer);
+
+ if (private->input_other_updated) {
+ err = scarlett2_update_input_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] = private->air_switch[elem->control];
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
@@ -2409,15 +2463,21 @@ static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->input_other_updated)
- scarlett2_update_input_other(mixer);
+
+ if (private->input_other_updated) {
+ err = scarlett2_update_input_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] =
private->phantom_switch[elem->control];
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
@@ -2589,14 +2649,20 @@ static int scarlett2_direct_monitor_ctl_get(
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = elem->head.mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->monitor_other_updated)
- scarlett2_update_monitor_other(mixer);
+
+ if (private->monitor_other_updated) {
+ err = scarlett2_update_monitor_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_direct_monitor_ctl_put(
@@ -2696,14 +2762,20 @@ static int scarlett2_speaker_switch_enum_ctl_get(
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->monitor_other_updated)
- scarlett2_update_monitor_other(mixer);
+
+ if (private->monitor_other_updated) {
+ err = scarlett2_update_monitor_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

/* when speaker switching gets enabled, switch the main/alt speakers
@@ -2851,14 +2923,20 @@ static int scarlett2_talkback_enum_ctl_get(
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->monitor_other_updated)
- scarlett2_update_monitor_other(mixer);
+
+ if (private->monitor_other_updated) {
+ err = scarlett2_update_monitor_other(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.enumerated.item[0] = private->talkback_switch;
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_talkback_enum_ctl_put(
@@ -3006,14 +3084,20 @@ static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->vol_updated)
- scarlett2_update_volumes(mixer);
- mutex_unlock(&private->data_mutex);

+ if (private->vol_updated) {
+ err = scarlett2_update_volumes(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
- return 0;
+
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
@@ -3277,7 +3361,8 @@ static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
mutex_lock(&private->data_mutex);

oval = private->mix[index];
- val = ucontrol->value.integer.value[0];
+ val = clamp(ucontrol->value.integer.value[0],
+ 0L, (long)SCARLETT2_MIXER_MAX_VALUE);
num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
mix_num = index / num_mixer_in;

@@ -3383,22 +3468,21 @@ static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
- const struct scarlett2_device_info *info = private->info;
- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
- int line_out_count =
- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
- int index = elem->control;
-
- if (index < line_out_count)
- index = line_out_remap(private, index);
+ int index = line_out_remap(private, elem->control);
+ int err = 0;

mutex_lock(&private->data_mutex);
- if (private->mux_updated)
- scarlett2_usb_get_mux(mixer);
+
+ if (private->mux_updated) {
+ err = scarlett2_usb_get_mux(mixer);
+ if (err < 0)
+ goto unlock;
+ }
ucontrol->value.enumerated.item[0] = private->mux[index];
- mutex_unlock(&private->data_mutex);

- return 0;
+unlock:
+ mutex_unlock(&private->data_mutex);
+ return err;
}

static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
@@ -3407,16 +3491,9 @@ static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
struct usb_mixer_elem_info *elem = kctl->private_data;
struct usb_mixer_interface *mixer = elem->head.mixer;
struct scarlett2_data *private = mixer->private_data;
- const struct scarlett2_device_info *info = private->info;
- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
- int line_out_count =
- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
- int index = elem->control;
+ int index = line_out_remap(private, elem->control);
int oval, val, err = 0;

- if (index < line_out_count)
- index = line_out_remap(private, index);
-
mutex_lock(&private->data_mutex);

oval = private->mux[index];
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0448700890f7..ada7acb91a1b 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4490,6 +4490,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
+ * Note: the user stack will only be populated if the *task* is
+ * the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@@ -4501,6 +4503,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
+ * The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
index 9fc429d2852d..f4a9328035bd 100644
--- a/tools/lib/api/io.h
+++ b/tools/lib/api/io.h
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <linux/types.h>

struct io {
/* File descriptor being read/ */
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a3af805a1d57..78c104922181 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2695,15 +2695,19 @@ int cmd_stat(int argc, const char **argv)
*/
if (metrics) {
const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+ int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
+ stat_config.metric_no_threshold,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events);

- metricgroup__parse_groups(evsel_list, pmu, metrics,
- stat_config.metric_no_group,
- stat_config.metric_no_merge,
- stat_config.metric_no_threshold,
- stat_config.user_requested_cpu_list,
- stat_config.system_wide,
- &stat_config.metric_events);
zfree(&metrics);
+ if (ret) {
+ status = ret;
+ goto out;
+ }
}

if (add_default_attributes())
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
index 88b23b85e33c..879ff21e0b17 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
@@ -110,7 +110,7 @@
{
"PublicDescription": "Flushes due to memory hazards",
"EventCode": "0x121",
- "EventName": "BPU_FLUSH_MEM_FAULT",
+ "EventName": "GPC_FLUSH_MEM_FAULT",
"BriefDescription": "Flushes due to memory hazards"
},
{
diff --git a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
index fbb065842880..bed765450ca9 100644
--- a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
+++ b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
@@ -6,4 +6,4 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
ret = 129
test_ret = true
arch = aarch64
-auxv = auxv["AT_HWCAP"] & 0x200000 == 0
+auxv = auxv["AT_HWCAP"] & 0x400000 == 0
diff --git a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
index c598c803221d..a65113cd7311 100644
--- a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
+++ b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
@@ -6,7 +6,7 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
ret = 1
test_ret = true
arch = aarch64
-auxv = auxv["AT_HWCAP"] & 0x200000 == 0x200000
+auxv = auxv["AT_HWCAP"] & 0x400000 == 0x400000
kernel_since = 6.1

[event:base-record]
diff --git a/tools/perf/tests/workloads/thloop.c b/tools/perf/tests/workloads/thloop.c
index af05269c2eb8..457b29f91c3e 100644
--- a/tools/perf/tests/workloads/thloop.c
+++ b/tools/perf/tests/workloads/thloop.c
@@ -7,7 +7,6 @@
#include "../tests.h"

static volatile sig_atomic_t done;
-static volatile unsigned count;

/* We want to check this symbol in perf report */
noinline void test_loop(void);
@@ -19,8 +18,7 @@ static void sighandler(int sig __maybe_unused)

noinline void test_loop(void)
{
- while (!done)
- __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED);
+ while (!done);
}

static void *thfunc(void *arg)
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 38fcf3ba5749..b00b5a2634c3 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -542,9 +542,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
}

-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
- struct perf_env *env,
- FILE *fp)
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
{
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
@@ -560,7 +560,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
if (info->btf_id) {
struct btf_node *node;

- node = perf_env__find_btf(env, info->btf_id);
+ node = __perf_env__find_btf(env, info->btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 1bcbd4fb6c66..e2f0420905f5 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -33,9 +33,9 @@ struct btf_node {
int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
- struct perf_env *env,
- FILE *fp);
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
#else
static inline int machine__process_bpf(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
return 0;
}

-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
- struct perf_env *env __maybe_unused,
- FILE *fp __maybe_unused)
+static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
{

}
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index b9fb71ab7a73..106429155c2e 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -253,8 +253,8 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
*/
addr_location__init(&al);
al.sym = node->ms.sym;
- al.map = node->ms.map;
- al.maps = thread__maps(thread);
+ al.map = map__get(node->ms.map);
+ al.maps = maps__get(thread__maps(thread));
al.addr = node->ip;

if (al.map && !al.sym)
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index a164164001fb..d2c7b6e6eae5 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -22,13 +22,19 @@ struct perf_env perf_env;

void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node)
+{
+ down_write(&env->bpf_progs.lock);
+ __perf_env__insert_bpf_prog_info(env, info_node);
+ up_write(&env->bpf_progs.lock);
+}
+
+void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
{
__u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node;
struct rb_node *parent = NULL;
struct rb_node **p;

- down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.infos.rb_node;

while (*p != NULL) {
@@ -40,15 +46,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
p = &(*p)->rb_right;
} else {
pr_debug("duplicated bpf prog info %u\n", prog_id);
- goto out;
+ return;
}
}

rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++;
-out:
- up_write(&env->bpf_progs.lock);
}

struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@@ -77,14 +81,22 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
}

bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+ bool ret;
+
+ down_write(&env->bpf_progs.lock);
+ ret = __perf_env__insert_btf(env, btf_node);
+ up_write(&env->bpf_progs.lock);
+ return ret;
+}
+
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id;
struct btf_node *node;
struct rb_node **p;
- bool ret = true;

- down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node;

while (*p != NULL) {
@@ -96,25 +108,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
p = &(*p)->rb_right;
} else {
pr_debug("duplicated btf %u\n", btf_id);
- ret = false;
- goto out;
+ return false;
}
}

rb_link_node(&btf_node->rb_node, parent, p);
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
env->bpf_progs.btfs_cnt++;
-out:
- up_write(&env->bpf_progs.lock);
- return ret;
+ return true;
}

struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+ struct btf_node *res;
+
+ down_read(&env->bpf_progs.lock);
+ res = __perf_env__find_btf(env, btf_id);
+ up_read(&env->bpf_progs.lock);
+ return res;
+}
+
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{
struct btf_node *node = NULL;
struct rb_node *n;

- down_read(&env->bpf_progs.lock);
n = env->bpf_progs.btfs.rb_node;

while (n) {
@@ -124,13 +142,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
else if (btf_id > node->id)
n = n->rb_right;
else
- goto out;
+ return node;
}
- node = NULL;
-
-out:
- up_read(&env->bpf_progs.lock);
- return node;
+ return NULL;
}

/* purge data in bpf_progs.infos tree */
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 4566c51f2fd9..359eff51cb85 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -164,12 +164,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);

void perf_env__init(struct perf_env *env);
+void __perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);

int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index fefc72066c4e..ac17a3cb59dc 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -293,9 +293,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
*/
phdr = elf_newphdr(e, 1);
phdr[0].p_type = PT_LOAD;
- phdr[0].p_offset = 0;
- phdr[0].p_vaddr = 0;
- phdr[0].p_paddr = 0;
+ phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
phdr[0].p_filesz = csize;
phdr[0].p_memsz = csize;
phdr[0].p_flags = PF_X | PF_R;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d812e1e371a7..1482567e5ac1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1444,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
nodes = new_nodes;
size += 4;
}
- ret = memory_node__read(&nodes[cnt++], idx);
+ ret = memory_node__read(&nodes[cnt], idx);
+ if (!ret)
+ cnt += 1;
}
out:
closedir(dir);
@@ -1847,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);

- bpf_event__print_bpf_prog_info(&node->info_linear->info,
- env, fp);
+ __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
}

up_read(&env->bpf_progs.lock);
@@ -3175,7 +3177,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
/* after reading from file, translate offset to address */
bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
- perf_env__insert_bpf_prog_info(env, info_node);
+ __perf_env__insert_bpf_prog_info(env, info_node);
}

up_write(&env->bpf_progs.lock);
@@ -3222,7 +3224,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (__do_read(ff, node->data, data_size))
goto out;

- perf_env__insert_btf(env, node);
+ __perf_env__insert_btf(env, node);
node = NULL;
}

@@ -4361,9 +4363,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
ret += fprintf(fp, "... ");

map = cpu_map__new_data(&ev->cpus.cpus);
- if (map)
+ if (map) {
ret += cpu_map__fprintf(map, fp);
- else
+ perf_cpu_map__put(map);
+ } else
ret += fprintf(fp, "failed to get cpus\n");
break;
default:
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
index 45b614bb73bf..764d660d30e2 100644
--- a/tools/perf/util/hisi-ptt.c
+++ b/tools/perf/util/hisi-ptt.c
@@ -121,6 +121,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
if (dump_trace)
hisi_ptt_dump_event(ptt, data, size);

+ free(data);
return 0;
}

diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 954b235e12e5..3a2e3687878c 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -100,11 +100,14 @@ int perf_mem_events__parse(const char *str)
return -1;
}

-static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
+static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
+ struct perf_mem_event *e)
{
+ char sysfs_name[100];
char path[PATH_MAX];
struct stat st;

+ scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
return !stat(path, &st);
}
@@ -120,7 +123,6 @@ int perf_mem_events__init(void)

for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
- char sysfs_name[100];
struct perf_pmu *pmu = NULL;

/*
@@ -136,12 +138,12 @@ int perf_mem_events__init(void)
* of core PMU.
*/
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
- e->supported |= perf_mem_event__supported(mnt, sysfs_name);
+ e->supported |= perf_mem_event__supported(mnt, pmu, e);
+ if (e->supported) {
+ found = true;
+ break;
+ }
}
-
- if (e->supported)
- found = true;
}

return found ? 0 : -ENOENT;
@@ -167,13 +169,10 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
int idx)
{
const char *mnt = sysfs__mount();
- char sysfs_name[100];
struct perf_pmu *pmu = NULL;

while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
- pmu->name);
- if (!perf_mem_event__supported(mnt, sysfs_name)) {
+ if (!perf_mem_event__supported(mnt, pmu, e)) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(idx, pmu->name));
}
@@ -183,6 +182,7 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
char **rec_tmp, int *tmp_nr)
{
+ const char *mnt = sysfs__mount();
int i = *argv_nr, k = 0;
struct perf_mem_event *e;

@@ -211,6 +211,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
const char *s = perf_mem_events__name(j, pmu->name);

+ if (!perf_mem_event__supported(mnt, pmu, e))
+ continue;
+
rec_argv[i++] = "-e";
if (s) {
char *copy = strdup(s);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 1c5c3eeba4cf..e31426167852 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -264,7 +264,7 @@ static void print_ll_miss(struct perf_stat_config *config,
static const double color_ratios[3] = {20.0, 10.0, 5.0};

print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, color_ratios,
- "of all L1-icache accesses");
+ "of all LL-cache accesses");
}

static void print_dtlb_miss(struct perf_stat_config *config,
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 8554db3fc0d7..6013335a8dae 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -46,6 +46,7 @@ static int __report_module(struct addr_location *al, u64 ip,
{
Dwfl_Module *mod;
struct dso *dso = NULL;
+ Dwarf_Addr base;
/*
* Some callers will use al->sym, so we can't just use the
* cheaper thread__find_map() here.
@@ -58,13 +59,25 @@ static int __report_module(struct addr_location *al, u64 ip,
if (!dso)
return 0;

+ /*
+ * The generated JIT DSO files only map the code segment without
+ * ELF headers. Since JIT codes used to be packed in a memory
+ * segment, calculating the base address using pgoff falls into
+ * a different code in another DSO. So just use the map->start
+ * directly to pick the correct one.
+ */
+ if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
+ base = map__start(al->map);
+ else
+ base = map__start(al->map) - map__pgoff(al->map);
+
mod = dwfl_addrmodule(ui->dwfl, ip);
if (mod) {
Dwarf_Addr s;

dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
- if (s != map__start(al->map) - map__pgoff(al->map))
- mod = 0;
+ if (s != base)
+ mod = NULL;
}

if (!mod) {
@@ -72,14 +85,14 @@ static int __report_module(struct addr_location *al, u64 ip,

__symbol__join_symfs(filename, sizeof(filename), dso->long_name);
mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
- map__start(al->map) - map__pgoff(al->map), false);
+ base, false);
}
if (!mod) {
char filename[PATH_MAX];

if (dso__build_id_filename(dso, filename, sizeof(filename), false))
mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
- map__start(al->map) - map__pgoff(al->map), false);
+ base, false);
}

if (mod) {
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index c0641882fd2f..5e5c3395a499 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -327,7 +327,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,

maps__for_each_entry(thread__maps(ui->thread), map_node) {
struct map *map = map_node->map;
- u64 start = map__start(map);
+ u64 start = map__start(map) - map__pgoff(map);

if (map__dso(map) == dso && start < base_addr)
base_addr = start;
diff --git a/tools/testing/selftests/alsa/conf.c b/tools/testing/selftests/alsa/conf.c
index 2f1685a3eae1..ff09038fdce6 100644
--- a/tools/testing/selftests/alsa/conf.c
+++ b/tools/testing/selftests/alsa/conf.c
@@ -186,7 +186,7 @@ static char *sysfs_get(const char *sysfs_root, const char *id)
close(fd);
if (len < 0)
ksft_exit_fail_msg("sysfs: unable to read value '%s': %s\n",
- path, errno);
+ path, strerror(errno));
while (len > 0 && path[len-1] == '\n')
len--;
path[len] = '\0';
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
index 23df154fcdd7..df942149c6f6 100644
--- a/tools/testing/selftests/alsa/mixer-test.c
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -166,7 +166,7 @@ static void find_controls(void)
err = snd_ctl_poll_descriptors(card_data->handle,
&card_data->pollfd, 1);
if (err != 1) {
- ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for %d\n",
+ ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for card %d: %d\n",
card, err);
}

@@ -319,7 +319,7 @@ static bool ctl_value_index_valid(struct ctl_data *ctl,
}

if (int64_val > snd_ctl_elem_info_get_max64(ctl->info)) {
- ksft_print_msg("%s.%d value %lld more than maximum %lld\n",
+ ksft_print_msg("%s.%d value %lld more than maximum %ld\n",
ctl->name, index, int64_val,
snd_ctl_elem_info_get_max(ctl->info));
return false;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 1f02168103dd..f141e278b16f 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -334,6 +334,8 @@ static void test_task_stack(void)
do_dummy_read(skel->progs.dump_task_stack);
do_dummy_read(skel->progs.get_task_user_stacks);

+ ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
+
bpf_iter_task_stack__destroy(skel);
}

diff --git a/tools/testing/selftests/bpf/prog_tests/time_tai.c b/tools/testing/selftests/bpf/prog_tests/time_tai.c
index a31119823666..f45af1b0ef2c 100644
--- a/tools/testing/selftests/bpf/prog_tests/time_tai.c
+++ b/tools/testing/selftests/bpf/prog_tests/time_tai.c
@@ -56,7 +56,7 @@ void test_time_tai(void)
ASSERT_NEQ(ts2, 0, "tai_ts2");

/* TAI is moving forward only */
- ASSERT_GT(ts2, ts1, "tai_forward");
+ ASSERT_GE(ts2, ts1, "tai_forward");

/* Check for future */
ret = clock_gettime(CLOCK_TAI, &now_tai);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
index f2b8167b72a8..442f4ca39fd7 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
@@ -35,6 +35,8 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}

+int num_user_stacks = 0;
+
SEC("iter/task")
int get_task_user_stacks(struct bpf_iter__task *ctx)
{
@@ -51,6 +53,9 @@ int get_task_user_stacks(struct bpf_iter__task *ctx)
if (res <= 0)
return 0;

+ /* Only one task, the current one, should succeed */
+ ++num_user_stacks;
+
buf_sz += res;

/* If the verifier doesn't refine bpf_get_task_stack res, and instead
diff --git a/tools/testing/selftests/bpf/progs/test_global_func16.c b/tools/testing/selftests/bpf/progs/test_global_func16.c
index e7206304632e..e3e64bc472cd 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func16.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func16.c
@@ -13,7 +13,7 @@ __noinline int foo(int (*arr)[10])
}

SEC("cgroup_skb/ingress")
-__failure __msg("invalid indirect read from stack")
+__success
int global_func16(struct __sk_buff *skb)
{
int array[10];
diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
index 359df865a8f3..8d77cc5323d3 100644
--- a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
@@ -27,8 +27,8 @@ __naked void stack_out_of_bounds(void)

SEC("socket")
__description("uninitialized stack1")
-__failure __msg("invalid indirect read from stack")
-__failure_unpriv
+__success __log_level(4) __msg("stack depth 8")
+__failure_unpriv __msg_unpriv("invalid indirect read from stack")
__naked void uninitialized_stack1(void)
{
asm volatile (" \
@@ -45,8 +45,8 @@ __naked void uninitialized_stack1(void)

SEC("socket")
__description("uninitialized stack2")
-__failure __msg("invalid read from stack")
-__failure_unpriv
+__success __log_level(4) __msg("stack depth 8")
+__failure_unpriv __msg_unpriv("invalid read from stack")
__naked void uninitialized_stack2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
index b054f9c48143..589e8270de46 100644
--- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
+++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
@@ -5,9 +5,10 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"

-SEC("cgroup/sysctl")
+SEC("socket")
__description("ARG_PTR_TO_LONG uninitialized")
-__failure __msg("invalid indirect read from stack R4 off -16+0 size 8")
+__success
+__failure_unpriv __msg_unpriv("invalid indirect read from stack R4 off -16+0 size 8")
__naked void arg_ptr_to_long_uninitialized(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
index efbfc3a4ad6a..f67390224a9c 100644
--- a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
@@ -5,9 +5,10 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"

-SEC("tc")
+SEC("socket")
__description("raw_stack: no skb_load_bytes")
-__failure __msg("invalid read from stack R6 off=-8 size=8")
+__success
+__failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8")
__naked void stack_no_skb_load_bytes(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c
index 83a90afba785..d1f23c1a7c5b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_var_off.c
+++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c
@@ -59,9 +59,10 @@ __naked void stack_read_priv_vs_unpriv(void)
" ::: __clobber_all);
}

-SEC("lwt_in")
+SEC("cgroup/skb")
__description("variable-offset stack read, uninitialized")
-__failure __msg("invalid variable-offset read from stack R2")
+__success
+__failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root")
__naked void variable_offset_stack_read_uninitialized(void)
{
asm volatile (" \
@@ -83,12 +84,55 @@ __naked void variable_offset_stack_read_uninitialized(void)

SEC("socket")
__description("variable-offset stack write, priv vs unpriv")
-__success __failure_unpriv
+__success
+/* Check that the maximum stack depth is correctly maintained according to the
+ * maximum possible variable offset.
+ */
+__log_level(4) __msg("stack depth 16")
+__failure_unpriv
/* Variable stack access is rejected for unprivileged.
*/
__msg_unpriv("R2 variable stack access prohibited for !root")
__retval(0)
__naked void stack_write_priv_vs_unpriv(void)
+{
+ asm volatile (" \
+ /* Get an unknown value */ \
+ r2 = *(u32*)(r1 + 0); \
+ /* Make it small and 8-byte aligned */ \
+ r2 &= 8; \
+ r2 -= 16; \
+ /* Add it to fp. We now have either fp-8 or \
+ * fp-16, but we don't know which \
+ */ \
+ r2 += r10; \
+ /* Dereference it for a stack write */ \
+ r0 = 0; \
+ *(u64*)(r2 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+/* Similar to the previous test, but this time also perform a read from the
+ * address written to with a variable offset. The read is allowed, showing that,
+ * after a variable-offset write, a priviledged program can read the slots that
+ * were in the range of that write (even if the verifier doesn't actually know if
+ * the slot being read was really written to or not.
+ *
+ * Despite this test being mostly a superset, the previous test is also kept for
+ * the sake of it checking the stack depth in the case where there is no read.
+ */
+SEC("socket")
+__description("variable-offset stack write followed by read")
+__success
+/* Check that the maximum stack depth is correctly maintained according to the
+ * maximum possible variable offset.
+ */
+__log_level(4) __msg("stack depth 16")
+__failure_unpriv
+__msg_unpriv("R2 variable stack access prohibited for !root")
+__retval(0)
+__naked void stack_write_followed_by_read(void)
{
asm volatile (" \
/* Get an unknown value */ \
@@ -103,12 +147,7 @@ __naked void stack_write_priv_vs_unpriv(void)
/* Dereference it for a stack write */ \
r0 = 0; \
*(u64*)(r2 + 0) = r0; \
- /* Now read from the address we just wrote. This shows\
- * that, after a variable-offset write, a priviledged\
- * program can read the slots that were in the range of\
- * that write (even if the verifier doesn't actually know\
- * if the slot being read was really written to or not.\
- */ \
+ /* Now read from the address we just wrote. */ \
r3 = *(u64*)(r2 + 0); \
r0 = 0; \
exit; \
@@ -253,9 +292,10 @@ __naked void access_min_out_of_bound(void)
: __clobber_all);
}

-SEC("lwt_in")
+SEC("cgroup/skb")
__description("indirect variable-offset stack access, min_off < min_initialized")
-__failure __msg("invalid indirect read from stack R2 var_off")
+__success
+__failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root")
__naked void access_min_off_min_initialized(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 07d786329105..04fc2c6c79e8 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -447,13 +447,13 @@ static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bo
unsigned long status = ct->status;

bpf_ct_release(ct);
- if (status & IPS_CONFIRMED_BIT)
+ if (status & IPS_CONFIRMED)
return XDP_PASS;
} else if (ct_lookup_opts.error != -ENOENT) {
return XDP_ABORTED;
}

- /* error == -ENOENT || !(status & IPS_CONFIRMED_BIT) */
+ /* error == -ENOENT || !(status & IPS_CONFIRMED) */
return XDP_TX;
}

diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
index 319337bdcfc8..9a7b1106fda8 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
@@ -83,17 +83,6 @@
.result = REJECT,
.errstr = "!read_ok",
},
-{
- "Can't use cmpxchg on uninit memory",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid read from stack",
-},
{
"BPF_W cmpxchg should zero top 32 bits",
.insns = {
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 3d5cd51071f0..ab25a81fd3a1 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -1505,7 +1505,9 @@
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_hash_8b = { 23 },
.result = REJECT,
- .errstr = "invalid read from stack R7 off=-16 size=8",
+ .errstr = "R0 invalid mem access 'scalar'",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid read from stack R7 off=-16 size=8",
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
index ad4c845a4ac7..b76bf5030952 100755
--- a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
+++ b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0

# Regression Test:
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
index 2330d37453f9..8c2619002147 100755
--- a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
+++ b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0

# Regression Test:
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
index 42ce602d8d49..e71d811656bb 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -120,6 +120,9 @@ h2_destroy()

switch_create()
{
+ local lanes_swp4
+ local pg1_size
+
# pools
# -----

@@ -229,7 +232,20 @@ switch_create()
dcb pfc set dev $swp4 prio-pfc all:off 1:on
# PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
# is (-2*MTU) about 80K of delay provision.
- dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
+ pg1_size=$_100KB
+
+ setup_wait_dev_with_timeout $swp4
+
+ lanes_swp4=$(ethtool $swp4 | grep 'Lanes:')
+ lanes_swp4=${lanes_swp4#*"Lanes: "}
+
+ # 8-lane ports use two buffers among which the configured buffer
+ # is split, so double the size to get twice (20K + 80K).
+ if [[ $lanes_swp4 -eq 8 ]]; then
+ pg1_size=$((pg1_size * 2))
+ fi
+
+ dcb buffer set dev $swp4 buffer-size all:0 1:$pg1_size

# bridges
# -------
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index fb850e0ec837..616d3581419c 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
delta_two_masks_one_key_test delta_simple_rehash_test \
- bloom_simple_test bloom_complex_test bloom_delta_test"
+ bloom_simple_test bloom_complex_test bloom_delta_test \
+ max_erp_entries_test max_group_size_test"
NUM_NETIFS=2
source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
@@ -983,6 +984,109 @@ bloom_delta_test()
log_test "bloom delta test ($tcflags)"
}

+max_erp_entries_test()
+{
+ # The number of eRP entries is limited. Once the maximum number of eRPs
+ # has been reached, filters cannot be added. This test verifies that
+ # when this limit is reached, inserstion fails without crashing.
+
+ RET=0
+
+ local num_masks=32
+ local num_regions=15
+ local chain_failed
+ local mask_failed
+ local ret
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ for ((i=1; i < $num_regions; i++)); do
+ for ((j=$num_masks; j >= 0; j--)); do
+ tc filter add dev $h2 ingress chain $i protocol ip \
+ pref $i handle $j flower $tcflags \
+ dst_ip 192.1.0.0/$j &> /dev/null
+ ret=$?
+
+ if [ $ret -ne 0 ]; then
+ chain_failed=$i
+ mask_failed=$j
+ break 2
+ fi
+ done
+ done
+
+ # We expect to exceed the maximum number of eRP entries, so that
+ # insertion eventually fails. Otherwise, the test should be adjusted to
+ # add more filters.
+ check_fail $ret "expected to exceed number of eRP entries"
+
+ for ((; i >= 1; i--)); do
+ for ((j=0; j <= $num_masks; j++)); do
+ tc filter del dev $h2 ingress chain $i protocol ip \
+ pref $i handle $j flower &> /dev/null
+ done
+ done
+
+ log_test "max eRP entries test ($tcflags). " \
+ "max chain $chain_failed, mask $mask_failed"
+}
+
+max_group_size_test()
+{
+ # The number of ACLs in an ACL group is limited. Once the maximum
+ # number of ACLs has been reached, filters cannot be added. This test
+ # verifies that when this limit is reached, insertion fails without
+ # crashing.
+
+ RET=0
+
+ local num_acls=32
+ local max_size
+ local ret
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ for ((i=1; i < $num_acls; i++)); do
+ if [[ $(( i % 2 )) == 1 ]]; then
+ tc filter add dev $h2 ingress pref $i proto ipv4 \
+ flower $tcflags dst_ip 198.51.100.1/32 \
+ ip_proto tcp tcp_flags 0x01/0x01 \
+ action drop &> /dev/null
+ else
+ tc filter add dev $h2 ingress pref $i proto ipv6 \
+ flower $tcflags dst_ip 2001:db8:1::1/128 \
+ action drop &> /dev/null
+ fi
+
+ ret=$?
+ [[ $ret -ne 0 ]] && max_size=$((i - 1)) && break
+ done
+
+ # We expect to exceed the maximum number of ACLs in a group, so that
+ # insertion eventually fails. Otherwise, the test should be adjusted to
+ # add more filters.
+ check_fail $ret "expected to exceed number of ACLs in a group"
+
+ for ((; i >= 1; i--)); do
+ if [[ $(( i % 2 )) == 1 ]]; then
+ tc filter del dev $h2 ingress pref $i proto ipv4 \
+ flower $tcflags dst_ip 198.51.100.1/32 \
+ ip_proto tcp tcp_flags 0x01/0x01 \
+ action drop &> /dev/null
+ else
+ tc filter del dev $h2 ingress pref $i proto ipv6 \
+ flower $tcflags dst_ip 2001:db8:1::1/128 \
+ action drop &> /dev/null
+ fi
+ done
+
+ log_test "max ACL group size test ($tcflags). max size $max_size"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
index c899b446acb6..327427ec10f5 100755
--- a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+++ b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
@@ -150,7 +150,7 @@ arp_test_gratuitous() {
fi
# Supply arp_accept option to set up which sets it in sysctl
setup ${arp_accept}
- ip netns exec ${HOST_NS} arping -A -U ${HOST_ADDR} -c1 2>&1 >/dev/null
+ ip netns exec ${HOST_NS} arping -A -I ${HOST_INTF} -U ${HOST_ADDR} -c1 2>&1 >/dev/null

if verify_arp $1 $2; then
printf " TEST: %-60s [ OK ]\n" "${test_msg[*]}"
diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
index 51df5e305855..b52d59547fc5 100755
--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
@@ -209,12 +209,12 @@ validate_v6_exception()
echo "Route get"
ip -netns h0 -6 ro get ${dst}
echo "Searching for:"
- echo " ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ echo " ${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
echo
fi

ip -netns h0 -6 ro get ${dst} | \
- grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ grep -q "${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
rc=$?

log_test $rc 0 "IPv6: host 0 to host ${i}, mtu ${mtu}"
diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
index 5235bdc8c0b1..3e5b5663d244 100644
--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
+++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
@@ -37,19 +37,20 @@ __thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
int threads_starting;
int running;

-extern void preempt_fpu(double *darray, int *threads_starting, int *running);
+extern int preempt_fpu(double *darray, int *threads_starting, int *running);

void *preempt_fpu_c(void *p)
{
+ long rc;
int i;
+
srand(pthread_self());
for (i = 0; i < 21; i++)
darray[i] = rand();

- /* Test failed if it ever returns */
- preempt_fpu(darray, &threads_starting, &running);
+ rc = preempt_fpu(darray, &threads_starting, &running);

- return p;
+ return (void *)rc;
}

int test_preempt_fpu(void)
diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
index 6761d6ce30ec..6f7cf400c687 100644
--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
@@ -37,19 +37,21 @@ __thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
int threads_starting;
int running;

-extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
+extern int preempt_vmx(vector int *varray, int *threads_starting, int *running);

void *preempt_vmx_c(void *p)
{
int i, j;
+ long rc;
+
srand(pthread_self());
for (i = 0; i < 12; i++)
for (j = 0; j < 4; j++)
varray[i][j] = rand();

- /* Test fails if it ever returns */
- preempt_vmx(varray, &threads_starting, &running);
- return p;
+ rc = preempt_vmx(varray, &threads_starting, &running);
+
+ return (void *)rc;
}

int test_preempt_vmx(void)
diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
index 50aab6b57da3..01abe4969b0f 100644
--- a/tools/testing/selftests/sgx/Makefile
+++ b/tools/testing/selftests/sgx/Makefile
@@ -16,10 +16,10 @@ HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack
ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
-fno-stack-protector -mrdrnd $(INCLUDES)

+ifeq ($(CAN_BUILD_X86_64), 1)
TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
TEST_FILES := $(OUTPUT)/test_encl.elf

-ifeq ($(CAN_BUILD_X86_64), 1)
all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
endif

diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
index 94bdeac1cf04..c9f658e44de6 100644
--- a/tools/testing/selftests/sgx/load.c
+++ b/tools/testing/selftests/sgx/load.c
@@ -136,11 +136,11 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
*/
uint64_t encl_get_entry(struct encl *encl, const char *symbol)
{
+ Elf64_Sym *symtab = NULL;
+ char *sym_names = NULL;
Elf64_Shdr *sections;
- Elf64_Sym *symtab;
Elf64_Ehdr *ehdr;
- char *sym_names;
- int num_sym;
+ int num_sym = 0;
int i;

ehdr = encl->bin;
@@ -161,6 +161,9 @@ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
}
}

+ if (!symtab || !sym_names)
+ return 0;
+
for (i = 0; i < num_sym; i++) {
Elf64_Sym *sym = &symtab[i];

diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c
index a07896a46364..d73b29becf5b 100644
--- a/tools/testing/selftests/sgx/sigstruct.c
+++ b/tools/testing/selftests/sgx/sigstruct.c
@@ -318,9 +318,9 @@ bool encl_measure(struct encl *encl)
struct sgx_sigstruct *sigstruct = &encl->sigstruct;
struct sgx_sigstruct_payload payload;
uint8_t digest[SHA256_DIGEST_LENGTH];
+ EVP_MD_CTX *ctx = NULL;
unsigned int siglen;
RSA *key = NULL;
- EVP_MD_CTX *ctx;
int i;

memset(sigstruct, 0, sizeof(*sigstruct));
@@ -384,7 +384,8 @@ bool encl_measure(struct encl *encl)
return true;

err:
- EVP_MD_CTX_destroy(ctx);
+ if (ctx)
+ EVP_MD_CTX_destroy(ctx);
RSA_free(key);
return false;
}
diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c
index c0d6397295e3..ae791df3e5a5 100644
--- a/tools/testing/selftests/sgx/test_encl.c
+++ b/tools/testing/selftests/sgx/test_encl.c
@@ -24,10 +24,11 @@ static void do_encl_emodpe(void *_op)
secinfo.flags = op->flags;

asm volatile(".byte 0x0f, 0x01, 0xd7"
- :
+ : /* no outputs */
: "a" (EMODPE),
"b" (&secinfo),
- "c" (op->epc_addr));
+ "c" (op->epc_addr)
+ : "memory" /* read from secinfo pointer */);
}

static void do_encl_eaccept(void *_op)
@@ -42,7 +43,8 @@ static void do_encl_eaccept(void *_op)
: "=a" (rax)
: "a" (EACCEPT),
"b" (&secinfo),
- "c" (op->epc_addr));
+ "c" (op->epc_addr)
+ : "memory" /* read from secinfo pointer */);

op->ret = rax;
}