Re: Linux 6.1.54

From: Greg Kroah-Hartman
Date: Tue Sep 19 2023 - 06:53:25 EST


diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 2524061836ac..34911ce5e4b5 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -91,8 +91,6 @@ Brief summary of control files.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
- memory.kmem.limit_in_bytes This knob is deprecated and writing to
- it will return -ENOTSUPP.
memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage
hits limits
diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
index 229af98b1d30..7cd88bc3a67d 100644
--- a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
@@ -16,8 +16,6 @@ description: |
reads required input clock frequencies from the devicetree and acts as clock
provider for all clock consumers of PS clocks.

-select: false
-
properties:
compatible:
const: xlnx,versal-clk
diff --git a/Documentation/mm/multigen_lru.rst b/Documentation/mm/multigen_lru.rst
index d7062c6a8946..d8f721f98868 100644
--- a/Documentation/mm/multigen_lru.rst
+++ b/Documentation/mm/multigen_lru.rst
@@ -89,15 +89,15 @@ variables are monotonically increasing.

Generation numbers are truncated into ``order_base_2(MAX_NR_GENS+1)``
bits in order to fit into the gen counter in ``folio->flags``. Each
-truncated generation number is an index to ``lrugen->lists[]``. The
+truncated generation number is an index to ``lrugen->folios[]``. The
sliding window technique is used to track at least ``MIN_NR_GENS`` and
at most ``MAX_NR_GENS`` generations. The gen counter stores a value
within ``[1, MAX_NR_GENS]`` while a page is on one of
-``lrugen->lists[]``; otherwise it stores zero.
+``lrugen->folios[]``; otherwise it stores zero.

Each generation is divided into multiple tiers. A page accessed ``N``
times through file descriptors is in tier ``order_base_2(N)``. Unlike
-generations, tiers do not have dedicated ``lrugen->lists[]``. In
+generations, tiers do not have dedicated ``lrugen->folios[]``. In
contrast to moving across generations, which requires the LRU lock,
moving across tiers only involves atomic operations on
``folio->flags`` and therefore has a negligible cost. A feedback loop
@@ -127,7 +127,7 @@ page mapped by this PTE to ``(max_seq%MAX_NR_GENS)+1``.
Eviction
--------
The eviction consumes old generations. Given an ``lruvec``, it
-increments ``min_seq`` when ``lrugen->lists[]`` indexed by
+increments ``min_seq`` when ``lrugen->folios[]`` indexed by
``min_seq%MAX_NR_GENS`` becomes empty. To select a type and a tier to
evict from, it first compares ``min_seq[]`` to select the older type.
If both types are equally old, it selects the one whose first tier has
diff --git a/Makefile b/Makefile
index 35fc0d62898d..844afa653fdd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
-SUBLEVEL = 53
+SUBLEVEL = 54
EXTRAVERSION =
NAME = Curry Ramen

@@ -1939,7 +1939,9 @@ quiet_cmd_depmod = DEPMOD $(MODLIB)

modules_install:
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
+ifndef modules_sign_only
$(call cmd,depmod)
+endif

else # CONFIG_MODULES

diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
index 1b0ffaeee16d..5258cb81a16b 100644
--- a/arch/arc/include/asm/atomic-llsc.h
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
} \

#define ATOMIC_OP_RETURN(op, asm_op) \
@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return val; \
}
@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return orig; \
}
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
index c5a8010fdc97..9089f34baac3 100644
--- a/arch/arc/include/asm/atomic64-arcv2.h
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); \
+ : "cc", "memory"); \
} \

#define ATOMIC64_OP_RETURN(op, op1, op2) \
@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return val; \
}
@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return orig; \
}
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
index c4faff092380..fee53c25fc80 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
@@ -100,7 +100,7 @@ phy0: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
- txdv-skew-psec = <0>;
+ txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
@@ -128,7 +128,7 @@ phy1: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
- txdv-skew-psec = <0>;
+ txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
index 78e6e2376b01..5ff66aa6f8ba 100644
--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
@@ -77,7 +77,7 @@ phy0: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
- txdv-skew-psec = <0>;
+ txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
index 2a0feb53f0dc..b62973bc2f0d 100644
--- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
@@ -80,7 +80,7 @@ phy0: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
- txdv-skew-psec = <0>;
+ txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
@@ -107,7 +107,7 @@ phy1: ethernet-phy@7 {
rxc-skew-psec = <2400>;
txc-skew-psec = <2400>;
rxdv-skew-psec = <0>;
- txdv-skew-psec = <0>;
+ txen-skew-psec = <0>;
rxd0-skew-psec = <0>;
rxd1-skew-psec = <0>;
rxd2-skew-psec = <0>;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 14134fd34ff7..0ce5f13eabb1 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1655,13 +1655,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
struct bpf_prog *p = l->link.prog;
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);

- if (p->aux->sleepable) {
- enter_prog = (u64)__bpf_prog_enter_sleepable;
- exit_prog = (u64)__bpf_prog_exit_sleepable;
- } else {
- enter_prog = (u64)__bpf_prog_enter;
- exit_prog = (u64)__bpf_prog_exit;
- }
+ enter_prog = (u64)bpf_trampoline_enter(p);
+ exit_prog = (u64)bpf_trampoline_exit(p);

if (l->cookie == 0) {
/* if cookie is zero, one instruction is enough to store it */
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index fe64ad43ba88..ee8f47aef98b 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -308,8 +308,8 @@ ifdef CONFIG_64BIT
endif
endif

- ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy)
- cflags-y += -msym32 -DKBUILD_64BIT_SYM32
+ ifeq ($(KBUILD_SYM32), y)
+ cflags-$(KBUILD_SYM32) += -msym32 -DKBUILD_64BIT_SYM32
else
ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y)
$(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32)
@@ -350,7 +350,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables

KBUILD_LDFLAGS += -m $(ld-emul)

-ifdef CONFIG_MIPS
+ifdef need-compiler
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h
index 6de13d08a388..b70b9094fb7c 100644
--- a/arch/parisc/include/asm/led.h
+++ b/arch/parisc/include/asm/led.h
@@ -11,8 +11,8 @@
#define LED1 0x02
#define LED0 0x01 /* bottom (or furthest left) LED */

-#define LED_LAN_TX LED0 /* for LAN transmit activity */
-#define LED_LAN_RCV LED1 /* for LAN receive activity */
+#define LED_LAN_RCV LED0 /* for LAN receive activity */
+#define LED_LAN_TX LED1 /* for LAN transmit activity */
#define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */

diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index c77b5f00a66a..d8f8dca4d796 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -530,7 +530,7 @@ static int __init ap325rxa_devices_setup(void)
device_initialize(&ap325rxa_ceu_device.dev);
dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);

platform_device_add(&ap325rxa_ceu_device);

diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 674da7ebd8b7..7ec03d4a4edf 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -1454,15 +1454,13 @@ static int __init arch_setup(void)
device_initialize(&ecovec_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
- ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[0]);

device_initialize(&ecovec_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
- ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[1]);

gpiod_add_lookup_table(&cn12_power_gpiod_table);
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 20f4db778ed6..c6d556dfbbbe 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void)
device_initialize(&kfr2r09_ceu_device.dev);
dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);

platform_device_add(&kfr2r09_ceu_device);

diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index f60061283c48..773ee767d0c4 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -604,7 +604,7 @@ static int __init migor_devices_setup(void)
device_initialize(&migor_ceu_device.dev);
dma_declare_coherent_memory(&migor_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);

platform_device_add(&migor_ceu_device);

diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index b60a2626e18b..6495f9354065 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -940,15 +940,13 @@ static int __init devices_setup(void)
device_initialize(&ms7724se_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
- ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[0]);

device_initialize(&ms7724se_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
- ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[1]);

return platform_add_devices(ms7724se_devices,
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 3b12e6b99412..6c2e3ff3cb28 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -101,12 +101,6 @@ static inline int cpu_has_svm(const char **msg)
return 0;
}

- if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
- if (msg)
- *msg = "can't execute cpuid_8000000a";
- return 0;
- }
-
if (!boot_cpu_has(X86_FEATURE_SVM)) {
if (msg)
*msg = "svm not available";
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index e910ec5a0cc0..d3e66740c7c6 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -810,6 +810,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
int ret = 0;
unsigned long flags;
struct amd_svm_iommu_ir *ir;
+ u64 entry;

/**
* In some cases, the existing irte is updated and re-set,
@@ -843,6 +844,18 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
ir->data = pi->ir_data;

spin_lock_irqsave(&svm->ir_list_lock, flags);
+
+ /*
+ * Update the target pCPU for IOMMU doorbells if the vCPU is running.
+ * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
+ * will update the pCPU info when the vCPU awkened and/or scheduled in.
+ * See also avic_vcpu_load().
+ */
+ entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
+ amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
+ true, pi->ir_data);
+
list_add(&ir->node, &svm->ir_list);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
out:
@@ -1022,10 +1035,11 @@ static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
int ret = 0;
- unsigned long flags;
struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);

+ lockdep_assert_held(&svm->ir_list_lock);
+
if (!kvm_arch_has_assigned_device(vcpu->kvm))
return 0;

@@ -1033,19 +1047,15 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
* Here, we go through the per-vcpu ir_list to update all existing
* interrupt remapping table entry targeting this vcpu.
*/
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
if (list_empty(&svm->ir_list))
- goto out;
+ return 0;

list_for_each_entry(ir, &svm->ir_list, node) {
ret = amd_iommu_update_ga(cpu, r, ir->data);
if (ret)
- break;
+ return ret;
}
-out:
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
- return ret;
+ return 0;
}

void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -1053,6 +1063,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 entry;
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long flags;

lockdep_assert_preemption_disabled();

@@ -1069,6 +1080,15 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_vcpu_is_blocking(vcpu))
return;

+ /*
+ * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
+ * _currently_ have assigned devices, as that can change. Holding
+ * ir_list_lock ensures that either svm_ir_list_add() will consume
+ * up-to-date entry information, or that this task will wait until
+ * svm_ir_list_add() completes to set the new target pCPU.
+ */
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+
entry = READ_ONCE(*(svm->avic_physical_id_cache));

entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
@@ -1077,25 +1097,48 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
+
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}

void avic_vcpu_put(struct kvm_vcpu *vcpu)
{
u64 entry;
struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long flags;

lockdep_assert_preemption_disabled();

+ /*
+ * Note, reading the Physical ID entry outside of ir_list_lock is safe
+ * as only the pCPU that has loaded (or is loading) the vCPU is allowed
+ * to modify the entry, and preemption is disabled. I.e. the vCPU
+ * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
+ * recursively.
+ */
entry = READ_ONCE(*(svm->avic_physical_id_cache));

/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
return;

+ /*
+ * Take and hold the per-vCPU interrupt remapping lock while updating
+ * the Physical ID entry even though the lock doesn't protect against
+ * multiple writers (see above). Holding ir_list_lock ensures that
+ * either svm_ir_list_add() will consume up-to-date entry information,
+ * or that this task will wait until svm_ir_list_add() completes to
+ * mark the vCPU as not running.
+ */
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+
avic_update_iommu_vcpu_affinity(vcpu, -1, 0);

entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+
}

void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 92268645a5fe..8053974af326 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -660,10 +660,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,

vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;

- if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
- WARN_ON(!svm->tsc_scaling_enabled);
+ if (svm->tsc_scaling_enabled &&
+ svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
nested_svm_update_tsc_ratio_msr(vcpu);
- }

vmcb02->control.int_ctl =
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
@@ -1022,8 +1021,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
}

- if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
- WARN_ON(!svm->tsc_scaling_enabled);
+ if (kvm_caps.has_tsc_control &&
+ vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
}
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index e0437acb5cf7..d08d5e085649 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1723,7 +1723,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
* Note, the source is not required to have the same number of
* vCPUs as the destination when migrating a vanilla SEV VM.
*/
- src_vcpu = kvm_get_vcpu(dst_kvm, i);
+ src_vcpu = kvm_get_vcpu(src_kvm, i);
src_svm = to_svm(src_vcpu);

/*
@@ -2951,9 +2951,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
* VMCB page. Do not include the encryption mask on the VMSA physical
- * address since hardware will access it using the guest key.
+ * address since hardware will access it using the guest key. Note,
+ * the VMSA will be NULL if this vCPU is the destination for intrahost
+ * migration, and will be copied later.
*/
- svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
+ if (svm->sev_es.vmsa)
+ svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);

/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a96f9a17e8b5..7e4d66be18ef 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -366,6 +366,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;

}
+static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len);

static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
bool commit_side_effects)
@@ -386,6 +388,14 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
}

if (!svm->next_rip) {
+ /*
+ * FIXME: Drop this when kvm_emulate_instruction() does the
+ * right thing and treats "can't emulate" as outright failure
+ * for EMULTYPE_SKIP.
+ */
+ if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
+ return 0;
+
if (unlikely(!commit_side_effects))
old_rflags = svm->vmcb->save.rflags;

@@ -4592,16 +4602,25 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
* and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
* decode garbage.
*
- * Inject #UD if KVM reached this point without an instruction buffer.
- * In practice, this path should never be hit by a well-behaved guest,
- * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
- * is still theoretically reachable, e.g. via unaccelerated fault-like
- * AVIC access, and needs to be handled by KVM to avoid putting the
- * guest into an infinite loop. Injecting #UD is somewhat arbitrary,
- * but its the least awful option given lack of insight into the guest.
+ * If KVM is NOT trying to simply skip an instruction, inject #UD if
+ * KVM reached this point without an instruction buffer. In practice,
+ * this path should never be hit by a well-behaved guest, e.g. KVM
+ * doesn't intercept #UD or #GP for SEV guests, but this path is still
+ * theoretically reachable, e.g. via unaccelerated fault-like AVIC
+ * access, and needs to be handled by KVM to avoid putting the guest
+ * into an infinite loop. Injecting #UD is somewhat arbitrary, but
+ * its the least awful option given lack of insight into the guest.
+ *
+ * If KVM is trying to skip an instruction, simply resume the guest.
+ * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
+ * will attempt to re-inject the INT3/INTO and skip the instruction.
+ * In that scenario, retrying the INT3/INTO and hoping the guest will
+ * make forward progress is the only option that has a chance of
+ * success (and in practice it will work the vast majority of the time).
*/
if (unlikely(!insn)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
+ if (!(emul_type & EMULTYPE_SKIP))
+ kvm_queue_exception(vcpu, UD_VECTOR);
return false;
}

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index db6053a22e86..5e680e039d0e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1813,10 +1813,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_link *l, int stack_size,
int run_ctx_off, bool save_ret)
{
- void (*exit)(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
- u64 (*enter)(struct bpf_prog *prog,
- struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
u8 *prog = *pprog;
u8 *jmp_insn;
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
@@ -1835,23 +1831,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
*/
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);

- if (p->aux->sleepable) {
- enter = __bpf_prog_enter_sleepable;
- exit = __bpf_prog_exit_sleepable;
- } else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
- enter = __bpf_prog_enter_struct_ops;
- exit = __bpf_prog_exit_struct_ops;
- } else if (p->expected_attach_type == BPF_LSM_CGROUP) {
- enter = __bpf_prog_enter_lsm_cgroup;
- exit = __bpf_prog_exit_lsm_cgroup;
- }
-
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);

- if (emit_call(&prog, enter, prog))
+ if (emit_call(&prog, bpf_trampoline_enter(p), prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -1896,7 +1881,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
- if (emit_call(&prog, exit, prog))
+ if (emit_call(&prog, bpf_trampoline_exit(p), prog))
return -EINVAL;

*pprog = prog;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f1bc600c4ded..1007f8027857 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -697,11 +697,41 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
return true;
}

+static unsigned int calculate_io_allowed(u32 iops_limit,
+ unsigned long jiffy_elapsed)
+{
+ unsigned int io_allowed;
+ u64 tmp;
+
+ /*
+ * jiffy_elapsed should not be a big value as minimum iops can be
+ * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+ * will allow dispatch after 1 second and after that slice should
+ * have been trimmed.
+ */
+
+ tmp = (u64)iops_limit * jiffy_elapsed;
+ do_div(tmp, HZ);
+
+ if (tmp > UINT_MAX)
+ io_allowed = UINT_MAX;
+ else
+ io_allowed = tmp;
+
+ return io_allowed;
+}
+
+static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+{
+ return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+}
+
/* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
- unsigned long nr_slices, time_elapsed, io_trim;
- u64 bytes_trim, tmp;
+ unsigned long time_elapsed;
+ long long bytes_trim;
+ int io_trim;

BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));

@@ -723,67 +753,38 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)

throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);

- time_elapsed = jiffies - tg->slice_start[rw];
-
- nr_slices = time_elapsed / tg->td->throtl_slice;
-
- if (!nr_slices)
+ time_elapsed = rounddown(jiffies - tg->slice_start[rw],
+ tg->td->throtl_slice);
+ if (!time_elapsed)
return;
- tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
- do_div(tmp, HZ);
- bytes_trim = tmp;

- io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
- HZ;
-
- if (!bytes_trim && !io_trim)
+ bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
+ time_elapsed) +
+ tg->carryover_bytes[rw];
+ io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
+ tg->carryover_ios[rw];
+ if (bytes_trim <= 0 && io_trim <= 0)
return;

- if (tg->bytes_disp[rw] >= bytes_trim)
+ tg->carryover_bytes[rw] = 0;
+ if ((long long)tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;

- if (tg->io_disp[rw] >= io_trim)
+ tg->carryover_ios[rw] = 0;
+ if ((int)tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
tg->io_disp[rw] = 0;

- tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
+ tg->slice_start[rw] += time_elapsed;

throtl_log(&tg->service_queue,
- "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
- tg->slice_start[rw], tg->slice_end[rw], jiffies);
-}
-
-static unsigned int calculate_io_allowed(u32 iops_limit,
- unsigned long jiffy_elapsed)
-{
- unsigned int io_allowed;
- u64 tmp;
-
- /*
- * jiffy_elapsed should not be a big value as minimum iops can be
- * 1 then at max jiffy elapsed should be equivalent of 1 second as we
- * will allow dispatch after 1 second and after that slice should
- * have been trimmed.
- */
-
- tmp = (u64)iops_limit * jiffy_elapsed;
- do_div(tmp, HZ);
-
- if (tmp > UINT_MAX)
- io_allowed = UINT_MAX;
- else
- io_allowed = tmp;
-
- return io_allowed;
-}
-
-static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
-{
- return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+ "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
+ bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
+ jiffies);
}

static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 53ab2306da00..1645335b8d2d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -422,6 +422,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
+ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */

/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 823c88622e34..ffdd12e722c9 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -123,8 +123,8 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
- void __iomem *base;
- int irq = 0;
+ void __iomem *base, *ctl_base;
+ int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */

dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");

@@ -165,26 +165,34 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;

- base = (void __iomem *)base_mem_res->start;
/* N.B. this assumes data_addr will be used for word-sized I/O only */
- ap->ioaddr.data_addr = base + 0 + 0 * 4;
- ap->ioaddr.error_addr = base + 1 + 1 * 4;
- ap->ioaddr.feature_addr = base + 1 + 1 * 4;
- ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
- ap->ioaddr.lbal_addr = base + 1 + 3 * 4;
- ap->ioaddr.lbam_addr = base + 1 + 4 * 4;
- ap->ioaddr.lbah_addr = base + 1 + 5 * 4;
- ap->ioaddr.device_addr = base + 1 + 6 * 4;
- ap->ioaddr.status_addr = base + 1 + 7 * 4;
- ap->ioaddr.command_addr = base + 1 + 7 * 4;
-
- base = (void __iomem *)ctl_mem_res->start;
- ap->ioaddr.altstatus_addr = base + 1;
- ap->ioaddr.ctl_addr = base + 1;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
- (unsigned long)base_mem_res->start,
- (unsigned long)ctl_mem_res->start);
+ ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
+
+ if (base_res) { /* only Q40 has IO resources */
+ io_offset = 0x10000;
+ reg_shift = 0;
+ base = (void __iomem *)base_res->start;
+ ctl_base = (void __iomem *)ctl_res->start;
+ } else {
+ base = (void __iomem *)base_mem_res->start;
+ ctl_base = (void __iomem *)ctl_mem_res->start;
+ }
+
+ ap->ioaddr.error_addr = base + io_offset + (1 << reg_shift);
+ ap->ioaddr.feature_addr = base + io_offset + (1 << reg_shift);
+ ap->ioaddr.nsect_addr = base + io_offset + (2 << reg_shift);
+ ap->ioaddr.lbal_addr = base + io_offset + (3 << reg_shift);
+ ap->ioaddr.lbam_addr = base + io_offset + (4 << reg_shift);
+ ap->ioaddr.lbah_addr = base + io_offset + (5 << reg_shift);
+ ap->ioaddr.device_addr = base + io_offset + (6 << reg_shift);
+ ap->ioaddr.status_addr = base + io_offset + (7 << reg_shift);
+ ap->ioaddr.command_addr = base + io_offset + (7 << reg_shift);
+
+ ap->ioaddr.altstatus_addr = ctl_base + io_offset;
+ ap->ioaddr.ctl_addr = ctl_base + io_offset;
+
+ ata_port_desc(ap, "cmd %px ctl %px data %px",
+ base, ctl_base, ap->ioaddr.data_addr);

irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res && irq_res->start > 0) {
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 0117df0fe3c5..092ba6f87aa3 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -567,6 +567,7 @@ static struct platform_driver pata_ftide010_driver = {
};
module_platform_driver(pata_ftide010_driver);

+MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
MODULE_AUTHOR("Linus Walleij <linus.walleij@xxxxxxxxxx>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index b729e9919bb0..c96fcf9ee3c0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -428,6 +428,7 @@ static struct platform_driver gemini_sata_driver = {
};
module_platform_driver(gemini_sata_driver);

+MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
MODULE_AUTHOR("Linus Walleij <linus.walleij@xxxxxxxxxx>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index e8cb914223cd..e9f38eba2f13 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1585,9 +1585,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
+ struct request *rq;

spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
+ list_for_each_entry(rq, &list, queuelist)
+ blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock);

while (!list_empty(&list)) {
@@ -1613,16 +1616,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);

- pr_info("rq %p timed out\n", rq);
-
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;

spin_lock(&nq->poll_lock);
+ /* The request may have completed meanwhile. */
+ if (blk_mq_request_completed(rq)) {
+ spin_unlock(&nq->poll_lock);
+ return BLK_EH_DONE;
+ }
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}

+ pr_info("rq %p timed out\n", rq);
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 083459028a4b..8a4362d75fc4 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -470,6 +470,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)

/* Trigger MHI RESET so that the device will not access host memory */
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ /* Skip MHI RESET if in RDDM state */
+ if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
+ goto skip_mhi_reset;
+
dev_dbg(dev, "Triggering MHI Reset in device\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);

@@ -495,6 +499,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
}
}

+skip_mhi_reset:
dev_dbg(dev,
"Waiting for all pending event ring processing to complete\n");
mhi_event = mhi_cntrl->mhi_event;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index db0b774207d3..f45239e73c4c 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -775,12 +775,13 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
rc = crb_map_pluton(dev, priv, buf, crb_pluton);
if (rc)
- return rc;
+ goto out;
}

priv->sm = sm;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 1d0f79e9c346..416d582a9e8e 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -62,8 +62,6 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
};

struct imx_pll14xx_clk imx_1443x_pll = {
@@ -137,11 +135,10 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
/*
* Fractional PLL constrains:
*
- * a) 6MHz <= prate <= 25MHz
- * b) 1 <= p <= 63 (1 <= p <= 4 prate = 24MHz)
- * c) 64 <= m <= 1023
- * d) 0 <= s <= 6
- * e) -32768 <= k <= 32767
+ * a) 1 <= p <= 63
+ * b) 64 <= m <= 1023
+ * c) 0 <= s <= 6
+ * d) -32768 <= k <= 32767
*
* fvco = (m * 65536 + k) * prate / (p * 65536)
*/
@@ -184,7 +181,7 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
}

/* Finally calculate best values */
- for (pdiv = 1; pdiv <= 7; pdiv++) {
+ for (pdiv = 1; pdiv <= 63; pdiv++) {
for (sdiv = 0; sdiv <= 6; sdiv++) {
/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index 8a4ba7a19ed1..6f56bdbf0204 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -1664,7 +1664,7 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev)
return ret;
}

- ret = pm_runtime_get(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
return ret;

diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index 0cd7ebe90301..64626f620a01 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -1783,8 +1783,10 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
return ret;

regmap = qcom_cc_map(pdev, &disp_cc_sm8450_desc);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }

clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
@@ -1799,9 +1801,16 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));

ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
+ if (ret)
+ goto err_put_rpm;

pm_runtime_put(&pdev->dev);

+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
return ret;
}

diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 8bed02a748ab..470a277603a9 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "pll0_vote",
- .parent_names = (const char *[]){ "pll8" },
+ .parent_names = (const char *[]){ "pll0" },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index 8486d7135ab1..113146835902 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -115,9 +115,13 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
ret = pm_clk_add(&pdev->dev, "iface");
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto destroy_pm_clk;
+ goto err_destroy_pm_clk;
}

+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ goto err_destroy_pm_clk;
+
if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
lpass_regmap_config.name = "qdsp6ss";
lpass_regmap_config.max_register = 0x3f;
@@ -125,7 +129,7 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)

ret = qcom_cc_probe_by_index(pdev, 0, desc);
if (ret)
- goto destroy_pm_clk;
+ goto err_put_rpm;
}

lpass_regmap_config.name = "top_cc";
@@ -134,11 +138,15 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)

ret = qcom_cc_probe_by_index(pdev, 1, desc);
if (ret)
- goto destroy_pm_clk;
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);

return 0;

-destroy_pm_clk:
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+err_destroy_pm_clk:
pm_clk_destroy(&pdev->dev);

disable_pm_runtime:
diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
index 5a1407440662..d106bc65470e 100644
--- a/drivers/clk/qcom/mss-sc7180.c
+++ b/drivers/clk/qcom/mss-sc7180.c
@@ -87,11 +87,22 @@ static int mss_sc7180_probe(struct platform_device *pdev)
return ret;
}

+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
if (ret < 0)
- return ret;
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);

return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
}

static const struct dev_pm_ops mss_sc7180_pm_ops = {
diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
index 780074e05841..26e2d63614ac 100644
--- a/drivers/clk/qcom/q6sstop-qcs404.c
+++ b/drivers/clk/qcom/q6sstop-qcs404.c
@@ -174,21 +174,32 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
return ret;
}

+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
q6sstop_regmap_config.name = "q6sstop_tcsr";
desc = &tcsr_qcs404_desc;

ret = qcom_cc_probe_by_index(pdev, 1, desc);
if (ret)
- return ret;
+ goto err_put_rpm;

q6sstop_regmap_config.name = "q6sstop_cc";
desc = &q6sstop_qcs404_desc;

ret = qcom_cc_probe_by_index(pdev, 0, desc);
if (ret)
- return ret;
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);

return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
}

static const struct dev_pm_ops q6sstopcc_pm_ops = {
diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
index 43184459228f..2cd288d6c3e4 100644
--- a/drivers/clk/qcom/turingcc-qcs404.c
+++ b/drivers/clk/qcom/turingcc-qcs404.c
@@ -125,11 +125,22 @@ static int turingcc_probe(struct platform_device *pdev)
return ret;
}

+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
ret = qcom_cc_probe(pdev, &turingcc_desc);
if (ret < 0)
- return ret;
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);

return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
}

static const struct dev_pm_ops turingcc_pm_ops = {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 933bb960490d..239c70ac120e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -773,6 +773,13 @@ static __always_inline void set_next_event_mem(const int access, unsigned long e
u64 cnt;

ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ }
+
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;

diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 476847a4916b..2967141f4e7b 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -9,6 +9,7 @@
* Copyright 2012 Javier Martin, Vista Silicon <javier.martin@xxxxxxxxxxxxxxxxx>
*/

+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -145,8 +146,8 @@ struct rz_dmac {
#define CHCFG_REQD BIT(3)
#define CHCFG_SEL(bits) ((bits) & 0x07)
#define CHCFG_MEM_COPY (0x80400008)
-#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
-#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
+#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
@@ -609,13 +610,15 @@ static int rz_dmac_config(struct dma_chan *chan,
if (val == CHCFG_DS_INVALID)
return -EINVAL;

- channel->chcfg |= CHCFG_FILL_DDS(val);
+ channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);

val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;

- channel->chcfg |= CHCFG_FILL_SDS(val);
+ channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);

return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index dd6f9ae6fbe9..2fced451f0ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -493,11 +495,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}

+static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips, unsigned int num_clips)
+{
+
+ if (file)
+ return -ENOSYS;
+
+ return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
+ num_clips);
+}
+
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};

+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = amdgpu_dirtyfb
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1100,7 +1120,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;

- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base,
+ &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);

if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3c50b3ff7954..cd6e99cf74a0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1269,6 +1269,13 @@ void handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;

+ /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
+ * legacy gamma setup.
+ */
+ if (crtc_state->cm_is_degamma_srgb &&
+ adev->dm.dc->caps.color.dpp.gamma_corr)
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
+
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];

if (crtc_state->stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index b9effadfc4bb..89da06033332 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -82,3 +82,4 @@ DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 674ab6d9b31e..16c05a24ac7a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1977,12 +1977,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}

- /* Check for case where we are going from odm 2:1 to max
- * pipe scenario. For these cases, we will call
- * commit_minimal_transition_state() to exit out of odm 2:1
- * first before processing new streams
+ /* ODM Combine 2:1 power optimization is only applied for single stream
+ * scenario, it uses extra pipes than needed to reduce power consumption
+ * We need to switch off this feature to make room for new streams.
*/
- if (stream_count == dc->res_pool->pipe_count) {
+ if (stream_count > dc->current_state->stream_count &&
+ dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@@ -3361,6 +3361,45 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
}
}

+static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+}
+
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3378,24 +3417,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
-
-
dc_z10_restore(dc);
-
- if (update_type == UPDATE_TYPE_FULL) {
- /* wait for all double-buffer activity to clear on all pipes */
- int pipe_idx;
-
- for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
- if (!pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
- }
- }
+ if (update_type == UPDATE_TYPE_FULL)
+ wait_for_outstanding_hw_updates(dc, context);

if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 8e9384094f6d..f2f55565e98a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;

- while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
- temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc != insert_above_mpcc)
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4ef632864948..fbc188812ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1515,17 +1515,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
- int mpcc_inst = hubp->inst;
- int opp_inst;
- int opp_count = dc->res_pool->pipe_count;
-
- for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
- dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
- break;
- }
- }
hws->funcs.update_mpcc(dc, pipe_ctx);
}

diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 0f39ab9dc5b4..9e74b6e8b573 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+ if (mid_point_frames_ceil &&
+ (last_render_time_in_us / mid_point_frames_ceil) <
+ in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
- if (last_render_time_in_us / frames_to_insert <
- in_out_vrr->min_duration_in_us){
+ if (frames_to_insert &&
+ (last_render_time_in_us / frames_to_insert) <
+ in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 82fd3c8adee1..5b2c2b7745bf 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -291,7 +291,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
;
} while (ast_read32(ast, 0x10100) != 0xa8);
} else {/* AST2100/1100 */
- if (ast->chip == AST2100 || ast->chip == 2200)
+ if (ast->chip == AST2100 || ast->chip == AST2200)
dram_reg_info = ast2100_dram_table_data;
else
dram_reg_info = ast1100_dram_table_data;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 6b5d4ea22b67..107f465a27b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -56,6 +56,7 @@ struct intel_breadcrumbs;

typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)

struct intel_hw_status_page {
struct list_head timelines;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0ec07dad1dcf..fecdc7ea78eb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -5111,6 +5111,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,

ve->base.flags = I915_ENGINE_IS_VIRTUAL;

+ BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
+ ve->base.mask = VIRTUAL_ENGINES;
+
intel_context_init(&ve->context, &ve->base);

for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 80c60754a5c1..79bf1be68d8c 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1179,6 +1179,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
kvm_pfn_t pfn;
+ int ret;

if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
@@ -1188,7 +1189,13 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
if (is_error_noslot_pfn(pfn))
return -EINVAL;
- return PageTransHuge(pfn_to_page(pfn));
+
+ if (!pfn_valid(pfn))
+ return -EINVAL;
+
+ ret = PageTransHuge(pfn_to_page(pfn));
+ kvm_release_pfn_clean(pfn);
+ return ret;
}

static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
@@ -2880,24 +2887,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_invalidate(gvt->gt);
}

-/**
- * intel_vgpu_reset_gtt - reset the all GTT related status
- * @vgpu: a vGPU
- *
- * This function is called from vfio core to reset reset all
- * GTT related status, including GGTT, PPGTT, scratch page.
- *
- */
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
-{
- /* Shadow pages are only created when there is no page
- * table tracking data, so remove page tracking data after
- * removing the shadow pages.
- */
- intel_vgpu_destroy_all_ppgtt_mm(vgpu);
- intel_vgpu_reset_ggtt(vgpu, true);
-}
-
/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a3b0f59ec8bd..4cb183e06e95 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);

int intel_gvt_init_gtt(struct intel_gvt *gvt);
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
void intel_gvt_clean_gtt(struct intel_gvt *gvt);

struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 803cd2ad4deb..7ce126a01cbf 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);

/*
- * Keep one request on each engine for reserved use under mempressure
- * do not use with virtual engines as this really is only needed for
- * kernel contexts.
+ * Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
- if (!intel_engine_is_virtual(rq->engine) &&
- is_power_of_2(rq->execution_mask) &&
+ if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;

diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 3bcc9c0f2019..7ed2516b6de0 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -611,6 +611,14 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
}

+static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
+
+ writel(0, mxsfb->base + LCDC_AS_CTRL);
+}
+
static bool mxsfb_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -626,6 +634,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_overlay_atomic_update,
+ .atomic_disable = mxsfb_plane_overlay_atomic_disable,
};

static const struct drm_plane_funcs mxsfb_plane_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index da45215a933d..bc8c1e9a845f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -43,13 +43,9 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct virtio_gpu_fence *fence,
uint32_t ring_idx)
{
- struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence_event *e = NULL;
int ret;

- if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
- return 0;
-
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
@@ -121,6 +117,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence *out_fence;
+ bool drm_fence_event;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
@@ -216,15 +213,24 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_memdup;
}

- out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
- if(!out_fence) {
- ret = -ENOMEM;
- goto out_unresv;
- }
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
+ (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+ drm_fence_event = true;
+ else
+ drm_fence_event = false;

- ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
- if (ret)
- goto out_unresv;
+ if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+ exbuf->num_bo_handles ||
+ drm_fence_event)
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+ else
+ out_fence = NULL;
+
+ if (drm_fence_event) {
+ ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (ret)
+ goto out_unresv;
+ }

if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 9cf186362ae2..3f08cd4a5c28 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};

+static const struct regmap_config sfpb_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x100,
+ .fast_io = true,
+};
+
static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
.offset = 0x4,
.stride = 0x4,
+ .regmap_config = &sfpb_mutex_config,
};

static const struct regmap_config tcsr_msm8226_mutex_config = {
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index afcdfbb002ff..9c1489c0dae1 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -148,7 +148,7 @@ static int tca6416_keys_open(struct input_dev *dev)
if (chip->use_polling)
schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
else
- enable_irq(chip->irqnum);
+ enable_irq(chip->client->irq);

return 0;
}
@@ -160,7 +160,7 @@ static void tca6416_keys_close(struct input_dev *dev)
if (chip->use_polling)
cancel_delayed_work_sync(&chip->dwork);
else
- disable_irq(chip->irqnum);
+ disable_irq(chip->client->irq);
}

static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
@@ -266,12 +266,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
goto fail1;

if (!chip->use_polling) {
- if (pdata->irq_is_gpio)
- chip->irqnum = gpio_to_irq(client->irq);
- else
- chip->irqnum = client->irq;
-
- error = request_threaded_irq(chip->irqnum, NULL,
+ error = request_threaded_irq(client->irq, NULL,
tca6416_keys_isr,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT | IRQF_NO_AUTOEN,
@@ -279,7 +274,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
if (error) {
dev_dbg(&client->dev,
"Unable to claim irq %d; error %d\n",
- chip->irqnum, error);
+ client->irq, error);
goto fail1;
}
}
@@ -297,10 +292,8 @@ static int tca6416_keypad_probe(struct i2c_client *client,
return 0;

fail2:
- if (!chip->use_polling) {
- free_irq(chip->irqnum, chip);
- enable_irq(chip->irqnum);
- }
+ if (!chip->use_polling)
+ free_irq(client->irq, chip);
fail1:
input_free_device(input);
kfree(chip);
@@ -311,10 +304,8 @@ static void tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);

- if (!chip->use_polling) {
- free_irq(chip->irqnum, chip);
- enable_irq(chip->irqnum);
- }
+ if (!chip->use_polling)
+ free_irq(client->irq, chip);

input_unregister_device(chip->input);
kfree(chip);
@@ -324,10 +315,9 @@ static void tca6416_keypad_remove(struct i2c_client *client)
static int tca6416_keypad_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);

if (device_may_wakeup(dev))
- enable_irq_wake(chip->irqnum);
+ enable_irq_wake(client->irq);

return 0;
}
@@ -335,10 +325,9 @@ static int tca6416_keypad_suspend(struct device *dev)
static int tca6416_keypad_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);

if (device_may_wakeup(dev))
- disable_irq_wake(chip->irqnum);
+ disable_irq_wake(client->irq);

return 0;
}
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index e47ab6c1177f..f24b174c7266 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -1381,9 +1381,6 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;

- sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1561,8 +1558,11 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
return error;
}

- if (dir == READ)
+ if (dir == READ) {
+ iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
return 0;
+ }

return iqs7222_ati_trigger(iqs7222);
}
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 7e27acf6c0cc..f597a1bd5684 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -227,10 +227,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
ret = of_parse_phandle_with_args(client_dn, "mboxes",
"#mbox-cells", j, &curr_ph);
of_node_put(curr_ph.np);
- if (!ret && curr_ph.np == controller_dn) {
+ if (!ret && curr_ph.np == controller_dn)
ipcc->num_chans++;
- break;
- }
}
}

diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index d8418d7fcc37..39661e23d7d4 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -272,6 +272,7 @@ struct brcmnand_controller {
const unsigned int *page_sizes;
unsigned int page_size_shift;
unsigned int max_oob;
+ u32 ecc_level_shift;
u32 features;

/* for low-power standby/resume only */
@@ -596,6 +597,34 @@ enum {
INTFC_CTLR_READY = BIT(31),
};

+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;

+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
return 0;
}

@@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
return 0;
}

-/***********************************************************************
- * NAND ACC CONTROL bitfield
- *
- * Some bits have remained constant throughout hardware revision, while
- * others have shifted around.
- ***********************************************************************/
-
-/* Constant for all versions (where supported) */
-enum {
- /* See BRCMNAND_HAS_CACHE_MODE */
- ACC_CONTROL_CACHE_MODE = BIT(22),
-
- /* See BRCMNAND_HAS_PREFETCH */
- ACC_CONTROL_PREFETCH = BIT(23),
-
- ACC_CONTROL_PAGE_HIT = BIT(24),
- ACC_CONTROL_WR_PREEMPT = BIT(25),
- ACC_CONTROL_PARTIAL_PAGE = BIT(26),
- ACC_CONTROL_RD_ERASED = BIT(27),
- ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
- ACC_CONTROL_WR_ECC = BIT(30),
- ACC_CONTROL_RD_ECC = BIT(31),
-};
-
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
@@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(4, 0);
}

-#define NAND_ACC_CONTROL_ECC_SHIFT 16
-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
-
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;

- mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= ACC_CONTROL_ECC_SHIFT;

/* v7.2 includes additional ECC levels */
- if (ctrl->nand_version >= 0x0702)
- mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;

return mask;
}
@@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)

if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
- acc_control |= host->hwcfg.ecc_level
- << NAND_ACC_CONTROL_ECC_SHIFT;
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
@@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
cpu_relax();
} while (time_after(limit, jiffies));

+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);

@@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
- int j;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;

/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);

- for (j = 0; j < tbytes; j += 4)
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
+
+ /* handle the remaing bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
return tbytes;
}

@@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)

dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);

- BUG_ON(ctrl->cmd_pending != 0);
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;

ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
@@ -2561,7 +2601,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp &= ~brcmnand_ecc_level_mask(ctrl);
tmp &= ~brcmnand_spare_area_mask(ctrl);
if (ctrl->nand_version >= 0x0302) {
- tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp |= cfg->spare_area_size;
}
nand_writereg(ctrl, acc_control_offs, tmp);
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index ffaa24055259..b7c775b615e8 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -120,8 +120,9 @@ static const struct flash_info winbond_nor_parts[] = {
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 0, 0)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index fb3cd4c78faa..06e0ebfe652a 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -132,6 +132,8 @@ struct sja1105_info {
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
@@ -262,6 +264,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 7729d3f8b7f5..984c0e604e8d 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {

static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
int rc;

- /* We don't _need_ to read the full entry, just the command area which
- * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
- * buffer that contains the full entry too. Additionally, our API
- * doesn't really know how many bytes into the buffer does the command
- * area really begin. So just read back the whole entry.
- */
+ /* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
- memset(cmd, 0, sizeof(*cmd));
- ops->cmd_packing(packed_buf, cmd, UNPACK);
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);

/* Hardware hasn't cleared VALID => still working on it */
- return cmd->valid ? -EAGAIN : 0;
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
}

/* Poll the dynamic config entry's control area until the hardware has
@@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
- int rc;
-
- return read_poll_timeout(sja1105_dynamic_config_poll_valid,
- rc, rc != -EAGAIN,
- SJA1105_DYNAMIC_CONFIG_SLEEP_US,
- SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
- false, priv, cmd, ops);
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
}

/* Provides read access to the settings through the dynamic interface
@@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;

- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);

- /* Don't dereference possibly NULL pointer - maybe caller
- * only wanted to see whether the entry existed or not.
- */
- if (entry)
- ops->entry_packing(packed_buf, entry, UNPACK);
- return 0;
+ return rc;
}

int sja1105_dynamic_config_write(struct sja1105_private *priv,
@@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;

- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
- if (cmd.errors)
- return -EINVAL;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);

- return 0;
+ return rc;
}

static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 947e8f7c0988..f1f1368e8146 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1805,6 +1805,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ int rc;

if (!vid) {
switch (db.type) {
@@ -1819,12 +1820,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
}

- return priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}

-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;

@@ -1844,6 +1849,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}

+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -1875,13 +1894,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;

- /* We need to hide the FDB entry for unknown multicast */
- if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
- l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
- continue;
-
u64_to_ether_addr(l2_lookup.macaddr, macaddr);

+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
@@ -1905,6 +1925,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
};
int i;

+ mutex_lock(&priv->fdb_lock);
+
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@@ -1918,7 +1940,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
- return;
+ break;
}

if (!(l2_lookup.destports & BIT(port)))
@@ -1930,14 +1952,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)

u64_to_ether_addr(l2_lookup.macaddr, macaddr);

- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
- return;
+ break;
}
}
+
+ mutex_unlock(&priv->fdb_lock);
}

static int sja1105_mdb_add(struct dsa_switch *ds, int port,
@@ -2122,11 +2146,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
}

#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}

static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;

+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
@@ -2157,14 +2206,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
int index;

if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);

- index = sja1105_find_unused_cbs_shaper(priv);
- if (index < 0)
- return -ENOSPC;
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }

cbs = &priv->cbs[index];
cbs->port = port;
@@ -2174,9 +2229,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
- /* User space is in kbits/sec, hardware in bytes/sec */
- cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
- cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
@@ -2241,6 +2304,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;

+ mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);

mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@@ -2355,6 +2419,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);

return rc;
}
@@ -2924,7 +2989,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
- int match;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);

table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@@ -2937,7 +3004,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
- return -ENOSPC;
+ rc = -ENOSPC;
+ goto out;
}

if (flags.val & BR_MCAST_FLOOD)
@@ -2945,10 +3013,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);

- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- l2_lookup[match].index,
- &l2_lookup[match],
- true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}

static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@@ -3318,6 +3389,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);

rc = sja1105_parse_dt(priv);
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d3c9ad6d39d4..e6b61aef4127 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index ecce5f7a549f..ed2863ed6a5b 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -740,7 +740,7 @@ static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
u32 port_rules = 0;
u8 mask[ETH_ALEN];

- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);

if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
port_rules = adin1110_port_rules(port_priv, true, true);
@@ -761,7 +761,7 @@ static int adin1110_set_mac_address(struct net_device *netdev,
return -EADDRNOTAVAIL;

eth_hw_addr_set(netdev, dev_addr);
- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);

mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
port_rules = adin1110_port_rules(port_priv, true, false);
@@ -1251,7 +1251,7 @@ static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv
goto out;

/* Allow only BPDUs to be passed to the CPU */
- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);
port_rules = adin1110_port_rules(port_priv, true, false);
ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
mask, port_rules);
@@ -1365,8 +1365,8 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
return -ENOMEM;

other_port = priv->ports[!port_priv->nr];
- port_rules = adin1110_port_rules(port_priv, false, true);
- memset(mask, 0xFF, ETH_ALEN);
+ port_rules = adin1110_port_rules(other_port, false, true);
+ eth_broadcast_addr(mask);

return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
mask, port_rules);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9c410f93a103..1aa578c1ca4a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -95,6 +95,8 @@
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -245,6 +247,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_PTPUNI_OFFSET 20 /* PTP Unicast packet enable */
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 5fb991835078..54b032a46b48 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -288,6 +288,11 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);

+ if (gem_has_ptp(bp)) {
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+ }
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -700,8 +705,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);

- macb_set_tx_clk(bp, speed);
-
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@@ -721,8 +724,15 @@ static void macb_mac_link_up(struct phylink_config *config,

spin_unlock_irqrestore(&bp->lock, flags);

- /* Enable Rx and Tx */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_set_tx_clk(bp, speed);
+
+ /* Enable Rx and Tx; Enable PTP unicast */
+ ctrl = macb_readl(bp, NCR);
+ if (gem_has_ptp(bp))
+ ctrl |= MACB_BIT(PTPUNI);
+
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));

netif_tx_wake_all_queues(ndev);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 2e6461b0ea8b..a9409e3721ad 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -492,7 +492,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;

- skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
+ if (rx->ctx.skb_tail == rx->ctx.skb_head)
+ skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
+ else
+ rx->ctx.skb_tail->next = skb;
rx->ctx.skb_tail = skb;
num_frags = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index fcb8b6dc5ab9..c693bb701ba3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -797,6 +797,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool dcb_ets_active;
};

#define HNAE3_MAX_DSCP 64
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 69d1549e63a9..00eed9835cb5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1406,9 +1406,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;

out:
- mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
+ mutex_destroy(&handle->dbgfs_lock);
return ret;
}

@@ -1416,6 +1416,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
{
u32 i;

+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
if (handle->dbgfs_buf[i]) {
kvfree(handle->dbgfs_buf[i]);
@@ -1423,8 +1426,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
}

mutex_destroy(&handle->dbgfs_lock);
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
}

void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 61f833d61f58..8aae179554a8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2102,8 +2102,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
+ * is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
hns3_tx_push_bd(ring, num);
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
return;
}

@@ -2114,6 +2118,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}

+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
+
if (ring->tqp->mem_base)
hns3_tx_mem_doorbell(ring);
else
@@ -2121,7 +2130,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);

ring->pending_buf = 0;
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}

static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
@@ -3307,8 +3315,6 @@ static void hns3_set_default_feature(struct net_device *netdev)

netdev->priv_flags |= IFF_UNICAST_FLT;

- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
-
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
@@ -3562,9 +3568,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
int *bytes, int *pkts, int budget)
{
- /* pair with ring->last_to_use update in hns3_tx_doorbell(),
- * smp_store_release() is not used in hns3_tx_doorbell() because
- * the doorbell operation already have the needed barrier operation.
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_doorbell().
*/
int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index cdf76fb58d45..e22835ae8a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -776,7 +776,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- if (module_type == HNAE3_MODULE_TYPE_CR)
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
cmd->base.port = PORT_DA;
else
cmd->base.port = PORT_FIBRE;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 09362823140d..2740f0d703e4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -251,7 +251,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
int ret;

if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ h->kinfo.tc_info.mqprio_active)
return -EINVAL;

ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
@@ -267,10 +267,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}

hclge_tm_schd_info_update(hdev, num_tc);
- if (num_tc > 1)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+ h->kinfo.tc_info.dcb_ets_active = num_tc > 1;

ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -463,7 +460,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;

- if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (h->kinfo.tc_info.mqprio_active)
return 0;

return hdev->dcbx_cap;
@@ -587,7 +584,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return -EBUSY;

- if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
+ kinfo = &vport->nic.kinfo;
+ if (kinfo->tc_info.dcb_ets_active)
return -EINVAL;

ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
@@ -601,7 +599,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
return ret;

- kinfo = &vport->nic.kinfo;
memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
kinfo->tc_info.mqprio_active = tc > 0;
@@ -610,13 +607,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
goto err_out;

- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- if (tc > 1)
- hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
-
return hclge_notify_init_up(hdev);

err_out:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 5cb8f1818e51..a1c59f4aae98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1517,7 +1517,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
struct hclge_desc desc[3];
int pos = 0;
int ret, i;
- u32 *req;
+ __le32 *req;

hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
@@ -1542,22 +1542,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
tcam_msg.loc);

/* tcam_data0 ~ tcam_data1 */
- req = (u32 *)req1->tcam_data;
+ req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));

/* tcam_data2 ~ tcam_data7 */
- req = (u32 *)req2->tcam_data;
+ req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));

/* tcam_data8 ~ tcam_data12 */
- req = (u32 *)req3->tcam_data;
+ req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));

return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 84ecd8b9be48..884e45fb6b72 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -11132,6 +11132,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,

static void hclge_info_show(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport->nic;
struct device *dev = &hdev->pdev->dev;

dev_info(dev, "PF info begin:\n");
@@ -11148,9 +11149,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);

diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 13f23d606e77..f6fef790e16c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -916,8 +916,6 @@ struct hclge_dev {

#define HCLGE_FLAG_MAIN BIT(0)
#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
-#define HCLGE_FLAG_DCB_ENABLE BIT(2)
-#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;

u32 pkt_buf_size; /* Total pf buf size for tx/rx */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 015b78144114..a2b759531cb7 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,11 @@ struct igb_adapter;
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
+#define IGB_MIN_TXD 64
#define IGB_MAX_TXD 4096

#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
+#define IGB_MIN_RXD 64
#define IGB_MAX_RXD 4096

#define IGB_DEFAULT_ITR 3 /* dynamic */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d0ead18ec026..45ce4ed16146 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3877,8 +3877,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;

- /* Virtualization features not supported on i210 family. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ /* Virtualization features not supported on i210 and 82580 family. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
+ (hw->mac.type == e1000_82580))
return;

/* Of the below we really only want the effect of getting
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 57d39ee00b58..7b83678ba83a 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -39,11 +39,11 @@ enum latency_range {
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
-#define IGBVF_MIN_TXD 80
+#define IGBVF_MIN_TXD 64

#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
-#define IGBVF_MIN_RXD 80
+#define IGBVF_MIN_RXD 64

#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index f83cbc4a1afa..d3b17aa1d1a8 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -354,11 +354,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
-#define IGC_MIN_TXD 80
+#define IGC_MIN_TXD 64
#define IGC_MAX_TXD 4096

#define IGC_DEFAULT_RXD 256
-#define IGC_MIN_RXD 80
+#define IGC_MIN_RXD 64
#define IGC_MAX_RXD 4096

/* Supported Rx Buffer Sizes */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index f8605f57bd06..75e1383263c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -995,6 +995,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;

@@ -1012,20 +1013,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1039,8 +1040,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -1051,7 +1052,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
@@ -1062,8 +1063,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -1095,8 +1094,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@@ -1129,6 +1128,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,

IXGBE_WRITE_FLUSH(hw);

+ /* configure adapter flags only when HW is actually configured */
+ adapter->flags = aflags;
+
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b399bdb1ca36..f936640cca4e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5578,6 +5578,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (loc == info->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
if (port->rfs_rules[i])
rules[loc++] = i;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index c85e0180d96d..1f3a8cf42765 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -834,6 +834,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}

+static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
+ u16 *smq, u16 *smq_mask)
+{
+ struct nix_cn10k_aq_enq_req *aq_req;
+
+ if (!is_rvu_otx2(rvu)) {
+ aq_req = (struct nix_cn10k_aq_enq_req *)req;
+ *smq = aq_req->sq.smq;
+ *smq_mask = aq_req->sq_mask.smq;
+ } else {
+ *smq = req->sq.smq;
+ *smq_mask = req->sq_mask.smq;
+ }
+}
+
static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
@@ -845,6 +860,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
+ u16 smq, smq_mask;
void *ctx, *mask;
bool ena;
u64 cfg;
@@ -916,13 +932,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rc)
return rc;

+ nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
- req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
+ req->sq_mask.ena && req->sq.ena && smq_mask))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
- pcifunc, req->sq.smq))
+ pcifunc, smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}

diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7e318133423a..0ac5ae16308f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2698,6 +2698,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;

for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 72b61f66df37..cd15d36b1507 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -97,7 +97,6 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (ip_version == 6) {
int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
- struct in6_addr zerov6 = {};

daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
@@ -105,8 +104,8 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6);
memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size);
memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size);
- if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) ||
- !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6)))
+ if (ipv6_addr_any(&tun_attr->dst_ip.v6) ||
+ ipv6_addr_any(&tun_attr->src_ip.v6))
return 0;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 5e0f7d96aac5..d136360ac6a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -123,18 +123,32 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
return ret;
}

-static void irq_release(struct mlx5_irq *irq)
+/* mlx5_system_free_irq - Free an IRQ
+ * @irq: IRQ to free
+ *
+ * Free the IRQ and other resources such as rmap from the system.
+ * BUT doesn't free or remove reference from mlx5.
+ * This function is very important for the shutdown flow, where we need to
+ * cleanup system resoruces but keep mlx5 objects alive,
+ * see mlx5_irq_table_free_irqs().
+ */
+static void mlx5_system_free_irq(struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = irq->pool;
-
- xa_erase(&pool->irqs, irq->index);
/* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
irq_update_affinity_hint(irq->irqn, NULL);
- free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh);
+}
+
+static void irq_release(struct mlx5_irq *irq)
+{
+ struct mlx5_irq_pool *pool = irq->pool;
+
+ xa_erase(&pool->irqs, irq->index);
+ mlx5_system_free_irq(irq);
+ free_cpumask_var(irq->mask);
kfree(irq);
}

@@ -597,7 +611,7 @@ static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
unsigned long index;

xa_for_each(&pool->irqs, index, irq)
- free_irq(irq->irqn, &irq->nh);
+ mlx5_system_free_irq(irq);
}

static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a07bcb2f5d2e..1559a4dafd41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2692,9 +2692,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)

/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- HRTIMER_MODE_REL);
+ stmmac_tx_timer_arm(priv, queue);

__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));

@@ -2975,9 +2973,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 tx_coal_timer = priv->tx_coal_timer[queue];
+
+ if (!tx_coal_timer)
+ return;

hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}

diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 059d610901d8..fc1458f96e17 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2628,6 +2628,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;

+ if (!budget)
+ return 0;
+
work_done = rx_bottom(tp, budget);

if (work_done < budget) {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 727b9278b9fe..36c5a41f84e4 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -313,6 +313,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
+ int ret = NETDEV_TX_OK;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
@@ -345,6 +346,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
drop:
atomic64_inc(&priv->dropped);
+ ret = NET_XMIT_DROP;
}

if (use_napi)
@@ -352,7 +354,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)

rcu_read_unlock();

- return NETDEV_TX_OK;
+ return ret;
}

static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 8bdc5e043831..3737c1021f88 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -56,8 +56,8 @@
static int led_type __read_mostly = -1;
static unsigned char lastleds; /* LED state from most recent update */
static unsigned int led_heartbeat __read_mostly = 1;
-static unsigned int led_diskio __read_mostly = 1;
-static unsigned int led_lanrxtx __read_mostly = 1;
+static unsigned int led_diskio __read_mostly;
+static unsigned int led_lanrxtx __read_mostly;
static char lcd_text[32] __read_mostly;
static char lcd_text_default[32] __read_mostly;
static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 0d6b5fab2f7e..de07b0837a04 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1698,7 +1698,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
struct intel_community_context *cctx;
struct intel_community *community;
struct device *dev = &pdev->dev;
- struct acpi_device *adev = ACPI_COMPANION(dev);
struct intel_pinctrl *pctrl;
acpi_status status;
unsigned int i;
@@ -1766,7 +1765,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (ret)
return ret;

- status = acpi_install_address_space_handler(adev->handle,
+ status = acpi_install_address_space_handler(ACPI_HANDLE(dev),
community->acpi_space_id,
chv_pinctrl_mmio_access_handler,
NULL, pctrl);
@@ -1783,7 +1782,7 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
const struct intel_community *community = &pctrl->communities[0];

- acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev),
+ acpi_remove_address_space_handler(ACPI_HANDLE(&pdev->dev),
community->acpi_space_id,
chv_pinctrl_mmio_access_handler);

diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 382793e73a60..30b50920b278 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -80,8 +80,8 @@ config MLXBF_PMC

config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on HWMON
- depends on I2C
+ depends on HWMON && I2C
+ depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index be967d797c28..2d4bbe99959e 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
};

static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
};

static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
};

static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ { 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
@@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
};

static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ { 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
@@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
};

static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ { 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
@@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
};

static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
@@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt, perfctl;
+ uint64_t perfmon_cfg, perfevt;

if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
@@ -904,25 +910,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
perfval_offset = perfcfg_offset +
pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;

- /* Set counter in "read" mode */
- perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
- MLXBF_PMC_PERFCTL);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
-
- if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
- MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
- return -EFAULT;
-
- /* Check if the counter is enabled */
-
- if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
- MLXBF_PMC_READ_REG_64, &perfctl))
- return -EFAULT;
-
- if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
- return -EINVAL;
-
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
@@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
} else
return -EINVAL;

- return sprintf(buf, "0x%llx\n", value);
+ return sysfs_emit(buf, "0x%llx\n", value);
}

/* Store function for "counter" sysfs files */
@@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,

err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
- return sprintf(buf, "No event being monitored\n");
+ return sysfs_emit(buf, "No event being monitored\n");

evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;

- return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
+ return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}

/* Store function for "event" sysfs files */
@@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
return -EINVAL;

for (i = 0, buf[0] = '\0'; i < size; ++i) {
- len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
- events[i].evt_name);
- if (len > PAGE_SIZE)
+ len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
+ events[i].evt_num, events[i].evt_name);
+ if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
@@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,

value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);

- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}

/* Store function for "enable" sysfs files - only for l3cache */
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index d31fe7eed38d..a04ff89a7ec4 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -56,6 +56,7 @@ struct mlxbf_tmfifo;
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
@@ -72,6 +73,7 @@ struct mlxbf_tmfifo_vring {
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
@@ -83,6 +85,14 @@ struct mlxbf_tmfifo_vring {
struct mlxbf_tmfifo *fifo;
};

+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
@@ -195,7 +205,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";

/* Maximum L2 header length. */
-#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)

/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
@@ -243,6 +253,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;

size = vring_size(vring->num, vring->align);
@@ -348,7 +359,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
return len;
}

-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
@@ -577,19 +588,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,

if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data, sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data,
- len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ }
vring->cur_len = len;
}

@@ -606,13 +623,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
- struct vring_desc *desc,
+ struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
+ bool drop_rx = false;

/* Read/Write packet header. */
if (is_rx) {
@@ -632,8 +650,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
- MLXBF_TMFIFO_NET_L2_OVERHEAD)
- return;
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
@@ -648,16 +666,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,

if (!tm_dev2)
return;
- vring->desc = desc;
+ vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
- vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
@@ -690,15 +717,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
- if (!desc)
- return false;
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
} else {
desc = vring->desc;
}

/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
- mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;

/* Return if new packet is for another ring. */
@@ -724,17 +759,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
vring->rem_len -= len;

/* Get the next desc on the chain. */
- if (vring->rem_len > 0 &&
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}

- /* Done and release the pending packet. */
- mlxbf_tmfifo_release_pending_pkt(vring);
+ /* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }

/*
* Make sure the load/store are in order before
@@ -914,7 +956,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)

/* Release the pending packet. */
if (vring->desc)
- mlxbf_tmfifo_release_pending_pkt(vring);
+ mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 2837b4ce8053..2826fc216d29 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -422,13 +422,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
- struct regmap *regmap;
- struct clk *clk, *gclk = NULL;
- struct clk *slow_clk;
char clk_name[] = "t0_clk";
int err;
int channel;

+ tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (tcbpwm == NULL)
+ return -ENOMEM;
+
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
@@ -437,49 +438,43 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return err;
}

- regmap = syscon_node_to_regmap(np->parent);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ tcbpwm->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(tcbpwm->regmap))
+ return PTR_ERR(tcbpwm->regmap);

- slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
- if (IS_ERR(slow_clk))
- return PTR_ERR(slow_clk);
+ tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+ if (IS_ERR(tcbpwm->slow_clk))
+ return PTR_ERR(tcbpwm->slow_clk);

clk_name[1] += channel;
- clk = of_clk_get_by_name(np->parent, clk_name);
- if (IS_ERR(clk))
- clk = of_clk_get_by_name(np->parent, "t0_clk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
+ if (IS_ERR(tcbpwm->clk))
+ tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
+ if (IS_ERR(tcbpwm->clk)) {
+ err = PTR_ERR(tcbpwm->clk);
+ goto err_slow_clk;
+ }

match = of_match_node(atmel_tcb_of_match, np->parent);
config = match->data;

if (config->has_gclk) {
- gclk = of_clk_get_by_name(np->parent, "gclk");
- if (IS_ERR(gclk))
- return PTR_ERR(gclk);
- }
-
- tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (tcbpwm == NULL) {
- err = -ENOMEM;
- goto err_slow_clk;
+ tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
+ if (IS_ERR(tcbpwm->gclk)) {
+ err = PTR_ERR(tcbpwm->gclk);
+ goto err_clk;
+ }
}

tcbpwm->chip.dev = &pdev->dev;
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
- tcbpwm->regmap = regmap;
- tcbpwm->clk = clk;
- tcbpwm->gclk = gclk;
- tcbpwm->slow_clk = slow_clk;
tcbpwm->width = config->counter_width;

- err = clk_prepare_enable(slow_clk);
+ err = clk_prepare_enable(tcbpwm->slow_clk);
if (err)
- goto err_slow_clk;
+ goto err_gclk;

spin_lock_init(&tcbpwm->lock);

@@ -494,23 +489,28 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
err_disable_clk:
clk_disable_unprepare(tcbpwm->slow_clk);

+err_gclk:
+ clk_put(tcbpwm->gclk);
+
+err_clk:
+ clk_put(tcbpwm->clk);
+
err_slow_clk:
- clk_put(slow_clk);
+ clk_put(tcbpwm->slow_clk);

return err;
}

-static int atmel_tcb_pwm_remove(struct platform_device *pdev)
+static void atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);

pwmchip_remove(&tcbpwm->chip);

clk_disable_unprepare(tcbpwm->slow_clk);
- clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->gclk);
clk_put(tcbpwm->clk);
-
- return 0;
+ clk_put(tcbpwm->slow_clk);
}

static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
@@ -564,7 +564,7 @@ static struct platform_driver atmel_tcb_pwm_driver = {
.pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
- .remove = atmel_tcb_pwm_remove,
+ .remove_new = atmel_tcb_pwm_remove,
};
module_platform_driver(atmel_tcb_pwm_driver);

diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 86a0ea0f6955..806f0bb3ad6d 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycles > 255)
duty_cycles = 255;

- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~0xFFFF;
val |= (period_cycles << 8) | duty_cycles;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);

return 0;
}
@@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret)
return ret;

- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val |= PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);

return 0;
}
@@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;

- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);

clk_disable_unprepare(lpc32xx->clk);
}
@@ -141,9 +141,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.npwm = 1;

/* If PWM is disabled, configure the output to the default value */
- val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
- writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ writel(val, lpc32xx->base);

ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
if (ret < 0) {
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index f94b43ce9a65..28e34d155334 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -441,6 +441,7 @@ static int zcdn_create(const char *name)
ZCRYPT_NAME "_%d", (int)MINOR(devt));
nodename[sizeof(nodename) - 1] = '\0';
if (dev_set_name(&zcdndev->device, nodename)) {
+ kfree(zcdndev);
rc = -EINVAL;
goto unlockout;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 64734d6e8ccb..07fbaa452d8a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d7e8454304ce..ab637324262f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -18,7 +18,7 @@
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
- * | | | 0x302d,0x3033 |
+ * | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5a2f629d18e6..7d282906598f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -458,6 +458,7 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
}

struct tmf_arg {
+ struct list_head tmf_elem;
struct qla_qpair *qpair;
struct fc_port *fcport;
struct scsi_qla_host *vha;
@@ -2534,7 +2535,6 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- struct list_head tmf_pending;

unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2555,9 +2555,6 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;

uint8_t nvme_flag;
- uint8_t active_tmf;
-#define MAX_ACTIVE_TMF 8
-
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t d_id;
@@ -3743,6 +3740,16 @@ struct qla_fw_resources {
u16 pad;
};

+struct qla_fw_res {
+ u16 iocb_total;
+ u16 iocb_limit;
+ atomic_t iocb_used;
+
+ u16 exch_total;
+ u16 exch_limit;
+ atomic_t exch_used;
+};
+
#define QLA_IOCB_PCT_LIMIT 95

/*Queue pair data structure */
@@ -4370,7 +4377,6 @@ struct qla_hw_data {
uint8_t aen_mbx_count;
atomic_t num_pend_mbx_stage1;
atomic_t num_pend_mbx_stage2;
- atomic_t num_pend_mbx_stage3;
uint16_t frame_payload_size;

uint32_t login_retry_count;
@@ -4640,6 +4646,8 @@ struct qla_hw_data {
uint32_t flt_region_aux_img_status_sec;
};
uint8_t active_image;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8

/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -4654,6 +4662,8 @@ struct qla_hw_data {

struct qla_msix_entry *msix_entries;

+ struct list_head tmf_pending;
+ struct list_head tmf_active;
struct list_head vp_list; /* list of VP */
unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
sizeof(unsigned long)];
@@ -4782,6 +4792,7 @@ struct qla_hw_data {
spinlock_t sadb_lock; /* protects list */
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
+ struct qla_fw_res fwres ____cacheline_aligned;
};

#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1925cc6897b6..f060e593685d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)

seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+ iocbs_used = atomic_read(&ha->fwres.iocb_used);
+ exch_used = atomic_read(&ha->fwres.exch_used);
+ seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
+ iocbs_used, ha->fwres.iocb_limit);
+
+ seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
+ exch_used, ha->fwres.exch_limit);
+ }
}

return 0;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 316122709b0e..2e4537f9e5b5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -143,6 +143,7 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
void qla_edif_clear_appdata(struct scsi_qla_host *vha,
struct fc_port *fcport);
const char *sc_to_str(uint16_t cmd);
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha);

/*
* Global Data in qla_os.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 30bbf33e3a6a..36abdb0de169 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -502,6 +502,7 @@ static
void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
struct fc_port *fcport = ea->fcport;
+ unsigned long flags;

ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
@@ -516,9 +517,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
/* deleted = 0 & logout_on_delete = force fw cleanup */
- fcport->deleted = 0;
+ if (fcport->deleted == QLA_SESS_DELETED)
+ fcport->deleted = 0;
+
fcport->logout_on_delete = 1;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -1128,7 +1135,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
u16 *mb;

if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
+ goto done;

ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
@@ -1182,8 +1189,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~(FCF_ASYNC_SENT);
done:
- fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE);
return rval;
}

@@ -1440,7 +1448,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)

spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
- ea->fcport->deleted = 0;
ea->fcport->logout_on_delete = 1;

if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
@@ -1997,12 +2004,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;

- if (sp->type == SRB_MARKER) {
- complete(&tmf->u.tmf.comp);
- return;
- }
+ if (sp->type == SRB_MARKER)
+ rc = QLA_FUNCTION_FAILED;
+ else
+ rc = qla24xx_async_abort_cmd(sp, false);

- rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
@@ -2033,10 +2039,14 @@ static void qla_marker_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}

-#define START_SP_W_RETRIES(_sp, _rval) \
+#define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
{\
int cnt = 5; \
do { \
+ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
+ _rval = EINVAL; \
+ break; \
+ } \
_rval = qla2x00_start_sp(_sp); \
if (_rval == EAGAIN) \
msleep(1); \
@@ -2059,6 +2069,7 @@ qla26xx_marker(struct tmf_arg *arg)
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;

if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8039,
@@ -2068,6 +2079,9 @@ qla26xx_marker(struct tmf_arg *arg)
return QLA_SUSPENDED;
}

+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2085,7 +2099,7 @@ qla26xx_marker(struct tmf_arg *arg)
tm_iocb->u.tmf.loop_id = fcport->loop_id;
tm_iocb->u.tmf.vp_index = vha->vp_idx;

- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);

ql_dbg(ql_dbg_taskm, vha, 0x8006,
"Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
@@ -2124,6 +2138,17 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}

+static int qla_tmf_wait(struct tmf_arg *arg)
+{
+ /* there are only 2 types of error handling that reaches here, lun or target reset */
+ if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
+ else
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
+}
+
static int
__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
@@ -2131,8 +2156,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
-
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
+ u64 jif;

if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8032,
@@ -2142,6 +2168,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
return QLA_SUSPENDED;
}

+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2159,7 +2188,7 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
tm_iocb->u.tmf.flags = arg->flags;
tm_iocb->u.tmf.lun = arg->lun;

- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);

ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
@@ -2177,8 +2206,26 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
"TM IOCB failed (%x).\n", rval);
}

- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
- rval = qla26xx_marker(arg);
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ jif = jiffies;
+ if (qla_tmf_wait(arg)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Waited %u ms Nexus=%ld:%06x:%llu.\n",
+ jiffies_to_msecs(jiffies - jif), vha->host_no,
+ fcport->d_id.b24, arg->lun);
+ }
+
+ if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
+ rval = qla26xx_marker(arg);
+ } else {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ if (tm_iocb->u.tmf.data)
+ rval = tm_iocb->u.tmf.data;

done_free_sp:
/* ref: INIT */
@@ -2187,30 +2234,42 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
return rval;
}

-static void qla_put_tmf(fc_port_t *fcport)
+static void qla_put_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;

spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- fcport->active_tmf--;
+ ha->active_tmf--;
+ list_del(&arg->tmf_elem);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}

static
-int qla_get_tmf(fc_port_t *fcport)
+int qla_get_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ fc_port_t *fcport = arg->fcport;
int rc = 0;
- LIST_HEAD(tmf_elem);
+ struct tmf_arg *t;

spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_add_tail(&tmf_elem, &fcport->tmf_pending);
+ list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
+ if (t->fcport == arg->fcport && t->lun == arg->lun) {
+ /* reject duplicate TMF */
+ ql_log(ql_log_warn, vha, 0x802c,
+ "found duplicate TMF. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return -EINVAL;
+ }
+ }

- while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
+ while (ha->active_tmf >= MAX_ACTIVE_TMF) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

msleep(1);
@@ -2222,15 +2281,17 @@ int qla_get_tmf(fc_port_t *fcport)
rc = EIO;
break;
}
- if (fcport->active_tmf < MAX_ACTIVE_TMF &&
- list_is_first(&tmf_elem, &fcport->tmf_pending))
+ if (ha->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&arg->tmf_elem, &ha->tmf_pending))
break;
}

- list_del(&tmf_elem);
+ list_del(&arg->tmf_elem);

- if (!rc)
- fcport->active_tmf++;
+ if (!rc) {
+ ha->active_tmf++;
+ list_add_tail(&arg->tmf_elem, &ha->tmf_active);
+ }

spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);

@@ -2242,9 +2303,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
- struct qla_qpair *qpair;
struct tmf_arg a;
- int i, rval = QLA_SUCCESS;
+ int rval = QLA_SUCCESS;

if (TMF_NOT_READY(fcport))
return QLA_SUSPENDED;
@@ -2252,47 +2312,22 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
a.vha = fcport->vha;
a.fcport = fcport;
a.lun = lun;
+ a.flags = flags;
+ INIT_LIST_HEAD(&a.tmf_elem);
+
if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
a.modifier = MK_SYNC_ID_LUN;
-
- if (qla_get_tmf(fcport))
- return QLA_FUNCTION_FAILED;
} else {
a.modifier = MK_SYNC_ID;
}

- if (vha->hw->mqenable) {
- for (i = 0; i < vha->hw->num_qpairs; i++) {
- qpair = vha->hw->queue_pair_map[i];
- if (!qpair)
- continue;
-
- if (TMF_NOT_READY(fcport)) {
- ql_log(ql_log_warn, vha, 0x8026,
- "Unable to send TM due to disruption.\n");
- rval = QLA_SUSPENDED;
- break;
- }
-
- a.qpair = qpair;
- a.flags = flags|TCF_NOTMCMD_TO_TARGET;
- rval = __qla2x00_async_tm_cmd(&a);
- if (rval)
- break;
- }
- }
-
- if (rval)
- goto bailout;
+ if (qla_get_tmf(&a))
+ return QLA_FUNCTION_FAILED;

a.qpair = vha->hw->base_qpair;
- a.flags = flags;
rval = __qla2x00_async_tm_cmd(&a);

-bailout:
- if (a.modifier == MK_SYNC_ID_LUN)
- qla_put_tmf(fcport);
-
+ qla_put_tmf(&a);
return rval;
}

@@ -4148,39 +4183,61 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha)
return ha->flags.lr_detected;
}

-void qla_init_iocb_limit(scsi_qla_host_t *vha)
+static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
{
- u16 i, num_qps;
- u32 limit;
- struct qla_hw_data *ha = vha->hw;
+ u8 num_qps;
+ u16 limit;
+ struct qla_hw_data *ha = qpair->vha->hw;

num_qps = ha->num_qpairs + 1;
limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;

- ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
- ha->base_qpair->fwres.iocbs_limit = limit;
- ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
- ha->base_qpair->fwres.iocbs_used = 0;
+ qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ qpair->fwres.iocbs_limit = limit;
+ qpair->fwres.iocbs_qp_limit = limit / num_qps;
+
+ qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+ qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+ QLA_IOCB_PCT_LIMIT) / 100;
+}
+
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;

- ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
- QLA_IOCB_PCT_LIMIT) / 100;
+ __qla_adjust_iocb_limit(ha->base_qpair);
+ ha->base_qpair->fwres.iocbs_used = 0;
ha->base_qpair->fwres.exch_used = 0;

for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
- ha->queue_pair_map[i]->fwres.iocbs_total =
- ha->orig_fw_iocb_count;
- ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
- ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
- limit / num_qps;
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
- ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->queue_pair_map[i]->fwres.exch_limit =
- (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
+
+ ha->fwres.iocb_total = ha->orig_fw_iocb_count;
+ ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ atomic_set(&ha->fwres.iocb_used, 0);
+ atomic_set(&ha->fwres.exch_used, 0);
+}
+
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla_adjust_iocb_limit(ha->base_qpair);
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
+ }
}

/**
@@ -4778,15 +4835,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (ha->flags.edif_enabled)
mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);

+ QLA_FW_STARTED(ha);
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
+ QLA_FW_STOPPED(ha);
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- QLA_FW_STARTED(ha);
vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}

@@ -5528,7 +5586,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
- INIT_LIST_HEAD(&fcport->tmf_pending);

INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
@@ -6116,6 +6173,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
+ unsigned long flags;
+
if (IS_SW_RESV_ADDR(fcport->d_id))
return;

@@ -6125,7 +6184,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
fcport->deleted = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
if (vha->hw->current_topology == ISP_CFG_NL)
fcport->logout_on_delete = 0;
else
@@ -7391,14 +7454,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}

/* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);

i = 0;
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
+ while (atomic_read(&ha->num_pend_mbx_stage2) ||
atomic_read(&ha->num_pend_mbx_stage1)) {
msleep(20);
i++;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index a034699e58ae..a7b5d1114682 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -386,6 +386,7 @@ enum {
RESOURCE_IOCB = BIT_0,
RESOURCE_EXCH = BIT_1, /* exchange */
RESOURCE_FORCE = BIT_2,
+ RESOURCE_HA = BIT_3,
};

static inline int
@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
u16 exch_used;
- struct qla_hw_data *ha = qp->vha->hw;
+ struct qla_hw_data *ha = qp->hw;

if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
return -ENOSPC;
}
}
+
+ if (ql2xenforce_iocb_limit == 2) {
+ if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
+ ha->fwres.iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
+ ha->fwres.exch_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+ }
+
force:
qp->fwres.iocbs_used += iores->iocb_cnt;
qp->fwres.exch_used += iores->exch_cnt;
+ if (ql2xenforce_iocb_limit == 2) {
+ atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
+ atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
+ iores->res_type |= RESOURCE_HA;
+ }
return 0;
}

+/*
+ * decrement to zero. This routine will not decrement below zero
+ * @v: pointer of type atomic_t
+ * @amount: amount to decrement from v
+ */
+static void qla_atomic_dtz(atomic_t *v, int amount)
+{
+ int c, old, dec;
+
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - amount;
+ if (unlikely(dec < 0))
+ dec = 0;
+
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+}
+
static inline void
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
+ struct qla_hw_data *ha = qp->hw;
+
+ if (iores->res_type & RESOURCE_HA) {
+ if (iores->res_type & RESOURCE_IOCB)
+ qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
+
+ if (iores->res_type & RESOURCE_EXCH)
+ qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
+ }
+
if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c9a686f06d29..9e524d52dc86 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3887,6 +3887,7 @@ qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
{
mrk->entry_type = MARKER_TYPE;
mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b41d604ca9bc..0111249cc877 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1121,8 +1121,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
unsigned long flags;
fc_port_t *fcport = NULL;

- if (!vha->hw->flags.fw_started)
+ if (!vha->hw->flags.fw_started) {
+ ql_log(ql_log_warn, vha, 0x50ff,
+ "Dropping AEN - %04x %04x %04x %04x.\n",
+ mb[0], mb[1], mb[2], mb[3]);
return;
+ }

/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -2539,7 +2543,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- case CS_TIMEOUT:
case CS_RESET:
if (atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 359595a64664..f794f4363a38 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);

wait_time = jiffies;
- atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
- atomic_dec(&ha->num_pend_mbx_stage3);

if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
@@ -2213,6 +2209,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
"Entered %s.\n", __func__);

+ if (!ha->flags.fw_started)
+ return QLA_FUNCTION_FAILED;
+
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw))
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 57545d5e82b9..c9a6fc882a80 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -132,6 +132,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
"Failed to allocate qpair\n");
return -EINVAL;
}
+ qla_adjust_iocb_limit(vha);
}
*handle = qpair;

@@ -663,7 +664,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,

rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x212d,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
sp->priv = NULL;
priv->sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ed70eb884786..78f7cd16967f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");

-int ql2xenforce_iocb_limit = 1;
+int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
+ "1: track usage per queue, 2: track usage per adapter");

/*
* CT6 CTX allocation cache
@@ -1478,8 +1479,9 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
+ cmd->device->lun,
+ WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -1545,8 +1547,8 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- 0, WAIT_TARGET) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
+ WAIT_TARGET) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -2999,9 +3001,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
- atomic_set(&ha->num_pend_mbx_stage3, 0);
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
+ INIT_LIST_HEAD(&ha->tmf_pending);
+ INIT_LIST_HEAD(&ha->tmf_active);

/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3278,6 +3281,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
+
+ if (ql2xenabledif && ql2xenabledif != 2) {
+ ql_log(ql_log_warn, base_vha, 0x302d,
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
+ ql2xenabledif = 2;
+ }
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@@ -3515,8 +3525,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bb754a950802..545473a0ffc8 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1085,10 +1085,6 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}

- spin_lock_irqsave(&vha->work_lock, flags);
- sess->flags &= ~FCF_ASYNC_SENT;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1098,7 +1094,6 @@ void qlt_free_session_done(struct work_struct *work)

qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
- sess->deleted = QLA_SESS_DELETED;

if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1150,10 +1145,15 @@ void qlt_free_session_done(struct work_struct *work)

sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- sess->free_pending = 0;

qla2x00_dfs_remove_rport(vha, sess);

+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETED;
+ sess->free_pending = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1202,12 +1202,12 @@ void qlt_unreg_sess(struct fc_port *sess)
* management from being sent.
*/
sess->flags |= FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);

if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);

- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index b7158e3c3a0b..5c7161b18b72 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -534,8 +534,8 @@ static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
decoded_bytes += rc;
}

- if (string_len > temp_ei->elem_len) {
- pr_err("%s: String len %d > Max Len %d\n",
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
__func__, string_len, temp_ei->elem_len);
return -ETOOSMALL;
} else if (string_len > tlv_len) {
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 5c5c99f7979e..30ec5b684533 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -87,8 +87,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
/* Not booted with device tree or no phandle link to the node */
bl->props.power = def_value ? FB_BLANK_UNBLANK
: FB_BLANK_POWERDOWN;
- else if (gpiod_get_direction(gbl->gpiod) == 0 &&
- gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ else if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
bl->props.power = FB_BLANK_POWERDOWN;
else
bl->props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index 305f1587bd89..8b2bc4adc50f 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -474,7 +474,6 @@ static int ep93xxfb_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;

- info->dev = &pdev->dev;
platform_set_drvdata(pdev, info);
fbi = info->par;
fbi->mach_info = mach_info;
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 9b2173f765c8..fb7fae750181 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -203,3 +203,4 @@ module_platform_driver(mid_wdt_driver);
MODULE_AUTHOR("David Cohen <david.a.cohen@xxxxxxxxxxxxxxx>");
MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:intel_mid_wdt");
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 96369c44863a..64daae693afd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2721,11 +2721,10 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}

- if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
- fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ sb->fsid, fs_info->fs_devices->fsid);
ret = -EINVAL;
}

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f2ee70c03f0d..0640ef59fe66 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3810,7 +3810,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
fs_info->data_reloc_bg == 0);

if (block_group->ro ||
- test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ (!ffe_ctl->for_data_reloc &&
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
ret = 1;
goto out;
}
@@ -3853,8 +3854,26 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
fs_info->treelog_bg = block_group->start;

- if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
- fs_info->data_reloc_bg = block_group->start;
+ if (ffe_ctl->for_data_reloc) {
+ if (!fs_info->data_reloc_bg)
+ fs_info->data_reloc_bg = block_group->start;
+ /*
+ * Do not allow allocations from this block group, unless it is
+ * for data relocation. Compared to increasing the ->ro, setting
+ * the ->zoned_data_reloc_ongoing flag still allows nocow
+ * writers to come in. See btrfs_inc_nocow_writers().
+ *
+ * We need to disable an allocation to avoid an allocation of
+ * regular (non-relocation data) extent. With mix of relocation
+ * extents and regular extents, we can dispatch WRITE commands
+ * (for relocation extents) and ZONE APPEND commands (for
+ * regular extents) at the same time to the same zone, which
+ * easily break the write pointer.
+ *
+ * Also, this flag avoids this block group to be zone finished.
+ */
+ set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
+ }

ffe_ctl->found_offset = start + block_group->alloc_offset;
block_group->alloc_offset += num_bytes;
@@ -3872,24 +3891,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
out:
if (ret && ffe_ctl->for_treelog)
fs_info->treelog_bg = 0;
- if (ret && ffe_ctl->for_data_reloc &&
- fs_info->data_reloc_bg == block_group->start) {
- /*
- * Do not allow further allocations from this block group.
- * Compared to increasing the ->ro, setting the
- * ->zoned_data_reloc_ongoing flag still allows nocow
- * writers to come in. See btrfs_inc_nocow_writers().
- *
- * We need to disable an allocation to avoid an allocation of
- * regular (non-relocation data) extent. With mix of relocation
- * extents and regular extents, we can dispatch WRITE commands
- * (for relocation extents) and ZONE APPEND commands (for
- * regular extents) at the same time to the same zone, which
- * easily break the write pointer.
- */
- set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
+ if (ret && ffe_ctl->for_data_reloc)
fs_info->data_reloc_bg = 0;
- }
spin_unlock(&fs_info->relocation_bg_lock);
spin_unlock(&fs_info->treelog_bg_lock);
spin_unlock(&block_group->lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 28bcba2e0590..222068bf8003 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3393,6 +3393,13 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
btrfs_free_reserved_extent(fs_info,
ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes, 1);
+ /*
+ * Actually free the qgroup rsv which was released when
+ * the ordered extent was created.
+ */
+ btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
+ ordered_extent->qgroup_rsv,
+ BTRFS_QGROUP_RSV_DATA);
}
}

diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index d3591c7f166a..d2bdcc2cce49 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2985,9 +2985,6 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
if (!page)
return -ENOMEM;
}
- ret = set_page_extent_mapped(page);
- if (ret < 0)
- goto release_page;

if (PageReadahead(page))
page_cache_async_readahead(inode->i_mapping, ra, NULL,
@@ -3003,6 +3000,15 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
}
}

+ /*
+ * We could have lost page private when we dropped the lock to read the
+ * page above, make sure we set_page_extent_mapped here so we have any
+ * of the subpage blocksize stuff we need in place.
+ */
+ ret = set_page_extent_mapped(page);
+ if (ret < 0)
+ goto release_page;
+
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;

diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index c7642c00a65d..2635fb4bffa0 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -404,11 +404,7 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
return 0;

used = btrfs_space_info_used(space_info, true);
- if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
- (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
- avail = 0;
- else
- avail = calc_available_free_space(fs_info, space_info, flush);
+ avail = calc_available_free_space(fs_info, space_info, flush);

if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
return 1;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2b776fce1c0f..a55556759441 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -279,10 +279,11 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
spin_unlock(&fs_info->trans_lock);

/*
- * If we are ATTACH, we just want to catch the current transaction,
- * and commit it. If there is no transaction, just return ENOENT.
+ * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
+ * current transaction, and commit it. If there is no transaction, just
+ * return ENOENT.
*/
- if (type == TRANS_ATTACH)
+ if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
return -ENOENT;

/*
@@ -580,8 +581,13 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
u64 delayed_refs_bytes = 0;

qgroup_reserved = num_items * fs_info->nodesize;
- ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
- enforce_qgroups);
+ /*
+ * Use prealloc for now, as there might be a currently running
+ * transaction that could free this reserved space prematurely
+ * by committing.
+ */
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
+ enforce_qgroups, false);
if (ret)
return ERR_PTR(ret);

@@ -693,6 +699,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
h->reloc_reserved = reloc_reserved;
}

+ /*
+ * Now that we have found a transaction to be a part of, convert the
+ * qgroup reservation from prealloc to pertrans. A different transaction
+ * can't race in and free our pertrans out from under us.
+ */
+ if (qgroup_reserved)
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+
got_it:
if (!current->journal_info)
current->journal_info = h;
@@ -740,7 +754,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
num_bytes, NULL);
reserve_fail:
- btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
}

diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 9bc7ac06c517..675dbed075d8 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -2009,6 +2009,10 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
* and block_group->meta_write_pointer for metadata.
*/
if (!fully_written) {
+ if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ spin_unlock(&block_group->lock);
+ return -EAGAIN;
+ }
spin_unlock(&block_group->lock);

ret = btrfs_inc_block_group_ro(block_group, false);
@@ -2037,7 +2041,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
return 0;
}

- if (block_group->reserved) {
+ if (block_group->reserved ||
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
btrfs_dec_block_group_ro(block_group);
return -EAGAIN;
@@ -2268,7 +2274,10 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica

/* All relocation extents are written. */
if (block_group->start + block_group->alloc_offset == logical + length) {
- /* Now, release this block group for further allocations. */
+ /*
+ * Now, release this block group for further allocations and
+ * zone finish.
+ */
clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
&block_group->runtime_flags);
}
@@ -2292,7 +2301,8 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)

spin_lock(&block_group->lock);
if (block_group->reserved || block_group->alloc_offset == 0 ||
- (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
continue;
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 8e83b51e3c68..fbd0329cf254 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -910,11 +910,11 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
}

/*
- * This function returns the number of file system metadata clusters at
+ * This function returns the number of file system metadata blocks at
* the beginning of a block group, including the reserved gdt blocks.
*/
-static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
- ext4_group_t block_group)
+unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
+ ext4_group_t block_group)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned num;
@@ -932,8 +932,15 @@ static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
} else { /* For META_BG_BLOCK_GROUPS */
num += ext4_bg_num_gdb(sb, block_group);
}
- return EXT4_NUM_B2C(sbi, num);
+ return num;
}
+
+static unsigned int ext4_num_base_meta_clusters(struct super_block *sb,
+ ext4_group_t block_group)
+{
+ return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group));
+}
+
/**
* ext4_inode_to_goal_block - return a hint for block allocation
* @inode: inode for block allocation
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 5504f72bbbbe..6fe3c941b565 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -215,7 +215,6 @@ int ext4_setup_system_zone(struct super_block *sb)
struct ext4_system_blocks *system_blks;
struct ext4_group_desc *gdp;
ext4_group_t i;
- int flex_size = ext4_flex_bg_size(sbi);
int ret;

system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
@@ -223,12 +222,13 @@ int ext4_setup_system_zone(struct super_block *sb)
return -ENOMEM;

for (i=0; i < ngroups; i++) {
+ unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i);
+
cond_resched();
- if (ext4_bg_has_super(sb, i) &&
- ((i < 5) || ((i % flex_size) == 0))) {
+ if (meta_blks != 0) {
ret = add_system_zone(system_blks,
ext4_group_first_block_no(sb, i),
- ext4_bg_num_gdb(sb, i) + 1, 0);
+ meta_blks, 0);
if (ret)
goto err;
}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index e20ac0654b3f..453d4da5de52 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -33,6 +33,8 @@ int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,

#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, iname, fname);
+ if (err)
+ ext4_fname_free_filename(fname);
#endif
return err;
}
@@ -51,6 +53,8 @@ int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,

#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
+ if (err)
+ ext4_fname_free_filename(fname);
#endif
return err;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0ea3960cb83e..72abb8d6caf7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3096,6 +3096,8 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
ext4_group_t block_group,
unsigned int flags);
+extern unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
+ ext4_group_t block_group);

extern __printf(7, 8)
void __ext4_error(struct super_block *, const char *, unsigned int, bool,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c2b7d0923894..37dca728ff96 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2160,15 +2160,6 @@ static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
return down_read_trylock(&sem->internal_rwsem);
}

-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
-{
- down_read_nested(&sem->internal_rwsem, subclass);
-}
-#else
-#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
-#endif
-
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
{
up_read(&sem->internal_rwsem);
@@ -2179,6 +2170,21 @@ static inline void f2fs_down_write(struct f2fs_rwsem *sem)
down_write(&sem->internal_rwsem);
}

+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_read_nested(&sem->internal_rwsem, subclass);
+}
+
+static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_write_nested(&sem->internal_rwsem, subclass);
+}
+#else
+#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
+#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
+#endif
+
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
{
return down_write_trylock(&sem->internal_rwsem);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 7e867dff681d..8747eec3d0a3 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -642,7 +642,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
}

if (inode) {
- f2fs_down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
+ SINGLE_DEPTH_NESTING);
page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index cbbf95b99541..16bf9d5c8d4f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -204,6 +204,8 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
f2fs_i_size_write(inode, fi->original_i_size);
fi->original_i_size = 0;
}
+ /* avoid stale dirty inode during eviction */
+ sync_inode_metadata(inode, 0);
}

static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index e8deaacf1832..f40bc51fa531 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -243,8 +243,16 @@ static int fuse_direntplus_link(struct file *file,
dput(dentry);
dentry = alias;
}
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ if (!IS_ERR(inode)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fi->lock);
+ fi->nlookup--;
+ spin_unlock(&fi->lock);
+ }
return PTR_ERR(dentry);
+ }
}
if (fc->readdirplus_auto)
set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 2f04c0ff7470..1e9fa26f04fe 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -182,13 +182,13 @@ static int gfs2_writepages(struct address_space *mapping,
int ret;

/*
- * Even if we didn't write any pages here, we might still be holding
+ * Even if we didn't write enough pages here, we might still be holding
* dirty pages in the ail. We forcibly flush the ail because we don't
* want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find.
*/
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
- if (ret == 0)
+ if (ret == 0 && wbc->nr_to_write > 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret;
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 61323deb80bc..e021d5f50c23 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -1285,9 +1285,6 @@ static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
{
unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);

- if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
- return 1;
-
return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
atomic_read(&sdp->sd_log_thresh2);
}
@@ -1304,7 +1301,6 @@ int gfs2_logd(void *data)
{
struct gfs2_sbd *sdp = data;
unsigned long t = 1;
- DEFINE_WAIT(wait);

while (!kthread_should_stop()) {

@@ -1329,7 +1325,9 @@ int gfs2_logd(void *data)
GFS2_LFC_LOGD_JFLUSH_REQD);
}

- if (gfs2_ail_flush_reqd(sdp)) {
+ if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
+ gfs2_ail_flush_reqd(sdp)) {
+ clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp, 0);
@@ -1341,17 +1339,12 @@ int gfs2_logd(void *data)

try_to_freeze();

- do {
- prepare_to_wait(&sdp->sd_logd_waitq, &wait,
- TASK_INTERRUPTIBLE);
- if (!gfs2_ail_flush_reqd(sdp) &&
- !gfs2_jrnl_flush_reqd(sdp) &&
- !kthread_should_stop())
- t = schedule_timeout(t);
- } while(t && !gfs2_ail_flush_reqd(sdp) &&
- !gfs2_jrnl_flush_reqd(sdp) &&
- !kthread_should_stop());
- finish_wait(&sdp->sd_logd_waitq, &wait);
+ t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
+ test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
+ gfs2_ail_flush_reqd(sdp) ||
+ gfs2_jrnl_flush_reqd(sdp) ||
+ kthread_should_stop(),
+ t);
}

return 0;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9ec91017a7f3..f033ac807013 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -349,6 +349,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)

/* Checkpoint list management */

+enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
+
/*
* journal_shrink_one_cp_list
*
@@ -360,7 +362,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
* Called with j_list_lock held.
*/
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
- bool destroy, bool *released)
+ enum shrink_type type,
+ bool *released)
{
struct journal_head *last_jh;
struct journal_head *next_jh = jh;
@@ -376,12 +379,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
jh = next_jh;
next_jh = jh->b_cpnext;

- if (destroy) {
+ if (type == SHRINK_DESTROY) {
ret = __jbd2_journal_remove_checkpoint(jh);
} else {
ret = jbd2_journal_try_remove_checkpoint(jh);
- if (ret < 0)
- continue;
+ if (ret < 0) {
+ if (type == SHRINK_BUSY_SKIP)
+ continue;
+ break;
+ }
}

nr_freed++;
@@ -445,7 +451,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
tid = transaction->t_tid;

freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
- false, &released);
+ SHRINK_BUSY_SKIP, &released);
nr_freed += freed;
(*nr_to_scan) -= min(*nr_to_scan, freed);
if (*nr_to_scan == 0)
@@ -485,19 +491,21 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
{
transaction_t *transaction, *last_transaction, *next_transaction;
+ enum shrink_type type;
bool released;

transaction = journal->j_checkpoint_transactions;
if (!transaction)
return;

+ type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP;
last_transaction = transaction->t_cpprev;
next_transaction = transaction;
do {
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
journal_shrink_one_cp_list(transaction->t_checkpoint_list,
- destroy, &released);
+ type, &released);
/*
* This function only frees up some memory if possible so we
* dont have an obligation to finish processing. Bail out if
@@ -631,6 +639,8 @@ int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);

+ if (jh->b_transaction)
+ return -EBUSY;
if (!trylock_buffer(bh))
return -EBUSY;
if (buffer_dirty(bh)) {
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 8286a9ec122f..357a3f7632e3 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -229,12 +229,8 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
/* Make sure we wrap around the log correctly! */
#define wrap(journal, var) \
do { \
- unsigned long _wrap_last = \
- jbd2_has_feature_fast_commit(journal) ? \
- (journal)->j_fc_last : (journal)->j_last; \
- \
- if (var >= _wrap_last) \
- var -= (_wrap_last - (journal)->j_first); \
+ if (var >= (journal)->j_last) \
+ var -= ((journal)->j_last - (journal)->j_first); \
} while (0)

static int fc_do_one_pass(journal_t *journal,
@@ -517,9 +513,7 @@ static int do_one_pass(journal_t *journal,
break;

jbd2_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
- next_commit_ID, next_log_block,
- jbd2_has_feature_fast_commit(journal) ?
- journal->j_fc_last : journal->j_last);
+ next_commit_ID, next_log_block, journal->j_last);

/* Skip over each chunk of the transaction looking
* either the next descriptor block or the final commit
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index cf34d0c30945..3bb530d4bb5c 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -474,13 +474,31 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
return result;
}

+static void nfs_direct_add_page_head(struct list_head *list,
+ struct nfs_page *req)
+{
+ struct nfs_page *head = req->wb_head;
+
+ if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
+ return;
+ if (!list_empty(&head->wb_list)) {
+ nfs_unlock_request(head);
+ return;
+ }
+ list_add(&head->wb_list, list);
+ kref_get(&head->wb_kref);
+ kref_get(&head->wb_kref);
+}
+
static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
{
struct nfs_page *req, *subreq;

list_for_each_entry(req, list, wb_list) {
- if (req->wb_head != req)
+ if (req->wb_head != req) {
+ nfs_direct_add_page_head(&req->wb_list, req);
continue;
+ }
subreq = req->wb_this_page;
if (subreq == req)
continue;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index ddbbf4fcda86..178001c90156 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -154,7 +154,7 @@ nfs4_get_device_info(struct nfs_server *server,
set_bit(NFS_DEVICEID_NOCACHE, &d->flags);

out_free_pages:
- for (i = 0; i < max_pages; i++)
+ while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
out_free_pdev:
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index bfc964b36c72..5a132c1e6f6c 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -218,7 +218,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
.tcon = tcon,
.path = path,
.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
- .desired_access = FILE_READ_ATTRIBUTES,
+ .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.fid = pfid,
};
diff --git a/fs/smb/client/cifs_dfs_ref.c b/fs/smb/client/cifs_dfs_ref.c
index b0864da9ef43..020e71fe1454 100644
--- a/fs/smb/client/cifs_dfs_ref.c
+++ b/fs/smb/client/cifs_dfs_ref.c
@@ -258,61 +258,23 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
goto compose_mount_options_out;
}

-/**
- * cifs_dfs_do_mount - mounts specified path using DFS full path
- *
- * Always pass down @fullpath to smb3_do_mount() so we can use the root server
- * to perform failover in case we failed to connect to the first target in the
- * referral.
- *
- * @mntpt: directory entry for the path we are trying to automount
- * @cifs_sb: parent/root superblock
- * @fullpath: full path in UNC format
- */
-static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
- struct cifs_sb_info *cifs_sb,
- const char *fullpath)
-{
- struct vfsmount *mnt;
- char *mountdata;
- char *devname;
-
- devname = kstrdup(fullpath, GFP_KERNEL);
- if (!devname)
- return ERR_PTR(-ENOMEM);
-
- convert_delimiter(devname, '/');
-
- /* TODO: change to call fs_context_for_mount(), fill in context directly, call fc_mount */
-
- /* See afs_mntpt_do_automount in fs/afs/mntpt.c for an example */
-
- /* strip first '\' from fullpath */
- mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- fullpath + 1, NULL, NULL);
- if (IS_ERR(mountdata)) {
- kfree(devname);
- return (struct vfsmount *)mountdata;
- }
-
- mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
- kfree(mountdata);
- kfree(devname);
- return mnt;
-}
-
/*
* Create a vfsmount that we can automount
*/
-static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
+static struct vfsmount *cifs_dfs_do_automount(struct path *path)
{
+ int rc;
+ struct dentry *mntpt = path->dentry;
+ struct fs_context *fc;
struct cifs_sb_info *cifs_sb;
- void *page;
+ void *page = NULL;
+ struct smb3_fs_context *ctx, *cur_ctx;
+ struct smb3_fs_context tmp;
char *full_path;
struct vfsmount *mnt;

- cifs_dbg(FYI, "in %s\n", __func__);
- BUG_ON(IS_ROOT(mntpt));
+ if (IS_ROOT(mntpt))
+ return ERR_PTR(-ESTALE);

/*
* The MSDFS spec states that paths in DFS referral requests and
@@ -321,29 +283,47 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
* gives us the latter, so we must adjust the result.
*/
cifs_sb = CIFS_SB(mntpt->d_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
- mnt = ERR_PTR(-EREMOTE);
- goto cdda_exit;
- }
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ return ERR_PTR(-EREMOTE);
+
+ cur_ctx = cifs_sb->ctx;
+
+ fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
+
+ ctx = smb3_fc2context(fc);

page = alloc_dentry_path();
/* always use tree name prefix */
full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
if (IS_ERR(full_path)) {
mnt = ERR_CAST(full_path);
- goto free_full_path;
+ goto out;
}

- convert_delimiter(full_path, '\\');
+ convert_delimiter(full_path, '/');
cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);

- mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
- cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt);
+ tmp = *cur_ctx;
+ tmp.source = full_path;
+ tmp.UNC = tmp.prepath = NULL;
+
+ rc = smb3_fs_context_dup(ctx, &tmp);
+ if (rc) {
+ mnt = ERR_PTR(rc);
+ goto out;
+ }
+
+ rc = smb3_parse_devname(full_path, ctx);
+ if (!rc)
+ mnt = fc_mount(fc);
+ else
+ mnt = ERR_PTR(rc);

-free_full_path:
+out:
+ put_fs_context(fc);
free_dentry_path(page);
-cdda_exit:
- cifs_dbg(FYI, "leaving %s\n" , __func__);
return mnt;
}

@@ -354,9 +334,9 @@ struct vfsmount *cifs_dfs_d_automount(struct path *path)
{
struct vfsmount *newmnt;

- cifs_dbg(FYI, "in %s\n", __func__);
+ cifs_dbg(FYI, "%s: %pd\n", __func__, path->dentry);

- newmnt = cifs_dfs_do_automount(path->dentry);
+ newmnt = cifs_dfs_do_automount(path);
if (IS_ERR(newmnt)) {
cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__);
return newmnt;
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 4a092cc5a393..03f34ec63e10 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -734,6 +734,7 @@ struct TCP_Server_Info {
*/
#define CIFS_SERVER_IS_CHAN(server) (!!(server)->primary_server)
struct TCP_Server_Info *primary_server;
+ __u16 channel_sequence_num; /* incremented on primary channel on each chan reconnect */

#ifdef CONFIG_CIFS_SWN_UPCALL
bool use_swn_dstaddr;
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 9cd282960c0b..57da4f23c1e4 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1725,6 +1725,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
+ tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
tcp_ses->reconnect_instance = 1;
tcp_ses->lstrp = jiffies;
tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
index f6f3a6b75601..e73625b5d0cc 100644
--- a/fs/smb/client/fscache.c
+++ b/fs/smb/client/fscache.c
@@ -48,7 +48,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
sharename = extract_sharename(tcon->tree_name);
if (IS_ERR(sharename)) {
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
- return -EINVAL;
+ return PTR_ERR(sharename);
}

slen = strlen(sharename);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 6b020d80bb94..1387d5126f53 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -167,8 +167,17 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)

spin_lock(&server->req_lock);
server->credits = val;
- if (val == 1)
+ if (val == 1) {
server->reconnect_instance++;
+ /*
+ * ChannelSequence updated for all channels in primary channel so that consistent
+ * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
+ */
+ if (CIFS_SERVER_IS_CHAN(server))
+ server->primary_server->channel_sequence_num++;
+ else
+ server->channel_sequence_num++;
+ }
scredits = server->credits;
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index ae17d78f6ba1..847d69d327c2 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -88,9 +88,20 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
const struct cifs_tcon *tcon,
struct TCP_Server_Info *server)
{
+ struct smb3_hdr_req *smb3_hdr;
shdr->ProtocolId = SMB2_PROTO_NUMBER;
shdr->StructureSize = cpu_to_le16(64);
shdr->Command = smb2_cmd;
+ if (server->dialect >= SMB30_PROT_ID) {
+ /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
+ smb3_hdr = (struct smb3_hdr_req *)shdr;
+ /* if primary channel is not set yet, use default channel for chan sequence num */
+ if (CIFS_SERVER_IS_CHAN(server))
+ smb3_hdr->ChannelSequence =
+ cpu_to_le16(server->primary_server->channel_sequence_num);
+ else
+ smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num);
+ }
if (server) {
spin_lock(&server->req_lock);
/* Request up to 10 credits but don't go over the limit. */
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 7d605db3bb3b..9619015d78f2 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -153,6 +153,28 @@ struct smb2_hdr {
__u8 Signature[16];
} __packed;

+struct smb3_hdr_req {
+ __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */
+ __le16 StructureSize; /* 64 */
+ __le16 CreditCharge; /* MBZ */
+ __le16 ChannelSequence; /* See MS-SMB2 3.2.4.1 and 3.2.7.1 */
+ __le16 Reserved;
+ __le16 Command;
+ __le16 CreditRequest; /* CreditResponse */
+ __le32 Flags;
+ __le32 NextCommand;
+ __le64 MessageId;
+ union {
+ struct {
+ __le32 ProcessId;
+ __le32 TreeId;
+ } __packed SyncId;
+ __le64 AsyncId;
+ } __packed Id;
+ __le64 SessionId;
+ __u8 Signature[16];
+} __packed;
+
struct smb2_pdu {
struct smb2_hdr hdr;
__le16 StructureSize2; /* size of wct area (varies, request specific) */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8cef9ec3a89c..b3d3aa8437dc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -862,22 +862,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
void *orig_call);
-/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx);
-u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
- struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx);
-u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
- struct bpf_tramp_run_ctx *run_ctx);
-void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);

struct bpf_ksym {
unsigned long start;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 0eb8f035b3d9..1a32baa78ce2 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -648,4 +648,17 @@ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
prog->aux->dst_prog->type : prog->type;
}

+static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+{
+ switch (resolve_prog_type(prog)) {
+ case BPF_PROG_TYPE_TRACING:
+ return prog->expected_attach_type != BPF_TRACE_ITER;
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ case BPF_PROG_TYPE_LSM:
+ return false;
+ default:
+ return true;
+ }
+}
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 37dfdcfcdd54..15d7529ac953 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -146,6 +146,7 @@ struct inet6_skb_parm {
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
};

#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 1f7c33b2f5a3..e164facb0f36 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -38,9 +38,9 @@
#define PHY_ID_KSZ9477 0x00221631

/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
-#define MICREL_KSZ8_P1_ERRATA 0x00000003
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)

#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index e8ed225d8f7c..4ef6c09cc2ee 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
if (reclaiming)
- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
else
- list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);

return true;
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f74891556f3..323ee36df683 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -312,7 +312,7 @@ enum lruvec_flags {
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
*
* A page is added to the youngest generation on faulting. The aging needs to
* check the accessed bit at least twice before handing this page over to the
@@ -324,8 +324,8 @@ enum lruvec_flags {
* rest of generations, if they exist, are considered inactive. See
* lru_gen_is_active().
*
- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
- * the aging needs not to worry about it. And it's set again when a page
+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
+ * that the aging needs not to worry about it. And it's set again when a page
* considered active is isolated for non-reclaiming purposes, e.g., migration.
* See lru_gen_add_folio() and lru_gen_del_folio().
*
@@ -412,7 +412,7 @@ struct lru_gen_struct {
/* the birth time of each generation in jiffies */
unsigned long timestamps[MAX_NR_GENS];
/* the multi-gen LRU lists, lazily sorted on eviction */
- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the multi-gen LRU sizes, eventually consistent */
long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the exponential moving average of refaulted */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cc5ed2cf25f6..2feee144fc0e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -261,6 +261,14 @@
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* For X bytes available in skb->head, what is the minimal
+ * allocation needed, knowing struct skb_shared_info needs
+ * to be aligned.
+ */
+#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
index b0d36a9934cc..5cf6f6f82aa7 100644
--- a/include/linux/tca6416_keypad.h
+++ b/include/linux/tca6416_keypad.h
@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
unsigned int rep:1; /* enable input subsystem auto repeat */
uint16_t pinmask;
uint16_t invert;
- int irq_is_gpio;
int use_polling; /* use polling if Interrupt is not connected*/
};
#endif
diff --git a/include/net/ip.h b/include/net/ip.h
index 1872f570abed..c286344628db 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -57,6 +57,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)

u16 frag_max_size;
};
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 6268963d9599..fa4e6af382e2 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -472,13 +472,10 @@ void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
rcu_read_lock();

from = rcu_dereference(rt->from);
- if (from) {
+ if (from)
*addr = from->fib6_prefsrc.addr;
- } else {
- struct in6_addr in6_zero = {};
-
- *addr = in6_zero;
- }
+ else
+ *addr = in6addr_any;

rcu_read_unlock();
}
@@ -610,7 +607,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
if (!net->ipv6.fib6_rules_require_fldissect)
return false;

- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl6->fl6_sport = flkeys->ports.src;
fl6->fl6_dport = flkeys->ports.dst;
fl6->flowi6_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a378eff827c7..f0c13864180e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -418,7 +418,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
if (!net->ipv4.fib_rules_require_fldissect)
return false;

- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl4->fl4_sport = flkeys->ports.src;
fl4->fl4_dport = flkeys->ports.dst;
fl4->flowi4_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index fca357679816..bca80522f95c 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -473,15 +473,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
} else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ DEV_STATS_INC(dev, tx_dropped);
}
}

diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e4ceef687c1c..517bdae78614 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -750,6 +750,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}

+static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
+}
+
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
@@ -1322,7 +1327,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return 0;
}

-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
int ret;

diff --git a/include/net/sock.h b/include/net/sock.h
index d1f936ed9755..fe695e8bfe28 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1049,6 +1049,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}

+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
void sk_stream_write_space(struct sock *sk);

/* OOB backlog add */
@@ -1401,7 +1407,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
if (sk->sk_prot->forward_alloc_get)
return sk->sk_prot->forward_alloc_get(sk);
#endif
- return sk->sk_forward_alloc;
+ return READ_ONCE(sk->sk_forward_alloc);
}

static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
@@ -1697,14 +1703,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
}

static inline void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
sk_mem_reclaim(sk);
}

diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index c2300c407f58..76297ecd4935 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -36,7 +36,6 @@ TRACE_EVENT(fib_table_lookup,
),

TP_fast_assign(
- struct in6_addr in6_zero = {};
struct net_device *dev;
struct in6_addr *in6;
__be32 *p32;
@@ -74,7 +73,7 @@ TRACE_EVENT(fib_table_lookup,
*p32 = nhc->nhc_gw.ipv4;

in6 = (struct in6_addr *)__entry->gw6;
- *in6 = in6_zero;
+ *in6 = in6addr_any;
} else if (nhc->nhc_gw_family == AF_INET6) {
p32 = (__be32 *) __entry->gw4;
*p32 = 0;
@@ -87,7 +86,7 @@ TRACE_EVENT(fib_table_lookup,
*p32 = 0;

in6 = (struct in6_addr *)__entry->gw6;
- *in6 = in6_zero;
+ *in6 = in6addr_any;
}
),

diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 6e821eb79450..4d3e607b3cde 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -68,11 +68,8 @@ TRACE_EVENT(fib6_table_lookup,
strcpy(__entry->name, "-");
}
if (res->f6i == net->ipv6.fib6_null_entry) {
- struct in6_addr in6_zero = {};
-
in6 = (struct in6_addr *)__entry->gw;
- *in6 = in6_zero;
-
+ *in6 = in6addr_any;
} else if (res->nh) {
in6 = (struct in6_addr *)__entry->gw;
*in6 = res->nh->fib_nh_gw6;
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 411bb2d1acd4..98ac9dbcec2f 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -181,6 +181,16 @@ static void io_worker_ref_put(struct io_wq *wq)
complete(&wq->worker_done);
}

+bool io_wq_worker_stopped(void)
+{
+ struct io_worker *worker = current->worker_private;
+
+ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
+ return true;
+
+ return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state);
+}
+
static void io_worker_cancel_cb(struct io_worker *worker)
{
struct io_wqe_acct *acct = io_wqe_get_acct(worker);
@@ -1340,13 +1350,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
return __io_wq_cpu_online(wq, cpu, false);
}

-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{
int i;

+ if (!tctx || !tctx->io_wq)
+ return -EINVAL;
+
rcu_read_lock();
for_each_node(i) {
- struct io_wqe *wqe = wq->wqes[i];
+ struct io_wqe *wqe = tctx->io_wq->wqes[i];

if (mask)
cpumask_copy(wqe->cpu_mask, mask);
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 31228426d192..2b2a6406dd8e 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -50,8 +50,9 @@ void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val);

-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
+bool io_wq_worker_stopped(void);

static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6d455e2428b9..f413ebed81ab 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1823,6 +1823,8 @@ void io_wq_submit_work(struct io_wq_work *work)
if (!needs_poll) {
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
break;
+ if (io_wq_worker_stopped())
+ break;
cond_resched();
continue;
}
@@ -3833,16 +3835,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
return 0;
}

+static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
+ cpumask_var_t new_mask)
+{
+ int ret;
+
+ if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+ ret = io_wq_cpu_affinity(current->io_uring, new_mask);
+ } else {
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ return ret;
+}
+
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
void __user *arg, unsigned len)
{
- struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask;
int ret;

- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;

@@ -3863,19 +3877,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
return -EFAULT;
}

- ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
+ ret = __io_register_iowq_aff(ctx, new_mask);
free_cpumask_var(new_mask);
return ret;
}

static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
{
- struct io_uring_task *tctx = current->io_uring;
-
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
- return io_wq_cpu_affinity(tctx->io_wq, NULL);
+ return __io_register_iowq_aff(ctx, NULL);
}

static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
diff --git a/io_uring/net.c b/io_uring/net.c
index 2b44126a876e..7245218fdbe2 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1337,12 +1337,12 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}

- if (ret >= 0 &&
- io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
+ if (ret < 0)
+ return ret;
+ if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
goto retry;

- io_req_set_res(req, ret, 0);
- return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
+ return -ECANCELED;
}

int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 869e1d2a4413..a4084acaff91 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -360,11 +360,12 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
if (ret == IOU_POLL_NO_ACTION)
return;

+ io_tw_lock(req->ctx, locked);
io_poll_remove_entries(req);
io_poll_tw_hash_eject(req, locked);

if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
- io_req_complete_post(req);
+ io_req_task_complete(req, locked);
else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
io_req_task_submit(req, locked);
else
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 6ffa5cf1bbb8..7b6facf529b8 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -423,3 +423,20 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
io_sq_thread_finish(ctx);
return ret;
}
+
+__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
+ cpumask_var_t mask)
+{
+ struct io_sq_data *sqd = ctx->sq_data;
+ int ret = -EINVAL;
+
+ if (sqd) {
+ io_sq_thread_park(sqd);
+ /* Don't set affinity for a dying thread */
+ if (sqd->thread)
+ ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
+ io_sq_thread_unpark(sqd);
+ }
+
+ return ret;
+}
diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
index 0c3fbcd1f583..36245f1afa5e 100644
--- a/io_uring/sqpoll.h
+++ b/io_uring/sqpoll.h
@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd);
int io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
+int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0c44a716f0a2..0c8b7733573e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -5135,14 +5135,15 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
}

run_ctx.bpf_cookie = 0;
- run_ctx.saved_run_ctx = NULL;
- if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
+ if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
/* recursion detected */
+ __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
bpf_prog_put(prog);
return -EBUSY;
}
attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
- __bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
+ __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
+ &run_ctx);
bpf_prog_put(prog);
return 0;
#endif
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 30af8f66e17b..c4381dfcd6b0 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -874,7 +874,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
* [2..MAX_U64] - execute bpf prog and record execution time.
* This is start time.
*/
-u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
+static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
__acquires(RCU)
{
rcu_read_lock();
@@ -911,7 +911,8 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
}
}

-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx)
__releases(RCU)
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
@@ -922,8 +923,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
rcu_read_unlock();
}

-u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
- struct bpf_tramp_run_ctx *run_ctx)
+static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx)
__acquires(RCU)
{
/* Runtime stats are exported via actual BPF_LSM_CGROUP
@@ -937,8 +938,8 @@ u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
return NO_START_TIME;
}

-void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx)
__releases(RCU)
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
@@ -947,35 +948,57 @@ void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
rcu_read_unlock();
}

-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx)
{
rcu_read_lock_trace();
migrate_disable();
might_fault();

+ run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
bpf_prog_inc_misses_counter(prog);
return 0;
}
+ return bpf_prog_start_time();
+}
+
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx)
+{
+ bpf_reset_run_ctx(run_ctx->saved_run_ctx);
+
+ update_prog_stats(prog, start);
+ this_cpu_dec(*(prog->active));
+ migrate_enable();
+ rcu_read_unlock_trace();
+}
+
+static u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx)
+{
+ rcu_read_lock_trace();
+ migrate_disable();
+ might_fault();

run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);

return bpf_prog_start_time();
}

-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx)
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);

update_prog_stats(prog, start);
- this_cpu_dec(*(prog->active));
migrate_enable();
rcu_read_unlock_trace();
}

-u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
- struct bpf_tramp_run_ctx *run_ctx)
+static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx)
__acquires(RCU)
{
rcu_read_lock();
@@ -986,8 +1009,8 @@ u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
return bpf_prog_start_time();
}

-void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
- struct bpf_tramp_run_ctx *run_ctx)
+static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx)
__releases(RCU)
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
@@ -1007,6 +1030,36 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
percpu_ref_put(&tr->pcref);
}

+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
+{
+ bool sleepable = prog->aux->sleepable;
+
+ if (bpf_prog_check_recur(prog))
+ return sleepable ? __bpf_prog_enter_sleepable_recur :
+ __bpf_prog_enter_recur;
+
+ if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
+ prog->expected_attach_type == BPF_LSM_CGROUP)
+ return __bpf_prog_enter_lsm_cgroup;
+
+ return sleepable ? __bpf_prog_enter_sleepable : __bpf_prog_enter;
+}
+
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
+{
+ bool sleepable = prog->aux->sleepable;
+
+ if (bpf_prog_check_recur(prog))
+ return sleepable ? __bpf_prog_exit_sleepable_recur :
+ __bpf_prog_exit_recur;
+
+ if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
+ prog->expected_attach_type == BPF_LSM_CGROUP)
+ return __bpf_prog_exit_lsm_cgroup;
+
+ return sleepable ? __bpf_prog_exit_sleepable : __bpf_prog_exit;
+}
+
int __weak
arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
diff --git a/lib/idr.c b/lib/idr.c
index 7ecdfdb5309e..13f2758c2377 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @nextid and @end. If
+ * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 184df6f701b4..a90bd265d73d 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -667,12 +667,13 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,

switch (val) {
case MODULE_STATE_LIVE:
- kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
+ kunit_module_init(mod);
+ break;
case MODULE_STATE_UNFORMED:
break;
}
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 60e1984c060f..0ae35223d773 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
int failures = 0, num_tests = 0;
int i;

- for (i = 0; i < 10; i++)
+ for (i = 0; i <= MAX_ORDER; i++)
num_tests += do_alloc_pages_order(i, &failures);

REPORT_FAILURES_IN_FN();
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index b620cf7de503..a2707af2951a 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -606,7 +606,7 @@ static void __init numbers_slice(void)
#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
do { \
const T expect[2] = { expect0, expect1 }; \
- T result[2] = {~expect[0], ~expect[1]}; \
+ T result[2] = { (T)~expect[0], (T)~expect[1] }; \
\
_test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
} while (0)
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 4962dd1ba4a6..c04214055229 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -36,14 +36,22 @@ struct vmemmap_remap_walk {
struct list_head *vmemmap_pages;
};

-static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
{
pmd_t __pmd;
int i;
unsigned long addr = start;
- struct page *page = pmd_page(*pmd);
- pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
+ struct page *head;
+ pte_t *pgtable;
+
+ spin_lock(&init_mm.page_table_lock);
+ head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
+ spin_unlock(&init_mm.page_table_lock);

+ if (!head)
+ return 0;
+
+ pgtable = pte_alloc_one_kernel(&init_mm);
if (!pgtable)
return -ENOMEM;

@@ -53,7 +61,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
pte_t entry, *pte;
pgprot_t pgprot = PAGE_KERNEL;

- entry = mk_pte(page + i, pgprot);
+ entry = mk_pte(head + i, pgprot);
pte = pte_offset_kernel(&__pmd, addr);
set_pte_at(&init_mm, addr, pte, entry);
}
@@ -65,8 +73,8 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
* be treated as indepdenent small pages (as they can be freed
* individually).
*/
- if (!PageReserved(page))
- split_page(page, get_order(PMD_SIZE));
+ if (!PageReserved(head))
+ split_page(head, get_order(PMD_SIZE));

/* Make pte visible before pmd. See comment in pmd_install(). */
smp_wmb();
@@ -80,20 +88,6 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
return 0;
}

-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
-{
- int leaf;
-
- spin_lock(&init_mm.page_table_lock);
- leaf = pmd_leaf(*pmd);
- spin_unlock(&init_mm.page_table_lock);
-
- if (!leaf)
- return 0;
-
- return __split_vmemmap_huge_pmd(pmd, start);
-}
-
static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end,
struct vmemmap_remap_walk *walk)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 67b6d8238b3e..0a403b241718 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3841,10 +3841,6 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
case _MEMSWAP:
ret = mem_cgroup_resize_max(memcg, nr_pages, true);
break;
- case _KMEM:
- /* kmem.limit_in_bytes is deprecated. */
- ret = -EOPNOTSUPP;
- break;
case _TCP:
ret = memcg_update_tcp_max(memcg, nr_pages);
break;
@@ -5055,12 +5051,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
.seq_show = memcg_numa_stat_show,
},
#endif
- {
- .name = "kmem.limit_in_bytes",
- .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
- .write = mem_cgroup_write,
- .read_u64 = mem_cgroup_read_u64,
- },
{
.name = "kmem.usage_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d18296109aa7..93d6f27dd40b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4258,7 +4258,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)

/* prevent cold/hot inversion if force_scan is true */
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- struct list_head *head = &lrugen->lists[old_gen][type][zone];
+ struct list_head *head = &lrugen->folios[old_gen][type][zone];

while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
@@ -4269,7 +4269,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);

new_gen = folio_inc_gen(lruvec, folio, false);
- list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
+ list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);

if (!--remaining)
return false;
@@ -4297,7 +4297,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
gen = lru_gen_from_seq(min_seq[type]);

for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- if (!list_empty(&lrugen->lists[gen][type][zone]))
+ if (!list_empty(&lrugen->folios[gen][type][zone]))
goto next;
}

@@ -4331,6 +4331,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
int type, zone;
struct lru_gen_struct *lrugen = &lruvec->lrugen;

+restart:
spin_lock_irq(&lruvec->lru_lock);

VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
@@ -4341,11 +4342,12 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)

VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));

- while (!inc_min_seq(lruvec, type, can_swap)) {
- spin_unlock_irq(&lruvec->lru_lock);
- cond_resched();
- spin_lock_irq(&lruvec->lru_lock);
- }
+ if (inc_min_seq(lruvec, type, can_swap))
+ continue;
+
+ spin_unlock_irq(&lruvec->lru_lock);
+ cond_resched();
+ goto restart;
}

/*
@@ -4728,7 +4730,8 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
* the eviction
******************************************************************************/

-static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
+static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
+ int tier_idx)
{
bool success;
int gen = folio_lru_gen(folio);
@@ -4762,7 +4765,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)

/* promoted */
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}

@@ -4771,7 +4774,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
int hist = lru_hist_from_seq(lrugen->min_seq[type]);

gen = folio_inc_gen(lruvec, folio, false);
- list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);

WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
lrugen->protected[hist][type][tier - 1] + delta);
@@ -4779,11 +4782,18 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
return true;
}

+ /* ineligible */
+ if (zone > sc->reclaim_idx) {
+ gen = folio_inc_gen(lruvec, folio, false);
+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ return true;
+ }
+
/* waiting for writeback */
if (folio_test_locked(folio) || folio_test_writeback(folio) ||
(type == LRU_GEN_FILE && folio_test_dirty(folio))) {
gen = folio_inc_gen(lruvec, folio, true);
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}

@@ -4831,7 +4841,8 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
int type, int tier, struct list_head *list)
{
- int gen, zone;
+ int i;
+ int gen;
enum vm_event_item item;
int sorted = 0;
int scanned = 0;
@@ -4847,10 +4858,11 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,

gen = lru_gen_from_seq(lrugen->min_seq[type]);

- for (zone = sc->reclaim_idx; zone >= 0; zone--) {
+ for (i = MAX_NR_ZONES; i > 0; i--) {
LIST_HEAD(moved);
int skipped = 0;
- struct list_head *head = &lrugen->lists[gen][type][zone];
+ int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
+ struct list_head *head = &lrugen->folios[gen][type][zone];

while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
@@ -4863,7 +4875,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,

scanned += delta;

- if (sort_folio(lruvec, folio, tier))
+ if (sort_folio(lruvec, folio, sc, tier))
sorted += delta;
else if (isolate_folio(lruvec, folio, sc)) {
list_add(&folio->lru, list);
@@ -5250,7 +5262,7 @@ static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
int gen, type, zone;

for_each_gen_type_zone(gen, type, zone) {
- if (!list_empty(&lrugen->lists[gen][type][zone]))
+ if (!list_empty(&lrugen->folios[gen][type][zone]))
return false;
}
}
@@ -5295,7 +5307,7 @@ static bool drain_evictable(struct lruvec *lruvec)
int remaining = MAX_LRU_BATCH;

for_each_gen_type_zone(gen, type, zone) {
- struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
+ struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];

while (!list_empty(head)) {
bool success;
@@ -5832,7 +5844,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
lrugen->timestamps[i] = jiffies;

for_each_gen_type_zone(gen, type, zone)
- INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
+ INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);

lruvec->mm_state.seq = MIN_NR_GENS;
init_waitqueue_head(&lruvec->mm_state.wait);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 007730412947..3288490590f2 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1738,8 +1738,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)

memset(&keys, 0, sizeof(keys));
__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
- &keys, NULL, 0, 0, 0,
- FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+ &keys, NULL, 0, 0, 0, 0);

return __flow_hash_from_keys(&keys, &hashrnd);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 24bf4aa222d2..8dca4a7ca4a1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -424,17 +424,26 @@ EXPORT_SYMBOL(napi_build_skb);
* may be used. Otherwise, the packet data may be discarded until enough
* memory is free
*/
-static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
+static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
bool *pfmemalloc)
{
- void *obj;
bool ret_pfmemalloc = false;
+ size_t obj_size;
+ void *obj;
+
+ obj_size = SKB_HEAD_ALIGN(*size);
+
+ obj_size = kmalloc_size_roundup(obj_size);
+ /* The following cast might truncate high-order bits of obj_size, this
+ * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
+ */
+ *size = (unsigned int)obj_size;

/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
*/
- obj = kmalloc_node_track_caller(size,
+ obj = kmalloc_node_track_caller(obj_size,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
if (obj || !(gfp_pfmemalloc_allowed(flags)))
@@ -442,7 +451,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node,

/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmalloc_node_track_caller(size, flags, node);
+ obj = kmalloc_node_track_caller(obj_size, flags, node);

out:
if (pfmemalloc)
@@ -479,7 +488,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
{
struct kmem_cache *cache;
struct sk_buff *skb;
- unsigned int osize;
bool pfmemalloc;
u8 *data;

@@ -504,18 +512,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- osize = kmalloc_size_roundup(size);
- data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc);
+ data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
/* kmalloc_size_roundup() might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- size = SKB_WITH_OVERHEAD(osize);
- prefetchw(data + size);
+ prefetchw(data + SKB_WITH_OVERHEAD(size));

/*
* Only clear those fields we need to clear, not those that we will
@@ -523,7 +527,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- __build_skb_around(skb, data, osize);
+ __build_skb_around(skb, data, size);
skb->pfmemalloc = pfmemalloc;

if (flags & SKB_ALLOC_FCLONE) {
@@ -578,8 +582,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
goto skb_success;
}

- len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- len = SKB_DATA_ALIGN(len);
+ len = SKB_HEAD_ALIGN(len);

if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -678,8 +681,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
} else {
- len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- len = SKB_DATA_ALIGN(len);
+ len = SKB_HEAD_ALIGN(len);

data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
@@ -1837,10 +1839,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;

- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size = kmalloc_size_roundup(size);
- data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
size = SKB_WITH_OVERHEAD(size);
@@ -6204,10 +6203,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;

- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size = kmalloc_size_roundup(size);
- data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
size = SKB_WITH_OVERHEAD(size);
@@ -6323,10 +6319,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;

- size = SKB_DATA_ALIGN(size);
- size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size = kmalloc_size_roundup(size);
- data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
size = SKB_WITH_OVERHEAD(size);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 296e45b6c3c0..a5c1f67dc96e 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -611,12 +611,18 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len, bool ingress)
{
+ int err = 0;
+
if (!ingress) {
if (!sock_writeable(psock->sk))
return -EAGAIN;
return skb_send_sock(psock->sk, skb, off, len);
}
- return sk_psock_skb_ingress(psock, skb, off, len);
+ skb_get(skb);
+ err = sk_psock_skb_ingress(psock, skb, off, len);
+ if (err < 0)
+ kfree_skb(skb);
+ return err;
}

static void sk_psock_skb_state(struct sk_psock *psock,
@@ -684,9 +690,7 @@ static void sk_psock_backlog(struct work_struct *work)
} while (len);

skb = skb_dequeue(&psock->ingress_skb);
- if (!ingress) {
- kfree_skb(skb);
- }
+ kfree_skb(skb);
}
end:
mutex_unlock(&psock->work_mutex);
diff --git a/net/core/sock.c b/net/core/sock.c
index fc475845c94d..e5858fa5d6d5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -761,7 +761,8 @@ bool sk_mc_loop(struct sock *sk)
return false;
if (!sk)
return true;
- switch (sk->sk_family) {
+ /* IPV6_ADDRFORM can change sk->sk_family under us. */
+ switch (READ_ONCE(sk->sk_family)) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if IS_ENABLED(CONFIG_IPV6)
@@ -1033,7 +1034,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
return -ENOMEM;
}
- sk->sk_forward_alloc += pages << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, pages << PAGE_SHIFT);

WRITE_ONCE(sk->sk_reserved_mem,
sk->sk_reserved_mem + (pages << PAGE_SHIFT));
@@ -2689,9 +2690,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
break;
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
break;
- if (sk->sk_err)
+ if (READ_ONCE(sk->sk_err))
break;
timeo = schedule_timeout(timeo);
}
@@ -2719,7 +2720,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;

err = -EPIPE;
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
goto failure;

if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
@@ -3081,10 +3082,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
{
int ret, amt = sk_mem_pages(size);

- sk->sk_forward_alloc += amt << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
ret = __sk_mem_raise_allocated(sk, size, amt, kind);
if (!ret)
- sk->sk_forward_alloc -= amt << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
return ret;
}
EXPORT_SYMBOL(__sk_mem_schedule);
@@ -3116,7 +3117,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
void __sk_mem_reclaim(struct sock *sk, int amount)
{
amount >>= PAGE_SHIFT;
- sk->sk_forward_alloc -= amount << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
__sk_mem_reduce_allocated(sk, amount);
}
EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -3714,7 +3715,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
- mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk);
mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 940c0e27be73..e31d1247b9f0 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -27,6 +27,7 @@
#include <linux/net.h>
#include <linux/pm_runtime.h>
#include <net/devlink.h>
+#include <net/ipv6.h>
#include <net/xdp_sock_drv.h>
#include <net/flow_offload.h>
#include <linux/ethtool_netlink.h>
@@ -3090,7 +3091,6 @@ struct ethtool_rx_flow_rule *
ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
{
const struct ethtool_rx_flow_spec *fs = input->fs;
- static struct in6_addr zero_addr = {};
struct ethtool_rx_flow_match *match;
struct ethtool_rx_flow_rule *flow;
struct flow_action_entry *act;
@@ -3196,20 +3196,20 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)

v6_spec = &fs->h_u.tcp_ip6_spec;
v6_m_spec = &fs->m_u.tcp_ip6_spec;
- if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src)) {
memcpy(&match->key.ipv6.src, v6_spec->ip6src,
sizeof(match->key.ipv6.src));
memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
sizeof(match->mask.ipv6.src));
}
- if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
+ if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) {
memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
sizeof(match->key.ipv6.dst));
memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
sizeof(match->mask.ipv6.dst));
}
- if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
- memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
+ if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src) ||
+ !ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) {
match->dissector.used_keys |=
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 629daacc9607..b71dab630a87 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -594,6 +594,7 @@ static int fill_frame_info(struct hsr_frame_info *frame,
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
/* FIXME: */
netdev_warn_once(skb->dev, "VLAN not yet supported");
+ return -EINVAL;
}

frame->is_from_san = false;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e8b9a9202fec..35d6e74be840 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -354,14 +354,14 @@ static void __inet_del_ifa(struct in_device *in_dev,
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1;
- struct in_ifaddr *last_prim;
+ struct in_ifaddr __rcu **last_prim;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);

ASSERT_RTNL();

ifa1 = rtnl_dereference(*ifap);
- last_prim = rtnl_dereference(in_dev->ifa_list);
+ last_prim = ifap;
if (in_dev->dead)
goto no_promotions;

@@ -375,7 +375,7 @@ static void __inet_del_ifa(struct in_device *in_dev,
while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
- last_prim = ifa;
+ last_prim = &ifa->ifa_next;

if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
@@ -439,9 +439,9 @@ static void __inet_del_ifa(struct in_device *in_dev,

rcu_assign_pointer(prev_prom->ifa_next, next_sec);

- last_sec = rtnl_dereference(last_prim->ifa_next);
+ last_sec = rtnl_dereference(*last_prim);
rcu_assign_pointer(promote->ifa_next, last_sec);
- rcu_assign_pointer(last_prim->ifa_next, promote);
+ rcu_assign_pointer(*last_prim, promote);
}

promote->ifa_flags &= ~IFA_F_SECONDARY;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3bb890a40ed7..3b6e6bc80dc1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -278,7 +278,8 @@ void fib_release_info(struct fib_info *fi)
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
}
- fi->fib_dead = 1;
+ /* Paired with READ_ONCE() from fib_table_lookup() */
+ WRITE_ONCE(fi->fib_dead, 1);
fib_info_put(fi);
}
spin_unlock_bh(&fib_info_lock);
@@ -1581,6 +1582,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
link_it:
ofi = fib_find_info(fi);
if (ofi) {
+ /* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
refcount_inc(&ofi->fib_treeref);
@@ -1619,6 +1621,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,

failure:
if (fi) {
+ /* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 74d403dbd2b4..d13fb9e76b97 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1582,7 +1582,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
if (fa->fa_dscp &&
inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
continue;
- if (fi->fib_dead)
+ /* Paired with WRITE_ONCE() in fib_release_info() */
+ if (READ_ONCE(fi->fib_dead))
continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c19b462662ad..d79de4b95186 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -795,43 +795,45 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
const struct net *net, unsigned short port,
int l3mdev, const struct sock *sk)
{
+ if (!net_eq(ib2_net(tb), net) || tb->port != port ||
+ tb->l3mdev != l3mdev)
+ return false;
+
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb->family)
+ if (sk->sk_family != tb->family) {
+ if (sk->sk_family == AF_INET)
+ return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
+ tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
return false;
+ }

if (sk->sk_family == AF_INET6)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
- else
+ return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
#endif
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
+ return tb->rcv_saddr == sk->sk_rcv_saddr;
}

bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
unsigned short port, int l3mdev, const struct sock *sk)
{
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr addr_any = {};
+ if (!net_eq(ib2_net(tb), net) || tb->port != port ||
+ tb->l3mdev != l3mdev)
+ return false;

+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family != tb->family) {
if (sk->sk_family == AF_INET)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
+ return ipv6_addr_any(&tb->v6_rcv_saddr) ||
+ ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);

return false;
}

if (sk->sk_family == AF_INET6)
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev &&
- ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
- else
+ return ipv6_addr_any(&tb->v6_rcv_saddr);
#endif
- return net_eq(ib2_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
+ return tb->rcv_saddr == 0;
}

/* The socket's bhash2 hashbucket spinlock must be held when this is called */
@@ -853,11 +855,10 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
u32 hash;
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr addr_any = {};

+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
- hash = ipv6_portaddr_hash(net, &addr_any, port);
+ hash = ipv6_portaddr_hash(net, &in6addr_any, port);
else
#endif
hash = ipv4_portaddr_hash(net, 0, port);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e880ce77322a..e7196ecffafc 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -584,7 +584,8 @@ static void ip_sublist_rcv_finish(struct list_head *head)
static struct sk_buff *ip_extract_route_hint(const struct net *net,
struct sk_buff *skb, int rt_type)
{
- if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
+ if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST ||
+ IPCB(skb)->flags & IPSKB_MULTIPATH)
return NULL;

return skb;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 51bd9a50a1d1..a04ffc128e22 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2146,6 +2146,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);

fib_select_multipath(res, h);
+ IPCB(skb)->flags |= IPSKB_MULTIPATH;
}
#endif

diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 26bd039f9296..dc3166e56169 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3380,7 +3380,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
if (delta <= 0)
return;
amt = sk_mem_pages(delta);
- sk->sk_forward_alloc += amt << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
sk_memory_allocated_add(sk, amt);

if (mem_cgroup_sockets_enabled && sk->sk_memcg)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 42c1f7d9a980..b2aa7777521f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1474,9 +1474,9 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
spin_lock(&sk_queue->lock);


- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
- sk->sk_forward_alloc -= amt;
+ sk_forward_alloc_add(sk, -amt);

if (amt)
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
@@ -1582,7 +1582,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
sk->sk_forward_alloc += delta;
}

- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);

/* no need to setup a destructor, we will explicitly release the
* forward allocated memory on dequeue
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 48a6486951cd..83be84219824 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1368,7 +1368,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
- max_desync_factor = min_t(__u32,
+ max_desync_factor = min_t(long,
idev->cnf.max_desync_factor,
cnf_temp_preferred_lft - regen_advance);

diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index d94041bb4287..b8378814532c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -99,7 +99,8 @@ static bool ip6_can_use_hint(const struct sk_buff *skb,
static struct sk_buff *ip6_extract_route_hint(const struct net *net,
struct sk_buff *skb)
{
- if (fib6_routes_require_src(net) || fib6_has_custom_rules(net))
+ if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
+ IP6CB(skb)->flags & IP6SKB_MULTIPATH)
return NULL;

return skb;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 960ab43a49c4..93957b20fccc 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -425,6 +425,9 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
if (match->nh && have_oif_match && res->nh)
return;

+ if (skb)
+ IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
+
/* We might have already computed the hash for ICMPv6 errors. In such
* case it will always be non-zero. Otherwise now is the time to do it.
*/
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 890a2423f559..65845c59c065 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1065,15 +1065,18 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
out_error:
kcm_push(kcm);

- if (copied && sock->type == SOCK_SEQPACKET) {
+ if (sock->type == SOCK_SEQPACKET) {
/* Wrote some bytes before encountering an
* error, return partial success.
*/
- goto partial_message;
- }
-
- if (head != kcm->seq_skb)
+ if (copied)
+ goto partial_message;
+ if (head != kcm->seq_skb)
+ kfree_skb(head);
+ } else {
kfree_skb(head);
+ kcm->seq_skb = NULL;
+ }

err = sk_stream_error(sk, msg->msg_flags, err);

@@ -1981,6 +1984,8 @@ static __net_exit void kcm_exit_net(struct net *net)
* that all multiplexors and psocks have been destroyed.
*/
WARN_ON(!list_empty(&knet->mux_list));
+
+ mutex_destroy(&knet->mutex);
}

static struct pernet_operations kcm_net_ops = {
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 61fefa1a82db..6dd880d6b051 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -131,9 +131,15 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
}

+static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size)
+{
+ WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc,
+ mptcp_sk(sk)->rmem_fwd_alloc + size);
+}
+
static void mptcp_rmem_charge(struct sock *sk, int size)
{
- mptcp_sk(sk)->rmem_fwd_alloc -= size;
+ mptcp_rmem_fwd_alloc_add(sk, -size);
}

static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
@@ -174,7 +180,7 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
{
amount >>= PAGE_SHIFT;
- mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT;
+ mptcp_rmem_charge(sk, amount << PAGE_SHIFT);
__sk_mem_reduce_allocated(sk, amount);
}

@@ -183,7 +189,7 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
struct mptcp_sock *msk = mptcp_sk(sk);
int reclaimable;

- msk->rmem_fwd_alloc += size;
+ mptcp_rmem_fwd_alloc_add(sk, size);
reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);

/* see sk_mem_uncharge() for the rationale behind the following schema */
@@ -338,7 +344,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
return false;

- msk->rmem_fwd_alloc += amount;
+ mptcp_rmem_fwd_alloc_add(sk, amount);
return true;
}

@@ -1802,7 +1808,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}

/* data successfully copied into the write queue */
- sk->sk_forward_alloc -= total_ts;
+ sk_forward_alloc_add(sk, -total_ts);
copied += psize;
dfrag->data_len += psize;
frag_truesize += psize;
@@ -3278,8 +3284,8 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
* inet_sock_destruct() will dispose it
*/
- sk->sk_forward_alloc += msk->rmem_fwd_alloc;
- msk->rmem_fwd_alloc = 0;
+ sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
+ WRITE_ONCE(msk->rmem_fwd_alloc, 0);
mptcp_token_destroy(msk);
mptcp_pm_free_anno_list(msk);
mptcp_free_local_addr_list(msk);
@@ -3562,7 +3568,8 @@ static void mptcp_shutdown(struct sock *sk, int how)

static int mptcp_forward_alloc_get(const struct sock *sk)
{
- return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
+ return READ_ONCE(sk->sk_forward_alloc) +
+ READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc);
}

static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 8f1bfa6ccc2d..50723ba08289 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -315,6 +315,14 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,

f = nla_data(osf_attrs[OSF_ATTR_FINGER]);

+ if (f->opt_num > ARRAY_SIZE(f->opt))
+ return -EINVAL;
+
+ if (!memchr(f->genre, 0, MAXGENRELEN) ||
+ !memchr(f->subtype, 0, MAXGENRELEN) ||
+ !memchr(f->version, 0, MAXGENRELEN))
+ return -EINVAL;
+
kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
if (!kf)
return -ENOMEM;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index c307c57a93e5..efb50c2b41f3 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -35,6 +35,14 @@ static unsigned int optlen(const u8 *opt, unsigned int offset)
return opt[offset + 1];
}

+static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len)
+{
+ if (len % NFT_REG32_SIZE)
+ dest[len / NFT_REG32_SIZE] = 0;
+
+ return skb_copy_bits(skb, offset, dest, len);
+}
+
static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -56,8 +64,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
}
offset += priv->offset;

- dest[priv->len / NFT_REG32_SIZE] = 0;
- if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
+ if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
@@ -153,8 +160,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
}
offset += priv->offset;

- dest[priv->len / NFT_REG32_SIZE] = 0;
- if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
+ if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
@@ -210,7 +216,8 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
*dest = 1;
} else {
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
memcpy(dest, opt + offset, priv->len);
}

@@ -388,9 +395,8 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
offset + ntohs(sch->length) > pkt->skb->len)
break;

- dest[priv->len / NFT_REG32_SIZE] = 0;
- if (skb_copy_bits(pkt->skb, offset + priv->offset,
- dest, priv->len) < 0)
+ if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset,
+ dest, priv->len) < 0)
break;
return;
}
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 591d87d5e5c0..68e6acd0f130 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -61,6 +61,7 @@ struct fq_pie_sched_data {
struct pie_params p_params;
u32 ecn_prob;
u32 flows_cnt;
+ u32 flows_cursor;
u32 quantum;
u32 memory_limit;
u32 new_flow_count;
@@ -375,22 +376,32 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
static void fq_pie_timer(struct timer_list *t)
{
struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
+ unsigned long next, tupdate;
struct Qdisc *sch = q->sch;
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
- u32 idx;
+ int max_cnt, i;

rcu_read_lock();
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);

- for (idx = 0; idx < q->flows_cnt; idx++)
- pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
- q->flows[idx].backlog);
-
- /* reset the timer to fire after 'tupdate' jiffies. */
- if (q->p_params.tupdate)
- mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
+ /* Limit this expensive loop to 2048 flows per round. */
+ max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
+ for (i = 0; i < max_cnt; i++) {
+ pie_calculate_probability(&q->p_params,
+ &q->flows[q->flows_cursor].vars,
+ q->flows[q->flows_cursor].backlog);
+ q->flows_cursor++;
+ }

+ tupdate = q->p_params.tupdate;
+ next = 0;
+ if (q->flows_cursor >= q->flows_cnt) {
+ q->flows_cursor = 0;
+ next = tupdate;
+ }
+ if (tupdate)
+ mod_timer(&q->adapt_timer, jiffies + next);
spin_unlock(root_lock);
rcu_read_unlock();
}
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index ea8c4a7174bb..35f49edf63db 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -207,7 +207,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct plug_sched_data),
.enqueue = plug_enqueue,
.dequeue = plug_dequeue,
- .peek = qdisc_peek_head,
+ .peek = qdisc_peek_dequeued,
.init = plug_init,
.change = plug_change,
.reset = qdisc_reset_queue,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e150d08f182d..ed01634af82c 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -973,10 +973,13 @@ static void qfq_update_eligible(struct qfq_sched *q)
}

/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
-static void agg_dequeue(struct qfq_aggregate *agg,
- struct qfq_class *cl, unsigned int len)
+static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
+ struct qfq_class *cl, unsigned int len)
{
- qdisc_dequeue_peeked(cl->qdisc);
+ struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
+
+ if (!skb)
+ return NULL;

cl->deficit -= (int) len;

@@ -986,6 +989,8 @@ static void agg_dequeue(struct qfq_aggregate *agg,
cl->deficit += agg->lmax;
list_move_tail(&cl->alist, &agg->active);
}
+
+ return skb;
}

static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
@@ -1131,11 +1136,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
if (!skb)
return NULL;

- qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
+
+ skb = agg_dequeue(in_serv_agg, cl, len);
+
+ if (!skb) {
+ sch->q.qlen++;
+ return NULL;
+ }
+
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);

- agg_dequeue(in_serv_agg, cl, len);
/* If lmax is lowered, through qfq_change_class, for a class
* owning pending packets with larger size than the new value
* of lmax, then the following condition may hold.
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index f13d6a34f32f..ec00ee75d59a 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -282,7 +282,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->init_retries, assoc->shutdown_retries,
assoc->rtx_data_chunks,
refcount_read(&sk->sk_wmem_alloc),
- sk->sk_wmem_queued,
+ READ_ONCE(sk->sk_wmem_queued),
sk->sk_sndbuf,
sk->sk_rcvbuf);
seq_printf(seq, "\n");
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a11b0d903514..32e3669adf14 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -68,7 +68,7 @@
#include <net/sctp/stream_sched.h>

/* Forward declarations for internal helper functions. */
-static bool sctp_writeable(struct sock *sk);
+static bool sctp_writeable(const struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len);
@@ -139,7 +139,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)

refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
- sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk));
sk_mem_charge(sk, chunk->skb->truesize);
}

@@ -9139,7 +9139,7 @@ static void sctp_wfree(struct sk_buff *skb)
struct sock *sk = asoc->base.sk;

sk_mem_uncharge(sk, skb->truesize);
- sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk)));
asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
&sk->sk_wmem_alloc));
@@ -9292,9 +9292,9 @@ void sctp_write_space(struct sock *sk)
* UDP-style sockets or TCP-style sockets, this code should work.
* - Daisy
*/
-static bool sctp_writeable(struct sock *sk)
+static bool sctp_writeable(const struct sock *sk)
{
- return sk->sk_sndbuf > sk->sk_wmem_queued;
+ return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued);
}

/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index c676d92af7b7..64b6dd439938 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1650,6 +1650,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *n;

+ spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
struct smc_link *link;

@@ -1665,6 +1666,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
if (link)
smc_llc_add_link_local(link);
}
+ spin_unlock_bh(&smc_lgr_list.lock);
}

/* link is down - switch connections to alternate link,
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 96b4545ea700..9be00ebbb234 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -802,7 +802,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
@@ -831,7 +831,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ca31847a6c70..310952f4c68f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -667,7 +667,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
* What the above comment does talk about? --ANK(980817)
*/

- if (unix_tot_inflight)
+ if (READ_ONCE(unix_tot_inflight))
unix_gc(); /* Garbage collect fds */
}

diff --git a/net/unix/scm.c b/net/unix/scm.c
index aa27a02478dc..e8e2a00bb0f5 100644
--- a/net/unix/scm.c
+++ b/net/unix/scm.c
@@ -63,7 +63,7 @@ void unix_inflight(struct user_struct *user, struct file *fp)
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
- user->unix_inflight++;
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
spin_unlock(&unix_gc_lock);
}

@@ -84,7 +84,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
}
- user->unix_inflight--;
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
spin_unlock(&unix_gc_lock);
}

@@ -98,7 +98,7 @@ static inline bool too_many_unix_fds(struct task_struct *p)
{
struct user_struct *user = current_user();

- if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
return false;
}
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index c014217f5fa7..22b36c8143cf 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -111,6 +111,9 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
sock_diag_save_cookie(sk, msg->xdiag_cookie);

mutex_lock(&xs->mutex);
+ if (READ_ONCE(xs->state) == XSK_UNBOUND)
+ goto out_nlmsg_trim;
+
if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
goto out_nlmsg_trim;

diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 748da578b418..d1f5bcff4b62 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -396,6 +396,9 @@ static char *eval_clause(const char *str, size_t len, int argc, char *argv[])

p++;
}
+
+ if (new_argc >= FUNCTION_MAX_ARGS)
+ pperror("too many function arguments");
new_argv[new_argc++] = prev;

/*
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 70392fd2fd29..f892cf8e37f0 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -51,7 +51,7 @@ $S Source: kernel-$__KERNELRELEASE.tar.gz
Provides: $PROVIDES
# $UTS_MACHINE as a fallback of _arch in case
# /usr/lib/rpm/platform/*/macros was not included.
- %define _arch %{?_arch:$UTS_MACHINE}
+ %{!?_arch: %define _arch $UTS_MACHINE}
%define __spec_install_post /usr/lib/rpm/brp-compress || :
%define debug_package %{nil}

diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c
index 368f077e7bee..5d2125aa6122 100644
--- a/sound/soc/tegra/tegra210_sfc.c
+++ b/sound/soc/tegra/tegra210_sfc.c
@@ -2,7 +2,7 @@
//
// tegra210_sfc.c - Tegra210 SFC driver
//
-// Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
+// Copyright (c) 2021-2023 NVIDIA CORPORATION. All rights reserved.

#include <linux/clk.h>
#include <linux/device.h>
@@ -42,6 +42,7 @@ static const int tegra210_sfc_rates[TEGRA210_SFC_NUM_RATES] = {
32000,
44100,
48000,
+ 64000,
88200,
96000,
176400,
@@ -2857,6 +2858,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_8to32,
coef_8to44,
coef_8to48,
+ UNSUPP_CONV,
coef_8to88,
coef_8to96,
UNSUPP_CONV,
@@ -2872,6 +2874,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_11to32,
coef_11to44,
coef_11to48,
+ UNSUPP_CONV,
coef_11to88,
coef_11to96,
UNSUPP_CONV,
@@ -2887,6 +2890,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_16to32,
coef_16to44,
coef_16to48,
+ UNSUPP_CONV,
coef_16to88,
coef_16to96,
coef_16to176,
@@ -2902,6 +2906,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_22to32,
coef_22to44,
coef_22to48,
+ UNSUPP_CONV,
coef_22to88,
coef_22to96,
coef_22to176,
@@ -2917,6 +2922,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_24to32,
coef_24to44,
coef_24to48,
+ UNSUPP_CONV,
coef_24to88,
coef_24to96,
coef_24to176,
@@ -2932,6 +2938,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
BYPASS_CONV,
coef_32to44,
coef_32to48,
+ UNSUPP_CONV,
coef_32to88,
coef_32to96,
coef_32to176,
@@ -2947,6 +2954,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_44to32,
BYPASS_CONV,
coef_44to48,
+ UNSUPP_CONV,
coef_44to88,
coef_44to96,
coef_44to176,
@@ -2962,11 +2970,28 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_48to32,
coef_48to44,
BYPASS_CONV,
+ UNSUPP_CONV,
coef_48to88,
coef_48to96,
coef_48to176,
coef_48to192,
},
+ /* Convertions from 64 kHz */
+ {
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ UNSUPP_CONV,
+ },
/* Convertions from 88.2 kHz */
{
coef_88to8,
@@ -2977,6 +3002,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_88to32,
coef_88to44,
coef_88to48,
+ UNSUPP_CONV,
BYPASS_CONV,
coef_88to96,
coef_88to176,
@@ -2991,6 +3017,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_96to32,
coef_96to44,
coef_96to48,
+ UNSUPP_CONV,
coef_96to88,
BYPASS_CONV,
coef_96to176,
@@ -3006,6 +3033,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_176to32,
coef_176to44,
coef_176to48,
+ UNSUPP_CONV,
coef_176to88,
coef_176to96,
BYPASS_CONV,
@@ -3021,6 +3049,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
coef_192to32,
coef_192to44,
coef_192to48,
+ UNSUPP_CONV,
coef_192to88,
coef_192to96,
coef_192to176,
diff --git a/sound/soc/tegra/tegra210_sfc.h b/sound/soc/tegra/tegra210_sfc.h
index 5a6b66e297d8..a4c993d79403 100644
--- a/sound/soc/tegra/tegra210_sfc.h
+++ b/sound/soc/tegra/tegra210_sfc.h
@@ -2,7 +2,7 @@
/*
* tegra210_sfc.h - Definitions for Tegra210 SFC driver
*
- * Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2021-2023 NVIDIA CORPORATION. All rights reserved.
*
*/

@@ -47,7 +47,7 @@
#define TEGRA210_SFC_EN_SHIFT 0
#define TEGRA210_SFC_EN (1 << TEGRA210_SFC_EN_SHIFT)

-#define TEGRA210_SFC_NUM_RATES 12
+#define TEGRA210_SFC_NUM_RATES 13

/* Fields in TEGRA210_SFC_COEF_RAM */
#define TEGRA210_SFC_COEF_RAM_EN BIT(0)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4b3ff7687236..f9917848cdad 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1751,6 +1751,7 @@ int cmd_top(int argc, const char **argv)
top.session = perf_session__new(NULL, NULL);
if (IS_ERR(top.session)) {
status = PTR_ERR(top.session);
+ top.session = NULL;
goto out_delete_evlist;
}

diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 97b17f8941dc..93dab6423a04 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2293,7 +2293,7 @@ static void syscall__exit(struct syscall *sc)
if (!sc)
return;

- free(sc->arg_fmt);
+ zfree(&sc->arg_fmt);
}

static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
@@ -3124,13 +3124,8 @@ static void evlist__free_syscall_tp_fields(struct evlist *evlist)
struct evsel *evsel;

evlist__for_each_entry(evlist, evsel) {
- struct evsel_trace *et = evsel->priv;
-
- if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
- continue;
-
- free(et->fmt);
- free(et);
+ evsel_trace__delete(evsel->priv);
+ evsel->priv = NULL;
}
}

@@ -4765,11 +4760,11 @@ static void trace__exit(struct trace *trace)
int i;

strlist__delete(trace->ev_qualifier);
- free(trace->ev_qualifier_ids.entries);
+ zfree(&trace->ev_qualifier_ids.entries);
if (trace->syscalls.table) {
for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
syscall__exit(&trace->syscalls.table[i]);
- free(trace->syscalls.table);
+ zfree(&trace->syscalls.table);
}
syscalltbl__delete(trace->sctbl);
zfree(&trace->perfconfig_events);
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
index 605be14f441c..9cb929bb64af 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -17,7 +17,7 @@
{
"EventCode": "0x34056",
"EventName": "PM_EXEC_STALL_LOAD_FINISH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the next-to-finish (NTF) instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
},
{
"EventCode": "0x3006C",
@@ -27,7 +27,7 @@
{
"EventCode": "0x300F4",
"EventName": "PM_RUN_INST_CMPL_CONC",
- "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
+ "BriefDescription": "PowerPC instruction completed by this thread when all threads in the core had the run-latch set."
},
{
"EventCode": "0x4C016",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
deleted file mode 100644
index 54acb55e2c8c..000000000000
--- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+++ /dev/null
@@ -1,7 +0,0 @@
-[
- {
- "EventCode": "0x4016E",
- "EventName": "PM_THRESH_NOT_MET",
- "BriefDescription": "Threshold counter did not meet threshold."
- }
-]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
index 558f9530f54e..61e9e0222c87 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -7,7 +7,7 @@
{
"EventCode": "0x10006",
"EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any other reason."
},
{
"EventCode": "0x10010",
@@ -32,12 +32,12 @@
{
"EventCode": "0x1D05E",
"EventName": "PM_DISP_STALL_HELD_HALT_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of power management."
},
{
"EventCode": "0x1E050",
"EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
},
{
"EventCode": "0x1F054",
@@ -67,7 +67,7 @@
{
"EventCode": "0x100F6",
"EventName": "PM_IERAT_MISS",
- "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
+ "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event. This event only counts instruction demand access."
},
{
"EventCode": "0x100F8",
@@ -77,7 +77,7 @@
{
"EventCode": "0x20006",
"EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
},
{
"EventCode": "0x20114",
@@ -102,7 +102,7 @@
{
"EventCode": "0x2D01A",
"EventName": "PM_DISP_STALL_IC_MISS",
- "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to an instruction cache miss."
},
{
"EventCode": "0x2E018",
@@ -112,7 +112,7 @@
{
"EventCode": "0x2E01A",
"EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the XVFC mapper/SRB was full."
},
{
"EventCode": "0x2C142",
@@ -137,7 +137,7 @@
{
"EventCode": "0x30004",
"EventName": "PM_DISP_STALL_FLUSH",
- "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+ "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet next-to-complete (NTC). PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
},
{
"EventCode": "0x3000A",
@@ -157,7 +157,7 @@
{
"EventCode": "0x30018",
"EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
},
{
"EventCode": "0x30026",
@@ -182,7 +182,7 @@
{
"EventCode": "0x3D05C",
"EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
},
{
"EventCode": "0x3E052",
@@ -192,7 +192,7 @@
{
"EventCode": "0x3E054",
"EventName": "PM_LD_MISS_L1",
- "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
+ "BriefDescription": "Load missed L1, counted at finish time. LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
},
{
"EventCode": "0x301EA",
@@ -202,7 +202,7 @@
{
"EventCode": "0x300FA",
"EventName": "PM_INST_FROM_L3MISS",
- "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
},
{
"EventCode": "0x40006",
@@ -232,16 +232,16 @@
{
"EventCode": "0x4E01A",
"EventName": "PM_DISP_STALL_HELD_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any reason."
},
{
"EventCode": "0x4003C",
"EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
},
{
"EventCode": "0x44056",
"EventName": "PM_VECTOR_ST_CMPL",
- "BriefDescription": "Vector store instructions completed."
+ "BriefDescription": "Vector store instruction completed."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
index 58b5dfe3a273..f2436fc5537c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
@@ -19,11 +19,6 @@
"EventName": "PM_MRK_BR_TAKEN_CMPL",
"BriefDescription": "Marked Branch Taken instruction completed."
},
- {
- "EventCode": "0x20112",
- "EventName": "PM_MRK_NTF_FIN",
- "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
- },
{
"EventCode": "0x2C01C",
"EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
@@ -62,17 +57,12 @@
{
"EventCode": "0x200FD",
"EventName": "PM_L1_ICACHE_MISS",
- "BriefDescription": "Demand iCache Miss."
- },
- {
- "EventCode": "0x30130",
- "EventName": "PM_MRK_INST_FIN",
- "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
+ "BriefDescription": "Demand instruction cache miss."
},
{
"EventCode": "0x34146",
"EventName": "PM_MRK_LD_CMPL",
- "BriefDescription": "Marked loads completed."
+ "BriefDescription": "Marked load instruction completed."
},
{
"EventCode": "0x3E158",
@@ -82,12 +72,12 @@
{
"EventCode": "0x3E15A",
"EventName": "PM_MRK_ST_FIN",
- "BriefDescription": "The marked instruction was a store of any kind."
+ "BriefDescription": "Marked store instruction finished."
},
{
"EventCode": "0x30068",
"EventName": "PM_L1_ICACHE_RELOADED_PREF",
- "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
+ "BriefDescription": "Counts all instruction cache prefetch reloads (includes demand turned into prefetch)."
},
{
"EventCode": "0x301E4",
@@ -102,12 +92,12 @@
{
"EventCode": "0x300FE",
"EventName": "PM_DATA_FROM_L3MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss."
},
{
"EventCode": "0x40012",
"EventName": "PM_L1_ICACHE_RELOADED_ALL",
- "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+ "BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
},
{
"EventCode": "0x40134",
@@ -117,22 +107,22 @@
{
"EventCode": "0x4505A",
"EventName": "PM_SP_FLOP_CMPL",
- "BriefDescription": "Single Precision floating point instructions completed."
+ "BriefDescription": "Single Precision floating point instruction completed."
},
{
"EventCode": "0x4D058",
"EventName": "PM_VECTOR_FLOP_CMPL",
- "BriefDescription": "Vector floating point instructions completed."
+ "BriefDescription": "Vector floating point instruction completed."
},
{
"EventCode": "0x4D05A",
"EventName": "PM_NON_MATH_FLOP_CMPL",
- "BriefDescription": "Non Math instructions completed."
+ "BriefDescription": "Non Math instruction completed."
},
{
"EventCode": "0x401E0",
"EventName": "PM_MRK_INST_CMPL",
- "BriefDescription": "marked instruction completed."
+ "BriefDescription": "Marked instruction completed."
},
{
"EventCode": "0x400FE",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
index 843b51f531e9..c4c10ca98cad 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -47,7 +47,7 @@
{
"EventCode": "0x10062",
"EventName": "PM_LD_L3MISS_PEND_CYC",
- "BriefDescription": "Cycles L3 miss was pending for this thread."
+ "BriefDescription": "Cycles in which an L3 miss was pending for this thread."
},
{
"EventCode": "0x20010",
@@ -132,7 +132,7 @@
{
"EventCode": "0x300FC",
"EventName": "PM_DTLB_MISS",
- "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
+ "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. This event only counts for demand misses."
},
{
"EventCode": "0x4D02C",
@@ -142,7 +142,7 @@
{
"EventCode": "0x4003E",
"EventName": "PM_LD_CMPL",
- "BriefDescription": "Loads completed."
+ "BriefDescription": "Load instruction completed."
},
{
"EventCode": "0x4C040",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
index b57526fa44f2..6e76f65c314c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
@@ -453,12 +453,6 @@
"MetricGroup": "General",
"MetricName": "LOADS_PER_INST"
},
- {
- "BriefDescription": "Average number of finished stores per completed instruction",
- "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
- "MetricGroup": "General",
- "MetricName": "STORES_PER_INST"
- },
{
"BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
"MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
index 7d0de1a2860b..36c5bbc64c3b 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -2,12 +2,12 @@
{
"EventCode": "0x10016",
"EventName": "PM_VSU0_ISSUE",
- "BriefDescription": "VSU instructions issued to VSU pipe 0."
+ "BriefDescription": "VSU instruction issued to VSU pipe 0."
},
{
"EventCode": "0x1001C",
"EventName": "PM_ULTRAVISOR_INST_CMPL",
- "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
+ "BriefDescription": "PowerPC instruction completed while the thread was in ultravisor state."
},
{
"EventCode": "0x100F0",
@@ -17,23 +17,18 @@
{
"EventCode": "0x10134",
"EventName": "PM_MRK_ST_DONE_L2",
- "BriefDescription": "Marked stores completed in L2 (RC machine done)."
+ "BriefDescription": "Marked store completed in L2."
},
{
"EventCode": "0x1505E",
"EventName": "PM_LD_HIT_L1",
- "BriefDescription": "Loads that finished without experiencing an L1 miss."
+ "BriefDescription": "Load finished without experiencing an L1 miss."
},
{
"EventCode": "0x1F056",
"EventName": "PM_DISP_SS0_2_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
},
- {
- "EventCode": "0x1F15C",
- "EventName": "PM_MRK_STCX_L2_CYC",
- "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
- },
{
"EventCode": "0x10066",
"EventName": "PM_ADJUNCT_CYC",
@@ -42,7 +37,7 @@
{
"EventCode": "0x101E4",
"EventName": "PM_MRK_L1_ICACHE_MISS",
- "BriefDescription": "Marked Instruction suffered an icache Miss."
+ "BriefDescription": "Marked instruction suffered an instruction cache miss."
},
{
"EventCode": "0x101EA",
@@ -72,7 +67,7 @@
{
"EventCode": "0x2E010",
"EventName": "PM_ADJUNCT_INST_CMPL",
- "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
+ "BriefDescription": "PowerPC instruction completed while the thread was in Adjunct state."
},
{
"EventCode": "0x2E014",
@@ -122,7 +117,7 @@
{
"EventCode": "0x201E4",
"EventName": "PM_MRK_DATA_FROM_L3MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
},
{
"EventCode": "0x201E8",
@@ -132,17 +127,17 @@
{
"EventCode": "0x200F2",
"EventName": "PM_INST_DISP",
- "BriefDescription": "PowerPC instructions dispatched."
+ "BriefDescription": "PowerPC instruction dispatched."
},
{
"EventCode": "0x30132",
"EventName": "PM_MRK_VSU_FIN",
- "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
+ "BriefDescription": "VSU marked instruction finished. Excludes simple FX instructions issued to the Store Unit."
},
{
"EventCode": "0x30038",
"EventName": "PM_EXEC_STALL_DMISS_LMEM",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCAPI cache, or local OpenCAPI memory."
},
{
"EventCode": "0x3F04A",
@@ -152,12 +147,12 @@
{
"EventCode": "0x3405A",
"EventName": "PM_PRIVILEGED_INST_CMPL",
- "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
+ "BriefDescription": "PowerPC instruction completed while the thread was in Privileged state."
},
{
"EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_CYC",
- "BriefDescription": "cycles to drain st from core to L2."
+ "BriefDescription": "Cycles in which the marked store drained from the core to the L2."
},
{
"EventCode": "0x3F054",
@@ -182,7 +177,7 @@
{
"EventCode": "0x4001C",
"EventName": "PM_VSU_FIN",
- "BriefDescription": "VSU instructions finished."
+ "BriefDescription": "VSU instruction finished."
},
{
"EventCode": "0x4C01A",
@@ -197,7 +192,7 @@
{
"EventCode": "0x4D022",
"EventName": "PM_HYPERVISOR_INST_CMPL",
- "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
+ "BriefDescription": "PowerPC instruction completed while the thread was in hypervisor state."
},
{
"EventCode": "0x4D026",
@@ -212,32 +207,32 @@
{
"EventCode": "0x40030",
"EventName": "PM_INST_FIN",
- "BriefDescription": "Instructions finished."
+ "BriefDescription": "Instruction finished."
},
{
"EventCode": "0x44146",
"EventName": "PM_MRK_STCX_CORE_CYC",
- "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
+ "BriefDescription": "Cycles spent in the core portion of a marked STCX instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
},
{
"EventCode": "0x44054",
"EventName": "PM_VECTOR_LD_CMPL",
- "BriefDescription": "Vector load instructions completed."
+ "BriefDescription": "Vector load instruction completed."
},
{
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
- "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
+ "BriefDescription": "Two floating point instruction completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
},
{
"EventCode": "0x45056",
"EventName": "PM_SCALAR_FLOP_CMPL",
- "BriefDescription": "Scalar floating point instructions completed."
+ "BriefDescription": "Scalar floating point instruction completed."
},
{
"EventCode": "0x4505C",
"EventName": "PM_MATH_FLOP_CMPL",
- "BriefDescription": "Math floating point instructions completed."
+ "BriefDescription": "Math floating point instruction completed."
},
{
"EventCode": "0x4D05E",
@@ -252,21 +247,21 @@
{
"EventCode": "0x401E6",
"EventName": "PM_MRK_INST_FROM_L3MISS",
- "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
+ "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
},
{
"EventCode": "0x401E8",
"EventName": "PM_MRK_DATA_FROM_L2MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
},
{
"EventCode": "0x400F0",
"EventName": "PM_LD_DEMAND_MISS_L1_FIN",
- "BriefDescription": "Load Missed L1, counted at finish time."
+ "BriefDescription": "Load missed L1, counted at finish time."
},
{
"EventCode": "0x400FA",
"EventName": "PM_RUN_INST_CMPL",
- "BriefDescription": "Completed PowerPC instructions gated by the run latch."
+ "BriefDescription": "PowerPC instruction completed while the run latch is set."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
index b8aded6045fa..799893c56f32 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -2,7 +2,7 @@
{
"EventCode": "0x100FE",
"EventName": "PM_INST_CMPL",
- "BriefDescription": "PowerPC instructions completed."
+ "BriefDescription": "PowerPC instruction completed."
},
{
"EventCode": "0x1000C",
@@ -12,7 +12,7 @@
{
"EventCode": "0x1000E",
"EventName": "PM_MMA_ISSUED",
- "BriefDescription": "MMA instructions issued."
+ "BriefDescription": "MMA instruction issued."
},
{
"EventCode": "0x10012",
@@ -107,7 +107,7 @@
{
"EventCode": "0x2D012",
"EventName": "PM_VSU1_ISSUE",
- "BriefDescription": "VSU instructions issued to VSU pipe 1."
+ "BriefDescription": "VSU instruction issued to VSU pipe 1."
},
{
"EventCode": "0x2D018",
@@ -122,7 +122,7 @@
{
"EventCode": "0x2E01E",
"EventName": "PM_EXEC_STALL_NTC_FLUSH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous next-to-finish (NTF) instruction is still completing and the new NTF instruction is stalled at dispatch."
},
{
"EventCode": "0x2013C",
@@ -137,7 +137,7 @@
{
"EventCode": "0x201E2",
"EventName": "PM_MRK_LD_MISS_L1",
- "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
+ "BriefDescription": "Marked demand data load miss counted at finish time."
},
{
"EventCode": "0x200F4",
@@ -172,7 +172,7 @@
{
"EventCode": "0x30028",
"EventName": "PM_CMPL_STALL_MEM_ECC",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a STCX waiting for its result or a load waiting for non-critical sectors of data and ECC."
},
{
"EventCode": "0x30036",
@@ -187,17 +187,12 @@
{
"EventCode": "0x3F044",
"EventName": "PM_VSU2_ISSUE",
- "BriefDescription": "VSU instructions issued to VSU pipe 2."
+ "BriefDescription": "VSU instruction issued to VSU pipe 2."
},
{
"EventCode": "0x30058",
"EventName": "PM_TLBIE_FIN",
- "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
- },
- {
- "EventCode": "0x3D058",
- "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
- "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
+ "BriefDescription": "TLBIE instruction finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
},
{
"EventCode": "0x30066",
@@ -252,7 +247,7 @@
{
"EventCode": "0x4E012",
"EventName": "PM_EXEC_STALL_UNKNOWN",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the next-to-finish (NTF) instruction finishes and completions came too close together."
},
{
"EventCode": "0x4D020",
@@ -267,12 +262,7 @@
{
"EventCode": "0x45058",
"EventName": "PM_IC_MISS_CMPL",
- "BriefDescription": "Non-speculative icache miss, counted at completion."
- },
- {
- "EventCode": "0x4D050",
- "EventName": "PM_VSU_NON_FLOP_CMPL",
- "BriefDescription": "Non-floating point VSU instructions completed."
+ "BriefDescription": "Non-speculative instruction cache miss, counted at completion."
},
{
"EventCode": "0x4D052",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
index b5d1bd39cfb2..364fedbfb490 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -12,11 +12,11 @@
{
"EventCode": "0x45052",
"EventName": "PM_4FLOP_CMPL",
- "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ "BriefDescription": "Four floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
},
{
"EventCode": "0x4D054",
"EventName": "PM_8FLOP_CMPL",
- "BriefDescription": "Four Double Precision vector instructions completed."
+ "BriefDescription": "Four Double Precision vector instruction completed."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
index db3766dca07c..961e2491e73f 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -4,11 +4,6 @@
"EventName": "PM_MRK_START_PROBE_NOP_CMPL",
"BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
},
- {
- "EventCode": "0x20016",
- "EventName": "PM_ST_FIN",
- "BriefDescription": "Store finish count. Includes speculative activity."
- },
{
"EventCode": "0x20018",
"EventName": "PM_ST_FWD",
@@ -17,7 +12,7 @@
{
"EventCode": "0x2011C",
"EventName": "PM_MRK_NTF_CYC",
- "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
+ "BriefDescription": "Cycles in which the marked instruction is the oldest in the pipeline (next-to-finish or next-to-complete)."
},
{
"EventCode": "0x2E01C",
@@ -37,7 +32,7 @@
{
"EventCode": "0x200FE",
"EventName": "PM_DATA_FROM_L2MISS",
- "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
},
{
"EventCode": "0x30010",
@@ -52,6 +47,6 @@
{
"EventCode": "0x4D05C",
"EventName": "PM_DPP_FLOP_CMPL",
- "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
+ "BriefDescription": "Double-Precision or Quad-Precision instruction completed."
}
]
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 13473aeba489..6bf24b85294c 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -22,10 +22,10 @@ compare_number()
}

# skip if --bpf-counters is not supported
-if ! perf stat --bpf-counters true > /dev/null 2>&1; then
+if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
if [ "$1" = "-v" ]; then
echo "Skipping: --bpf-counters not supported"
- perf --no-pager stat --bpf-counters true || true
+ perf --no-pager stat -e cycles --bpf-counters true || true
fi
exit 2
fi
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
index d724855d097c..e75d0780dc78 100755
--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -25,22 +25,22 @@ check_bpf_counter()
find_cgroups()
{
# try usual systemd slices first
- if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
+ if [ -d /sys/fs/cgroup/system.slice ] && [ -d /sys/fs/cgroup/user.slice ]; then
test_cgroups="system.slice,user.slice"
return
fi

# try root and self cgroups
- local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
- if [ -z ${self_cgrp} ]; then
+ find_cgroups_self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+ if [ -z ${find_cgroups_self_cgrp} ]; then
# cgroup v2 doesn't specify perf_event
- self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ find_cgroups_self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
fi

- if [ -z ${self_cgrp} ]; then
+ if [ -z ${find_cgroups_self_cgrp} ]; then
test_cgroups="/"
else
- test_cgroups="/,${self_cgrp}"
+ test_cgroups="/,${find_cgroups_self_cgrp}"
fi
}

@@ -48,13 +48,11 @@ find_cgroups()
# Just check if it runs without failure and has non-zero results.
check_system_wide_counted()
{
- local output
-
- output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
- if echo ${output} | grep -q -F "<not "; then
+ check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
+ if echo ${check_system_wide_counted_output} | grep -q -F "<not "; then
echo "Some system-wide events are not counted"
if [ "${verbose}" = "1" ]; then
- echo ${output}
+ echo ${check_system_wide_counted_output}
fi
exit 1
fi
@@ -62,13 +60,11 @@ check_system_wide_counted()

check_cpu_list_counted()
{
- local output
-
- output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
- if echo ${output} | grep -q -F "<not "; then
+ check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
+ if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
echo "Some CPU events are not counted"
if [ "${verbose}" = "1" ]; then
- echo ${output}
+ echo ${check_cpu_list_counted_output}
fi
exit 1
fi
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index b72ee6822222..fd3e67d2c6bd 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -407,11 +407,6 @@ static bool hist_browser__selection_has_children(struct hist_browser *browser)
return container_of(ms, struct callchain_list, ms)->has_children;
}

-static bool hist_browser__he_selection_unfolded(struct hist_browser *browser)
-{
- return browser->he_selection ? browser->he_selection->unfolded : false;
-}
-
static bool hist_browser__selection_unfolded(struct hist_browser *browser)
{
struct hist_entry *he = browser->he_selection;
@@ -584,8 +579,8 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
return n;
}

-static void __hist_entry__set_folding(struct hist_entry *he,
- struct hist_browser *hb, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he,
+ struct hist_browser *hb, bool unfold)
{
hist_entry__init_have_children(he);
he->unfolded = unfold ? he->has_children : false;
@@ -603,34 +598,12 @@ static void __hist_entry__set_folding(struct hist_entry *he,
he->nr_rows = 0;
}

-static void hist_entry__set_folding(struct hist_entry *he,
- struct hist_browser *browser, bool unfold)
-{
- double percent;
-
- percent = hist_entry__get_percent_limit(he);
- if (he->filtered || percent < browser->min_pcnt)
- return;
-
- __hist_entry__set_folding(he, browser, unfold);
-
- if (!he->depth || unfold)
- browser->nr_hierarchy_entries++;
- if (he->leaf)
- browser->nr_callchain_rows += he->nr_rows;
- else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
- browser->nr_hierarchy_entries++;
- he->has_no_entry = true;
- he->nr_rows = 1;
- } else
- he->has_no_entry = false;
-}
-
static void
__hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
struct rb_node *nd;
struct hist_entry *he;
+ double percent;

nd = rb_first_cached(&browser->hists->entries);
while (nd) {
@@ -640,6 +613,21 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);

hist_entry__set_folding(he, browser, unfold);
+
+ percent = hist_entry__get_percent_limit(he);
+ if (he->filtered || percent < browser->min_pcnt)
+ continue;
+
+ if (!he->depth || unfold)
+ browser->nr_hierarchy_entries++;
+ if (he->leaf)
+ browser->nr_callchain_rows += he->nr_rows;
+ else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
+ browser->nr_hierarchy_entries++;
+ he->has_no_entry = true;
+ he->nr_rows = 1;
+ } else
+ he->has_no_entry = false;
}
}

@@ -659,8 +647,10 @@ static void hist_browser__set_folding_selected(struct hist_browser *browser, boo
if (!browser->he_selection)
return;

- hist_entry__set_folding(browser->he_selection, browser, unfold);
- browser->b.nr_entries = hist_browser__nr_entries(browser);
+ if (unfold == browser->he_selection->unfolded)
+ return;
+
+ hist_browser__toggle_fold(browser);
}

static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -732,8 +722,8 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
hist_browser__set_folding(browser, true);
break;
case 'e':
- /* Expand the selected entry. */
- hist_browser__set_folding_selected(browser, !hist_browser__he_selection_unfolded(browser));
+ /* Toggle expand/collapse the selected entry. */
+ hist_browser__toggle_fold(browser);
break;
case 'H':
browser->show_headers = !browser->show_headers;
@@ -1779,7 +1769,7 @@ static void hists_browser__hierarchy_headers(struct hist_browser *browser)
hists_browser__scnprintf_hierarchy_headers(browser, headers,
sizeof(headers));

- ui_browser__gotorc(&browser->b, 0, 0);
+ ui_browser__gotorc_title(&browser->b, 0, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index db475e44f42f..a9122ea3b44c 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1756,8 +1756,11 @@ static int symbol__disassemble_bpf(struct symbol *sym,
perf_exe(tpath, sizeof(tpath));

bfdf = bfd_openr(tpath, NULL);
- assert(bfdf);
- assert(bfd_check_format(bfdf, bfd_object));
+ if (bfdf == NULL)
+ abort();
+
+ if (!bfd_check_format(bfdf, bfd_object))
+ abort();

s = open_memstream(&buf, &buf_size);
if (!s) {
@@ -1805,7 +1808,8 @@ static int symbol__disassemble_bpf(struct symbol *sym,
#else
disassemble = disassembler(bfdf);
#endif
- assert(disassemble);
+ if (disassemble == NULL)
+ abort();

fflush(s);
do {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 98dfaf84bd13..9e2dce70b130 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -4331,7 +4331,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
- u32 i, ids, n_ids;
+ u32 i, n_ids;
+ u64 *ids;
struct evsel *evsel;
struct evlist *evlist = *pevlist;

@@ -4347,9 +4348,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,

evlist__add(evlist, evsel);

- ids = event->header.size;
- ids -= (void *)&event->attr.id - (void *)event;
- n_ids = ids / sizeof(u64);
+ n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
+ n_ids = n_ids / sizeof(u64);
/*
* We don't have the cpu and thread maps on the header, so
* for allocating the perf_sample_id table we fake 1 cpu and
@@ -4358,8 +4358,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;

+ ids = (void *)&event->attr.attr + event->attr.attr.size;
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
}

return 0;
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 294619ade49f..1333ab1eda70 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -35,7 +35,8 @@ tap_timeout()
{
# Make sure tests will time out if utility is available.
if [ -x /usr/bin/timeout ] ; then
- /usr/bin/timeout --foreground "$kselftest_timeout" $1
+ /usr/bin/timeout --foreground "$kselftest_timeout" \
+ /usr/bin/timeout "$kselftest_timeout" $1
else
$1
fi
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 05400462c779..aa646e0661f3 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -72,7 +72,7 @@ endef
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
- rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
@@ -86,7 +86,7 @@ endif

define INSTALL_SINGLE_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
- $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
+ $(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
endef

define INSTALL_RULE