Re: [PATCH v8 2/2] lib: checksum: Use aligned accesses for ip_fast_csum and csum_ipv6_magic tests

From: Helge Deller
Date: Fri Feb 16 2024 - 01:43:00 EST


On 2/16/24 06:25, Guenter Roeck wrote:
On Fri, Feb 16, 2024 at 06:54:55AM +0100, Helge Deller wrote:

Can you please give a pointer to this test code?
I'm happy to try it on real hardware.

See below.

Testcase runs OK on physical machine:

#### carry64 aligned, expect 1 -> 1
#### carry64 unaligned 4, expect 1 -> 1
#### carry64 unaligned 2, expect 1 -> 1
#### carry32 aligned, expect 1 -> 1
#### carry64 unaligned, expect 1 -> 1
#### carry64 aligned, expect 0 -> 0
#### carry64 unaligned 4, expect 0 -> 0
#### carry64 unaligned 2, expect 0 -> 0
#### carry32 aligned, expect 0 -> 0
#### carry32 unaligned, expect 0 -> 0
ok 6 test_bad_carry

Helge

---
From 0478f35f02224994e1d81e614b66219ab7539f7f Mon Sep 17 00:00:00 2001
From: Guenter Roeck <linux@xxxxxxxxxxxx>
Date: Wed, 14 Feb 2024 11:25:18 -0800
Subject: [PATCH] carry tests

Signed-off-by: Guenter Roeck <linux@xxxxxxxxxxxx>
---
lib/checksum_kunit.c | 76 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 72c313ba4c78..8f7925396e53 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -546,12 +546,88 @@ static void test_csum_ipv6_magic(struct kunit *test)
#endif /* !CONFIG_NET */
}

+#ifdef CONFIG_64BIT
+
+static __inline__ int get_carry64(void *addr)
+{
+ int carry = 0;
+ unsigned long sum = 0xffffffff;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+" add %0, %0, %0\n" /* clear carry */
+" ldd 0(%2), %3\n" /* load from memory */
+" add %1, %3, %1\n" /* optionally generate carry */
+" ldd 0(%2), %3\n" /* load from memory again */
+" add,dc %0, %0, %0\n" /* return carry */
+ : "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
+ : "0" (carry), "1" (sum), "2" (addr)
+ : "memory");
+
+ return carry;
+}
+
+static __inline__ int get_carry32(void *addr)
+{
+ int carry = 0;
+ unsigned int sum = 0xffffffff;
+ unsigned int tmp;
+
+ __asm__ __volatile__ (
+" add %0, %0, %0\n" /* clear carry */
+" ldw 0(%2), %3\n" /* load from memory */
+" add %1, %3, %1\n" /* optionally generate carry */
+" ldw 0(%2), %3\n" /* load from memory again */
+" addc %0, %0, %0\n" /* return carry */
+ : "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
+ : "0" (carry), "1" (sum), "2" (addr)
+ : "memory");
+
+ return carry;
+}
+
+static void test_bad_carry(struct kunit *test)
+{
+ int carry;
+
+ memset(tmp_buf, 0xff, sizeof(tmp_buf));
+ carry = get_carry64(&tmp_buf[0]);
+ pr_info("#### carry64 aligned, expect 1 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[4]);
+ pr_info("#### carry64 unaligned 4, expect 1 -> %d\n", carry);
+
+ carry = get_carry64(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned 2, expect 1 -> %d\n", carry);
+
+ carry = get_carry32(&tmp_buf[0]);
+ pr_info("#### carry32 aligned, expect 1 -> %d\n", carry);
+ carry = get_carry32(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned, expect 1 -> %d\n", carry);
+
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ carry = get_carry64(&tmp_buf[0]);
+ pr_info("#### carry64 aligned, expect 0 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[4]);
+ pr_info("#### carry64 unaligned 4, expect 0 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned 2, expect 0 -> %d\n", carry);
+
+ carry = get_carry32(&tmp_buf[0]);
+ pr_info("#### carry32 aligned, expect 0 -> %d\n", carry);
+ carry = get_carry32(&tmp_buf[2]);
+ pr_info("#### carry32 unaligned, expect 0 -> %d\n", carry);
+}
+#else
+static void test_bad_carry(struct kunit *test) {}
+#endif /* CONFIG_64BIT */
+
static struct kunit_case __refdata checksum_test_cases[] = {
KUNIT_CASE(test_csum_fixed_random_inputs),
KUNIT_CASE(test_csum_all_carry_inputs),
KUNIT_CASE(test_csum_no_carry_inputs),
KUNIT_CASE(test_ip_fast_csum),
KUNIT_CASE(test_csum_ipv6_magic),
+ KUNIT_CASE(test_bad_carry),
{}
};