[PATCH] carry tests

From: Guenter Roeck
Date: Wed Feb 14 2024 - 14:25:18 EST


Signed-off-by: Guenter Roeck <linux@xxxxxxxxxxxx>
---
lib/checksum_kunit.c | 76 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 72c313ba4c78..8f7925396e53 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -546,12 +546,88 @@ static void test_csum_ipv6_magic(struct kunit *test)
#endif /* !CONFIG_NET */
}

+#ifdef CONFIG_64BIT
+
+static __inline__ int get_carry64(void *addr)
+{
+ int carry = 0;
+ unsigned long sum = 0xffffffff;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+" add %0, %0, %0\n" /* clear carry */
+" ldd 0(%2), %3\n" /* load from memory */
+" add %1, %3, %1\n" /* optionally generate carry */
+" ldd 0(%2), %3\n" /* load from memory again */
+" add,dc %0, %0, %0\n" /* return carry */
+ : "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
+ : "0" (carry), "1" (sum), "2" (addr)
+ : "memory");
+
+ return carry;
+}
+
+static __inline__ int get_carry32(void *addr)
+{
+ int carry = 0;
+ unsigned int sum = 0xffffffff;
+ unsigned int tmp;
+
+ __asm__ __volatile__ (
+" add %0, %0, %0\n" /* clear carry */
+" ldw 0(%2), %3\n" /* load from memory */
+" add %1, %3, %1\n" /* optionally generate carry */
+" ldw 0(%2), %3\n" /* load from memory again */
+" addc %0, %0, %0\n" /* return carry */
+ : "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
+ : "0" (carry), "1" (sum), "2" (addr)
+ : "memory");
+
+ return carry;
+}
+
+static void test_bad_carry(struct kunit *test)
+{
+ int carry;
+
+ memset(tmp_buf, 0xff, sizeof(tmp_buf));
+ carry = get_carry64(&tmp_buf[0]);
+ pr_info("#### carry64 aligned, expect 1 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[4]);
+ pr_info("#### carry64 unaligned 4, expect 1 -> %d\n", carry);
+
+ carry = get_carry64(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned 2, expect 1 -> %d\n", carry);
+
+ carry = get_carry32(&tmp_buf[0]);
+ pr_info("#### carry32 aligned, expect 1 -> %d\n", carry);
+ carry = get_carry32(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned, expect 1 -> %d\n", carry);
+
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ carry = get_carry64(&tmp_buf[0]);
+ pr_info("#### carry64 aligned, expect 0 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[4]);
+ pr_info("#### carry64 unaligned 4, expect 0 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned 2, expect 0 -> %d\n", carry);
+
+ carry = get_carry32(&tmp_buf[0]);
+ pr_info("#### carry32 aligned, expect 0 -> %d\n", carry);
+ carry = get_carry32(&tmp_buf[2]);
+ pr_info("#### carry32 unaligned, expect 0 -> %d\n", carry);
+}
+#else
+static void test_bad_carry(struct kunit *test) {}
+#endif /* CONFIG_64BIT */
+
static struct kunit_case __refdata checksum_test_cases[] = {
KUNIT_CASE(test_csum_fixed_random_inputs),
KUNIT_CASE(test_csum_all_carry_inputs),
KUNIT_CASE(test_csum_no_carry_inputs),
KUNIT_CASE(test_ip_fast_csum),
KUNIT_CASE(test_csum_ipv6_magic),
+ KUNIT_CASE(test_bad_carry),
{}
};

--
2.39.2