[PATCH] make raid5 checksums preempt-safe, take two

From: Robert Love (rml@tech9.net)
Date: Thu Aug 29 2002 - 10:32:31 EST


Linus,

The raid5 xor checksums use MMX/SSE state and are not preempt-safe.

Attached patch disables preemption in FPU_SAVE and XMMS_SAVE and
restores it in FPU_RESTORE and XMMS_RESTORE - preventing preemption
while in fp mode.

I fixed the silly errors in the previous patch; this ought to be right.

Patch is against 2.5.32-bk. Please, apply.

        Robert Love

diff -urN linux-2.5.32/include/asm-i386/xor.h linux/include/asm-i386/xor.h
--- linux-2.5.32/include/asm-i386/xor.h Tue Aug 27 15:26:34 2002
+++ linux/include/asm-i386/xor.h Wed Aug 28 17:17:51 2002
@@ -20,6 +20,7 @@
 
 #define FPU_SAVE \
   do { \
+ preempt_disable(); \
         if (!test_thread_flag(TIF_USEDFPU)) \
                 __asm__ __volatile__ (" clts;\n"); \
         __asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \
@@ -30,6 +31,7 @@
         __asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
         if (!test_thread_flag(TIF_USEDFPU)) \
                 stts(); \
+ preempt_enable(); \
   } while (0)
 
 #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
@@ -542,7 +544,8 @@
  * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
  */
 
-#define XMMS_SAVE \
+#define XMMS_SAVE do { \
+ preempt_disable(); \
         __asm__ __volatile__ ( \
                 "movl %%cr0,%0 ;\n\t" \
                 "clts ;\n\t" \
@@ -552,9 +555,10 @@
                 "movups %%xmm3,0x30(%1) ;\n\t" \
                 : "=&r" (cr0) \
                 : "r" (xmm_save) \
- : "memory")
+ : "memory"); \
+} while(0)
 
-#define XMMS_RESTORE \
+#define XMMS_RESTORE do { \
         __asm__ __volatile__ ( \
                 "sfence ;\n\t" \
                 "movups (%1),%%xmm0 ;\n\t" \
@@ -564,7 +568,9 @@
                 "movl %0,%%cr0 ;\n\t" \
                 : \
                 : "r" (cr0), "r" (xmm_save) \
- : "memory")
+ : "memory"); \
+ preempt_enable(); \
+} while(0)
 
 #define ALIGN16 __attribute__((aligned(16)))
 
diff -urN linux-2.5.32/include/asm-x86_64/xor.h linux/include/asm-x86_64/xor.h
--- linux-2.5.32/include/asm-x86_64/xor.h Tue Aug 27 15:26:36 2002
+++ linux/include/asm-x86_64/xor.h Wed Aug 28 17:19:07 2002
@@ -37,8 +37,9 @@
 
 /* Doesn't use gcc to save the XMM registers, because there is no easy way to
    tell it to do a clts before the register saving. */
-#define XMMS_SAVE \
- asm volatile ( \
+#define XMMS_SAVE do { \
+ preempt_disable(); \
+ asm volatile ( \
                 "movq %%cr0,%0 ;\n\t" \
                 "clts ;\n\t" \
                 "movups %%xmm0,(%1) ;\n\t" \
@@ -47,10 +48,11 @@
                 "movups %%xmm3,0x30(%1) ;\n\t" \
                 : "=r" (cr0) \
                 : "r" (xmm_save) \
- : "memory")
+ : "memory"); \
+} while(0)
 
 #define XMMS_RESTORE \
- asm volatile ( \
+ asm volatile ( \
                 "sfence ;\n\t" \
                 "movups (%1),%%xmm0 ;\n\t" \
                 "movups 0x10(%1),%%xmm1 ;\n\t" \
@@ -59,7 +61,9 @@
                 "movq %0,%%cr0 ;\n\t" \
                 : \
                 : "r" (cr0), "r" (xmm_save) \
- : "memory")
+ : "memory"); \
+ preempt_enable(); \
+} while(0)
 
 #define OFFS(x) "16*("#x")"
 #define PF_OFFS(x) "256+16*("#x")"

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Sat Aug 31 2002 - 22:00:26 EST