[ANNOUNCE] 3.0.9-rt26

From: Steven Rostedt
Date: Tue Nov 22 2011 - 11:55:59 EST



Dear RT Folks,

I'm please to announce the 3.0.9-rt26 stable release.

You can get this release via the git tree at:

git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

Head SHA1: 54b64aeec82d0079a8a469f4dca8fc5f8b5b8e1b


Or to build 3.0.9-rt26 directly, the following patches should be applied:

http://www.kernel.org/pub/linux/kernel/v3.0/linux-3.0.tar.xz

http://www.kernel.org/pub/linux/kernel/v3.0/patch-3.0.9.xz

http://www.kernel.org/pub/linux/kernel/projects/rt/3.0/patch-3.0.9-rt26.patch.xz


You can also build from 3.0.9-rt25 by applying the incremental patch:

http://www.kernel.org/pub/linux/kernel/projects/rt/3.0/incr/patch-3.0.9-rt25-rt26.patch.xz


The broken out patches are available at:

http://www.kernel.org/pub/linux/kernel/projects/rt/3.0/patches-3.0.9-rt26.tar.xz

Enjoy,

-- Steve

Changes from 3.0.9-rt25:

---

John Kacur (2):
softirq: Export in_serving_softirq()
rcu: Fix macro substitution for synchronize_rcu_bh() on RT

Peter Zijlstra (1):
x86: crypto: Reduce preempt disabled regions

Steven Rostedt (2):
tracing: Show padding as unsigned short
Linux 3.0.9-rt26

Thomas Gleixner (1):
dm: Make rt aware

----
arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++-----------
drivers/md/dm.c | 4 ++--
include/linux/rcutree.h | 2 +-
kernel/softirq.c | 1 +
kernel/trace/trace_events.c | 2 +-
localversion-rt | 2 +-
6 files changed, 19 insertions(+), 16 deletions(-)
---------------------------
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index feee8ff..81ec77c 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -288,14 +288,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK);
+ nbytes & AES_BLOCK_MASK);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- kernel_fpu_end();

return err;
}
@@ -312,14 +312,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- kernel_fpu_end();

return err;
}
@@ -358,14 +358,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- kernel_fpu_end();

return err;
}
@@ -382,14 +382,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- kernel_fpu_end();

return err;
}
@@ -444,18 +444,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

- kernel_fpu_begin();
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ kernel_fpu_begin();
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
if (walk.nbytes) {
+ kernel_fpu_begin();
ctr_crypt_final(ctx, &walk);
+ kernel_fpu_end();
err = blkcipher_walk_done(desc, &walk, 0);
}
- kernel_fpu_end();

return err;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 41abc6d..67207d6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1644,14 +1644,14 @@ static void dm_request_fn(struct request_queue *q)
if (map_request(ti, clone, md))
goto requeued;

- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
spin_lock(q->queue_lock);
}

goto out;

requeued:
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
spin_lock(q->queue_lock);

delay_and_out:
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 7a25aea..3bf47dc 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -60,7 +60,7 @@ static inline void exit_rcu(void)
#ifndef CONFIG_PREEMPT_RT_FULL
extern void synchronize_rcu_bh(void);
#else
-# define synchronize_rcu_bh() synchronize_rcu()
+# define synchronize_rcu_bh synchronize_rcu
#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e8756aa..026a283 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -447,6 +447,7 @@ int in_serving_softirq(void)
preempt_enable();
return res;
}
+EXPORT_SYMBOL(in_serving_softirq);

/*
* Called with bh and local interrupts disabled. For full RT cpu must
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 37d3ffb..c71844c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -117,7 +117,7 @@ static int trace_define_common_fields(void)
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
__common_field(unsigned short, migrate_disable);
- __common_field(int, padding);
+ __common_field(unsigned short, padding);

return ret;
}
diff --git a/localversion-rt b/localversion-rt
index c5b71f9..2e9afd4 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt25
+-rt26

Attachment: signature.asc
Description: This is a digitally signed message part