[PATCH 4/4] Char: rio, fix sparse warnings

From: Jiri Slaby
Date: Fri Mar 28 2008 - 17:19:46 EST


Add some locks and unlocks to some code paths.

Signed-off-by: Jiri Slaby <jirislaby@xxxxxxxxx>
---
drivers/char/rio/riotable.c | 4 +++-
drivers/char/rio/riotty.c | 4 ++++
2 files changed, 7 insertions(+), 1 deletions(-)

diff --git a/drivers/char/rio/riotable.c b/drivers/char/rio/riotable.c
index dfce405..2b24488 100644
--- a/drivers/char/rio/riotable.c
+++ b/drivers/char/rio/riotable.c
@@ -424,8 +424,10 @@ int RIOApel(struct rio_info *p)

MapP = &p->RIOConnectTable[Next++];
MapP->HostUniqueNum = HostP->UniqueNum;
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING)
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
+ rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
continue;
+ }
MapP->RtaUniqueNum = 0;
MapP->ID = 0;
MapP->Flags = SLOT_IN_USE;
diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c
index 047531f..c993548 100644
--- a/drivers/char/rio/riotty.c
+++ b/drivers/char/rio/riotty.c
@@ -319,6 +319,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
PortP->State |= RIO_WOPEN;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
/*
** ACTION: verify that this is a good thing
** to do here. -- ???
@@ -334,6 +335,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
func_exit();
return -EINTR;
}
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
}
PortP->State &= ~RIO_WOPEN;
}
@@ -493,6 +495,7 @@ int riotclose(void *ptr)

if (RIOShortCommand(p, PortP, RIOC_CLOSE, 1, 0) == RIO_FAIL) {
RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
goto close_end;
}

@@ -508,6 +511,7 @@ int riotclose(void *ptr)

if (p->RIOHalted) {
RIOClearUp(PortP);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
goto close_end;
}
if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
--
1.5.4.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/