/* slowpath.c: description * * Copyright (c) 2001 David Howells (dhowells@redhat.com). */ #define __KERNEL__ #include #include #include #include #include #include struct rw_semaphore_dwh { __u32 active; __u32 waiting; spinlock_t wait_lock; }; extern void FASTCALL(dwh_down_read(struct rw_semaphore_dwh *)); extern void FASTCALL(dwh_up_read(struct rw_semaphore_dwh *)); extern void FASTCALL(dwh_down_read_failed(struct rw_semaphore_dwh *, struct task_struct *)); extern void FASTCALL(dwh__rwsem_do_wake(struct rw_semaphore_dwh *)); struct rw_semaphore_aa { spinlock_t lock; int count; struct list_head wait; }; #define RWSEM_WAITQUEUE_READ 0 extern void FASTCALL(aa_down_read(struct rw_semaphore_aa *)); extern void FASTCALL(aa_up_read(struct rw_semaphore_aa *)); extern void FASTCALL(aa_down_failed(struct rw_semaphore_aa *, int)); extern void FASTCALL(aa_rwsem_wake(struct rw_semaphore_aa *)); void dwh_down_read(struct rw_semaphore_dwh *sem) { struct task_struct *tsk = current; spin_lock(&sem->wait_lock); if (sem->waiting) { sem->active++; spin_unlock(&sem->wait_lock); goto out; } sem->waiting++; dwh_down_read_failed(sem,tsk); spin_unlock(&sem->wait_lock); out: } void aa_down_read(struct rw_semaphore_aa *sem) { spin_lock_irq(&sem->lock); if (sem->count < 0 || !list_empty(&sem->wait)) goto slow_path; sem->count++; out: spin_unlock_irq(&sem->lock); return; slow_path: aa_down_failed(sem, RWSEM_WAITQUEUE_READ); goto out; } void dwh_up_read(struct rw_semaphore_dwh *sem) { spin_lock(&sem->wait_lock); if (--sem->active==0 && sem->waiting) dwh__rwsem_do_wake(sem); spin_unlock(&sem->wait_lock); } void aa_up_read(struct rw_semaphore_aa *sem) { unsigned long flags; spin_lock_irqsave(&sem->lock, flags); if (!--sem->count && !list_empty(&sem->wait)) aa_rwsem_wake(sem); spin_unlock_irqrestore(&sem->lock, flags); }