Using async down for aio write, x86-64 25-akpm/arch/x86_64/kernel/semaphore.c | 23 ++++++++++++++++++----- 25-akpm/arch/x86_64/kernel/x8664_ksyms.c | 2 +- 25-akpm/arch/x86_64/lib/thunk.S | 2 +- 25-akpm/include/asm-x86_64/semaphore.h | 27 ++++++++++++++++++--------- 4 files changed, 38 insertions(+), 16 deletions(-) diff -puN arch/x86_64/kernel/semaphore.c~aio-11-down_wq-x86_64 arch/x86_64/kernel/semaphore.c --- 25/arch/x86_64/kernel/semaphore.c~aio-11-down_wq-x86_64 Fri May 16 16:29:49 2003 +++ 25-akpm/arch/x86_64/kernel/semaphore.c Fri May 16 16:29:49 2003 @@ -54,15 +54,20 @@ void __up(struct semaphore *sem) wake_up(&sem->wait); } -void __down(struct semaphore * sem) +int __down_wq(struct semaphore * sem, wait_queue_t *wait) { struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); + DECLARE_WAITQUEUE(local_wait, tsk); unsigned long flags; - tsk->state = TASK_UNINTERRUPTIBLE; + if (is_sync_wait(wait)) + tsk->state = TASK_UNINTERRUPTIBLE; + if (!wait) { + wait = &local_wait; + } + spin_lock_irqsave(&sem->wait.lock, flags); - add_wait_queue_exclusive_locked(&sem->wait, &wait); + add_wait_queue_exclusive_locked(&sem->wait, wait); sem->sleepers++; for (;;) { @@ -80,17 +85,25 @@ void __down(struct semaphore * sem) sem->sleepers = 1; /* us - see -1 above */ spin_unlock_irqrestore(&sem->wait.lock, flags); + if (!is_sync_wait(wait)) + return -EIOCBRETRY; + schedule(); spin_lock_irqsave(&sem->wait.lock, flags); tsk->state = TASK_UNINTERRUPTIBLE; } - remove_wait_queue_locked(&sem->wait, &wait); + if (is_sync_wait(wait) || !list_empty(&wait->task_list)) { + remove_wait_queue_locked(&sem->wait, wait); + INIT_LIST_HEAD(&wait->task_list); + } wake_up_locked(&sem->wait); spin_unlock_irqrestore(&sem->wait.lock, flags); tsk->state = TASK_RUNNING; + return 0; } + int __down_interruptible(struct semaphore * sem) { int retval = 0; diff -puN arch/x86_64/kernel/x8664_ksyms.c~aio-11-down_wq-x86_64 arch/x86_64/kernel/x8664_ksyms.c --- 25/arch/x86_64/kernel/x8664_ksyms.c~aio-11-down_wq-x86_64 Fri May 16 16:29:49 2003 +++ 25-akpm/arch/x86_64/kernel/x8664_ksyms.c Fri May 16 16:29:49 2003 @@ -64,7 +64,7 @@ EXPORT_SYMBOL(get_cmos_time); EXPORT_SYMBOL(__io_virt_debug); #endif -EXPORT_SYMBOL_NOVERS(__down_failed); +EXPORT_SYMBOL_NOVERS(__down_failed_wq); EXPORT_SYMBOL_NOVERS(__down_failed_interruptible); EXPORT_SYMBOL_NOVERS(__down_failed_trylock); EXPORT_SYMBOL_NOVERS(__up_wakeup); diff -puN arch/x86_64/lib/thunk.S~aio-11-down_wq-x86_64 arch/x86_64/lib/thunk.S --- 25/arch/x86_64/lib/thunk.S~aio-11-down_wq-x86_64 Fri May 16 16:29:49 2003 +++ 25-akpm/arch/x86_64/lib/thunk.S Fri May 16 16:29:49 2003 @@ -38,7 +38,7 @@ #endif thunk do_softirq_thunk,do_softirq - thunk __down_failed,__down + thunk __down_failed_wq,__down_wq thunk_retrax __down_failed_interruptible,__down_interruptible thunk_retrax __down_failed_trylock,__down_trylock thunk __up_wakeup,__up diff -puN include/asm-x86_64/semaphore.h~aio-11-down_wq-x86_64 include/asm-x86_64/semaphore.h --- 25/include/asm-x86_64/semaphore.h~aio-11-down_wq-x86_64 Fri May 16 16:29:49 2003 +++ 25-akpm/include/asm-x86_64/semaphore.h Fri May 16 16:29:49 2003 @@ -98,39 +98,48 @@ static inline void init_MUTEX_LOCKED (st sema_init(sem, 0); } -asmlinkage void __down_failed(void /* special register calling convention */); +asmlinkage int __down_failed_wq(void /* special register calling convention */); asmlinkage int __down_failed_interruptible(void /* params in registers */); asmlinkage int __down_failed_trylock(void /* params in registers */); asmlinkage void __up_wakeup(void /* special register calling convention */); -asmlinkage void __down(struct semaphore * sem); +asmlinkage int __down_wq(struct semaphore * sem, wait_queue_t *wait); asmlinkage int __down_interruptible(struct semaphore * sem); asmlinkage int __down_trylock(struct semaphore * sem); asmlinkage void __up(struct semaphore * sem); /* * This is ugly, but we want the default case to fall through. - * "__down_failed" is a special asm handler that calls the C + * "__down_failed_wq" is a special asm handler that calls the C * routine that actually waits. See arch/x86_64/kernel/semaphore.c */ -static inline void down(struct semaphore * sem) +static inline int down_wq(struct semaphore * sem, wait_queue_t *wait) { + int result; + #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif __asm__ __volatile__( "# atomic down operation\n\t" - LOCK "decl %0\n\t" /* --sem->count */ - "js 2f\n" + LOCK "decl %1\n\t" /* --sem->count */ + "js 2f\n\t" + "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") - "2:\tcall __down_failed\n\t" + "2:\tcall __down_failed_wq\n\t" "jmp 1b\n" LOCK_SECTION_END - :"=m" (sem->count) - :"D" (sem) + :"=a" (result), "=m" (sem->count) + :"D" (sem), "S" (wait) :"memory"); + return result; +} + +static inline void down(struct semaphore * sem) +{ + down_wq(sem, NULL); } /* _