From: David Woodhouse Debgging infrastructure to drop a stack trace if someone calls one of the sleep_on() functions without lock_kernel() held. --- 25-akpm/include/linux/smp_lock.h | 4 +++- 25-akpm/kernel/sched.c | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff -puN include/linux/smp_lock.h~sleep_on-needs_lock_kernel include/linux/smp_lock.h --- 25/include/linux/smp_lock.h~sleep_on-needs_lock_kernel Wed Feb 11 18:36:14 2004 +++ 25-akpm/include/linux/smp_lock.h Wed Feb 11 18:36:14 2004 @@ -5,7 +5,9 @@ #include #include -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) +#define BKL_DEBUG /* For testing for sleep_on() abuse */ + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) || defined(BKL_DEBUG) extern spinlock_t kernel_flag; diff -puN kernel/sched.c~sleep_on-needs_lock_kernel kernel/sched.c --- 25/kernel/sched.c~sleep_on-needs_lock_kernel Wed Feb 11 18:36:14 2004 +++ 25-akpm/kernel/sched.c Wed Feb 11 18:36:14 2004 @@ -2219,10 +2219,21 @@ EXPORT_SYMBOL(wait_for_completion); __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); +#define SLEEP_ON_BKLCHECK \ + if (unlikely(!kernel_locked()) && \ + sleep_on_bkl_warnings < 10) { \ + sleep_on_bkl_warnings++; \ + WARN_ON(1); \ + } + +static int sleep_on_bkl_warnings; + void interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD @@ -2236,6 +2247,8 @@ long interruptible_sleep_on_timeout(wait { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD @@ -2264,6 +2277,8 @@ long sleep_on_timeout(wait_queue_head_t { SLEEP_ON_VAR + SLEEP_ON_BKLCHECK + current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD _