From: Nick Piggin Similarly to the earlier change in load_balance, only lock the runqueue in load_balance_newidle if the busiest queue found has a nr_running > 1. This will reduce frequency of expensive remote runqueue lock aquisitions in the schedule() path on some workloads. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- kernel/sched.c | 17 ++++++++++------- 1 files changed, 10 insertions(+), 7 deletions(-) diff -puN kernel/sched.c~sched-less-newidle-locking kernel/sched.c --- devel/kernel/sched.c~sched-less-newidle-locking 2005-08-30 18:45:57.000000000 -0700 +++ devel-akpm/kernel/sched.c 2005-08-30 18:45:57.000000000 -0700 @@ -2175,8 +2175,7 @@ static int load_balance(int this_cpu, ru */ double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, idle, - &all_pinned); + imbalance, sd, idle, &all_pinned); spin_unlock(&busiest->lock); /* All tasks on this runqueue were pinned by CPU affinity */ @@ -2271,18 +2270,22 @@ static int load_balance_newidle(int this BUG_ON(busiest == this_rq); - /* Attempt to move tasks */ - double_lock_balance(this_rq, busiest); - schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); - nr_moved = move_tasks(this_rq, this_cpu, busiest, + + nr_moved = 0; + if (busiest->nr_running > 1) { + /* Attempt to move tasks */ + double_lock_balance(this_rq, busiest); + nr_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, NEWLY_IDLE, NULL); + spin_unlock(&busiest->lock); + } + if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); else sd->nr_balance_failed = 0; - spin_unlock(&busiest->lock); return nr_moved; out_balanced: _