From: Anton Blanchard Clean up our idle loop code: - Remove a bunch of useless includes and make most functions static - There were places where we werent disabling interrupts before checking need_resched then calling the hypervisor to sleep our thread. We might race with an IPI and end up missing a reschedule. Disable interrupts around these regions to make them safe. - We forgot to turn off the polling flag when exiting the dedicated_idle idle loop. This could have resulted in all manner problems as other cpus would avoid sending IPIs to force reschedules. - Add a missing check for cpu_is_offline in the shared cpu idle loop. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton --- 25-akpm/arch/ppc64/kernel/idle.c | 124 ++++++++++++++++++--------------------- 1 files changed, 60 insertions(+), 64 deletions(-) diff -puN arch/ppc64/kernel/idle.c~ppc64-clean-up-idle-loop-code arch/ppc64/kernel/idle.c --- 25/arch/ppc64/kernel/idle.c~ppc64-clean-up-idle-loop-code 2004-09-11 16:30:05.638575144 -0700 +++ 25-akpm/arch/ppc64/kernel/idle.c 2004-09-11 16:30:05.643574384 -0700 @@ -16,28 +16,16 @@ */ #include -#include #include #include -#include #include -#include -#include -#include -#include -#include #include -#include -#include #include -#include #include #include -#include #include #include -#include #include #include @@ -45,11 +33,11 @@ extern long cede_processor(void); extern long poll_pending(void); extern void power4_idle(void); -int (*idle_loop)(void); +static int (*idle_loop)(void); #ifdef CONFIG_PPC_ISERIES -unsigned long maxYieldTime = 0; -unsigned long minYieldTime = 0xffffffffffffffffUL; +static unsigned long maxYieldTime = 0; +static unsigned long minYieldTime = 0xffffffffffffffffUL; static void yield_shared_processor(void) { @@ -80,7 +68,7 @@ static void yield_shared_processor(void) process_iSeries_events(); } -int iSeries_idle(void) +static int iSeries_idle(void) { struct paca_struct *lpaca; long oldval; @@ -91,13 +79,10 @@ int iSeries_idle(void) CTRL = mfspr(CTRLF); CTRL &= ~RUNLATCH; mtspr(CTRLT, CTRL); -#if 0 - init_idle(); -#endif lpaca = get_paca(); - for (;;) { + while (1) { if (lpaca->lppaca.xSharedProc) { if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) process_iSeries_events(); @@ -125,11 +110,13 @@ int iSeries_idle(void) schedule(); } + return 0; } -#endif -int default_idle(void) +#else + +static int default_idle(void) { long oldval; unsigned int cpu = smp_processor_id(); @@ -164,8 +151,6 @@ int default_idle(void) return 0; } -#ifdef CONFIG_PPC_PSERIES - DECLARE_PER_CPU(unsigned long, smt_snooze_delay); int dedicated_idle(void) @@ -179,8 +164,10 @@ int dedicated_idle(void) ppaca = &paca[cpu ^ 1]; while (1) { - /* Indicate to the HV that we are idle. Now would be - * a good time to find other work to dispatch. */ + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ lpaca->lppaca.xIdle = 1; oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); @@ -203,21 +190,17 @@ int dedicated_idle(void) HMT_medium(); if (!(ppaca->lppaca.xIdle)) { - /* Indicate we are no longer polling for - * work, and then clear need_resched. If - * need_resched was 1, set it back to 1 - * and schedule work + local_irq_disable(); + + /* + * We are about to sleep the thread + * and so wont be polling any + * more. */ clear_thread_flag(TIF_POLLING_NRFLAG); - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - if(oldval == 1) { - set_need_resched(); - break; - } - - local_irq_disable(); - /* SMT dynamic mode. Cede will result + /* + * SMT dynamic mode. Cede will result * in this thread going dormant, if the * partner thread is still doing work. * Thread wakes up if partner goes idle, @@ -225,15 +208,21 @@ int dedicated_idle(void) * occurs. Returning from the cede * enables external interrupts. */ - cede_processor(); + if (!need_resched()) + cede_processor(); + else + local_irq_enable(); } else { - /* Give the HV an opportunity at the + /* + * Give the HV an opportunity at the * processor, since we are not doing * any work. */ poll_pending(); } } + + clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } @@ -247,48 +236,49 @@ int dedicated_idle(void) return 0; } -int shared_idle(void) +static int shared_idle(void) { struct paca_struct *lpaca = get_paca(); + unsigned int cpu = smp_processor_id(); while (1) { - if (cpu_is_offline(smp_processor_id()) && - system_state == SYSTEM_RUNNING) - cpu_die(); - - /* Indicate to the HV that we are idle. Now would be - * a good time to find other work to dispatch. */ + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ lpaca->lppaca.xIdle = 1; - if (!need_resched()) { - local_irq_disable(); - - /* + while (!need_resched() && !cpu_is_offline(cpu)) { + local_irq_disable(); + + /* * Yield the processor to the hypervisor. We return if * an external interrupt occurs (which are driven prior * to returning here) or if a prod occurs from another - * processor. When returning here, external interrupts + * processor. When returning here, external interrupts * are enabled. + * + * Check need_resched() again with interrupts disabled + * to avoid a race. */ - cede_processor(); + if (!need_resched()) + cede_processor(); + else + local_irq_enable(); } HMT_medium(); lpaca->lppaca.xIdle = 0; schedule(); + if (cpu_is_offline(smp_processor_id()) && + system_state == SYSTEM_RUNNING) + cpu_die(); } return 0; } -#endif - -int cpu_idle(void) -{ - idle_loop(); - return 0; -} -int native_idle(void) +static int powermac_idle(void) { while(1) { if (!need_resched()) @@ -298,6 +288,13 @@ int native_idle(void) } return 0; } +#endif + +int cpu_idle(void) +{ + idle_loop(); + return 0; +} int idle_setup(void) { @@ -318,8 +315,8 @@ int idle_setup(void) idle_loop = default_idle; } } else if (systemcfg->platform == PLATFORM_POWERMAC) { - printk("idle = native_idle\n"); - idle_loop = native_idle; + printk("idle = powermac_idle\n"); + idle_loop = powermac_idle; } else { printk("idle_setup: unknown platform, use default_idle\n"); idle_loop = default_idle; @@ -328,4 +325,3 @@ int idle_setup(void) return 1; } - _