From: "Martin J. Bligh" , Nick Piggin arch_init_sched_domains is using cpu_callout_map (via cpu_possible) instead of cpu_online_map. That's really only intended for cpu bootstrap, and won't work properly if we called out to a cpu, but it failed to respond. The normal way is to use cpu_online_map for this stuff, and it even cleans up the existing code a bit. (it's just a case of s/all_cpus/cpu_online_map/ and removing the loop that builds all_cpus). I tested this out on the NUMA-Q, and it works fine. --- 25-akpm/arch/i386/kernel/smpboot.c | 38 ++++++++++--------------------------- 25-akpm/kernel/sched.c | 34 ++++++++------------------------- 2 files changed, 20 insertions(+), 52 deletions(-) diff -puN arch/i386/kernel/smpboot.c~sched-arch_init_sched_domains-fix arch/i386/kernel/smpboot.c --- 25/arch/i386/kernel/smpboot.c~sched-arch_init_sched_domains-fix Thu Jan 22 16:27:53 2004 +++ 25-akpm/arch/i386/kernel/smpboot.c Thu Jan 22 16:27:53 2004 @@ -1130,18 +1130,10 @@ static DEFINE_PER_CPU(struct sched_domai __init void arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); struct sched_domain *node_domain = &per_cpu(node_domains, i); @@ -1156,11 +1148,11 @@ __init void arch_init_sched_domains(void phys_domain->flags |= SD_FLAG_IDLE; *node_domain = SD_NODE_INIT; - node_domain->span = all_cpus; + node_domain->span = cpu_online_map; } /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); int j; first_cpu = last_cpu = NULL; @@ -1186,7 +1178,7 @@ __init void arch_init_sched_domains(void for (i = 0; i < MAX_NUMNODES; i++) { int j; cpumask_t nodemask; - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, node_to_cpumask(i), cpu_online_map); first_cpu = last_cpu = NULL; /* Set up physical groups */ @@ -1213,7 +1205,7 @@ __init void arch_init_sched_domains(void for (i = 0; i < MAX_NUMNODES; i++) { struct sched_group *cpu = &sched_group_nodes[i]; cpumask_t nodemask; - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, node_to_cpumask(i), cpu_online_map); if (cpus_empty(nodemask)) continue; @@ -1230,7 +1222,7 @@ __init void arch_init_sched_domains(void mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { int node = cpu_to_node(i); struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); @@ -1254,18 +1246,10 @@ static DEFINE_PER_CPU(struct sched_domai __init void arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); @@ -1273,12 +1257,12 @@ __init void arch_init_sched_domains(void cpu_domain->span = cpu_sibling_map[i]; *phys_domain = SD_CPU_INIT; - phys_domain->span = all_cpus; + phys_domain->span = cpu_online_map; phys_domain->flags |= SD_FLAG_IDLE; } /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); int j; first_cpu = last_cpu = NULL; @@ -1303,7 +1287,7 @@ __init void arch_init_sched_domains(void first_cpu = last_cpu = NULL; /* Set up physical groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_group *cpu = &sched_group_phys[i]; @@ -1321,7 +1305,7 @@ __init void arch_init_sched_domains(void last_cpu->next = first_cpu; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); struct sched_group *cpu_group = &sched_group_cpus[i]; diff -puN kernel/sched.c~sched-arch_init_sched_domains-fix kernel/sched.c --- 25/kernel/sched.c~sched-arch_init_sched_domains-fix Thu Jan 22 16:27:53 2004 +++ 25-akpm/kernel/sched.c Thu Jan 22 16:27:53 2004 @@ -3233,28 +3233,20 @@ DEFINE_PER_CPU(struct sched_domain, node static void __init arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_node = NULL, *last_node = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { int node = cpu_to_node(i); cpumask_t nodemask = node_to_cpumask(node); struct sched_domain *node_domain = &per_cpu(node_domains, i); struct sched_domain *cpu_domain = cpu_sched_domain(i); *node_domain = SD_NODE_INIT; - node_domain->span = all_cpus; + node_domain->span = cpu_online_map; *cpu_domain = SD_CPU_INIT; - cpus_and(cpu_domain->span, nodemask, all_cpus); + cpus_and(cpu_domain->span, nodemask, cpu_online_map); cpu_domain->parent = node_domain; } @@ -3265,7 +3257,7 @@ static void __init arch_init_sched_domai cpumask_t nodemask; struct sched_group *node = &sched_group_nodes[i]; - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, node_to_cpumask(i), cpu_online_map); if (cpus_empty(nodemask)) continue; @@ -3299,7 +3291,7 @@ static void __init arch_init_sched_domai last_node->next = first_node; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *node_domain = &per_cpu(node_domains, i); struct sched_domain *cpu_domain = cpu_sched_domain(i); node_domain->groups = &sched_group_nodes[cpu_to_node(i)]; @@ -3311,26 +3303,18 @@ static void __init arch_init_sched_domai static void __init arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); *cpu_domain = SD_CPU_INIT; - cpu_domain->span = all_cpus; + cpu_domain->span = cpu_online_map; } /* Set up CPU groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_group *cpu = &sched_group_cpus[i]; cpu->cpumask = CPU_MASK_NONE; @@ -3345,7 +3329,7 @@ static void __init arch_init_sched_domai last_cpu->next = first_cpu; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); cpu_domain->groups = &sched_group_cpus[i]; } _