Signed-off-by: Andrew Morton --- arch/x86_64/kernel/setup.c | 21 +++++++++++++++++++++ 1 files changed, 21 insertions(+) diff -puN arch/x86_64/kernel/setup.c~x86_64-intel-srat arch/x86_64/kernel/setup.c --- devel/arch/x86_64/kernel/setup.c~x86_64-intel-srat 2005-09-07 20:10:23.000000000 -0700 +++ devel-akpm/arch/x86_64/kernel/setup.c 2005-09-07 20:10:23.000000000 -0700 @@ -942,6 +942,25 @@ static int __cpuinit intel_num_cpu_cores return 1; } +static void srat_detect_node(void) +{ +#ifdef CONFIG_NUMA + unsigned apicid, node; + int cpu = smp_processor_id(); + + /* Don't do the funky fallback heuristics the AMD version employs + for now. */ + apicid = phys_proc_id[cpu]; + node = apicid_to_node[apicid]; + if (node == NUMA_NO_NODE) + node = 0; + cpu_to_node[cpu] = node; + + if (acpi_numa > 0) + printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); +#endif +} + static void __cpuinit init_intel(struct cpuinfo_x86 *c) { /* Cache sizes */ @@ -960,6 +979,8 @@ static void __cpuinit init_intel(struct if (c->x86 >= 15) set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); c->x86_num_cores = intel_num_cpu_cores(c); + + srat_detect_node(); } void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) _