===== include/asm-ia64/processor.h 1.62 vs edited ===== --- 1.62/include/asm-ia64/processor.h 2004-08-24 02:08:09 -07:00 +++ edited/include/asm-ia64/processor.h 2004-09-05 08:07:36 -07:00 @@ -335,6 +335,9 @@ #define prepare_to_copy(tsk) do { } while (0) #ifdef CONFIG_NUMA + +#define SD_NODES_PER_DOMAIN 4 + #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ .parent = NULL, \ ===== kernel/sched.c 1.342 vs edited ===== --- 1.342/kernel/sched.c 2004-08-26 23:30:30 -07:00 +++ edited/kernel/sched.c 2004-09-05 08:06:16 -07:00 @@ -4214,7 +4214,12 @@ unlock_cpu_hotplug(); } -#ifdef CONFIG_NUMA +/* + * To enable disjoint top-level NUMA domains, define SD_NODES_PER_DOMAIN + * in arch code. That defines the number of nearby nodes in a node's top + * level scheduling domain. + */ +#if defined(CONFIG_NUMA) && defined(SD_NODES_PER_DOMAIN) /** * find_next_best_node - find the next node to include in a sched_domain * @node: node whose sched_domain we're building @@ -4261,7 +4266,7 @@ * should be one that prevents unnecessary balancing, but also spreads tasks * out optimally. */ -cpumask_t __init sched_domain_node_span(int node, int size) +cpumask_t __init sched_domain_node_span(int node) { int i; cpumask_t span; @@ -4270,7 +4275,7 @@ cpus_clear(span); bitmap_zero(used_nodes, MAX_NUMNODES); - for (i = 0; i < size; i++) { + for (i = 0; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, used_nodes); cpumask_t nodemask; @@ -4280,7 +4285,12 @@ return span; } -#endif /* CONFIG_NUMA */ +#else /* CONFIG_NUMA && SD_NODES_PER_DOMAIN */ +cpumask_t __init sched_domain_node_span(int node) +{ + return cpu_possible_map; +} +#endif /* CONFIG_NUMA && SD_NODES_PER_DOMAIN */ #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); @@ -4304,9 +4314,6 @@ #ifdef CONFIG_NUMA -/* Number of nearby nodes in a node's scheduling domain */ -#define SD_NODES_PER_DOMAIN 4 - static DEFINE_PER_CPU(struct sched_domain, node_domains); static struct sched_group sched_group_nodes[MAX_NUMNODES]; __init static int cpu_to_node_group(int cpu) @@ -4433,7 +4440,7 @@ group = cpu_to_node_group(i); *sd = SD_NODE_INIT; /* FIXME: should be multilevel, in arch code */ - sd->span = sched_domain_node_span(i, SD_NODES_PER_DOMAIN); + sd->span = sched_domain_node_span(i); cpus_and(sd->span, sd->span, cpu_default_map); sd->groups = &sched_group_nodes[group]; #endif