Use anonymous union in mempolicy layer. There is this union v in struct mempolicy that causes code to look ugly. Since we now support anonymous union, lets replace that by an anonymous union. Signed-off-by: Christoph Lameter Index: linux-2.6.15-rc5-mm2/include/linux/mempolicy.h =================================================================== --- linux-2.6.15-rc5-mm2.orig/include/linux/mempolicy.h 2005-12-12 09:10:34.000000000 -0800 +++ linux-2.6.15-rc5-mm2/include/linux/mempolicy.h 2005-12-13 13:32:28.000000000 -0800 @@ -67,7 +67,7 @@ struct mempolicy { short preferred_node; /* preferred */ nodemask_t nodes; /* interleave */ /* undefined for default */ - } v; + }; nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ }; Index: linux-2.6.15-rc5-mm2/mm/mempolicy.c =================================================================== --- linux-2.6.15-rc5-mm2.orig/mm/mempolicy.c 2005-12-12 15:17:57.000000000 -0800 +++ linux-2.6.15-rc5-mm2/mm/mempolicy.c 2005-12-13 13:32:28.000000000 -0800 @@ -160,16 +160,16 @@ static struct mempolicy *mpol_new(int mo atomic_set(&policy->refcnt, 1); switch (mode) { case MPOL_INTERLEAVE: - policy->v.nodes = *nodes; + policy->nodes = *nodes; break; case MPOL_PREFERRED: - policy->v.preferred_node = first_node(*nodes); - if (policy->v.preferred_node >= MAX_NUMNODES) - policy->v.preferred_node = -1; + policy->preferred_node = first_node(*nodes); + if (policy->preferred_node >= MAX_NUMNODES) + policy->preferred_node = -1; break; case MPOL_BIND: - policy->v.zonelist = bind_zonelist(nodes); - if (policy->v.zonelist == NULL) { + policy->zonelist = bind_zonelist(nodes); + if (policy->zonelist == NULL) { kmem_cache_free(policy_cache, policy); return ERR_PTR(-ENOMEM); } @@ -403,7 +403,7 @@ long do_set_mempolicy(int mode, nodemask mpol_free(current->mempolicy); current->mempolicy = new; if (new && new->policy == MPOL_INTERLEAVE) - current->il_next = first_node(new->v.nodes); + current->il_next = first_node(new->nodes); return 0; } @@ -415,21 +415,21 @@ static void get_zonemask(struct mempolic nodes_clear(*nodes); switch (p->policy) { case MPOL_BIND: - for (i = 0; p->v.zonelist->zones[i]; i++) - node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, + for (i = 0; p->zonelist->zones[i]; i++) + node_set(p->zonelist->zones[i]->zone_pgdat->node_id, *nodes); break; case MPOL_DEFAULT: break; case MPOL_INTERLEAVE: - *nodes = p->v.nodes; + *nodes = p->nodes; break; case MPOL_PREFERRED: /* or use current node instead of online map? */ - if (p->v.preferred_node < 0) + if (p->preferred_node < 0) *nodes = node_online_map; else - node_set(p->v.preferred_node, *nodes); + node_set(p->preferred_node, *nodes); break; default: BUG(); @@ -958,7 +958,7 @@ static struct zonelist *zonelist_policy( switch (policy->policy) { case MPOL_PREFERRED: - nd = policy->v.preferred_node; + nd = policy->preferred_node; if (nd < 0) nd = numa_node_id(); break; @@ -966,8 +966,8 @@ static struct zonelist *zonelist_policy( /* Lower zones don't get a policy applied */ /* Careful: current->mems_allowed might have moved */ if (gfp_zone(gfp) >= policy_zone) - if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) - return policy->v.zonelist; + if (cpuset_zonelist_valid_mems_allowed(policy->zonelist)) + return policy->zonelist; /*FALL THROUGH*/ case MPOL_INTERLEAVE: /* should not happen */ case MPOL_DEFAULT: @@ -987,9 +987,9 @@ static unsigned interleave_nodes(struct struct task_struct *me = current; nid = me->il_next; - next = next_node(nid, policy->v.nodes); + next = next_node(nid, policy->nodes); if (next >= MAX_NUMNODES) - next = first_node(policy->v.nodes); + next = first_node(policy->nodes); me->il_next = next; return nid; } @@ -1009,11 +1009,11 @@ unsigned slab_node(struct mempolicy *pol * Follow bind policy behavior and start allocation at the * first node. */ - return policy->v.zonelist->zones[0]->zone_pgdat->node_id; + return policy->zonelist->zones[0]->zone_pgdat->node_id; case MPOL_PREFERRED: - if (policy->v.preferred_node >= 0) - return policy->v.preferred_node; + if (policy->preferred_node >= 0) + return policy->preferred_node; /* Fall through */ default: @@ -1025,14 +1025,14 @@ unsigned slab_node(struct mempolicy *pol static unsigned offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) { - unsigned nnodes = nodes_weight(pol->v.nodes); + unsigned nnodes = nodes_weight(pol->nodes); unsigned target = (unsigned)off % nnodes; int c; int nid = -1; c = 0; do { - nid = next_node(nid, pol->v.nodes); + nid = next_node(nid, pol->nodes); c++; } while (c <= target); return nid; @@ -1177,13 +1177,13 @@ struct mempolicy *__mpol_copy(struct mem *new = *old; atomic_set(&new->refcnt, 1); if (new->policy == MPOL_BIND) { - int sz = ksize(old->v.zonelist); - new->v.zonelist = kmalloc(sz, SLAB_KERNEL); - if (!new->v.zonelist) { + int sz = ksize(old->zonelist); + new->zonelist = kmalloc(sz, SLAB_KERNEL); + if (!new->zonelist) { kmem_cache_free(policy_cache, new); return ERR_PTR(-ENOMEM); } - memcpy(new->v.zonelist, old->v.zonelist, sz); + memcpy(new->zonelist, old->zonelist, sz); } return new; } @@ -1199,15 +1199,15 @@ int __mpol_equal(struct mempolicy *a, st case MPOL_DEFAULT: return 1; case MPOL_INTERLEAVE: - return nodes_equal(a->v.nodes, b->v.nodes); + return nodes_equal(a->nodes, b->nodes); case MPOL_PREFERRED: - return a->v.preferred_node == b->v.preferred_node; + return a->preferred_node == b->preferred_node; case MPOL_BIND: { int i; - for (i = 0; a->v.zonelist->zones[i]; i++) - if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i]) + for (i = 0; a->zonelist->zones[i]; i++) + if (a->zonelist->zones[i] != b->zonelist->zones[i]) return 0; - return b->v.zonelist->zones[i] == NULL; + return b->zonelist->zones[i] == NULL; } default: BUG(); @@ -1221,7 +1221,7 @@ void __mpol_free(struct mempolicy *p) if (!atomic_dec_and_test(&p->refcnt)) return; if (p->policy == MPOL_BIND) - kfree(p->v.zonelist); + kfree(p->zonelist); p->policy = MPOL_DEFAULT; kmem_cache_free(policy_cache, p); } @@ -1390,7 +1390,7 @@ int mpol_set_shared_policy(struct shared PDprintk("set_shared_policy %lx sz %lu %d %lx\n", vma->vm_pgoff, sz, npol? npol->policy : -1, - npol ? nodes_addr(npol->v.nodes)[0] : -1); + npol ? nodes_addr(npol->nodes)[0] : -1); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); @@ -1463,14 +1463,14 @@ void mpol_rebind_policy(struct mempolicy case MPOL_DEFAULT: break; case MPOL_INTERLEAVE: - nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask); - pol->v.nodes = tmp; + nodes_remap(tmp, pol->nodes, *mpolmask, *newmask); + pol->nodes = tmp; *mpolmask = *newmask; current->il_next = node_remap(current->il_next, *mpolmask, *newmask); break; case MPOL_PREFERRED: - pol->v.preferred_node = node_remap(pol->v.preferred_node, + pol->preferred_node = node_remap(pol->preferred_node, *mpolmask, *newmask); *mpolmask = *newmask; break; @@ -1480,7 +1480,7 @@ void mpol_rebind_policy(struct mempolicy struct zonelist *zonelist; nodes_clear(nodes); - for (z = pol->v.zonelist->zones; *z; z++) + for (z = pol->zonelist->zones; *z; z++) node_set((*z)->zone_pgdat->node_id, nodes); nodes_remap(tmp, nodes, *mpolmask, *newmask); nodes = tmp; @@ -1494,8 +1494,8 @@ void mpol_rebind_policy(struct mempolicy if (zonelist) { /* Good - got mem - substitute new zonelist */ - kfree(pol->v.zonelist); - pol->v.zonelist = zonelist; + kfree(pol->zonelist); + pol->zonelist = zonelist; } *mpolmask = *newmask; break; @@ -1558,7 +1558,7 @@ static inline int mpol_to_str(char *buff case MPOL_PREFERRED: nodes_clear(nodes); - node_set(pol->v.preferred_node, nodes); + node_set(pol->preferred_node, nodes); break; case MPOL_BIND: @@ -1566,7 +1566,7 @@ static inline int mpol_to_str(char *buff break; case MPOL_INTERLEAVE: - nodes = pol->v.nodes; + nodes = pol->nodes; break; default: