From: Alexander Nyberg <alexn@telia.com>

Here are a few comments and I removed a macro that doesn't need to be a
macro.  Also, struct array_cache isn't necessarily per cpu so that comment
is confusing.

Signed-off-by: Alexander Nyberg <alexn@telia.com>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 mm/slab.c |   35 +++++++++++++++++------------------
 1 files changed, 17 insertions(+), 18 deletions(-)

diff -puN mm/slab.c~numa-slab-allocator-cleanups mm/slab.c
--- devel/mm/slab.c~numa-slab-allocator-cleanups	2005-07-16 14:27:36.000000000 -0700
+++ devel-akpm/mm/slab.c	2005-07-16 14:27:36.000000000 -0700
@@ -249,7 +249,6 @@ struct slab_rcu {
 /*
  * struct array_cache
  *
- * Per cpu structures
  * Purpose:
  * - LIFO ordering, to hand out cache-warm objects from _alloc
  * - reduce the number of linked list operations
@@ -294,8 +293,8 @@ struct kmem_list3 {
 	int		free_touched;
 	unsigned int 	free_limit;
 	spinlock_t      list_lock;
-	struct array_cache	*shared;
-	struct array_cache	**alien;
+	struct array_cache	*shared;	/* shared per node */
+	struct array_cache	**alien;	/* on other nodes */
 };
 
 /*
@@ -336,17 +335,17 @@ static inline int index_of(const size_t 
 #define INDEX_AC index_of(sizeof(struct arraycache_init))
 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
 
-#define LIST3_INIT(parent) \
-	do {	\
-		INIT_LIST_HEAD(&(parent)->slabs_full);	\
-		INIT_LIST_HEAD(&(parent)->slabs_partial);	\
-		INIT_LIST_HEAD(&(parent)->slabs_free);	\
-		(parent)->shared = NULL; \
-		(parent)->alien = NULL; \
-		(parent)->list_lock = SPIN_LOCK_UNLOCKED;	\
-		(parent)->free_objects = 0;	\
-		(parent)->free_touched = 0;	\
-	} while (0)
+static inline void kmem_list3_init(struct kmem_list3 *parent)
+{
+	INIT_LIST_HEAD(&parent->slabs_full);
+	INIT_LIST_HEAD(&parent->slabs_partial);
+	INIT_LIST_HEAD(&parent->slabs_free);
+	parent->shared = NULL;
+	parent->alien = NULL;
+	spin_lock_init(&parent->list_lock);
+	parent->free_objects = 0;
+	parent->free_touched = 0;
+}
 
 #define MAKE_LIST(cachep, listp, slab, nodeid)	\
 	do {	\
@@ -859,7 +858,7 @@ static int __devinit cpuup_callback(stru
 				if (!(l3 = kmalloc_node(memsize,
 						GFP_KERNEL, node)))
 					goto bad;
-				LIST3_INIT(l3);
+				kmem_list3_init(l3);
 				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
 				  ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
 
@@ -998,7 +997,7 @@ void __init kmem_cache_init(void)
 	int i;
 
 	for (i = 0; i < NUM_INIT_LISTS; i++) {
-		LIST3_INIT(&initkmem_list3[i]);
+		kmem_list3_init(&initkmem_list3[i]);
 		if (i < MAX_NUMNODES)
 			cache_cache.nodelists[i] = NULL;
 	}
@@ -1765,7 +1764,7 @@ next:
 						kmalloc_node(sizeof(struct kmem_list3),
 								GFP_KERNEL, node);
 					BUG_ON(!cachep->nodelists[node]);
-					LIST3_INIT(cachep->nodelists[node]);
+					kmem_list3_init(cachep->nodelists[node]);
 				}
 			}
 		}
@@ -3099,7 +3098,7 @@ static int alloc_kmemlist(kmem_cache_t *
 						GFP_KERNEL, node)))
 			goto fail;
 
-		LIST3_INIT(l3);
+		kmem_list3_init(l3);
 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
 			((unsigned long)cachep)%REAPTIMEOUT_LIST3;
 		l3->shared = new;
_