From: Alexander Nyberg <alexn@telia.com>

This patch stores the 3 last function addresses of the call chain
allocating the object (it won't work on certain large caches though, that
fall backs to the now existing way).

Signed-off-by: Alexander Nyberg <alexn@telia.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 mm/slab.c |   99 ++++++++++++++++++++++++++++++++++++++++++++++----------------
 1 files changed, 74 insertions(+), 25 deletions(-)

diff -puN mm/slab.c~slab-leak-detector-give-longer-traces mm/slab.c
--- devel/mm/slab.c~slab-leak-detector-give-longer-traces	2005-08-06 15:35:04.000000000 -0700
+++ devel-akpm/mm/slab.c	2005-08-06 15:35:04.000000000 -0700
@@ -521,16 +521,28 @@ static unsigned long *dbg_redzone2(kmem_
 {
 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 	if (cachep->flags & SLAB_STORE_USER)
-		return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
+		return (unsigned long*) (objp+cachep->objsize-4*BYTES_PER_WORD);
 	return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
 }
 
-static void **dbg_userword(kmem_cache_t *cachep, void *objp)
+static void **dbg_userword3(kmem_cache_t *cachep, void *objp)
 {
 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 	return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
 }
 
+static void **dbg_userword2(kmem_cache_t *cachep, void *objp)
+{
+	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
+	return (void**)(objp+cachep->objsize-2*BYTES_PER_WORD);
+}
+
+static void **dbg_userword1(kmem_cache_t *cachep, void *objp)
+{
+	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
+	return (void**)(objp+cachep->objsize-3*BYTES_PER_WORD);
+}
+
 #else
 
 #define obj_dbghead(x)			0
@@ -1312,12 +1324,16 @@ static void print_objinfo(kmem_cache_t *
 	}
 
 	if (cachep->flags & SLAB_STORE_USER) {
-		printk(KERN_ERR "Last user: [<%p>]",
-				*dbg_userword(cachep, objp));
-		print_symbol("(%s)",
-				(unsigned long)*dbg_userword(cachep, objp));
+		printk(KERN_ERR "Last user:\n");
+		printk(KERN_ERR "[<%p>]", *dbg_userword1(cachep, objp));
+		print_symbol("(%s)", (unsigned long) *dbg_userword1(cachep, objp));
+		printk(KERN_ERR "[<%p>]", *dbg_userword2(cachep, objp));
+		print_symbol("(%s)", (unsigned long) *dbg_userword2(cachep, objp));
+		printk(KERN_ERR "[<%p>]", *dbg_userword2(cachep, objp));
+		print_symbol("(%s)", (unsigned long) *dbg_userword2(cachep, objp));
 		printk("\n");
 	}
+
 	realobj = (char*)objp+obj_dbghead(cachep);
 	size = obj_reallen(cachep);
 	for (i=0; i<size && lines;i+=16, lines--) {
@@ -1532,7 +1548,7 @@ kmem_cache_create (const char *name, siz
 	 * above the next power of two: caches with object sizes just above a
 	 * power of two have a significant amount of internal fragmentation.
 	 */
-	if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
+	if ((size < 4096 || fls(size-1) == fls(size-1+5*BYTES_PER_WORD)))
 		flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
 	if (!(flags & SLAB_DESTROY_BY_RCU))
 		flags |= SLAB_POISON;
@@ -1612,7 +1628,7 @@ kmem_cache_create (const char *name, siz
 		 * object.
 		 */
 		align = BYTES_PER_WORD;
-		size += BYTES_PER_WORD;
+		size += 3 * BYTES_PER_WORD;
 	}
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
 	if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
@@ -2081,8 +2097,11 @@ static void cache_init_objs(kmem_cache_t
 		/* need to poison the objs? */
 		if (cachep->flags & SLAB_POISON)
 			poison_obj(cachep, objp, POISON_FREE);
-		if (cachep->flags & SLAB_STORE_USER)
-			*dbg_userword(cachep, objp) = NULL;
+		if (cachep->flags & SLAB_STORE_USER) {
+			*dbg_userword1(cachep, objp) = NULL;
+			*dbg_userword2(cachep, objp) = NULL;
+			*dbg_userword3(cachep, objp) = NULL;
+		}
 
 		if (cachep->flags & SLAB_RED_ZONE) {
 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
@@ -2256,7 +2275,7 @@ static void kfree_debugcheck(const void 
 	}
 }
 
-static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
+static void inline *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
 					void *caller)
 {
 	struct page *page;
@@ -2286,8 +2305,11 @@ static void *cache_free_debugcheck(kmem_
 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
 	}
-	if (cachep->flags & SLAB_STORE_USER)
-		*dbg_userword(cachep, objp) = caller;
+	if (cachep->flags & SLAB_STORE_USER) {
+		*dbg_userword1(cachep, objp) = caller; /* address(0) */
+		*dbg_userword2(cachep, objp) = __builtin_return_address(1);
+		*dbg_userword3(cachep, objp) = __builtin_return_address(2);
+	}
 
 	objnr = (objp-slabp->s_mem)/cachep->objsize;
 
@@ -2462,7 +2484,7 @@ cache_alloc_debugcheck_before(kmem_cache
 }
 
 #if DEBUG
-static void *
+static void inline *
 cache_alloc_debugcheck_after(kmem_cache_t *cachep,
 			unsigned int __nocast flags, void *objp, void *caller)
 {
@@ -2479,8 +2501,11 @@ cache_alloc_debugcheck_after(kmem_cache_
 #endif
 		poison_obj(cachep, objp, POISON_INUSE);
 	}
-	if (cachep->flags & SLAB_STORE_USER)
-		*dbg_userword(cachep, objp) = caller;
+	if (cachep->flags & SLAB_STORE_USER) {
+		*dbg_userword1(cachep, objp) = caller; /* address(0) */
+		*dbg_userword2(cachep, objp) = __builtin_return_address(1);
+		*dbg_userword3(cachep, objp) = __builtin_return_address(2);
+	}
 
 	if (cachep->flags & SLAB_RED_ZONE) {
 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
@@ -3529,6 +3554,37 @@ struct seq_operations slabinfo_op = {
 	.show	= s_show,
 };
 
+#if DEBUG
+#include <linux/nmi.h>
+static inline void dump_slab(kmem_cache_t *cachep, struct slab *slabp)
+{
+	int i;
+	int slab_user = cachep->flags & SLAB_STORE_USER;
+
+	for (i = 0; i < cachep->num; i++) {
+		if (slab_user) {
+			void *objp = slabp->s_mem + cachep->objsize * i;
+
+			printk("obj:%p [%p] ", objp, *dbg_userword1(cachep, objp));
+			print_symbol("<%s>", (unsigned long) *dbg_userword1(cachep, objp));
+			printk("\n");
+			printk("obj:%p [%p] ", objp, *dbg_userword2(cachep, objp));
+			print_symbol("<%s>", (unsigned long) *dbg_userword2(cachep, objp));
+			printk("\n");
+			printk("obj:%p [%p] ", objp, *dbg_userword3(cachep, objp));
+			print_symbol("<%s>", (unsigned long) *dbg_userword3(cachep, objp));
+			printk("\n");
+		} else {
+			unsigned long sym = slab_bufctl(slabp)[i];
+
+			printk("obj %p/%d: %p", slabp, i, (void *)sym);
+			print_symbol(" <%s>", sym);
+			printk("\n");
+		}
+	}
+}
+#endif
+
 static void do_dump_slabp(kmem_cache_t *cachep)
 {
 #if DEBUG
@@ -3542,16 +3598,9 @@ static void do_dump_slabp(kmem_cache_t *
 		spin_lock(&rl3->list_lock);
 
 		list_for_each(q, &rl3->slabs_full) {
-			int i;
 			struct slab *slabp = list_entry(q, struct slab, list);
-
-			for (i = 0; i < cachep->num; i++) {
-				unsigned long sym = slab_bufctl(slabp)[i];
-
-				printk("obj %p/%d: %p", slabp, i, (void *)sym);
-				print_symbol(" <%s>", sym);
-				printk("\n");
-			}
+			dump_slab(cachep, slabp);
+			touch_nmi_watchdog();
 		}
 		spin_unlock(&rl3->list_lock);
 	}
_