From: Manfred Spraul Attached is a patch that fixes the fragmentation that Badri noticed with kmem_cache_alloc_node. kmem_cache_alloc_node tries to allocate memory from a given node. The current implementation contains two bugs: - the node aware code was used even for !CONFIG_NUMA systems. Fix: inline function that redefines kmem_cache_alloc_node as kmem_cache_alloc for !CONFIG_NUMA. - the code always allocated a new slab for each new allocation. This caused severe fragmentation. Fix: walk the slabp lists and search for a matching page instead of allocating a new page. - the patch also adds a new statistics field for node-local allocs. They should be rare - the codepath is quite slow, especially compared to the normal kmem_cache_alloc. Signed-Off-By: Manfred Spraul Signed-off-by: Andrew Morton --- 25-akpm/include/linux/slab.h | 7 ++ 25-akpm/mm/slab.c | 113 ++++++++++++++++++++++++------------------- 2 files changed, 72 insertions(+), 48 deletions(-) diff -puN include/linux/slab.h~slab-reduce-fragmentation-due-to-kmem_cache_alloc_node include/linux/slab.h --- 25/include/linux/slab.h~slab-reduce-fragmentation-due-to-kmem_cache_alloc_node 2004-10-09 20:36:33.459379008 -0700 +++ 25-akpm/include/linux/slab.h 2004-10-09 20:36:33.465378096 -0700 @@ -61,7 +61,14 @@ extern kmem_cache_t *kmem_cache_create(c extern int kmem_cache_destroy(kmem_cache_t *); extern int kmem_cache_shrink(kmem_cache_t *); extern void *kmem_cache_alloc(kmem_cache_t *, int); +#ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(kmem_cache_t *, int); +#else +static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node) +{ + return kmem_cache_alloc(cachep, GFP_KERNEL); +} +#endif extern void kmem_cache_free(kmem_cache_t *, void *); extern unsigned int kmem_cache_size(kmem_cache_t *); diff -puN mm/slab.c~slab-reduce-fragmentation-due-to-kmem_cache_alloc_node mm/slab.c --- 25/mm/slab.c~slab-reduce-fragmentation-due-to-kmem_cache_alloc_node 2004-10-09 20:36:33.461378704 -0700 +++ 25-akpm/mm/slab.c 2004-10-09 20:36:33.468377640 -0700 @@ -327,6 +327,7 @@ struct kmem_cache_s { unsigned long reaped; unsigned long errors; unsigned long max_freeable; + unsigned long node_allocs; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; @@ -361,6 +362,7 @@ struct kmem_cache_s { (x)->high_mark = (x)->num_active; \ } while (0) #define STATS_INC_ERR(x) ((x)->errors++) +#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_SET_FREEABLE(x, i) \ do { if ((x)->max_freeable < i) \ (x)->max_freeable = i; \ @@ -378,6 +380,7 @@ struct kmem_cache_s { #define STATS_INC_REAPED(x) do { } while (0) #define STATS_SET_HIGH(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0) +#define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) \ do { } while (0) @@ -1747,7 +1750,7 @@ static void set_slab_attr(kmem_cache_t * * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow (kmem_cache_t * cachep, int flags) +static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid) { struct slab *slabp; void *objp; @@ -1798,7 +1801,7 @@ static int cache_grow (kmem_cache_t * ca /* Get mem for the objs. */ - if (!(objp = kmem_getpages(cachep, flags, -1))) + if (!(objp = kmem_getpages(cachep, flags, nodeid))) goto failed; /* Get slab management. */ @@ -2032,7 +2035,7 @@ alloc_done: if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags); + x = cache_grow(cachep, flags, -1); // cache_grow can reenable interrupts, then ac could change. ac = ac_data(cachep); @@ -2313,6 +2316,7 @@ out: return 0; } +#ifdef CONFIG_NUMA /** * kmem_cache_alloc_node - Allocate an object on the specified node * @cachep: The cache to allocate from. @@ -2325,69 +2329,80 @@ out: */ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) { - size_t offset; + int loop; void *objp; struct slab *slabp; kmem_bufctl_t next; - /* The main algorithms are not node aware, thus we have to cheat: - * We bypass all caches and allocate a new slab. - * The following code is a streamlined copy of cache_grow(). - */ + for (loop = 0;;loop++) { + struct list_head *q; - /* Get colour for the slab, and update the next value. */ - spin_lock_irq(&cachep->spinlock); - offset = cachep->colour_next; - cachep->colour_next++; - if (cachep->colour_next >= cachep->colour) - cachep->colour_next = 0; - offset *= cachep->colour_off; - spin_unlock_irq(&cachep->spinlock); - - /* Get mem for the objs. */ - if (!(objp = kmem_getpages(cachep, GFP_KERNEL, nodeid))) - goto failed; + objp = NULL; + check_irq_on(); + spin_lock_irq(&cachep->spinlock); + /* walk through all partial and empty slab and find one + * from the right node */ + list_for_each(q,&cachep->lists.slabs_partial) { + slabp = list_entry(q, struct slab, list); + + if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid || + loop > 2) + goto got_slabp; + } + list_for_each(q, &cachep->lists.slabs_free) { + slabp = list_entry(q, struct slab, list); + + if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid || + loop > 2) + goto got_slabp; + } + spin_unlock_irq(&cachep->spinlock); - /* Get slab management. */ - if (!(slabp = alloc_slabmgmt(cachep, objp, offset, GFP_KERNEL))) - goto opps1; + local_irq_disable(); + if (!cache_grow(cachep, GFP_KERNEL, nodeid)) { + local_irq_enable(); + return NULL; + } + local_irq_enable(); + } +got_slabp: + /* found one: allocate object */ + check_slabp(cachep, slabp); + check_spinlock_acquired(cachep); - set_slab_attr(cachep, slabp, objp); - cache_init_objs(cachep, slabp, SLAB_CTOR_CONSTRUCTOR); + STATS_INC_ALLOCED(cachep); + STATS_INC_ACTIVE(cachep); + STATS_SET_HIGH(cachep); + STATS_INC_NODEALLOCS(cachep); - /* The first object is ours: */ objp = slabp->s_mem + slabp->free*cachep->objsize; + slabp->inuse++; next = slab_bufctl(slabp)[slabp->free]; #if DEBUG slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; #endif slabp->free = next; - - /* add the remaining objects into the cache */ - spin_lock_irq(&cachep->spinlock); check_slabp(cachep, slabp); - STATS_INC_GROWN(cachep); - /* Make slab active. */ - if (slabp->free == BUFCTL_END) { - list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_full)); - } else { - list_add_tail(&slabp->list, - &(list3_data(cachep)->slabs_partial)); - list3_data(cachep)->free_objects += cachep->num-1; - } + + /* move slabp to correct slabp list: */ + list_del(&slabp->list); + if (slabp->free == BUFCTL_END) + list_add(&slabp->list, &cachep->lists.slabs_full); + else + list_add(&slabp->list, &cachep->lists.slabs_partial); + + list3_data(cachep)->free_objects--; spin_unlock_irq(&cachep->spinlock); + objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp, __builtin_return_address(0)); return objp; -opps1: - kmem_freepages(cachep, objp); -failed: - return NULL; - } EXPORT_SYMBOL(kmem_cache_alloc_node); +#endif + /** * kmalloc - allocate memory * @size: how many bytes of memory are required. @@ -2816,15 +2831,16 @@ static void *s_start(struct seq_file *m, * without _too_ many complaints. */ #if STATS - seq_puts(m, "slabinfo - version: 2.0 (statistics)\n"); + seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); #else - seq_puts(m, "slabinfo - version: 2.0\n"); + seq_puts(m, "slabinfo - version: 2.1\n"); #endif seq_puts(m, "# name "); seq_puts(m, " : tunables "); seq_puts(m, " : slabdata "); #if STATS - seq_puts(m, " : globalstat "); + seq_puts(m, " : globalstat " + " "); seq_puts(m, " : cpustat "); #endif seq_putc(m, '\n'); @@ -2915,10 +2931,11 @@ static int s_show(struct seq_file *m, vo unsigned long errors = cachep->errors; unsigned long max_freeable = cachep->max_freeable; unsigned long free_limit = cachep->free_limit; + unsigned long node_allocs = cachep->node_allocs; - seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu", + seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, - max_freeable, free_limit); + max_freeable, free_limit, node_allocs); } /* cpu stats */ { _