diff --git a/mm/slub.c b/mm/slub.c index 5fbda3526e988c05a1c743a2850c62a202421f28..4cb9c0b39daf94e0dd461c29a9f5b03c0723b964 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2800,7 +2800,7 @@ static void *get_partial(struct kmem_cache *s, int node, struct partial_context searchnode = numa_mem_id(); object = get_partial_node(s, get_node(s, searchnode), pc); - if (object || node != NUMA_NO_NODE) + if (object || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) return object; return get_any_partial(s, pc); @@ -3497,6 +3497,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, struct slab *slab; unsigned long flags; struct partial_context pc; + bool try_thisnode = true; stat(s, ALLOC_SLOWPATH); @@ -3611,6 +3612,21 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, new_objects: pc.flags = gfpflags; + /* + * When a preferred node is indicated but no __GFP_THISNODE + * + * 1) try to get a partial slab from target node only by having + * __GFP_THISNODE in pc.flags for get_partial() + * 2) if 1) failed, try to allocate a new slab from target node with + * GPF_NOWAIT | __GFP_THISNODE opportunistically + * 3) if 2) failed, retry with original gfpflags which will allow + * get_partial() try partial lists of other nodes before potentially + * allocating new page from other nodes + */ + if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) + && try_thisnode)) + pc.flags = GFP_NOWAIT | __GFP_THISNODE; + pc.slab = &slab; pc.orig_size = orig_size; freelist = get_partial(s, node, &pc); @@ -3636,10 +3652,15 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, } slub_put_cpu_ptr(s->cpu_slab); - slab = new_slab(s, gfpflags, node); + slab = new_slab(s, pc.flags, node); c = slub_get_cpu_ptr(s->cpu_slab); if (unlikely(!slab)) { + if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) + && try_thisnode) { + try_thisnode = false; + goto new_objects; + } slab_out_of_memory(s, gfpflags, node); return NULL; }