Remove __GFP_NOFAIL in kmem and retry internally.
As of 2.6.31 it's clear __GFP_NOFAIL should no longer be used and it may disappear from the kernel at any time. To handle this I have simply added *_nofail wrappers in the kmem implementation which perform the retry for non-atomic allocations. From linux-2.6.31 mm/page_alloc.c:1166 /* * __GFP_NOFAIL is not to be used in new code. * * All __GFP_NOFAIL callers should be fixed so that they * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to * allocate greater than order-1 page units with * __GFP_NOFAIL. */ WARN_ON_ONCE(order > 1);
This commit is contained in:
parent
baf2979ed3
commit
c89fdee4d3
|
@ -49,7 +49,7 @@ extern "C" {
|
||||||
/*
|
/*
|
||||||
* Memory allocation interfaces
|
* Memory allocation interfaces
|
||||||
*/
|
*/
|
||||||
#define KM_SLEEP (GFP_KERNEL | __GFP_NOFAIL)
|
#define KM_SLEEP GFP_KERNEL
|
||||||
#define KM_NOSLEEP GFP_ATOMIC
|
#define KM_NOSLEEP GFP_ATOMIC
|
||||||
#undef KM_PANIC /* No linux analog */
|
#undef KM_PANIC /* No linux analog */
|
||||||
#define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
|
#define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
|
||||||
|
@ -63,6 +63,51 @@ extern "C" {
|
||||||
# define __GFP_ZERO 0x8000
|
# define __GFP_ZERO 0x8000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* __GFP_NOFAIL looks like it will be removed from the kernel perhaps as
|
||||||
|
* early as 2.6.32. To avoid this issue when it occurs in upstream kernels
|
||||||
|
* we retry the allocation here as long as it is not __GFP_WAIT (GFP_ATOMIC).
|
||||||
|
* I would prefer the caller handle the failure case cleanly but we are
|
||||||
|
* trying to emulate Solaris and those are not the Solaris semantics.
|
||||||
|
*/
|
||||||
|
static inline void *
|
||||||
|
kmalloc_nofail(size_t size, gfp_t flags)
|
||||||
|
{
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
do {
|
||||||
|
ptr = kmalloc(size, flags);
|
||||||
|
} while (ptr == NULL && (flags & __GFP_WAIT));
|
||||||
|
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
kzalloc_nofail(size_t size, gfp_t flags)
|
||||||
|
{
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
do {
|
||||||
|
ptr = kzalloc(size, flags);
|
||||||
|
} while (ptr == NULL && (flags & __GFP_WAIT));
|
||||||
|
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef HAVE_KMALLOC_NODE
|
||||||
|
static inline void *
|
||||||
|
kmalloc_node_nofail(size_t size, gfp_t flags, int node)
|
||||||
|
{
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
do {
|
||||||
|
ptr = kmalloc_node(size, flags, node);
|
||||||
|
} while (ptr == NULL && (flags & __GFP_WAIT));
|
||||||
|
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
#endif /* HAVE_KMALLOC_NODE */
|
||||||
|
|
||||||
#ifdef DEBUG_KMEM
|
#ifdef DEBUG_KMEM
|
||||||
|
|
||||||
extern atomic64_t kmem_alloc_used;
|
extern atomic64_t kmem_alloc_used;
|
||||||
|
@ -125,16 +170,16 @@ extern void vmem_free_debug(void *ptr, size_t size);
|
||||||
|
|
||||||
#else /* DEBUG_KMEM */
|
#else /* DEBUG_KMEM */
|
||||||
|
|
||||||
# define kmem_alloc(size, flags) kmalloc((size), (flags))
|
# define kmem_alloc(size, flags) kmalloc_nofail((size), (flags))
|
||||||
# define kmem_zalloc(size, flags) kzalloc((size), (flags))
|
# define kmem_zalloc(size, flags) kzalloc_nofail((size), (flags))
|
||||||
# define kmem_free(ptr, size) ((void)(size), kfree(ptr))
|
# define kmem_free(ptr, size) ((void)(size), kfree(ptr))
|
||||||
|
|
||||||
# ifdef HAVE_KMALLOC_NODE
|
# ifdef HAVE_KMALLOC_NODE
|
||||||
# define kmem_alloc_node(size, flags, node) \
|
# define kmem_alloc_node(size, flags, node) \
|
||||||
kmalloc_node((size), (flags), (node))
|
kmalloc_node_nofail((size), (flags), (node))
|
||||||
# else
|
# else
|
||||||
# define kmem_alloc_node(size, flags, node) \
|
# define kmem_alloc_node(size, flags, node) \
|
||||||
kmalloc((size), (flags))
|
kmalloc_nofail((size), (flags))
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
# define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
|
# define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
|
||||||
|
|
|
@ -380,7 +380,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
ENTRY;
|
ENTRY;
|
||||||
|
|
||||||
dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t),
|
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
|
||||||
flags & ~__GFP_ZERO);
|
flags & ~__GFP_ZERO);
|
||||||
|
|
||||||
if (dptr == NULL) {
|
if (dptr == NULL) {
|
||||||
|
@ -409,11 +409,11 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
|
||||||
/* Use the correct allocator */
|
/* Use the correct allocator */
|
||||||
if (node_alloc) {
|
if (node_alloc) {
|
||||||
ASSERT(!(flags & __GFP_ZERO));
|
ASSERT(!(flags & __GFP_ZERO));
|
||||||
ptr = kmalloc_node(size, flags, node);
|
ptr = kmalloc_node_nofail(size, flags, node);
|
||||||
} else if (flags & __GFP_ZERO) {
|
} else if (flags & __GFP_ZERO) {
|
||||||
ptr = kzalloc(size, flags & ~__GFP_ZERO);
|
ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO);
|
||||||
} else {
|
} else {
|
||||||
ptr = kmalloc(size, flags);
|
ptr = kmalloc_nofail(size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(ptr == NULL)) {
|
if (unlikely(ptr == NULL)) {
|
||||||
|
@ -500,7 +500,7 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
|
||||||
|
|
||||||
ASSERT(flags & KM_SLEEP);
|
ASSERT(flags & KM_SLEEP);
|
||||||
|
|
||||||
dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags);
|
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), flags);
|
||||||
if (dptr == NULL) {
|
if (dptr == NULL) {
|
||||||
CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
|
CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
|
||||||
sizeof(kmem_debug_t), flags);
|
sizeof(kmem_debug_t), flags);
|
||||||
|
@ -614,11 +614,11 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
|
||||||
/* Use the correct allocator */
|
/* Use the correct allocator */
|
||||||
if (node_alloc) {
|
if (node_alloc) {
|
||||||
ASSERT(!(flags & __GFP_ZERO));
|
ASSERT(!(flags & __GFP_ZERO));
|
||||||
ptr = kmalloc_node(size, flags, node);
|
ptr = kmalloc_node_nofail(size, flags, node);
|
||||||
} else if (flags & __GFP_ZERO) {
|
} else if (flags & __GFP_ZERO) {
|
||||||
ptr = kzalloc(size, flags & (~__GFP_ZERO));
|
ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO));
|
||||||
} else {
|
} else {
|
||||||
ptr = kmalloc(size, flags);
|
ptr = kmalloc_nofail(size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ptr == NULL) {
|
if (ptr == NULL) {
|
||||||
|
@ -1077,7 +1077,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
|
||||||
sizeof(void *) * skc->skc_mag_size;
|
sizeof(void *) * skc->skc_mag_size;
|
||||||
ENTRY;
|
ENTRY;
|
||||||
|
|
||||||
skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node);
|
skm = kmem_alloc_node(size, KM_SLEEP, node);
|
||||||
if (skm) {
|
if (skm) {
|
||||||
skm->skm_magic = SKM_MAGIC;
|
skm->skm_magic = SKM_MAGIC;
|
||||||
skm->skm_avail = 0;
|
skm->skm_avail = 0;
|
||||||
|
|
Loading…
Reference in New Issue