Aligned free for aligned alloc

Windows port frees memory that was alloc'd aligned in a different way
then alloc'd memory.  So changing frees to be specific.

Reviewed-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Andrew Innes <andrew.c12@gmail.com>
Co-Authored-By: Jorgen Lundman <lundman@lundman.net>
Closes #14059
This commit is contained in:
Andrew Innes 2022-10-27 06:08:31 +08:00 committed by GitHub
parent 41133c9794
commit 07de86923b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 30 additions and 12 deletions

View File

@ -137,6 +137,21 @@ umem_free(const void *ptr, size_t size __maybe_unused)
free((void *)ptr); free((void *)ptr);
} }
/*
* umem_free_aligned was added for supporting portability
* with non-POSIX platforms that require a different free
* to be used with aligned allocations.
*/
static inline void
umem_free_aligned(void *ptr, size_t size __maybe_unused)
{
#ifndef _WIN32
free((void *)ptr);
#else
_aligned_free(ptr);
#endif
}
static inline void static inline void
umem_nofail_callback(umem_nofail_callback_t *cb __maybe_unused) umem_nofail_callback(umem_nofail_callback_t *cb __maybe_unused)
{} {}
@ -196,7 +211,10 @@ umem_cache_free(umem_cache_t *cp, void *ptr)
if (cp->cache_destructor) if (cp->cache_destructor)
cp->cache_destructor(ptr, cp->cache_private); cp->cache_destructor(ptr, cp->cache_private);
umem_free(ptr, cp->cache_bufsize); if (cp->cache_align != 0)
umem_free_aligned(ptr, cp->cache_bufsize);
else
umem_free(ptr, cp->cache_bufsize);
} }
static inline void static inline void

View File

@ -934,7 +934,6 @@ zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
vdev_phys_t *label; vdev_phys_t *label;
nvlist_t *expected_config = NULL; nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size; uint64_t expected_guid = 0, size;
int error;
*config = NULL; *config = NULL;
@ -942,8 +941,9 @@ zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
return (0); return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label)); label = (vdev_phys_t *)umem_alloc_aligned(sizeof (*label), PAGESIZE,
if (error) UMEM_DEFAULT);
if (label == NULL)
return (-1); return (-1);
for (l = 0; l < VDEV_LABELS; l++) { for (l = 0; l < VDEV_LABELS; l++) {
@ -992,7 +992,7 @@ zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
if (num_labels != NULL) if (num_labels != NULL)
*num_labels = count; *num_labels = count;
free(label); umem_free_aligned(label, sizeof (*label));
*config = expected_config; *config = expected_config;
return (0); return (0);
@ -1023,9 +1023,9 @@ zpool_read_label(int fd, nvlist_t **config, int *num_labels)
return (0); return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&labels, PAGESIZE, labels = (vdev_phys_t *)umem_alloc_aligned(
VDEV_LABELS * sizeof (*labels)); VDEV_LABELS * sizeof (*labels), PAGESIZE, UMEM_DEFAULT);
if (error) if (labels == NULL)
return (-1); return (-1);
memset(aiocbs, 0, sizeof (aiocbs)); memset(aiocbs, 0, sizeof (aiocbs));
@ -1078,7 +1078,7 @@ zpool_read_label(int fd, nvlist_t **config, int *num_labels)
error = zpool_read_label_slow(fd, config, num_labels); error = zpool_read_label_slow(fd, config, num_labels);
saved_errno = errno; saved_errno = errno;
} }
free(labels); umem_free_aligned(labels, VDEV_LABELS * sizeof (*labels));
errno = saved_errno; errno = saved_errno;
return (error); return (error);
} }
@ -1127,7 +1127,7 @@ zpool_read_label(int fd, nvlist_t **config, int *num_labels)
if (num_labels != NULL) if (num_labels != NULL)
*num_labels = count; *num_labels = count;
free(labels); umem_free_aligned(labels, VDEV_LABELS * sizeof (*labels));
*config = expected_config; *config = expected_config;
return (0); return (0);

View File

@ -598,7 +598,7 @@ abd_free_chunks(abd_t *abd)
abd_for_each_sg(abd, sg, n, i) { abd_for_each_sg(abd, sg, n, i) {
struct page *p = nth_page(sg_page(sg), 0); struct page *p = nth_page(sg_page(sg), 0);
umem_free(p, PAGESIZE); umem_free_aligned(p, PAGESIZE);
} }
abd_free_sg_table(abd); abd_free_sg_table(abd);
} }
@ -704,7 +704,7 @@ abd_free_zero_scatter(void)
__free_page(abd_zero_page); __free_page(abd_zero_page);
#endif /* HAVE_ZERO_PAGE_GPL_ONLY */ #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
#else #else
umem_free(abd_zero_page, PAGESIZE); umem_free_aligned(abd_zero_page, PAGESIZE);
#endif /* _KERNEL */ #endif /* _KERNEL */
} }