FreeBSD: Use a hash table for taskqid lookups

Previously a tqent could be recycled prematurely, update the
code to use a hash table for lookups to resolve this.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #10529
This commit is contained in:
Matthew Macy 2020-07-11 17:13:45 -07:00 committed by GitHub
parent 6f1db5f37e
commit 3933305eac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 129 additions and 53 deletions

View File

@ -30,6 +30,7 @@
#include <sys/proc.h>
#include <sys/taskqueue.h>
#include <sys/thread.h>
#include <sys/ck.h>
#ifdef __cplusplus
extern "C" {
@ -37,26 +38,26 @@ extern "C" {
#define TASKQ_NAMELEN 31
struct taskqueue;
struct taskq {
typedef struct taskq {
struct taskqueue *tq_queue;
};
} taskq_t;
typedef struct taskq taskq_t;
typedef uintptr_t taskqid_t;
typedef void (task_func_t)(void *);
typedef struct taskq_ent {
struct task tqent_task;
struct timeout_task tqent_timeout_task;
task_func_t *tqent_func;
void *tqent_arg;
struct timeout_task tqent_timeout_task;
int tqent_type;
int tqent_gen;
taskqid_t tqent_id;
CK_LIST_ENTRY(taskq_ent) tqent_hash;
uint8_t tqent_type;
uint8_t tqent_registered;
uint8_t tqent_cancelled;
volatile uint32_t tqent_rc;
} taskq_ent_t;
struct proc;
/*
* Public flags for taskq_create(): bit range 0-15
*/

View File

@ -38,6 +38,8 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <sys/taskq.h>
#include <sys/zfs_context.h>
#include <sys/ck.h>
#include <sys/epoch.h>
#include <vm/uma.h>
@ -50,41 +52,40 @@ taskq_t *dynamic_taskq = NULL;
extern int uma_align_cache;
#define TQ_MASK uma_align_cache
#define TQ_PTR_MASK ~uma_align_cache
static MALLOC_DEFINE(M_TASKQ, "taskq", "taskq structures");
static CK_LIST_HEAD(tqenthashhead, taskq_ent) *tqenthashtbl;
static unsigned long tqenthash;
static unsigned long tqenthashlock;
static struct sx *tqenthashtbl_lock;
static uint32_t tqidnext = 1;
#define TQIDHASH(tqid) (&tqenthashtbl[(tqid) & tqenthash])
#define TQIDHASHLOCK(tqid) (&tqenthashtbl_lock[((tqid) & tqenthashlock)])
#define TIMEOUT_TASK 1
#define NORMAL_TASK 2
static int
taskqent_init(void *mem, int size, int flags)
{
bzero(mem, sizeof (taskq_ent_t));
return (0);
}
static int
taskqent_ctor(void *mem, int size, void *arg, int flags)
{
return (0);
}
static void
taskqent_dtor(void *mem, int size, void *arg)
{
taskq_ent_t *ent = mem;
ent->tqent_gen = (ent->tqent_gen + 1) & TQ_MASK;
}
static void
system_taskq_init(void *arg)
{
int i;
tsd_create(&taskq_tsd, NULL);
tqenthashtbl = hashinit(mp_ncpus * 8, M_TASKQ, &tqenthash);
tqenthashlock = (tqenthash + 1) / 8;
if (tqenthashlock > 0)
tqenthashlock--;
tqenthashtbl_lock =
malloc(sizeof (*tqenthashtbl_lock) * (tqenthashlock + 1),
M_TASKQ, M_WAITOK | M_ZERO);
for (i = 0; i < tqenthashlock + 1; i++)
sx_init_flags(&tqenthashtbl_lock[i], "tqenthash", SX_DUPOK);
tqidnext = 1;
taskq_zone = uma_zcreate("taskq_zone", sizeof (taskq_ent_t),
taskqent_ctor, taskqent_dtor, taskqent_init, NULL,
UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
NULL, NULL, NULL, NULL,
UMA_ALIGN_CACHE, 0);
system_taskq = taskq_create("system_taskq", mp_ncpus, minclsyspri,
0, 0, 0);
system_delay_taskq = taskq_create("system_delay_taskq", mp_ncpus,
@ -96,15 +97,65 @@ SYSINIT(system_taskq_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_init,
static void
system_taskq_fini(void *arg)
{
int i;
taskq_destroy(system_delay_taskq);
taskq_destroy(system_taskq);
uma_zdestroy(taskq_zone);
tsd_destroy(&taskq_tsd);
for (i = 0; i < tqenthashlock + 1; i++)
sx_destroy(&tqenthashtbl_lock[i]);
for (i = 0; i < tqenthash + 1; i++)
VERIFY(CK_LIST_EMPTY(&tqenthashtbl[i]));
free(tqenthashtbl_lock, M_TASKQ);
free(tqenthashtbl, M_TASKQ);
}
SYSUNINIT(system_taskq_fini, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_fini,
NULL);
static taskq_ent_t *
taskq_lookup(taskqid_t tqid)
{
taskq_ent_t *ent = NULL;
sx_xlock(TQIDHASHLOCK(tqid));
CK_LIST_FOREACH(ent, TQIDHASH(tqid), tqent_hash) {
if (ent->tqent_id == tqid)
break;
}
if (ent != NULL)
refcount_acquire(&ent->tqent_rc);
sx_xunlock(TQIDHASHLOCK(tqid));
return (ent);
}
static taskqid_t
taskq_insert(taskq_ent_t *ent)
{
taskqid_t tqid = atomic_fetchadd_int(&tqidnext, 1);
ent->tqent_id = tqid;
ent->tqent_registered = B_TRUE;
sx_xlock(TQIDHASHLOCK(tqid));
CK_LIST_INSERT_HEAD(TQIDHASH(tqid), ent, tqent_hash);
sx_xunlock(TQIDHASHLOCK(tqid));
return (tqid);
}
static void
taskq_remove(taskq_ent_t *ent)
{
taskqid_t tqid = ent->tqent_id;
if (!ent->tqent_registered)
return;
sx_xlock(TQIDHASHLOCK(tqid));
CK_LIST_REMOVE(ent, tqent_hash);
sx_xunlock(TQIDHASHLOCK(tqid));
ent->tqent_registered = B_FALSE;
}
static void
taskq_tsd_set(void *context)
{
@ -174,26 +225,44 @@ taskq_of_curthread(void)
return (tsd_get(taskq_tsd));
}
static void
taskq_free(taskq_ent_t *task)
{
taskq_remove(task);
if (refcount_release(&task->tqent_rc))
uma_zfree(taskq_zone, task);
}
int
taskq_cancel_id(taskq_t *tq, taskqid_t tid)
{
uint32_t pend;
int rc;
taskq_ent_t *ent = (void*)(tid & TQ_PTR_MASK);
taskq_ent_t *ent;
if (ent == NULL)
if (tid == 0)
return (0);
if ((tid & TQ_MASK) != ent->tqent_gen)
if ((ent = taskq_lookup(tid)) == NULL)
return (0);
ent->tqent_cancelled = B_TRUE;
if (ent->tqent_type == TIMEOUT_TASK) {
rc = taskqueue_cancel_timeout(tq->tq_queue,
&ent->tqent_timeout_task, &pend);
} else
rc = taskqueue_cancel(tq->tq_queue, &ent->tqent_task, &pend);
if (rc == EBUSY)
taskq_wait_id(tq, tid);
else
uma_zfree(taskq_zone, ent);
if (rc == EBUSY) {
taskqueue_drain(tq->tq_queue, &ent->tqent_task);
} else if (pend) {
/*
* Tasks normally free themselves when run, but here the task
* was cancelled so it did not free itself.
*/
taskq_free(ent);
}
/* Free the extra reference we added with taskq_lookup. */
taskq_free(ent);
return (rc);
}
@ -202,8 +271,9 @@ taskq_run(void *arg, int pending __unused)
{
taskq_ent_t *task = arg;
task->tqent_func(task->tqent_arg);
uma_zfree(taskq_zone, task);
if (!task->tqent_cancelled)
task->tqent_func(task->tqent_arg);
taskq_free(task);
}
taskqid_t
@ -227,12 +297,12 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
task = uma_zalloc(taskq_zone, mflag);
if (task == NULL)
return (0);
tid = (uintptr_t)task;
MPASS((tid & TQ_MASK) == 0);
task->tqent_func = func;
task->tqent_arg = arg;
task->tqent_type = TIMEOUT_TASK;
tid |= task->tqent_gen;
task->tqent_cancelled = B_FALSE;
refcount_init(&task->tqent_rc, 1);
tid = taskq_insert(task);
TIMEOUT_TASK_INIT(tq->tq_queue, &task->tqent_timeout_task, 0,
taskq_run, task);
@ -261,15 +331,15 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
task = uma_zalloc(taskq_zone, mflag);
if (task == NULL)
return (0);
tid = (uintptr_t)task;
MPASS((tid & TQ_MASK) == 0);
refcount_init(&task->tqent_rc, 1);
task->tqent_func = func;
task->tqent_arg = arg;
task->tqent_cancelled = B_FALSE;
task->tqent_type = NORMAL_TASK;
tid = taskq_insert(task);
TASK_INIT(&task->tqent_task, prio, taskq_run, task);
tid |= task->tqent_gen;
taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
VERIFY(tid);
return (tid);
}
@ -292,7 +362,9 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint32_t flags,
* can go at the front of the queue.
*/
prio = !!(flags & TQ_FRONT);
task->tqent_cancelled = B_FALSE;
task->tqent_registered = B_FALSE;
task->tqent_id = 0;
task->tqent_func = func;
task->tqent_arg = arg;
@ -309,12 +381,15 @@ taskq_wait(taskq_t *tq)
void
taskq_wait_id(taskq_t *tq, taskqid_t tid)
{
taskq_ent_t *ent = (void*)(tid & TQ_PTR_MASK);
taskq_ent_t *ent;
if ((tid & TQ_MASK) != ent->tqent_gen)
if (tid == 0)
return;
if ((ent = taskq_lookup(tid)) == NULL)
return;
taskqueue_drain(tq->tq_queue, &ent->tqent_task);
taskq_free(ent);
}
void