module: icp: remove unused gswq, kcfpool, [as]req_cache, reqid_table, obsolete kstat
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz> Closes #12901
This commit is contained in:
parent
1c17d2940c
commit
a288428d83
|
@ -35,38 +35,10 @@
|
|||
#include <sys/crypto/sched_impl.h>
|
||||
#include <sys/crypto/api.h>
|
||||
|
||||
static kcf_global_swq_t *gswq; /* Global queue */
|
||||
|
||||
/* Thread pool related variables */
|
||||
static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */
|
||||
static const int kcf_maxthreads = 2;
|
||||
static const int kcf_minthreads = 1;
|
||||
|
||||
/* kmem caches used by the scheduler */
|
||||
static kmem_cache_t *kcf_sreq_cache;
|
||||
static kmem_cache_t *kcf_areq_cache;
|
||||
static kmem_cache_t *kcf_context_cache;
|
||||
|
||||
/* Global request ID table */
|
||||
static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
|
||||
|
||||
/* KCF stats. Not protected. */
|
||||
static kcf_stats_t kcf_ksdata = {
|
||||
{ "total threads in pool", KSTAT_DATA_UINT32},
|
||||
{ "idle threads in pool", KSTAT_DATA_UINT32},
|
||||
{ "min threads in pool", KSTAT_DATA_UINT32},
|
||||
{ "max threads in pool", KSTAT_DATA_UINT32},
|
||||
{ "requests in gswq", KSTAT_DATA_UINT32},
|
||||
{ "max requests in gswq", KSTAT_DATA_UINT32},
|
||||
{ "maxalloc for gwsq", KSTAT_DATA_UINT32}
|
||||
};
|
||||
|
||||
static kstat_t *kcf_misc_kstat = NULL;
|
||||
ulong_t kcf_swprov_hndl = 0;
|
||||
|
||||
static void kcfpool_alloc(void);
|
||||
static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
|
||||
|
||||
/*
|
||||
* Create a new context.
|
||||
*/
|
||||
|
@ -84,13 +56,9 @@ kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
|
|||
|
||||
/* initialize the context for the consumer */
|
||||
kcf_ctx->kc_refcnt = 1;
|
||||
kcf_ctx->kc_req_chain_first = NULL;
|
||||
kcf_ctx->kc_req_chain_last = NULL;
|
||||
kcf_ctx->kc_secondctx = NULL;
|
||||
KCF_PROV_REFHOLD(pd);
|
||||
kcf_ctx->kc_prov_desc = pd;
|
||||
kcf_ctx->kc_sw_prov_desc = NULL;
|
||||
kcf_ctx->kc_mech = NULL;
|
||||
|
||||
ctx = &kcf_ctx->kc_glbl_ctx;
|
||||
ctx->cc_provider = pd->pd_prov_handle;
|
||||
|
@ -118,12 +86,6 @@ kcf_free_context(kcf_context_t *kcf_ctx)
|
|||
{
|
||||
kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
|
||||
crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
|
||||
kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
|
||||
|
||||
/* Release the second context, if any */
|
||||
|
||||
if (kcf_secondctx != NULL)
|
||||
KCF_CONTEXT_REFRELE(kcf_secondctx);
|
||||
|
||||
if (gctx->cc_provider_private != NULL) {
|
||||
mutex_enter(&pd->pd_lock);
|
||||
|
@ -154,77 +116,6 @@ kcf_free_context(kcf_context_t *kcf_ctx)
|
|||
kmem_cache_free(kcf_context_cache, kcf_ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the request after releasing all the holds.
|
||||
*/
|
||||
void
|
||||
kcf_free_req(kcf_areq_node_t *areq)
|
||||
{
|
||||
KCF_PROV_REFRELE(areq->an_provider);
|
||||
if (areq->an_context != NULL)
|
||||
KCF_CONTEXT_REFRELE(areq->an_context);
|
||||
|
||||
if (areq->an_tried_plist != NULL)
|
||||
kcf_free_triedlist(areq->an_tried_plist);
|
||||
kmem_cache_free(kcf_areq_cache, areq);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmem_cache_alloc constructor for sync request structure.
|
||||
*/
|
||||
static int
|
||||
kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
|
||||
{
|
||||
(void) cdrarg, (void) kmflags;
|
||||
kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
|
||||
|
||||
sreq->sn_type = CRYPTO_SYNCH;
|
||||
cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
|
||||
mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
kcf_sreq_cache_destructor(void *buf, void *cdrarg)
|
||||
{
|
||||
(void) cdrarg;
|
||||
kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
|
||||
|
||||
mutex_destroy(&sreq->sn_lock);
|
||||
cv_destroy(&sreq->sn_cv);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmem_cache_alloc constructor for async request structure.
|
||||
*/
|
||||
static int
|
||||
kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
|
||||
{
|
||||
(void) cdrarg, (void) kmflags;
|
||||
kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
|
||||
|
||||
areq->an_type = CRYPTO_ASYNCH;
|
||||
areq->an_refcnt = 0;
|
||||
mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
|
||||
cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
kcf_areq_cache_destructor(void *buf, void *cdrarg)
|
||||
{
|
||||
(void) cdrarg;
|
||||
kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
|
||||
|
||||
ASSERT(areq->an_refcnt == 0);
|
||||
mutex_destroy(&areq->an_lock);
|
||||
cv_destroy(&areq->an_done);
|
||||
cv_destroy(&areq->an_turn_cv);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmem_cache_alloc constructor for kcf_context structure.
|
||||
*/
|
||||
|
@ -235,7 +126,6 @@ kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
|
|||
kcf_context_t *kctx = (kcf_context_t *)buf;
|
||||
|
||||
kctx->kc_refcnt = 0;
|
||||
mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -247,46 +137,13 @@ kcf_context_cache_destructor(void *buf, void *cdrarg)
|
|||
kcf_context_t *kctx = (kcf_context_t *)buf;
|
||||
|
||||
ASSERT(kctx->kc_refcnt == 0);
|
||||
mutex_destroy(&kctx->kc_in_use_lock);
|
||||
}
|
||||
|
||||
void
|
||||
kcf_sched_destroy(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (kcf_misc_kstat)
|
||||
kstat_delete(kcf_misc_kstat);
|
||||
|
||||
if (kcfpool) {
|
||||
mutex_destroy(&kcfpool->kp_thread_lock);
|
||||
cv_destroy(&kcfpool->kp_nothr_cv);
|
||||
mutex_destroy(&kcfpool->kp_user_lock);
|
||||
cv_destroy(&kcfpool->kp_user_cv);
|
||||
|
||||
kmem_free(kcfpool, sizeof (kcf_pool_t));
|
||||
}
|
||||
|
||||
for (i = 0; i < REQID_TABLES; i++) {
|
||||
if (kcf_reqid_table[i]) {
|
||||
mutex_destroy(&(kcf_reqid_table[i]->rt_lock));
|
||||
kmem_free(kcf_reqid_table[i],
|
||||
sizeof (kcf_reqid_table_t));
|
||||
}
|
||||
}
|
||||
|
||||
if (gswq) {
|
||||
mutex_destroy(&gswq->gs_lock);
|
||||
cv_destroy(&gswq->gs_cv);
|
||||
kmem_free(gswq, sizeof (kcf_global_swq_t));
|
||||
}
|
||||
|
||||
if (kcf_context_cache)
|
||||
kmem_cache_destroy(kcf_context_cache);
|
||||
if (kcf_areq_cache)
|
||||
kmem_cache_destroy(kcf_areq_cache);
|
||||
if (kcf_sreq_cache)
|
||||
kmem_cache_destroy(kcf_sreq_cache);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -295,9 +152,6 @@ kcf_sched_destroy(void)
|
|||
void
|
||||
kcf_sched_init(void)
|
||||
{
|
||||
int i;
|
||||
kcf_reqid_table_t *rt;
|
||||
|
||||
/*
|
||||
* Create all the kmem caches needed by the framework. We set the
|
||||
* align argument to 64, to get a slab aligned to 64-byte as well as
|
||||
|
@ -305,98 +159,7 @@ kcf_sched_init(void)
|
|||
* This helps to avoid false sharing as this is the size of the
|
||||
* CPU cache line.
|
||||
*/
|
||||
kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
|
||||
sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
|
||||
kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
|
||||
|
||||
kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
|
||||
sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
|
||||
kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
|
||||
|
||||
kcf_context_cache = kmem_cache_create("kcf_context_cache",
|
||||
sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
|
||||
kcf_context_cache_destructor, NULL, NULL, NULL, 0);
|
||||
|
||||
gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
|
||||
|
||||
mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
|
||||
gswq->gs_njobs = 0;
|
||||
gswq->gs_maxjobs = kcf_maxthreads * CRYPTO_TASKQ_MAX;
|
||||
gswq->gs_first = gswq->gs_last = NULL;
|
||||
|
||||
/* Initialize the global reqid table */
|
||||
for (i = 0; i < REQID_TABLES; i++) {
|
||||
rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
|
||||
kcf_reqid_table[i] = rt;
|
||||
mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
rt->rt_curid = i;
|
||||
}
|
||||
|
||||
/* Allocate and initialize the thread pool */
|
||||
kcfpool_alloc();
|
||||
|
||||
/* Create the kcf kstat */
|
||||
kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
|
||||
KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
|
||||
KSTAT_FLAG_VIRTUAL);
|
||||
|
||||
if (kcf_misc_kstat != NULL) {
|
||||
kcf_misc_kstat->ks_data = &kcf_ksdata;
|
||||
kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
|
||||
kstat_install(kcf_misc_kstat);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate the thread pool and initialize all the fields.
|
||||
*/
|
||||
static void
|
||||
kcfpool_alloc()
|
||||
{
|
||||
kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
|
||||
|
||||
kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
|
||||
kcfpool->kp_blockedthreads = 0;
|
||||
kcfpool->kp_signal_create_thread = B_FALSE;
|
||||
kcfpool->kp_nthrs = 0;
|
||||
kcfpool->kp_user_waiting = B_FALSE;
|
||||
|
||||
mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL);
|
||||
|
||||
mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update kstats.
|
||||
*/
|
||||
static int
|
||||
kcf_misc_kstat_update(kstat_t *ksp, int rw)
|
||||
{
|
||||
uint_t tcnt;
|
||||
kcf_stats_t *ks_data;
|
||||
|
||||
if (rw == KSTAT_WRITE)
|
||||
return (EACCES);
|
||||
|
||||
ks_data = ksp->ks_data;
|
||||
|
||||
ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
|
||||
/*
|
||||
* The failover thread is counted in kp_idlethreads in
|
||||
* some corner cases. This is done to avoid doing more checks
|
||||
* when submitting a request. We account for those cases below.
|
||||
*/
|
||||
if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1))
|
||||
tcnt--;
|
||||
ks_data->ks_idle_thrs.value.ui32 = tcnt;
|
||||
ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
|
||||
ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
|
||||
ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
|
||||
ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
|
||||
ks_data->ks_swq_maxalloc.value.ui32 = CRYPTO_TASKQ_MAX;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
|
|
@ -54,17 +54,6 @@ typedef struct kcf_prov_stats {
|
|||
kstat_named_t ps_ops_busy_rval;
|
||||
} kcf_prov_stats_t;
|
||||
|
||||
/* Various kcf stats. Not protected. */
|
||||
typedef struct kcf_stats {
|
||||
kstat_named_t ks_thrs_in_pool;
|
||||
kstat_named_t ks_idle_thrs;
|
||||
kstat_named_t ks_minthrs;
|
||||
kstat_named_t ks_maxthrs;
|
||||
kstat_named_t ks_swq_njobs;
|
||||
kstat_named_t ks_swq_maxjobs;
|
||||
kstat_named_t ks_swq_maxalloc;
|
||||
} kcf_stats_t;
|
||||
|
||||
/*
|
||||
* Keep all the information needed by the scheduler from
|
||||
* this provider.
|
||||
|
|
|
@ -41,21 +41,6 @@ extern "C" {
|
|||
#include <sys/crypto/common.h>
|
||||
#include <sys/crypto/ops_impl.h>
|
||||
|
||||
typedef void (kcf_func_t)(void *, int);
|
||||
|
||||
typedef enum kcf_req_status {
|
||||
REQ_ALLOCATED = 1,
|
||||
REQ_WAITING, /* At the framework level */
|
||||
REQ_INPROGRESS, /* At the provider level */
|
||||
REQ_DONE,
|
||||
REQ_CANCELED
|
||||
} kcf_req_status_t;
|
||||
|
||||
typedef enum kcf_call_type {
|
||||
CRYPTO_SYNCH = 1,
|
||||
CRYPTO_ASYNCH
|
||||
} kcf_call_type_t;
|
||||
|
||||
#define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
|
||||
|
||||
/*
|
||||
|
@ -100,143 +85,6 @@ typedef struct kcf_prov_tried {
|
|||
#define KCF_ATOMIC_INCR(x) atomic_add_32(&(x), 1)
|
||||
#define KCF_ATOMIC_DECR(x) atomic_add_32(&(x), -1)
|
||||
|
||||
/*
|
||||
* Node structure for synchronous requests.
|
||||
*/
|
||||
typedef struct kcf_sreq_node {
|
||||
/* Should always be the first field in this structure */
|
||||
kcf_call_type_t sn_type;
|
||||
/*
|
||||
* sn_cv and sr_lock are used to wait for the
|
||||
* operation to complete. sn_lock also protects
|
||||
* the sn_state field.
|
||||
*/
|
||||
kcondvar_t sn_cv;
|
||||
kmutex_t sn_lock;
|
||||
kcf_req_status_t sn_state;
|
||||
|
||||
/*
|
||||
* Return value from the operation. This will be
|
||||
* one of the CRYPTO_* errors defined in common.h.
|
||||
*/
|
||||
int sn_rv;
|
||||
|
||||
/* Internal context for this request */
|
||||
struct kcf_context *sn_context;
|
||||
|
||||
/* Provider handling this request */
|
||||
kcf_provider_desc_t *sn_provider;
|
||||
} kcf_sreq_node_t;
|
||||
|
||||
/*
|
||||
* Node structure for asynchronous requests. A node can be on
|
||||
* on a chain of requests hanging of the internal context
|
||||
* structure and can be in the global provider queue.
|
||||
*/
|
||||
typedef struct kcf_areq_node {
|
||||
/* Should always be the first field in this structure */
|
||||
kcf_call_type_t an_type;
|
||||
|
||||
/* an_lock protects the field an_state */
|
||||
kmutex_t an_lock;
|
||||
kcf_req_status_t an_state;
|
||||
crypto_call_req_t an_reqarg;
|
||||
|
||||
/*
|
||||
* The next two fields should be NULL for operations that
|
||||
* don't need a context.
|
||||
*/
|
||||
/* Internal context for this request */
|
||||
struct kcf_context *an_context;
|
||||
|
||||
/* next in chain of requests for context */
|
||||
struct kcf_areq_node *an_ctxchain_next;
|
||||
|
||||
kcondvar_t an_turn_cv;
|
||||
boolean_t an_is_my_turn;
|
||||
|
||||
/* Next and previous nodes in the global queue. */
|
||||
struct kcf_areq_node *an_next;
|
||||
struct kcf_areq_node *an_prev;
|
||||
|
||||
/* Provider handling this request */
|
||||
kcf_provider_desc_t *an_provider;
|
||||
kcf_prov_tried_t *an_tried_plist;
|
||||
|
||||
struct kcf_areq_node *an_idnext; /* Next in ID hash */
|
||||
struct kcf_areq_node *an_idprev; /* Prev in ID hash */
|
||||
kcondvar_t an_done; /* Signal request completion */
|
||||
uint_t an_refcnt;
|
||||
} kcf_areq_node_t;
|
||||
|
||||
#define KCF_AREQ_REFHOLD(areq) { \
|
||||
atomic_add_32(&(areq)->an_refcnt, 1); \
|
||||
ASSERT((areq)->an_refcnt != 0); \
|
||||
}
|
||||
|
||||
#define KCF_AREQ_REFRELE(areq) { \
|
||||
ASSERT((areq)->an_refcnt != 0); \
|
||||
membar_exit(); \
|
||||
if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0) \
|
||||
kcf_free_req(areq); \
|
||||
}
|
||||
|
||||
#define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
|
||||
|
||||
#define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
|
||||
(areq)->an_reqarg.cr_callback_arg, err);
|
||||
|
||||
/*
|
||||
* The following are some what similar to macros in callo.h, which implement
|
||||
* callout tables.
|
||||
*
|
||||
* The lower four bits of the ID are used to encode the table ID to
|
||||
* index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for
|
||||
* wrap around when generating ID. We assume that there won't be a request
|
||||
* which takes more time than 2^^(sizeof (long) - 5) other requests submitted
|
||||
* after it. This ensures there won't be any ID collision.
|
||||
*/
|
||||
#define REQID_COUNTER_HIGH (1UL << (8 * sizeof (long) - 1))
|
||||
#define REQID_COUNTER_SHIFT 4
|
||||
#define REQID_COUNTER_LOW (1 << REQID_COUNTER_SHIFT)
|
||||
#define REQID_TABLES 16
|
||||
#define REQID_TABLE_MASK (REQID_TABLES - 1)
|
||||
|
||||
#define REQID_BUCKETS 512
|
||||
#define REQID_BUCKET_MASK (REQID_BUCKETS - 1)
|
||||
#define REQID_HASH(id) (((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK)
|
||||
|
||||
#define GET_REQID(areq) (areq)->an_reqarg.cr_reqid
|
||||
#define SET_REQID(areq, val) GET_REQID(areq) = val
|
||||
|
||||
/*
|
||||
* Hash table for async requests.
|
||||
*/
|
||||
typedef struct kcf_reqid_table {
|
||||
kmutex_t rt_lock;
|
||||
crypto_req_id_t rt_curid;
|
||||
kcf_areq_node_t *rt_idhash[REQID_BUCKETS];
|
||||
} kcf_reqid_table_t;
|
||||
|
||||
/*
|
||||
* Global provider queue structure. Requests to be
|
||||
* handled by a provider and have the ALWAYS_QUEUE flag set
|
||||
* get queued here.
|
||||
*/
|
||||
typedef struct kcf_global_swq {
|
||||
/*
|
||||
* gs_cv and gs_lock are used to wait for new requests.
|
||||
* gs_lock protects the changes to the queue.
|
||||
*/
|
||||
kcondvar_t gs_cv;
|
||||
kmutex_t gs_lock;
|
||||
uint_t gs_njobs;
|
||||
uint_t gs_maxjobs;
|
||||
kcf_areq_node_t *gs_first;
|
||||
kcf_areq_node_t *gs_last;
|
||||
} kcf_global_swq_t;
|
||||
|
||||
|
||||
/*
|
||||
* Internal representation of a canonical context. We contain crypto_ctx_t
|
||||
* structure in order to have just one memory allocation. The SPI
|
||||
|
@ -245,18 +93,8 @@ typedef struct kcf_global_swq {
|
|||
typedef struct kcf_context {
|
||||
crypto_ctx_t kc_glbl_ctx;
|
||||
uint_t kc_refcnt;
|
||||
kmutex_t kc_in_use_lock;
|
||||
/*
|
||||
* kc_req_chain_first and kc_req_chain_last are used to chain
|
||||
* multiple async requests using the same context. They should be
|
||||
* NULL for sync requests.
|
||||
*/
|
||||
kcf_areq_node_t *kc_req_chain_first;
|
||||
kcf_areq_node_t *kc_req_chain_last;
|
||||
kcf_provider_desc_t *kc_prov_desc; /* Prov. descriptor */
|
||||
kcf_provider_desc_t *kc_sw_prov_desc; /* Prov. descriptor */
|
||||
kcf_mech_entry_t *kc_mech;
|
||||
struct kcf_context *kc_secondctx; /* for dual contexts */
|
||||
} kcf_context_t;
|
||||
|
||||
/*
|
||||
|
@ -310,53 +148,11 @@ typedef struct kcf_context {
|
|||
* A crypto_ctx_template_t is internally a pointer to this struct
|
||||
*/
|
||||
typedef struct kcf_ctx_template {
|
||||
crypto_kcf_provider_handle_t ct_prov_handle; /* provider handle */
|
||||
uint_t ct_generation; /* generation # */
|
||||
size_t ct_size; /* for freeing */
|
||||
crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */
|
||||
/* from the provider */
|
||||
} kcf_ctx_template_t;
|
||||
|
||||
/*
|
||||
* Structure for pool of threads working on the global queue.
|
||||
*/
|
||||
typedef struct kcf_pool {
|
||||
uint32_t kp_threads; /* Number of threads in pool */
|
||||
uint32_t kp_idlethreads; /* Idle threads in pool */
|
||||
uint32_t kp_blockedthreads; /* Blocked threads in pool */
|
||||
|
||||
/*
|
||||
* cv & lock to monitor the condition when no threads
|
||||
* are around. In this case the failover thread kicks in.
|
||||
*/
|
||||
kcondvar_t kp_nothr_cv;
|
||||
kmutex_t kp_thread_lock;
|
||||
|
||||
/* Userspace thread creator variables. */
|
||||
boolean_t kp_signal_create_thread; /* Create requested flag */
|
||||
int kp_nthrs; /* # of threads to create */
|
||||
boolean_t kp_user_waiting; /* Thread waiting for work */
|
||||
|
||||
/*
|
||||
* cv & lock for the condition where more threads need to be
|
||||
* created. kp_user_lock also protects the three fields above.
|
||||
*/
|
||||
kcondvar_t kp_user_cv; /* Creator cond. variable */
|
||||
kmutex_t kp_user_lock; /* Creator lock */
|
||||
} kcf_pool_t;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The following values are based on the assumption that it would
|
||||
* take around eight cpus to load a hardware provider (This is true for
|
||||
* at least one product) and a kernel client may come from different
|
||||
* low-priority interrupt levels. The CRYPTO_TASKQ_MAX number is based on
|
||||
* a throughput of 1GB/s using 512-byte buffers. These are just
|
||||
* reasonable estimates and might need to change in future.
|
||||
*/
|
||||
#define CRYPTO_TASKQ_MAX 2 * 1024 * 1024
|
||||
|
||||
|
||||
extern void kcf_free_triedlist(kcf_prov_tried_t *);
|
||||
extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
|
||||
|
@ -367,20 +163,8 @@ extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
|
|||
crypto_session_id_t);
|
||||
extern void kcf_sched_destroy(void);
|
||||
extern void kcf_sched_init(void);
|
||||
extern void kcf_sched_start(void);
|
||||
extern void kcf_free_context(kcf_context_t *);
|
||||
|
||||
extern int kcf_svc_wait(int *);
|
||||
extern int kcf_svc_do_run(void);
|
||||
extern int kcf_need_signature_verification(kcf_provider_desc_t *);
|
||||
extern void kcf_verify_signature(void *);
|
||||
extern struct modctl *kcf_get_modctl(crypto_provider_info_t *);
|
||||
extern void verify_unverified_providers(void);
|
||||
extern void kcf_free_req(kcf_areq_node_t *areq);
|
||||
extern void crypto_bufcall_service(void);
|
||||
|
||||
extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue