module: icp: remove unused kcf_op_{group,type}, req_params, ...

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #12901
This commit is contained in:
наб 2021-12-24 17:34:19 +01:00 committed by Brian Behlendorf
parent f3c3a6d47e
commit 3fd5ead75e
7 changed files with 0 additions and 897 deletions

View File

@ -133,222 +133,3 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
mutex_exit(&me->me_mutex);
return (pd);
}
/*
* Do the actual work of calling the provider routines.
*
* pd - Provider structure
* ctx - Context for this operation
* params - Parameters for this operation
* rhndl - Request handle to use for notification
*
* The return values are the same as that of the respective SPI.
*/
int
common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
kcf_req_params_t *params, crypto_req_handle_t rhndl)
{
int err = CRYPTO_ARGUMENTS_BAD;
kcf_op_type_t optype;
optype = params->rp_optype;
switch (params->rp_opgrp) {
case KCF_OG_DIGEST: {
kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
switch (optype) {
case KCF_OP_INIT:
/*
* We should do this only here and not in KCF_WRAP_*
* macros. This is because we may want to try other
* providers, in case we recover from a failure.
*/
KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
pd, &dops->do_mech);
err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech,
rhndl);
break;
case KCF_OP_SINGLE:
err = KCF_PROV_DIGEST(pd, ctx, dops->do_data,
dops->do_digest, rhndl);
break;
case KCF_OP_UPDATE:
err = KCF_PROV_DIGEST_UPDATE(pd, ctx,
dops->do_data, rhndl);
break;
case KCF_OP_FINAL:
err = KCF_PROV_DIGEST_FINAL(pd, ctx,
dops->do_digest, rhndl);
break;
case KCF_OP_ATOMIC:
ASSERT(ctx == NULL);
KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
pd, &dops->do_mech);
err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid,
&dops->do_mech, dops->do_data, dops->do_digest,
rhndl);
break;
case KCF_OP_DIGEST_KEY:
err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key,
rhndl);
break;
default:
break;
}
break;
}
case KCF_OG_MAC: {
kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
switch (optype) {
case KCF_OP_INIT:
KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
pd, &mops->mo_mech);
err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech,
mops->mo_key, mops->mo_templ, rhndl);
break;
case KCF_OP_SINGLE:
err = KCF_PROV_MAC(pd, ctx, mops->mo_data,
mops->mo_mac, rhndl);
break;
case KCF_OP_UPDATE:
err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data,
rhndl);
break;
case KCF_OP_FINAL:
err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl);
break;
case KCF_OP_ATOMIC:
ASSERT(ctx == NULL);
KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
pd, &mops->mo_mech);
err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid,
&mops->mo_mech, mops->mo_key, mops->mo_data,
mops->mo_mac, mops->mo_templ, rhndl);
break;
case KCF_OP_MAC_VERIFY_ATOMIC:
ASSERT(ctx == NULL);
KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
pd, &mops->mo_mech);
err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid,
&mops->mo_mech, mops->mo_key, mops->mo_data,
mops->mo_mac, mops->mo_templ, rhndl);
break;
default:
break;
}
break;
}
case KCF_OG_ENCRYPT: {
kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
switch (optype) {
case KCF_OP_INIT:
KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
pd, &eops->eo_mech);
err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech,
eops->eo_key, eops->eo_templ, rhndl);
break;
case KCF_OP_SINGLE:
err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext,
eops->eo_ciphertext, rhndl);
break;
case KCF_OP_UPDATE:
err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx,
eops->eo_plaintext, eops->eo_ciphertext, rhndl);
break;
case KCF_OP_FINAL:
err = KCF_PROV_ENCRYPT_FINAL(pd, ctx,
eops->eo_ciphertext, rhndl);
break;
case KCF_OP_ATOMIC:
ASSERT(ctx == NULL);
KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
pd, &eops->eo_mech);
err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid,
&eops->eo_mech, eops->eo_key, eops->eo_plaintext,
eops->eo_ciphertext, eops->eo_templ, rhndl);
break;
default:
break;
}
break;
}
case KCF_OG_DECRYPT: {
kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
switch (optype) {
case KCF_OP_INIT:
KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
pd, &dcrops->dop_mech);
err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech,
dcrops->dop_key, dcrops->dop_templ, rhndl);
break;
case KCF_OP_SINGLE:
err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext,
dcrops->dop_plaintext, rhndl);
break;
case KCF_OP_UPDATE:
err = KCF_PROV_DECRYPT_UPDATE(pd, ctx,
dcrops->dop_ciphertext, dcrops->dop_plaintext,
rhndl);
break;
case KCF_OP_FINAL:
err = KCF_PROV_DECRYPT_FINAL(pd, ctx,
dcrops->dop_plaintext, rhndl);
break;
case KCF_OP_ATOMIC:
ASSERT(ctx == NULL);
KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
pd, &dcrops->dop_mech);
err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid,
&dcrops->dop_mech, dcrops->dop_key,
dcrops->dop_ciphertext, dcrops->dop_plaintext,
dcrops->dop_templ, rhndl);
break;
default:
break;
}
break;
}
default:
break;
} /* end of switch(params->rp_opgrp) */
KCF_PROV_INCRSTATS(pd, err);
return (err);
}

View File

@ -64,10 +64,7 @@ static kcf_stats_t kcf_ksdata = {
static kstat_t *kcf_misc_kstat = NULL;
ulong_t kcf_swprov_hndl = 0;
static int kcf_disp_sw_request(kcf_areq_node_t *);
static int kcf_enqueue(kcf_areq_node_t *);
static void kcfpool_alloc(void);
static void kcf_reqid_delete(kcf_areq_node_t *areq);
static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
/*
@ -106,201 +103,6 @@ kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
return (ctx);
}
/*
* Queue the request node and do one of the following:
* - If there is an idle thread signal it to run.
* - If there is no idle thread and max running threads is not
* reached, signal the creator thread for more threads.
*
* If the two conditions above are not met, we don't need to do
* anything. The request will be picked up by one of the
* worker threads when it becomes available.
*/
static int
kcf_disp_sw_request(kcf_areq_node_t *areq)
{
int err;
int cnt = 0;
if ((err = kcf_enqueue(areq)) != 0)
return (err);
if (kcfpool->kp_idlethreads > 0) {
/* Signal an idle thread to run */
mutex_enter(&gswq->gs_lock);
cv_signal(&gswq->gs_cv);
mutex_exit(&gswq->gs_lock);
return (CRYPTO_QUEUED);
}
/*
* We keep the number of running threads to be at
* kcf_minthreads to reduce gs_lock contention.
*/
cnt = kcf_minthreads -
(kcfpool->kp_threads - kcfpool->kp_blockedthreads);
if (cnt > 0) {
/*
* The following ensures the number of threads in pool
* does not exceed kcf_maxthreads.
*/
cnt = MIN(cnt, kcf_maxthreads - (int)kcfpool->kp_threads);
if (cnt > 0) {
/* Signal the creator thread for more threads */
mutex_enter(&kcfpool->kp_user_lock);
if (!kcfpool->kp_signal_create_thread) {
kcfpool->kp_signal_create_thread = B_TRUE;
kcfpool->kp_nthrs = cnt;
cv_signal(&kcfpool->kp_user_cv);
}
mutex_exit(&kcfpool->kp_user_lock);
}
}
return (CRYPTO_QUEUED);
}
/*
* This routine checks if a request can be retried on another
* provider. If true, mech1 is initialized to point to the mechanism
* structure. fg is initialized to the correct crypto_func_group_t bit flag.
* They are initialized by this routine, so that the caller can pass them to
* kcf_get_mech_provider() with no further change.
*
* We check that the request is for a init or atomic routine and that
* it is for one of the operation groups used from k-api .
*/
static boolean_t
can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
crypto_func_group_t *fg)
{
kcf_req_params_t *params;
kcf_op_type_t optype;
params = &areq->an_params;
optype = params->rp_optype;
if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
return (B_FALSE);
switch (params->rp_opgrp) {
case KCF_OG_DIGEST: {
kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
dops->do_mech.cm_type = dops->do_framework_mechtype;
*mech1 = &dops->do_mech;
*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
CRYPTO_FG_DIGEST_ATOMIC;
break;
}
case KCF_OG_MAC: {
kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
mops->mo_mech.cm_type = mops->mo_framework_mechtype;
*mech1 = &mops->mo_mech;
*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
CRYPTO_FG_MAC_ATOMIC;
break;
}
case KCF_OG_ENCRYPT: {
kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
eops->eo_mech.cm_type = eops->eo_framework_mechtype;
*mech1 = &eops->eo_mech;
*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
CRYPTO_FG_ENCRYPT_ATOMIC;
break;
}
case KCF_OG_DECRYPT: {
kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
*mech1 = &dcrops->dop_mech;
*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
CRYPTO_FG_DECRYPT_ATOMIC;
break;
}
default:
return (B_FALSE);
}
return (B_TRUE);
}
/*
* This routine is called when a request to a provider has failed
* with a recoverable error. This routine tries to find another provider
* and dispatches the request to the new provider, if one is available.
* We reuse the request structure.
*
* A return value of NULL from kcf_get_mech_provider() indicates
* we have tried the last provider.
*/
static int
kcf_resubmit_request(kcf_areq_node_t *areq)
{
int error = CRYPTO_FAILED;
kcf_context_t *ictx;
kcf_provider_desc_t *old_pd;
kcf_provider_desc_t *new_pd;
crypto_mechanism_t *mech1 = NULL;
crypto_func_group_t fg = 0;
if (!can_resubmit(areq, &mech1, &fg))
return (error);
old_pd = areq->an_provider;
/*
* Add old_pd to the list of providers already tried. We release
* the hold on old_pd (from the earlier kcf_get_mech_provider()) in
* kcf_free_triedlist().
*/
if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
KM_NOSLEEP) == NULL)
return (error);
new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
areq->an_tried_plist, fg);
if (new_pd == NULL)
return (error);
/*
* We reuse the old context by resetting provider specific
* fields in it.
*/
if ((ictx = areq->an_context) != NULL) {
crypto_ctx_t *ctx;
ASSERT(old_pd == ictx->kc_prov_desc);
KCF_PROV_REFRELE(ictx->kc_prov_desc);
KCF_PROV_REFHOLD(new_pd);
ictx->kc_prov_desc = new_pd;
ctx = &ictx->kc_glbl_ctx;
ctx->cc_provider = new_pd->pd_prov_handle;
ctx->cc_session = new_pd->pd_sid;
ctx->cc_provider_private = NULL;
}
/* We reuse areq. by resetting the provider and context fields. */
KCF_PROV_REFRELE(old_pd);
KCF_PROV_REFHOLD(new_pd);
areq->an_provider = new_pd;
mutex_enter(&areq->an_lock);
areq->an_state = REQ_WAITING;
mutex_exit(&areq->an_lock);
error = kcf_disp_sw_request(areq);
return (error);
}
/*
* We're done with this framework context, so free it. Note that freeing
* framework context (kcf_context) frees the global context (crypto_ctx).
@ -367,45 +169,6 @@ kcf_free_req(kcf_areq_node_t *areq)
kmem_cache_free(kcf_areq_cache, areq);
}
/*
* Add the request node to the end of the global queue.
*
* The caller should not hold the queue lock. Returns 0 if the
* request is successfully queued. Returns CRYPTO_BUSY if the limit
* on the number of jobs is exceeded.
*/
static int
kcf_enqueue(kcf_areq_node_t *node)
{
kcf_areq_node_t *tnode;
mutex_enter(&gswq->gs_lock);
if (gswq->gs_njobs >= gswq->gs_maxjobs) {
mutex_exit(&gswq->gs_lock);
return (CRYPTO_BUSY);
}
if (gswq->gs_last == NULL) {
gswq->gs_first = gswq->gs_last = node;
} else {
ASSERT(gswq->gs_last->an_next == NULL);
tnode = gswq->gs_last;
tnode->an_next = node;
gswq->gs_last = node;
node->an_prev = tnode;
}
gswq->gs_njobs++;
/* an_lock not needed here as we hold gs_lock */
node->an_state = REQ_WAITING;
mutex_exit(&gswq->gs_lock);
return (0);
}
/*
* kmem_cache_alloc constructor for sync request structure.
*/
@ -592,107 +355,6 @@ kcf_sched_init(void)
}
}
/*
* Signal the waiting sync client.
*/
void
kcf_sop_done(kcf_sreq_node_t *sreq, int error)
{
mutex_enter(&sreq->sn_lock);
sreq->sn_state = REQ_DONE;
sreq->sn_rv = error;
cv_signal(&sreq->sn_cv);
mutex_exit(&sreq->sn_lock);
}
/*
* Callback the async client with the operation status.
* We free the async request node and possibly the context.
* We also handle any chain of requests hanging off of
* the context.
*/
void
kcf_aop_done(kcf_areq_node_t *areq, int error)
{
kcf_op_type_t optype;
boolean_t skip_notify = B_FALSE;
kcf_context_t *ictx;
kcf_areq_node_t *nextreq;
/*
* Handle recoverable errors. This has to be done first
* before doing anything else in this routine so that
* we do not change the state of the request.
*/
if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
/*
* We try another provider, if one is available. Else
* we continue with the failure notification to the
* client.
*/
if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
return;
}
mutex_enter(&areq->an_lock);
areq->an_state = REQ_DONE;
mutex_exit(&areq->an_lock);
optype = (&areq->an_params)->rp_optype;
if ((ictx = areq->an_context) != NULL) {
/*
* A request after it is removed from the request
* queue, still stays on a chain of requests hanging
* of its context structure. It needs to be removed
* from this chain at this point.
*/
mutex_enter(&ictx->kc_in_use_lock);
nextreq = areq->an_ctxchain_next;
if (nextreq != NULL) {
mutex_enter(&nextreq->an_lock);
nextreq->an_is_my_turn = B_TRUE;
cv_signal(&nextreq->an_turn_cv);
mutex_exit(&nextreq->an_lock);
}
ictx->kc_req_chain_first = nextreq;
if (nextreq == NULL)
ictx->kc_req_chain_last = NULL;
mutex_exit(&ictx->kc_in_use_lock);
if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
ASSERT(nextreq == NULL);
KCF_CONTEXT_REFRELE(ictx);
} else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
/*
* NOTE - We do not release the context in case of update
* operations. We require the consumer to free it explicitly,
* in case it wants to abandon an update operation. This is done
* as there may be mechanisms in ECB mode that can continue
* even if an operation on a block fails.
*/
KCF_CONTEXT_REFRELE(ictx);
}
}
/*
* If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
* always. If this flag is clear, we skip the notification
* provided there are no errors. We check this flag for only
* init or update operations. It is ignored for single, final or
* atomic operations.
*/
skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
(error == CRYPTO_SUCCESS);
if (!skip_notify) {
NOTIFY_CLIENT(areq, error);
}
kcf_reqid_delete(areq);
KCF_AREQ_REFRELE(areq);
}
/*
* Allocate the thread pool and initialize all the fields.
*/
@ -714,37 +376,6 @@ kcfpool_alloc()
cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
}
/*
* Delete the async request from the hash table.
*/
static void
kcf_reqid_delete(kcf_areq_node_t *areq)
{
int indx;
kcf_areq_node_t *nextp, *prevp;
crypto_req_id_t id = GET_REQID(areq);
kcf_reqid_table_t *rt;
rt = kcf_reqid_table[id & REQID_TABLE_MASK];
indx = REQID_HASH(id);
mutex_enter(&rt->rt_lock);
nextp = areq->an_idnext;
prevp = areq->an_idprev;
if (nextp != NULL)
nextp->an_idprev = prevp;
if (prevp != NULL)
prevp->an_idnext = nextp;
else
rt->rt_idhash[indx] = nextp;
SET_REQID(areq, 0);
cv_broadcast(&areq->an_done);
mutex_exit(&rt->rt_lock);
}
/*
* Update kstats.
*/

View File

@ -410,36 +410,6 @@ typedef struct crypto_minor {
KCF_PROV_DIGEST_OPS(pd)->digest_init(ctx, mech, req) : \
CRYPTO_NOT_SUPPORTED)
/*
* The _ (underscore) in _digest is needed to avoid replacing the
* function digest().
*/
#define KCF_PROV_DIGEST(pd, ctx, data, _digest, req) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest) ? \
KCF_PROV_DIGEST_OPS(pd)->digest(ctx, data, _digest, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DIGEST_UPDATE(pd, ctx, data, req) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_update) ? \
KCF_PROV_DIGEST_OPS(pd)->digest_update(ctx, data, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DIGEST_KEY(pd, ctx, key, req) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_key) ? \
KCF_PROV_DIGEST_OPS(pd)->digest_key(ctx, key, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DIGEST_FINAL(pd, ctx, digest, req) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_final) ? \
KCF_PROV_DIGEST_OPS(pd)->digest_final(ctx, digest, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DIGEST_ATOMIC(pd, session, mech, data, digest, req) ( \
(KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_atomic) ? \
KCF_PROV_DIGEST_OPS(pd)->digest_atomic( \
(pd)->pd_prov_handle, session, mech, data, digest, req) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_cipher_ops(9S) entry points.
*/
@ -450,22 +420,6 @@ typedef struct crypto_minor {
req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT(pd, ctx, plaintext, ciphertext, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt) ? \
KCF_PROV_CIPHER_OPS(pd)->encrypt(ctx, plaintext, ciphertext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_UPDATE(pd, ctx, plaintext, ciphertext, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_update) ? \
KCF_PROV_CIPHER_OPS(pd)->encrypt_update(ctx, plaintext, \
ciphertext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_final) ? \
KCF_PROV_CIPHER_OPS(pd)->encrypt_final(ctx, ciphertext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_ENCRYPT_ATOMIC(pd, session, mech, key, plaintext, ciphertext, \
template, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic) ? \
@ -474,28 +428,6 @@ typedef struct crypto_minor {
template, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_INIT(pd, ctx, mech, key, template, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_init) ? \
KCF_PROV_CIPHER_OPS(pd)->decrypt_init(ctx, mech, key, template, \
req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT(pd, ctx, ciphertext, plaintext, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt) ? \
KCF_PROV_CIPHER_OPS(pd)->decrypt(ctx, ciphertext, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_UPDATE(pd, ctx, ciphertext, plaintext, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_update) ? \
KCF_PROV_CIPHER_OPS(pd)->decrypt_update(ctx, ciphertext, \
plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_final) ? \
KCF_PROV_CIPHER_OPS(pd)->decrypt_final(ctx, plaintext, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_DECRYPT_ATOMIC(pd, session, mech, key, ciphertext, plaintext, \
template, req) ( \
(KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic) ? \
@ -517,11 +449,6 @@ typedef struct crypto_minor {
* The _ (underscore) in _mac is needed to avoid replacing the
* function mac().
*/
#define KCF_PROV_MAC(pd, ctx, data, _mac, req) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac) ? \
KCF_PROV_MAC_OPS(pd)->mac(ctx, data, _mac, req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_UPDATE(pd, ctx, data, req) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_update) ? \
KCF_PROV_MAC_OPS(pd)->mac_update(ctx, data, req) : \
@ -540,14 +467,6 @@ typedef struct crypto_minor {
req) : \
CRYPTO_NOT_SUPPORTED)
#define KCF_PROV_MAC_VERIFY_ATOMIC(pd, session, mech, key, data, mac, \
template, req) ( \
(KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_verify_atomic) ? \
KCF_PROV_MAC_OPS(pd)->mac_verify_atomic( \
(pd)->pd_prov_handle, session, mech, key, data, mac, template, \
req) : \
CRYPTO_NOT_SUPPORTED)
/*
* Wrappers for crypto_ctx_ops(9S) entry points.
*/

View File

@ -26,218 +26,8 @@
#ifndef _SYS_CRYPTO_OPS_IMPL_H
#define _SYS_CRYPTO_OPS_IMPL_H
/*
* Scheduler internal structures.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/common.h>
/*
* The parameters needed for each function group are batched
* in one structure. This is much simpler than having a
* separate structure for each function.
*
* In some cases, a field is generically named to keep the
* structure small. The comments indicate these cases.
*/
typedef struct kcf_digest_ops_params {
crypto_session_id_t do_sid;
crypto_mech_type_t do_framework_mechtype;
crypto_mechanism_t do_mech;
crypto_data_t *do_data;
crypto_data_t *do_digest;
crypto_key_t *do_digest_key; /* Argument for digest_key() */
} kcf_digest_ops_params_t;
typedef struct kcf_mac_ops_params {
crypto_session_id_t mo_sid;
crypto_mech_type_t mo_framework_mechtype;
crypto_mechanism_t mo_mech;
crypto_key_t *mo_key;
crypto_data_t *mo_data;
crypto_data_t *mo_mac;
crypto_spi_ctx_template_t mo_templ;
} kcf_mac_ops_params_t;
typedef struct kcf_encrypt_ops_params {
crypto_session_id_t eo_sid;
crypto_mech_type_t eo_framework_mechtype;
crypto_mechanism_t eo_mech;
crypto_key_t *eo_key;
crypto_data_t *eo_plaintext;
crypto_data_t *eo_ciphertext;
crypto_spi_ctx_template_t eo_templ;
} kcf_encrypt_ops_params_t;
typedef struct kcf_decrypt_ops_params {
crypto_session_id_t dop_sid;
crypto_mech_type_t dop_framework_mechtype;
crypto_mechanism_t dop_mech;
crypto_key_t *dop_key;
crypto_data_t *dop_ciphertext;
crypto_data_t *dop_plaintext;
crypto_spi_ctx_template_t dop_templ;
} kcf_decrypt_ops_params_t;
/*
* The operation type within a function group.
*/
typedef enum kcf_op_type {
/* common ops for all mechanisms */
KCF_OP_INIT = 1,
KCF_OP_SINGLE, /* pkcs11 sense. So, INIT is already done */
KCF_OP_UPDATE,
KCF_OP_FINAL,
KCF_OP_ATOMIC,
/* digest_key op */
KCF_OP_DIGEST_KEY,
/* mac specific op */
KCF_OP_MAC_VERIFY_ATOMIC,
/* mac/cipher specific op */
KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC,
} kcf_op_type_t;
/*
* The operation groups that need wrapping of parameters. This is somewhat
* similar to the function group type in spi.h except that this also includes
* all the functions that don't have a mechanism.
*
* The wrapper macros should never take these enum values as an argument.
* Rather, they are assigned in the macro itself since they are known
* from the macro name.
*/
typedef enum kcf_op_group {
KCF_OG_DIGEST = 1,
KCF_OG_MAC,
KCF_OG_ENCRYPT,
KCF_OG_DECRYPT,
} kcf_op_group_t;
/*
* The kcf_op_type_t enum values used here should be only for those
* operations for which there is a k-api routine in sys/crypto/api.h.
*/
#define IS_INIT_OP(ftype) ((ftype) == KCF_OP_INIT)
#define IS_SINGLE_OP(ftype) ((ftype) == KCF_OP_SINGLE)
#define IS_UPDATE_OP(ftype) ((ftype) == KCF_OP_UPDATE)
#define IS_FINAL_OP(ftype) ((ftype) == KCF_OP_FINAL)
#define IS_ATOMIC_OP(ftype) ( \
(ftype) == KCF_OP_ATOMIC || (ftype) == KCF_OP_MAC_VERIFY_ATOMIC)
/*
* Keep the parameters associated with a request around.
* We need to pass them to the SPI.
*/
typedef struct kcf_req_params {
kcf_op_group_t rp_opgrp;
kcf_op_type_t rp_optype;
union {
kcf_digest_ops_params_t digest_params;
kcf_mac_ops_params_t mac_params;
kcf_encrypt_ops_params_t encrypt_params;
kcf_decrypt_ops_params_t decrypt_params;
} rp_u;
} kcf_req_params_t;
/*
* The ioctl/k-api code should bundle the parameters into a kcf_req_params_t
* structure before calling a scheduler routine. The following macros are
* available for that purpose.
*
* For the most part, the macro arguments closely correspond to the
* function parameters. In some cases, we use generic names. The comments
* for the structure should indicate these cases.
*/
#define KCF_WRAP_DIGEST_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _digest) { \
kcf_digest_ops_params_t *dops = &(req)->rp_u.digest_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_DIGEST; \
(req)->rp_optype = ftype; \
dops->do_sid = _sid; \
if (mechp != NULL) { \
dops->do_mech = *mechp; \
dops->do_framework_mechtype = mechp->cm_type; \
} \
dops->do_digest_key = _key; \
dops->do_data = _data; \
dops->do_digest = _digest; \
}
#define KCF_WRAP_MAC_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_data, _mac, _templ) { \
kcf_mac_ops_params_t *mops = &(req)->rp_u.mac_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_MAC; \
(req)->rp_optype = ftype; \
mops->mo_sid = _sid; \
if (mechp != NULL) { \
mops->mo_mech = *mechp; \
mops->mo_framework_mechtype = mechp->cm_type; \
} \
mops->mo_key = _key; \
mops->mo_data = _data; \
mops->mo_mac = _mac; \
mops->mo_templ = _templ; \
}
#define KCF_WRAP_ENCRYPT_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_plaintext, _ciphertext, _templ) { \
kcf_encrypt_ops_params_t *cops = &(req)->rp_u.encrypt_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_ENCRYPT; \
(req)->rp_optype = ftype; \
cops->eo_sid = _sid; \
if (mechp != NULL) { \
cops->eo_mech = *mechp; \
cops->eo_framework_mechtype = mechp->cm_type; \
} \
cops->eo_key = _key; \
cops->eo_plaintext = _plaintext; \
cops->eo_ciphertext = _ciphertext; \
cops->eo_templ = _templ; \
}
#define KCF_WRAP_DECRYPT_OPS_PARAMS(req, ftype, _sid, _mech, _key, \
_ciphertext, _plaintext, _templ) { \
kcf_decrypt_ops_params_t *cops = &(req)->rp_u.decrypt_params; \
crypto_mechanism_t *mechp = _mech; \
\
(req)->rp_opgrp = KCF_OG_DECRYPT; \
(req)->rp_optype = ftype; \
cops->dop_sid = _sid; \
if (mechp != NULL) { \
cops->dop_mech = *mechp; \
cops->dop_framework_mechtype = mechp->cm_type; \
} \
cops->dop_key = _key; \
cops->dop_ciphertext = _ciphertext; \
cops->dop_plaintext = _plaintext; \
cops->dop_templ = _templ; \
}
#define KCF_SET_PROVIDER_MECHNUM(fmtype, pd, mechp) \
(mechp)->cm_type = \
KCF_TO_PROV_MECHNUM(pd, fmtype);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_CRYPTO_OPS_IMPL_H */

View File

@ -121,12 +121,6 @@ typedef struct kcf_sreq_node {
*/
int sn_rv;
/*
* parameters to call the SPI with. This can be
* a pointer as we know the caller context/stack stays.
*/
struct kcf_req_params *sn_params;
/* Internal context for this request */
struct kcf_context *sn_context;
@ -148,12 +142,6 @@ typedef struct kcf_areq_node {
kcf_req_status_t an_state;
crypto_call_req_t an_reqarg;
/*
* parameters to call the SPI with. We need to
* save the params since the caller stack can go away.
*/
struct kcf_req_params an_params;
/*
* The next two fields should be NULL for operations that
* don't need a context.
@ -448,10 +436,6 @@ extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
extern void kcf_sched_destroy(void);
extern void kcf_sched_init(void);
extern void kcf_sched_start(void);
extern void kcf_sop_done(kcf_sreq_node_t *, int);
extern void kcf_aop_done(kcf_areq_node_t *, int);
extern int common_submit_request(kcf_provider_desc_t *,
crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t);
extern void kcf_free_context(kcf_context_t *);
extern int kcf_svc_wait(int *);

View File

@ -121,7 +121,6 @@ static const crypto_digest_ops_t sha2_digest_ops = {
.digest_init = sha2_digest_init,
.digest = sha2_digest,
.digest_update = sha2_digest_update,
.digest_key = NULL,
.digest_final = sha2_digest_final,
.digest_atomic = sha2_digest_atomic
};

View File

@ -65,7 +65,6 @@ static const crypto_digest_ops_t skein_digest_ops = {
.digest_init = skein_digest_init,
.digest = skein_digest,
.digest_update = skein_update,
.digest_key = NULL,
.digest_final = skein_final,
.digest_atomic = skein_digest_atomic
};