module: icp: remove unused CRYPTO_ALWAYS_QUEUE
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz> Closes #12901
This commit is contained in:
parent
65a613b70d
commit
eb1e09b7ec
|
@ -41,7 +41,6 @@ typedef void *crypto_ctx_template_t;
|
|||
typedef uint32_t crypto_call_flag_t;
|
||||
|
||||
/* crypto_call_flag's values */
|
||||
#define CRYPTO_ALWAYS_QUEUE 0x00000001 /* ALWAYS queue the req. */
|
||||
#define CRYPTO_NOTIFY_OPDONE 0x00000002 /* Notify intermediate steps */
|
||||
#define CRYPTO_SKIP_REQID 0x00000004 /* Skip request ID generation */
|
||||
#define CRYPTO_RESTRICTED 0x00000008 /* cannot use restricted prov */
|
||||
|
@ -73,18 +72,10 @@ extern void crypto_destroy_ctx_template(crypto_ctx_template_t tmpl);
|
|||
extern int crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data,
|
||||
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
|
||||
crypto_call_req_t *cr);
|
||||
extern int crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
|
||||
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
|
||||
crypto_call_req_t *cr);
|
||||
extern int crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
|
||||
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr);
|
||||
extern int crypto_mac_init_prov(crypto_provider_t, crypto_session_id_t,
|
||||
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
|
||||
crypto_context_t *, crypto_call_req_t *);
|
||||
extern int crypto_mac_update(crypto_context_t ctx, crypto_data_t *data,
|
||||
crypto_call_req_t *cr);
|
||||
extern int crypto_mac_final(crypto_context_t ctx, crypto_data_t *data,
|
||||
crypto_call_req_t *cr);
|
||||
extern int crypto_mac_update(crypto_context_t ctx, crypto_data_t *data);
|
||||
extern int crypto_mac_final(crypto_context_t ctx, crypto_data_t *data);
|
||||
|
||||
/*
|
||||
* Single and multi-part encryption operations.
|
||||
|
|
|
@ -75,7 +75,6 @@ crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext,
|
|||
{
|
||||
int error;
|
||||
kcf_mech_entry_t *me;
|
||||
kcf_req_params_t params;
|
||||
kcf_provider_desc_t *pd;
|
||||
kcf_ctx_template_t *ctx_tmpl;
|
||||
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
|
||||
|
@ -93,21 +92,11 @@ retry:
|
|||
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL))
|
||||
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(crq, pd)) {
|
||||
crypto_mechanism_t lmech;
|
||||
|
||||
lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
|
||||
error = KCF_PROV_ENCRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
|
||||
plaintext, ciphertext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
} else {
|
||||
KCF_WRAP_ENCRYPT_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, pd->pd_sid,
|
||||
mech, key, plaintext, ciphertext, spi_ctx_tmpl);
|
||||
error = kcf_submit_request(pd, NULL, crq, ¶ms);
|
||||
}
|
||||
crypto_mechanism_t lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
error = KCF_PROV_ENCRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
|
||||
plaintext, ciphertext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
|
||||
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
|
||||
IS_RECOVERABLE(error)) {
|
||||
|
@ -164,7 +153,6 @@ crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext,
|
|||
{
|
||||
int error;
|
||||
kcf_mech_entry_t *me;
|
||||
kcf_req_params_t params;
|
||||
kcf_provider_desc_t *pd;
|
||||
kcf_ctx_template_t *ctx_tmpl;
|
||||
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
|
||||
|
@ -182,21 +170,12 @@ retry:
|
|||
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL))
|
||||
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(crq, pd)) {
|
||||
crypto_mechanism_t lmech;
|
||||
crypto_mechanism_t lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
|
||||
lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
|
||||
error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
|
||||
ciphertext, plaintext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
} else {
|
||||
KCF_WRAP_DECRYPT_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, pd->pd_sid,
|
||||
mech, key, ciphertext, plaintext, spi_ctx_tmpl);
|
||||
error = kcf_submit_request(pd, NULL, crq, ¶ms);
|
||||
}
|
||||
error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key,
|
||||
ciphertext, plaintext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
|
||||
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
|
||||
IS_RECOVERABLE(error)) {
|
||||
|
|
|
@ -94,7 +94,6 @@ crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data,
|
|||
{
|
||||
int error;
|
||||
kcf_mech_entry_t *me;
|
||||
kcf_req_params_t params;
|
||||
kcf_provider_desc_t *pd;
|
||||
kcf_ctx_template_t *ctx_tmpl;
|
||||
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
|
||||
|
@ -112,85 +111,11 @@ retry:
|
|||
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL))
|
||||
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(crq, pd)) {
|
||||
crypto_mechanism_t lmech;
|
||||
|
||||
lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
|
||||
error = KCF_PROV_MAC_ATOMIC(pd, pd->pd_sid, &lmech, key, data,
|
||||
mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
} else {
|
||||
KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_ATOMIC,
|
||||
pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl);
|
||||
|
||||
error = kcf_submit_request(pd, NULL, crq, ¶ms);
|
||||
}
|
||||
|
||||
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
|
||||
IS_RECOVERABLE(error)) {
|
||||
/* Add pd to the linked list of providers tried. */
|
||||
if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (list != NULL)
|
||||
kcf_free_triedlist(list);
|
||||
|
||||
KCF_PROV_REFRELE(pd);
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Single part operation to compute the MAC corresponding to the specified
|
||||
* 'data' and to verify that it matches the MAC specified by 'mac'.
|
||||
* The other arguments are the same as the function crypto_mac_prov().
|
||||
* Relies on the KCF scheduler to choose a provider.
|
||||
*/
|
||||
int
|
||||
crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
|
||||
crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
|
||||
crypto_call_req_t *crq)
|
||||
{
|
||||
int error;
|
||||
kcf_mech_entry_t *me;
|
||||
kcf_req_params_t params;
|
||||
kcf_provider_desc_t *pd;
|
||||
kcf_ctx_template_t *ctx_tmpl;
|
||||
crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
|
||||
kcf_prov_tried_t *list = NULL;
|
||||
|
||||
retry:
|
||||
/* The pd is returned held */
|
||||
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
|
||||
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq))) == NULL) {
|
||||
if (list != NULL)
|
||||
kcf_free_triedlist(list);
|
||||
return (error);
|
||||
}
|
||||
|
||||
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL))
|
||||
spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(crq, pd)) {
|
||||
crypto_mechanism_t lmech;
|
||||
|
||||
lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
|
||||
error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key,
|
||||
data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
} else {
|
||||
KCF_WRAP_MAC_OPS_PARAMS(¶ms,
|
||||
KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
|
||||
key, data, mac, spi_ctx_tmpl);
|
||||
|
||||
error = kcf_submit_request(pd, NULL, crq, ¶ms);
|
||||
}
|
||||
crypto_mechanism_t lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
|
||||
error = KCF_PROV_MAC_ATOMIC(pd, pd->pd_sid, &lmech, key, data,
|
||||
mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, error);
|
||||
|
||||
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
|
||||
IS_RECOVERABLE(error)) {
|
||||
|
@ -242,14 +167,13 @@ retry:
|
|||
* Returns:
|
||||
* See comment in the beginning of the file.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
|
||||
crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl,
|
||||
crypto_context_t *ctxp, crypto_call_req_t *crq)
|
||||
{
|
||||
int rv;
|
||||
crypto_ctx_t *ctx;
|
||||
kcf_req_params_t params;
|
||||
kcf_provider_desc_t *pd = provider;
|
||||
kcf_provider_desc_t *real_provider = pd;
|
||||
|
||||
|
@ -259,20 +183,11 @@ crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
|
|||
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL)
|
||||
return (CRYPTO_HOST_MEMORY);
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(crq, pd)) {
|
||||
crypto_mechanism_t lmech;
|
||||
|
||||
lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
|
||||
rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl,
|
||||
KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, rv);
|
||||
} else {
|
||||
KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_INIT, sid, mech, key,
|
||||
NULL, NULL, tmpl);
|
||||
rv = kcf_submit_request(real_provider, ctx, crq, ¶ms);
|
||||
}
|
||||
crypto_mechanism_t lmech = *mech;
|
||||
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
|
||||
rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl,
|
||||
KCF_SWFP_RHNDL(crq));
|
||||
KCF_PROV_INCRSTATS(pd, rv);
|
||||
|
||||
if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
|
||||
*ctxp = (crypto_context_t)ctx;
|
||||
|
@ -342,11 +257,9 @@ retry:
|
|||
* Arguments:
|
||||
* context: A crypto_context_t initialized by mac_init().
|
||||
* data: The message part to be MAC'ed
|
||||
* cr: crypto_call_req_t calling conditions and call back info.
|
||||
*
|
||||
* Description:
|
||||
* Asynchronously submits a request for, or synchronously performs a
|
||||
* part of a MAC operation.
|
||||
* Synchronously performs a part of a MAC operation.
|
||||
*
|
||||
* Context:
|
||||
* Process or interrupt, according to the semantics dictated by the 'cr'.
|
||||
|
@ -355,14 +268,11 @@ retry:
|
|||
* See comment in the beginning of the file.
|
||||
*/
|
||||
int
|
||||
crypto_mac_update(crypto_context_t context, crypto_data_t *data,
|
||||
crypto_call_req_t *cr)
|
||||
crypto_mac_update(crypto_context_t context, crypto_data_t *data)
|
||||
{
|
||||
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
|
||||
kcf_context_t *kcf_ctx;
|
||||
kcf_provider_desc_t *pd;
|
||||
kcf_req_params_t params;
|
||||
int rv;
|
||||
|
||||
if ((ctx == NULL) ||
|
||||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
|
||||
|
@ -370,16 +280,8 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
|
|||
return (CRYPTO_INVALID_CONTEXT);
|
||||
}
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(cr, pd)) {
|
||||
rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
|
||||
KCF_PROV_INCRSTATS(pd, rv);
|
||||
} else {
|
||||
KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_UPDATE,
|
||||
ctx->cc_session, NULL, NULL, data, NULL, NULL);
|
||||
rv = kcf_submit_request(pd, ctx, cr, ¶ms);
|
||||
}
|
||||
|
||||
int rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
|
||||
KCF_PROV_INCRSTATS(pd, rv);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
|
@ -389,11 +291,9 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
|
|||
* Arguments:
|
||||
* context: A crypto_context_t initialized by mac_init().
|
||||
* mac: Storage for the message authentication code.
|
||||
* cr: crypto_call_req_t calling conditions and call back info.
|
||||
*
|
||||
* Description:
|
||||
* Asynchronously submits a request for, or synchronously performs a
|
||||
* part of a message authentication operation.
|
||||
* Synchronously performs a part of a message authentication operation.
|
||||
*
|
||||
* Context:
|
||||
* Process or interrupt, according to the semantics dictated by the 'cr'.
|
||||
|
@ -402,14 +302,11 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
|
|||
* See comment in the beginning of the file.
|
||||
*/
|
||||
int
|
||||
crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
|
||||
crypto_call_req_t *cr)
|
||||
crypto_mac_final(crypto_context_t context, crypto_data_t *mac)
|
||||
{
|
||||
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
|
||||
kcf_context_t *kcf_ctx;
|
||||
kcf_provider_desc_t *pd;
|
||||
kcf_req_params_t params;
|
||||
int rv;
|
||||
|
||||
if ((ctx == NULL) ||
|
||||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
|
||||
|
@ -417,15 +314,8 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
|
|||
return (CRYPTO_INVALID_CONTEXT);
|
||||
}
|
||||
|
||||
/* The fast path for SW providers. */
|
||||
if (CHECK_FASTPATH(cr, pd)) {
|
||||
rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);
|
||||
KCF_PROV_INCRSTATS(pd, rv);
|
||||
} else {
|
||||
KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_FINAL,
|
||||
ctx->cc_session, NULL, NULL, NULL, mac, NULL);
|
||||
rv = kcf_submit_request(pd, ctx, cr, ¶ms);
|
||||
}
|
||||
int rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);
|
||||
KCF_PROV_INCRSTATS(pd, rv);
|
||||
|
||||
/* Release the hold done in kcf_new_ctx() during init step. */
|
||||
KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx);
|
||||
|
@ -434,8 +324,6 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
|
|||
|
||||
#if defined(_KERNEL)
|
||||
EXPORT_SYMBOL(crypto_mac);
|
||||
EXPORT_SYMBOL(crypto_mac_verify);
|
||||
EXPORT_SYMBOL(crypto_mac_init_prov);
|
||||
EXPORT_SYMBOL(crypto_mac_init);
|
||||
EXPORT_SYMBOL(crypto_mac_update);
|
||||
EXPORT_SYMBOL(crypto_mac_final);
|
||||
|
|
|
@ -68,7 +68,6 @@ static int kcf_disp_sw_request(kcf_areq_node_t *);
|
|||
static int kcf_enqueue(kcf_areq_node_t *);
|
||||
static void kcfpool_alloc(void);
|
||||
static void kcf_reqid_delete(kcf_areq_node_t *areq);
|
||||
static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
|
||||
static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
|
||||
|
||||
/*
|
||||
|
@ -107,65 +106,6 @@ kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
|
|||
return (ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new async request node.
|
||||
*
|
||||
* ictx - Framework private context pointer
|
||||
* crq - Has callback function and argument. Should be non NULL.
|
||||
* req - The parameters to pass to the SPI
|
||||
*/
|
||||
static kcf_areq_node_t *
|
||||
kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
|
||||
crypto_call_req_t *crq, kcf_req_params_t *req)
|
||||
{
|
||||
kcf_areq_node_t *arptr, *areq;
|
||||
|
||||
ASSERT(crq != NULL);
|
||||
arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
|
||||
if (arptr == NULL)
|
||||
return (NULL);
|
||||
|
||||
arptr->an_state = REQ_ALLOCATED;
|
||||
arptr->an_reqarg = *crq;
|
||||
arptr->an_params = *req;
|
||||
arptr->an_context = ictx;
|
||||
|
||||
arptr->an_next = arptr->an_prev = NULL;
|
||||
KCF_PROV_REFHOLD(pd);
|
||||
arptr->an_provider = pd;
|
||||
arptr->an_tried_plist = NULL;
|
||||
arptr->an_refcnt = 1;
|
||||
arptr->an_idnext = arptr->an_idprev = NULL;
|
||||
|
||||
/*
|
||||
* Requests for context-less operations do not use the
|
||||
* fields - an_is_my_turn, and an_ctxchain_next.
|
||||
*/
|
||||
if (ictx == NULL)
|
||||
return (arptr);
|
||||
|
||||
KCF_CONTEXT_REFHOLD(ictx);
|
||||
/*
|
||||
* Chain this request to the context.
|
||||
*/
|
||||
mutex_enter(&ictx->kc_in_use_lock);
|
||||
arptr->an_ctxchain_next = NULL;
|
||||
if ((areq = ictx->kc_req_chain_last) == NULL) {
|
||||
arptr->an_is_my_turn = B_TRUE;
|
||||
ictx->kc_req_chain_last =
|
||||
ictx->kc_req_chain_first = arptr;
|
||||
} else {
|
||||
ASSERT(ictx->kc_req_chain_first != NULL);
|
||||
arptr->an_is_my_turn = B_FALSE;
|
||||
/* Insert the new request to the end of the chain. */
|
||||
areq->an_ctxchain_next = arptr;
|
||||
ictx->kc_req_chain_last = arptr;
|
||||
}
|
||||
mutex_exit(&ictx->kc_in_use_lock);
|
||||
|
||||
return (arptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue the request node and do one of the following:
|
||||
* - If there is an idle thread signal it to run.
|
||||
|
@ -362,80 +302,6 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
|
|||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine called by both ioctl and k-api. The consumer should
|
||||
* bundle the parameters into a kcf_req_params_t structure. A bunch
|
||||
* of macros are available in ops_impl.h for this bundling. They are:
|
||||
*
|
||||
* KCF_WRAP_DIGEST_OPS_PARAMS()
|
||||
* KCF_WRAP_MAC_OPS_PARAMS()
|
||||
* KCF_WRAP_ENCRYPT_OPS_PARAMS()
|
||||
* KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
|
||||
*
|
||||
* It is the caller's responsibility to free the ctx argument when
|
||||
* appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
|
||||
*/
|
||||
int
|
||||
kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
|
||||
crypto_call_req_t *crq, kcf_req_params_t *params)
|
||||
{
|
||||
int error = CRYPTO_SUCCESS;
|
||||
kcf_areq_node_t *areq;
|
||||
kcf_context_t *kcf_ctx;
|
||||
|
||||
kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
|
||||
|
||||
/* Synchronous */
|
||||
if (crq == NULL) {
|
||||
error = common_submit_request(pd, ctx, params,
|
||||
KCF_RHNDL(KM_SLEEP));
|
||||
} else { /* Asynchronous */
|
||||
if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
|
||||
/*
|
||||
* This case has less overhead since there is
|
||||
* no switching of context.
|
||||
*/
|
||||
error = common_submit_request(pd, ctx, params,
|
||||
KCF_RHNDL(KM_NOSLEEP));
|
||||
} else {
|
||||
/*
|
||||
* CRYPTO_ALWAYS_QUEUE is set. We need to
|
||||
* queue the request and return.
|
||||
*/
|
||||
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
|
||||
params);
|
||||
if (areq == NULL)
|
||||
error = CRYPTO_HOST_MEMORY;
|
||||
else {
|
||||
if (!(crq->cr_flag
|
||||
& CRYPTO_SKIP_REQID)) {
|
||||
/*
|
||||
* Set the request handle. We have to
|
||||
* do this before dispatching the
|
||||
* request.
|
||||
*/
|
||||
crq->cr_reqid = kcf_reqid_insert(areq);
|
||||
}
|
||||
|
||||
error = kcf_disp_sw_request(areq);
|
||||
/*
|
||||
* There is an error processing this
|
||||
* request. Remove the handle and
|
||||
* release the request structure.
|
||||
*/
|
||||
if (error != CRYPTO_QUEUED) {
|
||||
if (!(crq->cr_flag
|
||||
& CRYPTO_SKIP_REQID))
|
||||
kcf_reqid_delete(areq);
|
||||
KCF_AREQ_REFRELE(areq);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* We're done with this framework context, so free it. Note that freeing
|
||||
* framework context (kcf_context) frees the global context (crypto_ctx).
|
||||
|
@ -852,40 +718,6 @@ kcfpool_alloc()
|
|||
cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert the async request in the hash table after assigning it
|
||||
* an ID. Returns the ID.
|
||||
*
|
||||
* The ID is used by the caller to pass as an argument to a
|
||||
* cancel_req() routine later.
|
||||
*/
|
||||
static crypto_req_id_t
|
||||
kcf_reqid_insert(kcf_areq_node_t *areq)
|
||||
{
|
||||
int indx;
|
||||
crypto_req_id_t id;
|
||||
kcf_areq_node_t *headp;
|
||||
kcf_reqid_table_t *rt;
|
||||
|
||||
rt = kcf_reqid_table[CPU_SEQID_UNSTABLE & REQID_TABLE_MASK];
|
||||
|
||||
mutex_enter(&rt->rt_lock);
|
||||
|
||||
rt->rt_curid = id =
|
||||
(rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
|
||||
SET_REQID(areq, id);
|
||||
indx = REQID_HASH(id);
|
||||
headp = areq->an_idnext = rt->rt_idhash[indx];
|
||||
areq->an_idprev = NULL;
|
||||
if (headp != NULL)
|
||||
headp->an_idprev = areq;
|
||||
|
||||
rt->rt_idhash[indx] = areq;
|
||||
mutex_exit(&rt->rt_lock);
|
||||
|
||||
return (id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete the async request from the hash table.
|
||||
*/
|
||||
|
|
|
@ -59,9 +59,6 @@ typedef enum kcf_call_type {
|
|||
#define CHECK_RESTRICT(crq) (crq != NULL && \
|
||||
((crq)->cr_flag & CRYPTO_RESTRICTED))
|
||||
|
||||
#define CHECK_FASTPATH(crq, pd) ((crq) == NULL || \
|
||||
!((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE))
|
||||
|
||||
#define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
|
||||
|
||||
/*
|
||||
|
@ -452,8 +449,6 @@ extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
|
|||
boolean_t);
|
||||
extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
|
||||
crypto_session_id_t);
|
||||
extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
|
||||
crypto_call_req_t *, kcf_req_params_t *);
|
||||
extern void kcf_sched_destroy(void);
|
||||
extern void kcf_sched_init(void);
|
||||
extern void kcf_sched_start(void);
|
||||
|
|
|
@ -1004,7 +1004,7 @@ zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
|
|||
cd.cd_raw.iov_base = (char *)&bab;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_update(ctx, &cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
@ -1075,7 +1075,7 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
|
|||
cd.cd_raw.iov_base = (char *)adnp;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_update(ctx, &cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
@ -1160,7 +1160,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
|
|||
cd.cd_raw.iov_base = (char *)&intval;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_update(ctx, &cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
@ -1178,7 +1178,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
|
|||
cd.cd_raw.iov_base = (char *)&intval;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_update(ctx, &cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
@ -1195,7 +1195,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
|
|||
cd.cd_raw.iov_base = (char *)raw_portable_mac;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_final(ctx, &cd, NULL);
|
||||
ret = crypto_mac_final(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
@ -1253,7 +1253,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
|
|||
cd.cd_raw.iov_base = (char *)&intval;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_update(ctx, &cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
@ -1287,7 +1287,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
|
|||
cd.cd_raw.iov_base = (char *)raw_local_mac;
|
||||
cd.cd_raw.iov_len = cd.cd_length;
|
||||
|
||||
ret = crypto_mac_final(ctx, &cd, NULL);
|
||||
ret = crypto_mac_final(ctx, &cd);
|
||||
if (ret != CRYPTO_SUCCESS) {
|
||||
ret = SET_ERROR(EIO);
|
||||
goto error;
|
||||
|
|
|
@ -114,15 +114,15 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
|
|||
if (ret != CRYPTO_SUCCESS)
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
ret = crypto_mac_update(ctx, &T_cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &T_cd);
|
||||
if (ret != CRYPTO_SUCCESS)
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
ret = crypto_mac_update(ctx, &info_cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &info_cd);
|
||||
if (ret != CRYPTO_SUCCESS)
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
ret = crypto_mac_update(ctx, &c_cd, NULL);
|
||||
ret = crypto_mac_update(ctx, &c_cd);
|
||||
if (ret != CRYPTO_SUCCESS)
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
|
@ -130,7 +130,7 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
|
|||
T_cd.cd_length = T_len;
|
||||
T_cd.cd_raw.iov_len = T_cd.cd_length;
|
||||
|
||||
ret = crypto_mac_final(ctx, &T_cd, NULL);
|
||||
ret = crypto_mac_final(ctx, &T_cd);
|
||||
if (ret != CRYPTO_SUCCESS)
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
|
|
Loading…
Reference in New Issue