1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#ifndef _SYS_CRYPTO_SCHED_IMPL_H
26#define	_SYS_CRYPTO_SCHED_IMPL_H
27
28/*
29 * Scheduler internal structures.
30 */
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
36#include <sys/types.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/door.h>
40#include <sys/crypto/api.h>
41#include <sys/crypto/spi.h>
42#include <sys/crypto/impl.h>
43#include <sys/crypto/common.h>
44#include <sys/crypto/ops_impl.h>
45
46typedef void (kcf_func_t)(void *, int);
47
48typedef enum kcf_req_status {
49	REQ_ALLOCATED = 1,
50	REQ_WAITING,		/* At the framework level */
51	REQ_INPROGRESS,		/* At the provider level */
52	REQ_DONE,
53	REQ_CANCELED
54} kcf_req_status_t;
55
56typedef enum kcf_call_type {
57	CRYPTO_SYNCH = 1,
58	CRYPTO_ASYNCH
59} kcf_call_type_t;
60
61#define	CHECK_FASTPATH(crq, pd) ((crq) == NULL ||	\
62	!((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) &&	\
63	(pd)->pd_prov_type == CRYPTO_SW_PROVIDER
64
65#define	KCF_KMFLAG(crq)	(((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
66
67/*
68 * The framework keeps an internal handle to use in the adaptive
69 * asynchronous case. This is the case when a client has the
70 * CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for
71 * the request. The request is completed in the context of the calling
72 * thread and kernel memory must be allocated with KM_NOSLEEP.
73 *
74 * The framework passes a pointer to the handle in crypto_req_handle_t
75 * argument when it calls the SPI of the software provider. The macros
76 * KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this.
77 *
78 * When a provider asks the framework for kmflag value via
79 * crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro.
80 */
81extern ulong_t kcf_swprov_hndl;
82#define	KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl)
83#define	KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl)
84#define	REQHNDL2_KMFLAG(rhndl) \
85	((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP)
86
87/* Internal call_req flags. They start after the public ones in api.h */
88
89#define	CRYPTO_SETDUAL	0x00001000	/* Set the 'cont' boolean before */
90					/* submitting the request */
91#define	KCF_ISDUALREQ(crq)	\
92	(((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL))
93
94typedef struct kcf_prov_tried {
95	kcf_provider_desc_t	*pt_pd;
96	struct kcf_prov_tried	*pt_next;
97} kcf_prov_tried_t;
98
99/* Must be different from KM_SLEEP and KM_NOSLEEP */
100#define	KCF_HOLD_PROV	0x1000
101
102#define	IS_FG_SUPPORTED(mdesc, fg)		\
103	(((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
104
105#define	IS_PROVIDER_TRIED(pd, tlist)		\
106	(tlist != NULL && is_in_triedlist(pd, tlist))
107
108#define	IS_RECOVERABLE(error)			\
109	(error == CRYPTO_BUFFER_TOO_BIG ||	\
110	error == CRYPTO_BUSY ||			\
111	error == CRYPTO_DEVICE_ERROR ||		\
112	error == CRYPTO_DEVICE_MEMORY ||	\
113	error == CRYPTO_KEY_SIZE_RANGE ||	\
114	error == CRYPTO_NO_PERMISSION)
115
116#define	KCF_ATOMIC_INCR(x)	atomic_add_32(&(x), 1)
117#define	KCF_ATOMIC_DECR(x)	atomic_add_32(&(x), -1)
118
119/*
120 * Node structure for synchronous requests.
121 */
122typedef struct kcf_sreq_node {
123	/* Should always be the first field in this structure */
124	kcf_call_type_t		sn_type;
125	/*
126	 * sn_cv and sr_lock are used to wait for the
127	 * operation to complete. sn_lock also protects
128	 * the sn_state field.
129	 */
130	kcondvar_t		sn_cv;
131	kmutex_t		sn_lock;
132	kcf_req_status_t	sn_state;
133
134	/*
135	 * Return value from the operation. This will be
136	 * one of the CRYPTO_* errors defined in common.h.
137	 */
138	int			sn_rv;
139
140	/*
141	 * parameters to call the SPI with. This can be
142	 * a pointer as we know the caller context/stack stays.
143	 */
144	struct kcf_req_params	*sn_params;
145
146	/* Internal context for this request */
147	struct kcf_context	*sn_context;
148
149	/* Provider handling this request */
150	kcf_provider_desc_t	*sn_provider;
151
152	kcf_prov_cpu_t		*sn_mp;
153} kcf_sreq_node_t;
154
155/*
156 * Node structure for asynchronous requests. A node can be on
157 * on a chain of requests hanging of the internal context
158 * structure and can be in the global software provider queue.
159 */
160typedef struct kcf_areq_node {
161	/* Should always be the first field in this structure */
162	kcf_call_type_t		an_type;
163
164	/* an_lock protects the field an_state  */
165	kmutex_t		an_lock;
166	kcf_req_status_t	an_state;
167	crypto_call_req_t	an_reqarg;
168
169	/*
170	 * parameters to call the SPI with. We need to
171	 * save the params since the caller stack can go away.
172	 */
173	struct kcf_req_params	an_params;
174
175	/*
176	 * The next two fields should be NULL for operations that
177	 * don't need a context.
178	 */
179	/* Internal context for this request */
180	struct kcf_context	*an_context;
181
182	/* next in chain of requests for context */
183	struct kcf_areq_node	*an_ctxchain_next;
184
185	kcondvar_t		an_turn_cv;
186	boolean_t		an_is_my_turn;
187	boolean_t		an_isdual;	/* for internal reuse */
188
189	/*
190	 * Next and previous nodes in the global software
191	 * queue. These fields are NULL for a hardware
192	 * provider since we use a taskq there.
193	 */
194	struct kcf_areq_node	*an_next;
195	struct kcf_areq_node	*an_prev;
196
197	/* Provider handling this request */
198	kcf_provider_desc_t	*an_provider;
199	kcf_prov_cpu_t		*an_mp;
200	kcf_prov_tried_t	*an_tried_plist;
201
202	struct kcf_areq_node	*an_idnext;	/* Next in ID hash */
203	struct kcf_areq_node	*an_idprev;	/* Prev in ID hash */
204	kcondvar_t		an_done;	/* Signal request completion */
205	uint_t			an_refcnt;
206} kcf_areq_node_t;
207
208#define	KCF_AREQ_REFHOLD(areq) {		\
209	atomic_add_32(&(areq)->an_refcnt, 1);	\
210	ASSERT((areq)->an_refcnt != 0);		\
211}
212
213#define	KCF_AREQ_REFRELE(areq) {				\
214	ASSERT((areq)->an_refcnt != 0);				\
215	membar_exit();						\
216	if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0)	\
217		kcf_free_req(areq);				\
218}
219
220#define	GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
221
222#define	NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
223	(areq)->an_reqarg.cr_callback_arg, err);
224
225/* For internally generated call requests for dual operations */
226typedef	struct kcf_call_req {
227	crypto_call_req_t	kr_callreq;	/* external client call req */
228	kcf_req_params_t	kr_params;	/* Params saved for next call */
229	kcf_areq_node_t		*kr_areq;	/* Use this areq */
230	off_t			kr_saveoffset;
231	size_t			kr_savelen;
232} kcf_dual_req_t;
233
234/*
235 * The following are some what similar to macros in callo.h, which implement
236 * callout tables.
237 *
238 * The lower four bits of the ID are used to encode the table ID to
239 * index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for
240 * wrap around when generating ID. We assume that there won't be a request
241 * which takes more time than 2^^(sizeof (long) - 5) other requests submitted
242 * after it. This ensures there won't be any ID collision.
243 */
244#define	REQID_COUNTER_HIGH	(1UL << (8 * sizeof (long) - 1))
245#define	REQID_COUNTER_SHIFT	4
246#define	REQID_COUNTER_LOW	(1 << REQID_COUNTER_SHIFT)
247#define	REQID_TABLES		16
248#define	REQID_TABLE_MASK	(REQID_TABLES - 1)
249
250#define	REQID_BUCKETS		512
251#define	REQID_BUCKET_MASK	(REQID_BUCKETS - 1)
252#define	REQID_HASH(id)	(((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK)
253
254#define	GET_REQID(areq) (areq)->an_reqarg.cr_reqid
255#define	SET_REQID(areq, val)	GET_REQID(areq) = val
256
257/*
258 * Hash table for async requests.
259 */
260typedef struct kcf_reqid_table {
261	kmutex_t		rt_lock;
262	crypto_req_id_t		rt_curid;
263	kcf_areq_node_t		*rt_idhash[REQID_BUCKETS];
264} kcf_reqid_table_t;
265
266/*
267 * Global software provider queue structure. Requests to be
268 * handled by a SW provider and have the ALWAYS_QUEUE flag set
269 * get queued here.
270 */
271typedef struct kcf_global_swq {
272	/*
273	 * gs_cv and gs_lock are used to wait for new requests.
274	 * gs_lock protects the changes to the queue.
275	 */
276	kcondvar_t		gs_cv;
277	kmutex_t		gs_lock;
278	uint_t			gs_njobs;
279	uint_t			gs_maxjobs;
280	kcf_areq_node_t		*gs_first;
281	kcf_areq_node_t		*gs_last;
282} kcf_global_swq_t;
283
284
285/*
286 * Internal representation of a canonical context. We contain crypto_ctx_t
287 * structure in order to have just one memory allocation. The SPI
288 * ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure.
289 */
290typedef struct kcf_context {
291	crypto_ctx_t		kc_glbl_ctx;
292	uint_t			kc_refcnt;
293	kmutex_t		kc_in_use_lock;
294	/*
295	 * kc_req_chain_first and kc_req_chain_last are used to chain
296	 * multiple async requests using the same context. They should be
297	 * NULL for sync requests.
298	 */
299	kcf_areq_node_t		*kc_req_chain_first;
300	kcf_areq_node_t		*kc_req_chain_last;
301	kcf_provider_desc_t	*kc_prov_desc;	/* Prov. descriptor */
302	kcf_provider_desc_t	*kc_sw_prov_desc;	/* Prov. descriptor */
303	kcf_mech_entry_t	*kc_mech;
304	struct kcf_context	*kc_secondctx;	/* for dual contexts */
305} kcf_context_t;
306
307/*
308 * Bump up the reference count on the framework private context. A
309 * global context or a request that references this structure should
310 * do a hold.
311 */
312#define	KCF_CONTEXT_REFHOLD(ictx) {		\
313	atomic_add_32(&(ictx)->kc_refcnt, 1);	\
314	ASSERT((ictx)->kc_refcnt != 0);		\
315}
316
317/*
318 * Decrement the reference count on the framework private context.
319 * When the last reference is released, the framework private
320 * context structure is freed along with the global context.
321 */
322#define	KCF_CONTEXT_REFRELE(ictx) {				\
323	ASSERT((ictx)->kc_refcnt != 0);				\
324	membar_exit();						\
325	if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0)	\
326		kcf_free_context(ictx);				\
327}
328
329/*
330 * Check if we can release the context now. In case of CRYPTO_QUEUED
331 * we do not release it as we can do it only after the provider notified
332 * us. In case of CRYPTO_BUSY, the client can retry the request using
333 * the context, so we do not release the context.
334 *
335 * This macro should be called only from the final routine in
336 * an init/update/final sequence. We do not release the context in case
337 * of update operations. We require the consumer to free it
338 * explicitly, in case it wants to abandon the operation. This is done
339 * as there may be mechanisms in ECB mode that can continue even if
340 * an operation on a block fails.
341 */
342#define	KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) {			\
343	if (KCF_CONTEXT_DONE(rv))				\
344		KCF_CONTEXT_REFRELE(kcf_ctx);			\
345}
346
347/*
348 * This macro determines whether we're done with a context.
349 */
350#define	KCF_CONTEXT_DONE(rv)					\
351	((rv) != CRYPTO_QUEUED && (rv) != CRYPTO_BUSY &&	\
352	    (rv) != CRYPTO_BUFFER_TOO_SMALL)
353
354/*
355 * A crypto_ctx_template_t is internally a pointer to this struct
356 */
357typedef	struct kcf_ctx_template {
358	crypto_kcf_provider_handle_t	ct_prov_handle;	/* provider handle */
359	uint_t				ct_generation;	/* generation # */
360	size_t				ct_size;	/* for freeing */
361	crypto_spi_ctx_template_t	ct_prov_tmpl;	/* context template */
362							/* from the SW prov */
363} kcf_ctx_template_t;
364
365/*
366 * Structure for pool of threads working on global software queue.
367 */
368typedef struct kcf_pool {
369	uint32_t	kp_threads;		/* Number of threads in pool */
370	uint32_t	kp_idlethreads;		/* Idle threads in pool */
371	uint32_t	kp_blockedthreads;	/* Blocked threads in pool */
372
373	/*
374	 * cv & lock to monitor the condition when no threads
375	 * are around. In this case the failover thread kicks in.
376	 */
377	kcondvar_t	kp_nothr_cv;
378	kmutex_t	kp_thread_lock;
379
380	/* Userspace thread creator variables. */
381	boolean_t	kp_signal_create_thread; /* Create requested flag  */
382	int		kp_nthrs;		/* # of threads to create */
383	boolean_t	kp_user_waiting;	/* Thread waiting for work */
384
385	/*
386	 * cv & lock for the condition where more threads need to be
387	 * created. kp_user_lock also protects the three fileds above.
388	 */
389	kcondvar_t	kp_user_cv;		/* Creator cond. variable */
390	kmutex_t	kp_user_lock;		/* Creator lock */
391} kcf_pool_t;
392
393
394/*
395 * State of a crypto bufcall element.
396 */
397typedef enum cbuf_state {
398	CBUF_FREE = 1,
399	CBUF_WAITING,
400	CBUF_RUNNING
401} cbuf_state_t;
402
403/*
404 * Structure of a crypto bufcall element.
405 */
406typedef struct kcf_cbuf_elem {
407	/*
408	 * lock and cv to wait for CBUF_RUNNING to be done
409	 * kc_lock also protects kc_state.
410	 */
411	kmutex_t		kc_lock;
412	kcondvar_t		kc_cv;
413	cbuf_state_t		kc_state;
414
415	struct kcf_cbuf_elem	*kc_next;
416	struct kcf_cbuf_elem	*kc_prev;
417
418	void			(*kc_func)(void *arg);
419	void			*kc_arg;
420} kcf_cbuf_elem_t;
421
422/*
423 * State of a notify element.
424 */
425typedef enum ntfy_elem_state {
426	NTFY_WAITING = 1,
427	NTFY_RUNNING
428} ntfy_elem_state_t;
429
430/*
431 * Structure of a notify list element.
432 */
433typedef struct kcf_ntfy_elem {
434	/*
435	 * lock and cv to wait for NTFY_RUNNING to be done.
436	 * kn_lock also protects kn_state.
437	 */
438	kmutex_t			kn_lock;
439	kcondvar_t			kn_cv;
440	ntfy_elem_state_t		kn_state;
441
442	struct kcf_ntfy_elem		*kn_next;
443	struct kcf_ntfy_elem		*kn_prev;
444
445	crypto_notify_callback_t	kn_func;
446	uint32_t			kn_event_mask;
447} kcf_ntfy_elem_t;
448
449
450/*
451 * The following values are based on the assumption that it would
452 * take around eight cpus to load a hardware provider (This is true for
453 * at least one product) and a kernel client may come from different
454 * low-priority interrupt levels. We will have CYRPTO_TASKQ_MIN number
455 * of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on
456 * a throughput of 1GB/s using 512-byte buffers. These are just
457 * reasonable estimates and might need to change in future.
458 */
459#define	CRYPTO_TASKQ_THREADS	8
460#define	CYRPTO_TASKQ_MIN	64
461#define	CRYPTO_TASKQ_MAX	2 * 1024 * 1024
462
463extern int crypto_taskq_threads;
464extern int crypto_taskq_minalloc;
465extern int crypto_taskq_maxalloc;
466extern kcf_global_swq_t *gswq;
467extern int kcf_maxthreads;
468extern int kcf_minthreads;
469
470/* Door handle for talking to kcfd */
471extern door_handle_t kcf_dh;
472extern kmutex_t	 kcf_dh_lock;
473
474/*
475 * All pending crypto bufcalls are put on a list. cbuf_list_lock
476 * protects changes to this list.
477 */
478extern kmutex_t cbuf_list_lock;
479extern kcondvar_t cbuf_list_cv;
480
481/*
482 * All event subscribers are put on a list. kcf_notify_list_lock
483 * protects changes to this list.
484 */
485extern kmutex_t ntfy_list_lock;
486extern kcondvar_t ntfy_list_cv;
487
488boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *,
489    kcf_provider_desc_t *, kcf_provider_desc_t **);
490extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_key_t *,
491    crypto_mech_type_t, crypto_key_t *,
492    kcf_provider_desc_t *, kcf_provider_desc_t **,
493    crypto_func_group_t);
494extern int kcf_get_hardware_provider_nomech(offset_t, offset_t,
495    kcf_provider_desc_t *, kcf_provider_desc_t **);
496extern void kcf_free_triedlist(kcf_prov_tried_t *);
497extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
498    kcf_provider_desc_t *, int);
499extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
500    crypto_key_t *, kcf_mech_entry_t **, int *, kcf_prov_tried_t *,
501    crypto_func_group_t, size_t);
502extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *,
503    crypto_key_t *, crypto_mechanism_t *, crypto_key_t *,
504    kcf_mech_entry_t **, crypto_mech_type_t *,
505    crypto_mech_type_t *, int *, kcf_prov_tried_t *,
506    crypto_func_group_t, crypto_func_group_t, size_t);
507extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t  *, kcf_provider_desc_t *,
508    crypto_session_id_t);
509extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
510    crypto_call_req_t *, kcf_req_params_t *, boolean_t);
511extern void kcf_sched_init(void);
512extern void kcf_sched_start(void);
513extern void kcf_sop_done(kcf_sreq_node_t *, int);
514extern void kcf_aop_done(kcf_areq_node_t *, int);
515extern int common_submit_request(kcf_provider_desc_t *,
516    crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t);
517extern void kcf_free_context(kcf_context_t *);
518
519extern int kcf_svc_wait(int *);
520extern int kcf_svc_do_run(void);
521extern int kcf_need_fips140_verification(kcf_provider_desc_t *);
522extern int kcf_need_signature_verification(kcf_provider_desc_t *);
523extern void kcf_verify_signature(void *);
524extern struct modctl *kcf_get_modctl(crypto_provider_info_t *);
525extern void verify_unverified_providers();
526extern void kcf_free_req(kcf_areq_node_t *areq);
527extern void crypto_bufcall_service(void);
528
529extern void kcf_walk_ntfylist(uint32_t, void *);
530extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
531
532extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *);
533extern void kcf_next_req(void *, int);
534extern void kcf_last_req(void *, int);
535
536#ifdef __cplusplus
537}
538#endif
539
540#endif /* _SYS_CRYPTO_SCHED_IMPL_H */
541