1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * In kernel module, the md4 module is created with one modlinkage,
29 * this is different to md5 and sha1 modules which have a legacy misc
30 * variant for direct calls to the Init/Update/Final routines.
31 *
32 * - a modlcrypto that allows the module to register with the Kernel
33 *   Cryptographic Framework (KCF) as a software provider for the MD4
34 *   mechanisms.
35 */
36
37#include <sys/types.h>
38#include <sys/systm.h>
39#include <sys/modctl.h>
40#include <sys/cmn_err.h>
41#include <sys/ddi.h>
42#include <sys/crypto/common.h>
43#include <sys/crypto/spi.h>
44#include <sys/sysmacros.h>
45#include <sys/strsun.h>
46#include <sys/note.h>
47#include <sys/md4.h>
48
49extern struct mod_ops mod_miscops;
50extern struct mod_ops mod_cryptoops;
51
52/*
53 * Module linkage information for the kernel.
54 */
55
56static struct modlcrypto modlcrypto = {
57	&mod_cryptoops,
58	"MD4 Kernel SW Provider"
59};
60
61static struct modlinkage modlinkage = {
62	MODREV_1,
63	(void *)&modlcrypto,
64	NULL
65};
66
67/*
68 * CSPI information (entry points, provider info, etc.)
69 */
70
71typedef enum md4_mech_type {
72	MD4_MECH_INFO_TYPE,		/* SUN_CKM_MD4 */
73} md4_mech_type_t;
74
75#define	MD4_DIGEST_LENGTH	16	/* MD4 digest length in bytes */
76
77/*
78 * Context for MD4 mechanism.
79 */
80typedef struct md4_ctx {
81	md4_mech_type_t		mc_mech_type;	/* type of context */
82	MD4_CTX			mc_md4_ctx;	/* MD4 context */
83} md4_ctx_t;
84
85/*
86 * Macros to access the MD4 contexts from a context passed
87 * by KCF to one of the entry points.
88 */
89
90#define	PROV_MD4_CTX(ctx)	((md4_ctx_t *)(ctx)->cc_provider_private)
91
92/*
93 * Mechanism info structure passed to KCF during registration.
94 */
95static crypto_mech_info_t md4_mech_info_tab[] = {
96	/* MD4 */
97	{SUN_CKM_MD4, MD4_MECH_INFO_TYPE,
98	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
99	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
100};
101
102static void md4_provider_status(crypto_provider_handle_t, uint_t *);
103
104static crypto_control_ops_t md4_control_ops = {
105	md4_provider_status
106};
107
108static int md4_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
109    crypto_req_handle_t);
110static int md4_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
111    crypto_req_handle_t);
112static int md4_digest_update(crypto_ctx_t *, crypto_data_t *,
113    crypto_req_handle_t);
114static int md4_digest_final(crypto_ctx_t *, crypto_data_t *,
115    crypto_req_handle_t);
116static int md4_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
117    crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
118    crypto_req_handle_t);
119
120static crypto_digest_ops_t md4_digest_ops = {
121	md4_digest_init,
122	md4_digest,
123	md4_digest_update,
124	NULL,
125	md4_digest_final,
126	md4_digest_atomic
127};
128
129static crypto_ops_t md4_crypto_ops = {
130	&md4_control_ops,
131	&md4_digest_ops,
132	NULL,
133	NULL,
134	NULL,
135	NULL,
136	NULL,
137	NULL,
138	NULL,
139	NULL,
140	NULL,
141	NULL,
142	NULL,
143	NULL,
144};
145
146static crypto_provider_info_t md4_prov_info = {
147	CRYPTO_SPI_VERSION_1,
148	"MD4 Software Provider",
149	CRYPTO_SW_PROVIDER,
150	{&modlinkage},
151	NULL,
152	&md4_crypto_ops,
153	sizeof (md4_mech_info_tab)/sizeof (crypto_mech_info_t),
154	md4_mech_info_tab
155};
156
157static crypto_kcf_provider_handle_t md4_prov_handle = NULL;
158
159int
160_init(void)
161{
162	int ret;
163
164	if ((ret = mod_install(&modlinkage)) != 0)
165		return (ret);
166
167	/* Register with KCF.  If the registration fails, remove the module. */
168	if (crypto_register_provider(&md4_prov_info, &md4_prov_handle)) {
169		(void) mod_remove(&modlinkage);
170		return (EACCES);
171	}
172
173	return (0);
174}
175
176int
177_fini(void)
178{
179	/* Unregister from KCF if module is registered */
180	if (md4_prov_handle != NULL) {
181		if (crypto_unregister_provider(md4_prov_handle))
182			return (EBUSY);
183
184		md4_prov_handle = NULL;
185	}
186
187	return (mod_remove(&modlinkage));
188}
189
190int
191_info(struct modinfo *modinfop)
192{
193	return (mod_info(&modlinkage, modinfop));
194}
195
196/*
197 * KCF software provider control entry points.
198 */
199/* ARGSUSED */
200static void
201md4_provider_status(crypto_provider_handle_t provider, uint_t *status)
202{
203	*status = CRYPTO_PROVIDER_READY;
204}
205
206/*
207 * KCF software provider digest entry points.
208 */
209
210static int
211md4_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
212    crypto_req_handle_t req)
213{
214	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
215		return (CRYPTO_MECHANISM_INVALID);
216
217	/*
218	 * Allocate and initialize MD4 context.
219	 */
220	ctx->cc_provider_private = kmem_alloc(sizeof (md4_ctx_t),
221	    crypto_kmflag(req));
222	if (ctx->cc_provider_private == NULL)
223		return (CRYPTO_HOST_MEMORY);
224
225	PROV_MD4_CTX(ctx)->mc_mech_type = MD4_MECH_INFO_TYPE;
226	MD4Init(&PROV_MD4_CTX(ctx)->mc_md4_ctx);
227
228	return (CRYPTO_SUCCESS);
229}
230
231/*
232 * Helper MD4 digest update function for uio data.
233 */
234static int
235md4_digest_update_uio(MD4_CTX *md4_ctx, crypto_data_t *data)
236{
237	off_t offset = data->cd_offset;
238	size_t length = data->cd_length;
239	uint_t vec_idx;
240	size_t cur_len;
241
242	/* we support only kernel buffer */
243	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
244		return (CRYPTO_ARGUMENTS_BAD);
245
246	/*
247	 * Jump to the first iovec containing data to be
248	 * digested.
249	 */
250	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
251	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
252	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
253		;
254	if (vec_idx == data->cd_uio->uio_iovcnt) {
255		/*
256		 * The caller specified an offset that is larger than the
257		 * total size of the buffers it provided.
258		 */
259		return (CRYPTO_DATA_LEN_RANGE);
260	}
261
262	/*
263	 * Now do the digesting on the iovecs.
264	 */
265	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
266		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
267		    offset, length);
268
269		MD4Update(md4_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
270		    offset, cur_len);
271
272		length -= cur_len;
273		vec_idx++;
274		offset = 0;
275	}
276
277	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
278		/*
279		 * The end of the specified iovec's was reached but
280		 * the length requested could not be processed, i.e.
281		 * The caller requested to digest more data than it provided.
282		 */
283		return (CRYPTO_DATA_LEN_RANGE);
284	}
285
286	return (CRYPTO_SUCCESS);
287}
288
289/*
290 * Helper MD4 digest final function for uio data.
291 * digest_len is the length of the desired digest. If digest_len
292 * is smaller than the default MD4 digest length, the caller
293 * must pass a scratch buffer, digest_scratch, which must
294 * be at least MD4_DIGEST_LENGTH bytes.
295 */
296static int
297md4_digest_final_uio(MD4_CTX *md4_ctx, crypto_data_t *digest,
298    ulong_t digest_len, uchar_t *digest_scratch)
299{
300	off_t offset = digest->cd_offset;
301	uint_t vec_idx;
302
303	/* we support only kernel buffer */
304	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
305		return (CRYPTO_ARGUMENTS_BAD);
306
307	/*
308	 * Jump to the first iovec containing ptr to the digest to
309	 * be returned.
310	 */
311	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
312	    vec_idx < digest->cd_uio->uio_iovcnt;
313	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
314		;
315	if (vec_idx == digest->cd_uio->uio_iovcnt) {
316		/*
317		 * The caller specified an offset that is
318		 * larger than the total size of the buffers
319		 * it provided.
320		 */
321		return (CRYPTO_DATA_LEN_RANGE);
322	}
323
324	if (offset + digest_len <=
325	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
326		/*
327		 * The computed MD4 digest will fit in the current
328		 * iovec.
329		 */
330		if (digest_len != MD4_DIGEST_LENGTH) {
331			/*
332			 * The caller requested a short digest. Digest
333			 * into a scratch buffer and return to
334			 * the user only what was requested.
335			 */
336			MD4Final(digest_scratch, md4_ctx);
337			bcopy(digest_scratch, (uchar_t *)digest->
338			    cd_uio->uio_iov[vec_idx].iov_base + offset,
339			    digest_len);
340		} else {
341			MD4Final((uchar_t *)digest->
342			    cd_uio->uio_iov[vec_idx].iov_base + offset,
343			    md4_ctx);
344		}
345	} else {
346		/*
347		 * The computed digest will be crossing one or more iovec's.
348		 * This is bad performance-wise but we need to support it.
349		 * Allocate a small scratch buffer on the stack and
350		 * copy it piece meal to the specified digest iovec's.
351		 */
352		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
353		off_t scratch_offset = 0;
354		size_t length = digest_len;
355		size_t cur_len;
356
357		MD4Final(digest_tmp, md4_ctx);
358
359		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
360			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
361			    offset, length);
362			bcopy(digest_tmp + scratch_offset,
363			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
364			    cur_len);
365
366			length -= cur_len;
367			vec_idx++;
368			scratch_offset += cur_len;
369			offset = 0;
370		}
371
372		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
373			/*
374			 * The end of the specified iovec's was reached but
375			 * the length requested could not be processed, i.e.
376			 * The caller requested to digest more data than it
377			 * provided.
378			 */
379			return (CRYPTO_DATA_LEN_RANGE);
380		}
381	}
382
383	return (CRYPTO_SUCCESS);
384}
385
386/*
387 * Helper MD4 digest update for mblk's.
388 */
389static int
390md4_digest_update_mblk(MD4_CTX *md4_ctx, crypto_data_t *data)
391{
392	off_t offset = data->cd_offset;
393	size_t length = data->cd_length;
394	mblk_t *mp;
395	size_t cur_len;
396
397	/*
398	 * Jump to the first mblk_t containing data to be digested.
399	 */
400	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
401	    offset -= MBLKL(mp), mp = mp->b_cont)
402		;
403	if (mp == NULL) {
404		/*
405		 * The caller specified an offset that is larger than the
406		 * total size of the buffers it provided.
407		 */
408		return (CRYPTO_DATA_LEN_RANGE);
409	}
410
411	/*
412	 * Now do the digesting on the mblk chain.
413	 */
414	while (mp != NULL && length > 0) {
415		cur_len = MIN(MBLKL(mp) - offset, length);
416		MD4Update(md4_ctx, mp->b_rptr + offset, cur_len);
417		length -= cur_len;
418		offset = 0;
419		mp = mp->b_cont;
420	}
421
422	if (mp == NULL && length > 0) {
423		/*
424		 * The end of the mblk was reached but the length requested
425		 * could not be processed, i.e. The caller requested
426		 * to digest more data than it provided.
427		 */
428		return (CRYPTO_DATA_LEN_RANGE);
429	}
430
431	return (CRYPTO_SUCCESS);
432}
433
434/*
435 * Helper MD4 digest final for mblk's.
436 * digest_len is the length of the desired digest. If digest_len
437 * is smaller than the default MD4 digest length, the caller
438 * must pass a scratch buffer, digest_scratch, which must
439 * be at least MD4_DIGEST_LENGTH bytes.
440 */
441static int
442md4_digest_final_mblk(MD4_CTX *md4_ctx, crypto_data_t *digest,
443    ulong_t digest_len, uchar_t *digest_scratch)
444{
445	off_t offset = digest->cd_offset;
446	mblk_t *mp;
447
448	/*
449	 * Jump to the first mblk_t that will be used to store the digest.
450	 */
451	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
452	    offset -= MBLKL(mp), mp = mp->b_cont)
453		;
454	if (mp == NULL) {
455		/*
456		 * The caller specified an offset that is larger than the
457		 * total size of the buffers it provided.
458		 */
459		return (CRYPTO_DATA_LEN_RANGE);
460	}
461
462	if (offset + digest_len <= MBLKL(mp)) {
463		/*
464		 * The computed MD4 digest will fit in the current mblk.
465		 * Do the MD4Final() in-place.
466		 */
467		if (digest_len != MD4_DIGEST_LENGTH) {
468			/*
469			 * The caller requested a short digest. Digest
470			 * into a scratch buffer and return to
471			 * the user only what was requested.
472			 */
473			MD4Final(digest_scratch, md4_ctx);
474			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
475		} else {
476			MD4Final(mp->b_rptr + offset, md4_ctx);
477		}
478	} else {
479		/*
480		 * The computed digest will be crossing one or more mblk's.
481		 * This is bad performance-wise but we need to support it.
482		 * Allocate a small scratch buffer on the stack and
483		 * copy it piece meal to the specified digest iovec's.
484		 */
485		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
486		off_t scratch_offset = 0;
487		size_t length = digest_len;
488		size_t cur_len;
489
490		MD4Final(digest_tmp, md4_ctx);
491
492		while (mp != NULL && length > 0) {
493			cur_len = MIN(MBLKL(mp) - offset, length);
494			bcopy(digest_tmp + scratch_offset,
495			    mp->b_rptr + offset, cur_len);
496
497			length -= cur_len;
498			mp = mp->b_cont;
499			scratch_offset += cur_len;
500			offset = 0;
501		}
502
503		if (mp == NULL && length > 0) {
504			/*
505			 * The end of the specified mblk was reached but
506			 * the length requested could not be processed, i.e.
507			 * The caller requested to digest more data than it
508			 * provided.
509			 */
510			return (CRYPTO_DATA_LEN_RANGE);
511		}
512	}
513
514	return (CRYPTO_SUCCESS);
515}
516
517/* ARGSUSED */
518static int
519md4_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
520    crypto_req_handle_t req)
521{
522	int ret = CRYPTO_SUCCESS;
523
524	ASSERT(ctx->cc_provider_private != NULL);
525
526	/*
527	 * We need to just return the length needed to store the output.
528	 * We should not destroy the context for the following cases.
529	 */
530	if ((digest->cd_length == 0) ||
531	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
532		digest->cd_length = MD4_DIGEST_LENGTH;
533		return (CRYPTO_BUFFER_TOO_SMALL);
534	}
535
536	/*
537	 * Do the MD4 update on the specified input data.
538	 */
539	switch (data->cd_format) {
540	case CRYPTO_DATA_RAW:
541		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
542		    data->cd_raw.iov_base + data->cd_offset,
543		    data->cd_length);
544		break;
545	case CRYPTO_DATA_UIO:
546		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
547		    data);
548		break;
549	case CRYPTO_DATA_MBLK:
550		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
551		    data);
552		break;
553	default:
554		ret = CRYPTO_ARGUMENTS_BAD;
555	}
556
557	if (ret != CRYPTO_SUCCESS) {
558		/* the update failed, free context and bail */
559		kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
560		ctx->cc_provider_private = NULL;
561		digest->cd_length = 0;
562		return (ret);
563	}
564
565	/*
566	 * Do an MD4 final, must be done separately since the digest
567	 * type can be different than the input data type.
568	 */
569	switch (digest->cd_format) {
570	case CRYPTO_DATA_RAW:
571		MD4Final((unsigned char *)digest->cd_raw.iov_base +
572		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
573		break;
574	case CRYPTO_DATA_UIO:
575		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
576		    digest, MD4_DIGEST_LENGTH, NULL);
577		break;
578	case CRYPTO_DATA_MBLK:
579		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
580		    digest, MD4_DIGEST_LENGTH, NULL);
581		break;
582	default:
583		ret = CRYPTO_ARGUMENTS_BAD;
584	}
585
586	/* all done, free context and return */
587
588	if (ret == CRYPTO_SUCCESS) {
589		digest->cd_length = MD4_DIGEST_LENGTH;
590	} else {
591		digest->cd_length = 0;
592	}
593
594	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
595	ctx->cc_provider_private = NULL;
596	return (ret);
597}
598
599/* ARGSUSED */
600static int
601md4_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
602    crypto_req_handle_t req)
603{
604	int ret = CRYPTO_SUCCESS;
605
606	ASSERT(ctx->cc_provider_private != NULL);
607
608	/*
609	 * Do the MD4 update on the specified input data.
610	 */
611	switch (data->cd_format) {
612	case CRYPTO_DATA_RAW:
613		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
614		    data->cd_raw.iov_base + data->cd_offset,
615		    data->cd_length);
616		break;
617	case CRYPTO_DATA_UIO:
618		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
619		    data);
620		break;
621	case CRYPTO_DATA_MBLK:
622		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
623		    data);
624		break;
625	default:
626		ret = CRYPTO_ARGUMENTS_BAD;
627	}
628
629	return (ret);
630}
631
632/* ARGSUSED */
633static int
634md4_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
635    crypto_req_handle_t req)
636{
637	int ret = CRYPTO_SUCCESS;
638
639	ASSERT(ctx->cc_provider_private != NULL);
640
641	/*
642	 * We need to just return the length needed to store the output.
643	 * We should not destroy the context for the following cases.
644	 */
645	if ((digest->cd_length == 0) ||
646	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
647		digest->cd_length = MD4_DIGEST_LENGTH;
648		return (CRYPTO_BUFFER_TOO_SMALL);
649	}
650
651	/*
652	 * Do an MD4 final.
653	 */
654	switch (digest->cd_format) {
655	case CRYPTO_DATA_RAW:
656		MD4Final((unsigned char *)digest->cd_raw.iov_base +
657		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
658		break;
659	case CRYPTO_DATA_UIO:
660		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
661		    digest, MD4_DIGEST_LENGTH, NULL);
662		break;
663	case CRYPTO_DATA_MBLK:
664		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
665		    digest, MD4_DIGEST_LENGTH, NULL);
666		break;
667	default:
668		ret = CRYPTO_ARGUMENTS_BAD;
669	}
670
671	/* all done, free context and return */
672
673	if (ret == CRYPTO_SUCCESS) {
674		digest->cd_length = MD4_DIGEST_LENGTH;
675	} else {
676		digest->cd_length = 0;
677	}
678
679	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
680	ctx->cc_provider_private = NULL;
681
682	return (ret);
683}
684
685/* ARGSUSED */
686static int
687md4_digest_atomic(crypto_provider_handle_t provider,
688    crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
689    crypto_data_t *data, crypto_data_t *digest,
690    crypto_req_handle_t req)
691{
692	int ret = CRYPTO_SUCCESS;
693	MD4_CTX md4_ctx;
694
695	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
696		return (CRYPTO_MECHANISM_INVALID);
697
698	/*
699	 * Do the MD4 init.
700	 */
701	MD4Init(&md4_ctx);
702
703	/*
704	 * Do the MD4 update on the specified input data.
705	 */
706	switch (data->cd_format) {
707	case CRYPTO_DATA_RAW:
708		MD4Update(&md4_ctx, data->cd_raw.iov_base + data->cd_offset,
709		    data->cd_length);
710		break;
711	case CRYPTO_DATA_UIO:
712		ret = md4_digest_update_uio(&md4_ctx, data);
713		break;
714	case CRYPTO_DATA_MBLK:
715		ret = md4_digest_update_mblk(&md4_ctx, data);
716		break;
717	default:
718		ret = CRYPTO_ARGUMENTS_BAD;
719	}
720
721	if (ret != CRYPTO_SUCCESS) {
722		/* the update failed, bail */
723		digest->cd_length = 0;
724		return (ret);
725	}
726
727	/*
728	 * Do an MD4 final, must be done separately since the digest
729	 * type can be different than the input data type.
730	 */
731	switch (digest->cd_format) {
732	case CRYPTO_DATA_RAW:
733		MD4Final((unsigned char *)digest->cd_raw.iov_base +
734		    digest->cd_offset, &md4_ctx);
735		break;
736	case CRYPTO_DATA_UIO:
737		ret = md4_digest_final_uio(&md4_ctx, digest,
738		    MD4_DIGEST_LENGTH, NULL);
739		break;
740	case CRYPTO_DATA_MBLK:
741		ret = md4_digest_final_mblk(&md4_ctx, digest,
742		    MD4_DIGEST_LENGTH, NULL);
743		break;
744	default:
745		ret = CRYPTO_ARGUMENTS_BAD;
746	}
747
748	if (ret == CRYPTO_SUCCESS) {
749		digest->cd_length = MD4_DIGEST_LENGTH;
750	} else {
751		digest->cd_length = 0;
752	}
753
754	return (ret);
755}
756