1/*
2 * Copyright (c) 2003, PADL Software Pty Ltd.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include "gsskrb5_locl.h"
34
35/*
36 * Implementation of RFC 4121
37 */
38
39#define CFXSentByAcceptor	(1 << 0)
40#define CFXSealed		(1 << 1)
41#define CFXAcceptorSubkey	(1 << 2)
42
43static void
44log_broken_acceptor_server(void *ptr)
45{
46    _gss_mg_log(5, "cfx%s subkey when not expecting one", (char *)ptr);
47}
48
49static OM_uint32
50verify_flags(struct gsskrb5_crypto *ctx, u_char token_flags, const char *token)
51{
52    if (token_flags & CFXSentByAcceptor) {
53	if (ctx->flags & GK5C_ACCEPTOR) {
54	    _gss_mg_log(5, "cfx%s acceptor token set by initiator", token);
55	    return GSS_S_DEFECTIVE_TOKEN;
56	}
57    } else {
58	if ((ctx->flags & GK5C_ACCEPTOR) == 0) {
59	    _gss_mg_log(5, "cfx%s !acceptor token set by acceptor", token);
60	    return GSS_S_DEFECTIVE_TOKEN;
61	}
62    }
63
64    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY) {
65	if ((token_flags & CFXAcceptorSubkey) == 0) {
66	    _gss_mg_log(5, "cfx%s no subkey", token);
67	    return GSS_S_DEFECTIVE_TOKEN;
68	}
69    } else {
70	if (token_flags & CFXAcceptorSubkey) {
71	    /*
72	     * XXX there are broken servers out there that sets
73	     * CFXAcceptorSubkey even though that they didn't set an
74	     * acceptor subkey. Just log once and then ignore the
75	     * error.
76	     */
77	    static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
78	    heim_base_once_f(&once, rk_UNCONST(token), log_broken_acceptor_server);
79	}
80    }
81
82    return GSS_S_COMPLETE;
83}
84
85krb5_error_code
86_gsskrb5cfx_wrap_length_cfx(krb5_context context,
87			    struct gsskrb5_crypto *ctx,
88			    int conf_req_flag,
89			    size_t input_length,
90			    size_t *output_length,
91			    size_t *cksumsize,
92			    uint16_t *padlength)
93{
94    krb5_error_code ret;
95    krb5_cksumtype type;
96
97    /* 16-byte header is always first */
98    *output_length = sizeof(gss_cfx_wrap_token_desc);
99    *padlength = 0;
100
101    ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
102    if (ret)
103	return ret;
104
105    ret = krb5_checksumsize(context, type, cksumsize);
106    if (ret)
107	return ret;
108
109    if (conf_req_flag) {
110	size_t padsize;
111
112	/* Header is concatenated with data before encryption */
113	input_length += sizeof(gss_cfx_wrap_token_desc);
114
115	if (GK5C_IS_DCE_STYLE(ctx)) {
116	    ret = krb5_crypto_getblocksize(context, ctx->crypto, &padsize);
117	} else {
118	    ret = krb5_crypto_getpadsize(context, ctx->crypto, &padsize);
119	}
120	if (ret) {
121	    return ret;
122	}
123	if (padsize > 1) {
124	    /* XXX check this */
125	    *padlength = padsize - (input_length % padsize);
126
127	    /* We add the pad ourselves (noted here for completeness only) */
128	    input_length += *padlength;
129	}
130
131	*output_length += krb5_get_wrapped_length(context,
132						  ctx->crypto, input_length);
133    } else {
134	/* Checksum is concatenated with data */
135	*output_length += input_length + *cksumsize;
136    }
137
138    assert(*output_length > input_length);
139
140    return 0;
141}
142
143OM_uint32
144_gssapi_wrap_size_cfx(OM_uint32 *minor_status,
145		      struct gsskrb5_crypto *ctx,
146		      krb5_context context,
147		      int conf_req_flag,
148		      gss_qop_t qop_req,
149		      OM_uint32 req_output_size,
150		      OM_uint32 *max_input_size)
151{
152    krb5_error_code ret;
153
154    *max_input_size = 0;
155
156    /* 16-byte header is always first */
157    if (req_output_size < 16)
158	return 0;
159    req_output_size -= 16;
160
161    if (conf_req_flag) {
162	size_t wrapped_size, sz;
163
164	wrapped_size = req_output_size + 1;
165	do {
166	    wrapped_size--;
167	    sz = krb5_get_wrapped_length(context,
168					 ctx->crypto, wrapped_size);
169	} while (wrapped_size && sz > req_output_size);
170	if (wrapped_size == 0)
171	    return 0;
172
173	/* inner header */
174	if (wrapped_size < 16)
175	    return 0;
176
177	wrapped_size -= 16;
178
179	*max_input_size = (OM_uint32)wrapped_size;
180    } else {
181	krb5_cksumtype type;
182	size_t cksumsize;
183
184	ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
185	if (ret)
186	    return ret;
187
188	ret = krb5_checksumsize(context, type, &cksumsize);
189	if (ret)
190	    return ret;
191
192	if (req_output_size < cksumsize)
193	    return 0;
194
195	/* Checksum is concatenated with data */
196	*max_input_size = (OM_uint32)(req_output_size - cksumsize);
197    }
198
199    return 0;
200}
201
202/*
203 * Rotate "rrc" bytes to the front or back
204 */
205
206static krb5_error_code
207rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
208{
209    u_char *tmp, buf[256];
210    size_t left;
211
212    if (len == 0)
213	return 0;
214
215    rrc %= len;
216
217    if (rrc == 0)
218	return 0;
219
220    left = len - rrc;
221
222    if (rrc <= sizeof(buf)) {
223	tmp = buf;
224    } else {
225	tmp = malloc(rrc);
226	if (tmp == NULL)
227	    return ENOMEM;
228    }
229
230    if (unrotate) {
231	memcpy(tmp, data, rrc);
232	memmove(data, (u_char *)data + rrc, left);
233	memcpy((u_char *)data + left, tmp, rrc);
234    } else {
235	memcpy(tmp, (u_char *)data + left, rrc);
236	memmove((u_char *)data + rrc, data, left);
237	memcpy(data, tmp, rrc);
238    }
239
240    if (rrc > sizeof(buf))
241	free(tmp);
242
243    return 0;
244}
245
246static OM_uint32
247_gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
248{
249    if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
250	if (buffer->buffer.length == size)
251	    return GSS_S_COMPLETE;
252	free(buffer->buffer.value);
253    }
254
255    buffer->buffer.value = malloc(size);
256    buffer->buffer.length = size;
257    if (buffer->buffer.value == NULL) {
258	*minor_status = ENOMEM;
259	return GSS_S_FAILURE;
260    }
261    buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
262
263    return GSS_S_COMPLETE;
264}
265
266
267OM_uint32
268_gk_verify_buffers(OM_uint32 *minor_status,
269		   struct gsskrb5_crypto *ctx,
270		   const gss_iov_buffer_desc *header,
271		   const gss_iov_buffer_desc *padding,
272		   const gss_iov_buffer_desc *trailer)
273{
274    if (header == NULL) {
275	*minor_status = EINVAL;
276	return GSS_S_FAILURE;
277    }
278
279    if (GK5C_IS_DCE_STYLE(ctx)) {
280	/*
281	 * In DCE style mode we reject having a padding or trailer buffer
282	 */
283	if (padding) {
284	    *minor_status = EINVAL;
285	    return GSS_S_FAILURE;
286	}
287	if (trailer) {
288	    *minor_status = EINVAL;
289	    return GSS_S_FAILURE;
290	}
291    } else {
292	/*
293	 * In non-DCE style mode we require having a padding buffer
294	 */
295	if (padding == NULL) {
296	    *minor_status = EINVAL;
297	    return GSS_S_FAILURE;
298	}
299    }
300
301    *minor_status = 0;
302    return GSS_S_COMPLETE;
303}
304
305OM_uint32
306_gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
307		     struct gsskrb5_crypto *ctx,
308		     krb5_context context,
309		     int conf_req_flag,
310		     int *conf_state,
311		     gss_iov_buffer_desc *iov,
312		     int iov_count)
313{
314    OM_uint32 major_status, junk;
315    gss_iov_buffer_desc *header, *trailer, *padding;
316    size_t gsshsize, k5hsize;
317    size_t gsstsize, k5tsize;
318    size_t rrc = 0, ec = 0;
319    int i;
320    gss_cfx_wrap_token token;
321    krb5_error_code ret;
322    unsigned usage;
323    krb5_crypto_iov *data = NULL;
324
325    header = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
326    if (header == NULL) {
327	*minor_status = EINVAL;
328	return GSS_S_FAILURE;
329    }
330
331    padding = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
332    if (padding != NULL) {
333	padding->buffer.length = 0;
334    }
335
336    trailer = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
337
338    major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
339    if (major_status != GSS_S_COMPLETE) {
340	return major_status;
341    }
342
343    if (conf_req_flag) {
344	size_t k5psize = 0;
345	size_t k5pbase = 0;
346	size_t k5bsize = 0;
347	size_t size = 0;
348
349	for (i = 0; i < iov_count; i++) {
350	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
351	    case GSS_IOV_BUFFER_TYPE_DATA:
352		size += iov[i].buffer.length;
353		break;
354	    default:
355		break;
356	    }
357	}
358
359	size += sizeof(gss_cfx_wrap_token_desc);
360
361	*minor_status = krb5_crypto_length(context, ctx->crypto,
362					   KRB5_CRYPTO_TYPE_HEADER,
363					   &k5hsize);
364	if (*minor_status)
365	    return GSS_S_FAILURE;
366
367	*minor_status = krb5_crypto_length(context, ctx->crypto,
368					   KRB5_CRYPTO_TYPE_TRAILER,
369					   &k5tsize);
370	if (*minor_status)
371	    return GSS_S_FAILURE;
372
373	*minor_status = krb5_crypto_length(context, ctx->crypto,
374					   KRB5_CRYPTO_TYPE_PADDING,
375					   &k5pbase);
376	if (*minor_status)
377	    return GSS_S_FAILURE;
378
379	if (k5pbase > 1) {
380	    k5psize = k5pbase - (size % k5pbase);
381	} else {
382	    k5psize = 0;
383	}
384
385	if (k5psize == 0 && GK5C_IS_DCE_STYLE(ctx)) {
386	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
387						     &k5bsize);
388	    if (*minor_status)
389		return GSS_S_FAILURE;
390	    ec = k5bsize;
391	} else {
392	    ec = k5psize;
393	}
394
395	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
396	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
397    } else {
398	if (GK5C_IS_DCE_STYLE(ctx)) {
399	    *minor_status = EINVAL;
400	    return GSS_S_FAILURE;
401	}
402
403	k5hsize = 0;
404	*minor_status = krb5_crypto_length(context, ctx->crypto,
405					   KRB5_CRYPTO_TYPE_CHECKSUM,
406					   &k5tsize);
407	if (*minor_status)
408	    return GSS_S_FAILURE;
409
410	gsshsize = sizeof(gss_cfx_wrap_token_desc);
411	gsstsize = k5tsize;
412    }
413
414    /*
415     *
416     */
417
418    if (trailer == NULL) {
419	rrc = gsstsize;
420	if (GK5C_IS_DCE_STYLE(ctx))
421	    rrc -= ec;
422	gsshsize += gsstsize;
423	gsstsize = 0;
424    } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
425	major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
426	if (major_status)
427	    goto failure;
428    } else if (trailer->buffer.length < gsstsize) {
429	*minor_status = KRB5_BAD_MSIZE;
430	major_status = GSS_S_FAILURE;
431	goto failure;
432    } else
433	trailer->buffer.length = gsstsize;
434
435    /*
436     *
437     */
438
439    if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
440	major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
441	if (major_status != GSS_S_COMPLETE)
442	    goto failure;
443    } else if (header->buffer.length < gsshsize) {
444	*minor_status = KRB5_BAD_MSIZE;
445	major_status = GSS_S_FAILURE;
446	goto failure;
447    } else
448	header->buffer.length = gsshsize;
449
450    token = (gss_cfx_wrap_token)header->buffer.value;
451
452    token->TOK_ID[0] = 0x05;
453    token->TOK_ID[1] = 0x04;
454    token->Flags     = 0;
455    token->Filler    = 0xFF;
456
457    if (ctx->flags & GK5C_ACCEPTOR)
458      token->Flags |= CFXSentByAcceptor;
459
460    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY)
461	token->Flags |= CFXAcceptorSubkey;
462
463    if (ctx->flags & GK5C_ACCEPTOR)
464	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
465    else
466	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
467
468    if (conf_req_flag) {
469	/*
470	 * In Wrap tokens with confidentiality, the EC field is
471	 * used to encode the size (in bytes) of the random filler.
472	 */
473	token->Flags |= CFXSealed;
474	token->EC[0] = (ec >> 8) & 0xFF;
475	token->EC[1] = (ec >> 0) & 0xFF;
476
477    } else {
478	/*
479	 * In Wrap tokens without confidentiality, the EC field is
480	 * used to encode the size (in bytes) of the trailing
481	 * checksum.
482	 *
483	 * This is not used in the checksum calcuation itself,
484	 * because the checksum length could potentially vary
485	 * depending on the data length.
486	 */
487	token->EC[0] = 0;
488	token->EC[1] = 0;
489    }
490
491    /*
492     * In Wrap tokens that provide for confidentiality, the RRC
493     * field in the header contains the hex value 00 00 before
494     * encryption.
495     *
496     * In Wrap tokens that do not provide for confidentiality,
497     * both the EC and RRC fields in the appended checksum
498     * contain the hex value 00 00 for the purpose of calculating
499     * the checksum.
500     */
501    token->RRC[0] = 0;
502    token->RRC[1] = 0;
503
504    _gss_mg_encode_be_uint32(ctx->seqnumhi, &token->SND_SEQ[0]);
505    _gss_mg_encode_be_uint32(ctx->seqnumlo, &token->SND_SEQ[4]);
506
507    ctx->seqnumlo++;
508    if (ctx->seqnumlo == 0)
509	ctx->seqnumhi++;
510
511    data = calloc(iov_count + 3, sizeof(data[0]));
512    if (data == NULL) {
513	*minor_status = ENOMEM;
514	major_status = GSS_S_FAILURE;
515	goto failure;
516    }
517
518    if (conf_req_flag) {
519	/*
520	  plain packet:
521
522	  {"header" | encrypt(plaintext-data | ec-padding | E"header")}
523
524	  Expanded, this is with with RRC = 0:
525
526	  {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
527
528	  In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
529
530	  {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data  }
531	*/
532
533	i = 0;
534	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
535	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
536	data[i].data.length = k5hsize;
537
538	for (i = 1; i < iov_count + 1; i++) {
539	    switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
540	    case GSS_IOV_BUFFER_TYPE_DATA:
541		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
542		break;
543	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
544		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
545		break;
546	    default:
547		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
548		break;
549	    }
550	    data[i].data.length = iov[i - 1].buffer.length;
551	    data[i].data.data = iov[i - 1].buffer.value;
552	}
553
554	/*
555	 * Any necessary padding is added here to ensure that the
556	 * encrypted token header is always at the end of the
557	 * ciphertext.
558	 */
559
560	/* encrypted CFX header in trailer (or after the header if in
561	   DCE mode). Copy in header into E"header"
562	*/
563	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
564	if (trailer)
565	    data[i].data.data = trailer->buffer.value;
566	else
567	    data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
568
569	data[i].data.length = ec + sizeof(*token);
570	memset(data[i].data.data, 0xFF, ec);
571	memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
572	i++;
573
574	/* Kerberos trailer comes after the gss trailer */
575	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
576	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
577	data[i].data.length = k5tsize;
578	i++;
579
580	ret = krb5_encrypt_iov_ivec(context, ctx->crypto,
581				    usage, data, i, NULL);
582	if (ret != 0) {
583	    *minor_status = ret;
584	    major_status = GSS_S_FAILURE;
585	    goto failure;
586	}
587
588	if (rrc) {
589	    token->RRC[0] = (rrc >> 8) & 0xFF;
590	    token->RRC[1] = (rrc >> 0) & 0xFF;
591	}
592
593    } else {
594	/*
595	  plain packet:
596
597	  {data | "header" | gss-trailer (krb5 checksum)
598
599	  don't do RRC != 0
600
601	*/
602
603	for (i = 0; i < iov_count; i++) {
604	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
605	    case GSS_IOV_BUFFER_TYPE_DATA:
606		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
607		break;
608	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
609		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
610		break;
611	    default:
612		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
613		break;
614	    }
615	    data[i].data.length = iov[i].buffer.length;
616	    data[i].data.data = iov[i].buffer.value;
617	}
618
619	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
620	data[i].data.data = header->buffer.value;
621	data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
622	i++;
623
624	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
625	if (trailer) {
626	    data[i].data.data = trailer->buffer.value;
627	} else {
628	    data[i].data.data = (uint8_t *)header->buffer.value +
629		sizeof(gss_cfx_wrap_token_desc);
630	}
631	data[i].data.length = k5tsize;
632	i++;
633
634	ret = krb5_create_checksum_iov(context, ctx->crypto,
635				       usage, data, i, NULL);
636	if (ret) {
637	    *minor_status = ret;
638	    major_status = GSS_S_FAILURE;
639	    goto failure;
640	}
641
642	if (rrc) {
643	    token->RRC[0] = (rrc >> 8) & 0xFF;
644	    token->RRC[1] = (rrc >> 0) & 0xFF;
645	}
646
647	token->EC[0] =  (k5tsize >> 8) & 0xFF;
648	token->EC[1] =  (k5tsize >> 0) & 0xFF;
649    }
650
651    if (conf_state != NULL)
652	*conf_state = conf_req_flag;
653
654    free(data);
655
656    *minor_status = 0;
657    return GSS_S_COMPLETE;
658
659 failure:
660    if (data)
661	free(data);
662
663    gss_release_iov_buffer(&junk, iov, iov_count);
664
665    return major_status;
666}
667
668/* This is slowpath */
669static OM_uint32
670unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
671{
672    uint8_t *p, *q;
673    size_t len = 0, skip;
674    int i;
675
676    for (i = 0; i < iov_count; i++)
677	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
678	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
679	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
680	    len += iov[i].buffer.length;
681
682    p = malloc(len);
683    if (p == NULL) {
684	*minor_status = ENOMEM;
685	return GSS_S_FAILURE;
686    }
687    q = p;
688
689    /* copy up */
690
691    for (i = 0; i < iov_count; i++) {
692	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
693	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
694	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
695	    {
696		memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
697		q += iov[i].buffer.length;
698	    }
699    }
700    assert((size_t)(q - p) == len);
701
702    /* unrotate first part */
703    q = p + rrc;
704    skip = rrc;
705    for (i = 0; i < iov_count; i++) {
706	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
707	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
708	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
709	    {
710		if (iov[i].buffer.length <= skip) {
711		    skip -= iov[i].buffer.length;
712		} else {
713		    memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
714		    q += iov[i].buffer.length - skip;
715		    skip = 0;
716		}
717	    }
718    }
719    /* copy trailer */
720    q = p;
721    skip = rrc;
722    for (i = 0; i < iov_count; i++) {
723	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
724	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
725	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
726	    {
727		memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
728		if (iov[i].buffer.length > skip)
729		    break;
730		skip -= iov[i].buffer.length;
731		q += iov[i].buffer.length;
732	    }
733    }
734    return GSS_S_COMPLETE;
735}
736
737
738OM_uint32
739_gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
740		       struct gsskrb5_crypto *ctx,
741		       krb5_context context,
742		       int *conf_state,
743		       gss_qop_t *qop_state,
744		       gss_iov_buffer_desc *iov,
745		       int iov_count)
746{
747    OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
748    gss_iov_buffer_desc *header, *trailer, *padding;
749    gss_cfx_wrap_token token, ttoken;
750    u_char token_flags;
751    krb5_error_code ret;
752    unsigned usage;
753    uint16_t ec, rrc;
754    krb5_crypto_iov *data = NULL;
755    int i, j;
756
757    *minor_status = 0;
758
759    header = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
760    if (header == NULL) {
761	*minor_status = EINVAL;
762	return GSS_S_FAILURE;
763    }
764
765    /* we check exact below, this just for sanity */
766    if (header->buffer.length < sizeof(*token)) {
767	_gss_mg_log(5, "cfxunwrap-iov token too short: %ld",
768		    (unsigned long)header->buffer.length);
769	return GSS_S_DEFECTIVE_TOKEN;
770    }
771
772    padding = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
773    if (padding != NULL && padding->buffer.length != 0) {
774	*minor_status = EINVAL;
775	return GSS_S_FAILURE;
776    }
777
778    trailer = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
779
780    major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
781    if (major_status != GSS_S_COMPLETE) {
782	return major_status;
783    }
784
785    token = (gss_cfx_wrap_token)header->buffer.value;
786
787    if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
788	return GSS_S_DEFECTIVE_TOKEN;
789
790    /* Ignore unknown flags */
791    token_flags = token->Flags &
792	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
793
794    ret = verify_flags(ctx, token_flags, "unwrap-iov");
795    if (ret)
796	return ret;
797
798    if (token->Filler != 0xFF)
799	return GSS_S_DEFECTIVE_TOKEN;
800
801    if (conf_state != NULL)
802	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
803
804    ec  = (token->EC[0]  << 8) | token->EC[1];
805    rrc = (token->RRC[0] << 8) | token->RRC[1];
806
807    /*
808     * Check sequence number
809     */
810    _gss_mg_decode_be_uint32(&token->SND_SEQ[0], &seq_number_hi);
811    _gss_mg_decode_be_uint32(&token->SND_SEQ[4], &seq_number_lo);
812    if (seq_number_hi) {
813	/* no support for 64-bit sequence numbers */
814	*minor_status = ERANGE;
815	return GSS_S_UNSEQ_TOKEN;
816    }
817
818    ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
819    if (ret != 0) {
820	*minor_status = 0;
821	return ret;
822    }
823
824    /*
825     * Decrypt and/or verify checksum
826     */
827
828    if (ctx->flags & GK5C_ACCEPTOR) {
829	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
830    } else {
831	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
832    }
833
834    data = calloc(iov_count + 3, sizeof(data[0]));
835    if (data == NULL) {
836	*minor_status = ENOMEM;
837	major_status = GSS_S_FAILURE;
838	goto failure;
839    }
840
841    if (token_flags & CFXSealed) {
842	size_t k5tsize, k5hsize;
843
844	krb5_crypto_length(context, ctx->crypto,
845			   KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
846	krb5_crypto_length(context, ctx->crypto,
847			   KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
848
849	/* Check RRC */
850	if (trailer == NULL) {
851	    size_t gsstsize = k5tsize + sizeof(*token);
852	    size_t gsshsize = k5hsize + sizeof(*token);
853
854	    if (rrc != gsstsize) {
855		major_status = GSS_S_DEFECTIVE_TOKEN;
856		goto failure;
857	    }
858
859	    if (GK5C_IS_DCE_STYLE(ctx))
860		gsstsize += ec;
861
862	    gsshsize += gsstsize;
863
864	    if (header->buffer.length != gsshsize) {
865		major_status = GSS_S_DEFECTIVE_TOKEN;
866		goto failure;
867	    }
868	} else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
869	    major_status = GSS_S_DEFECTIVE_TOKEN;
870	    goto failure;
871	} else if (header->buffer.length != sizeof(*token) + k5hsize) {
872	    major_status = GSS_S_DEFECTIVE_TOKEN;
873	    goto failure;
874	} else if (rrc != 0) {
875	    /* go though slowpath */
876	    major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
877	    if (major_status)
878		goto failure;
879	}
880
881	i = 0;
882	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
883	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
884	data[i].data.length = k5hsize;
885	i++;
886
887	for (j = 0; j < iov_count; i++, j++) {
888	    switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
889	    case GSS_IOV_BUFFER_TYPE_DATA:
890		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
891		break;
892	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
893		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
894		break;
895	    default:
896		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
897		break;
898	    }
899	    data[i].data.length = iov[j].buffer.length;
900	    data[i].data.data = iov[j].buffer.value;
901	}
902
903	/* encrypted CFX header in trailer (or after the header if in
904	   DCE mode). Copy in header into E"header"
905	*/
906	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
907	if (trailer) {
908	    data[i].data.data = trailer->buffer.value;
909	} else {
910	    data[i].data.data = ((uint8_t *)header->buffer.value) +
911		header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
912	}
913
914	data[i].data.length = ec + sizeof(*token);
915	ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
916	i++;
917
918	/* Kerberos trailer comes after the gss trailer */
919	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
920	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
921	data[i].data.length = k5tsize;
922	i++;
923
924	ret = krb5_decrypt_iov_ivec(context, ctx->crypto,
925				    usage, data, i, NULL);
926	if (ret != 0) {
927	    *minor_status = ret;
928	    major_status = GSS_S_FAILURE;
929	    goto failure;
930	}
931
932	ttoken->RRC[0] = token->RRC[0];
933	ttoken->RRC[1] = token->RRC[1];
934
935	/* Check the integrity of the header */
936	if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
937	    major_status = GSS_S_BAD_MIC;
938	    goto failure;
939	}
940    } else {
941	size_t gsstsize = ec;
942	size_t gsshsize = sizeof(*token);
943
944	if (trailer == NULL) {
945	    /* Check RRC */
946	    if (rrc != gsstsize) {
947		*minor_status = EINVAL;
948		major_status = GSS_S_FAILURE;
949		goto failure;
950	    }
951
952	    gsshsize += gsstsize;
953	    gsstsize = 0;
954	} else if (trailer->buffer.length != gsstsize) {
955	    major_status = GSS_S_DEFECTIVE_TOKEN;
956	    goto failure;
957	} else if (rrc != 0) {
958	    /* Check RRC */
959	    *minor_status = EINVAL;
960	    major_status = GSS_S_FAILURE;
961	    goto failure;
962	}
963
964	if (header->buffer.length != gsshsize) {
965	    major_status = GSS_S_DEFECTIVE_TOKEN;
966	    goto failure;
967	}
968
969	for (i = 0; i < iov_count; i++) {
970	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
971	    case GSS_IOV_BUFFER_TYPE_DATA:
972		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
973		break;
974	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
975		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
976		break;
977	    default:
978		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
979		break;
980	    }
981	    data[i].data.length = iov[i].buffer.length;
982	    data[i].data.data = iov[i].buffer.value;
983	}
984
985	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
986	data[i].data.data = header->buffer.value;
987	data[i].data.length = sizeof(*token);
988	i++;
989
990	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
991	if (trailer) {
992	    data[i].data.data = trailer->buffer.value;
993	} else {
994	    data[i].data.data = (uint8_t *)header->buffer.value +
995		sizeof(*token);
996	}
997	data[i].data.length = ec;
998	i++;
999
1000	token = (gss_cfx_wrap_token)header->buffer.value;
1001	token->EC[0]  = 0;
1002	token->EC[1]  = 0;
1003	token->RRC[0] = 0;
1004	token->RRC[1] = 0;
1005
1006	ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
1007	if (ret) {
1008	    *minor_status = ret;
1009	    major_status = GSS_S_FAILURE;
1010	    goto failure;
1011	}
1012    }
1013
1014    if (qop_state != NULL) {
1015	*qop_state = GSS_C_QOP_DEFAULT;
1016    }
1017
1018    free(data);
1019
1020    *minor_status = 0;
1021    return GSS_S_COMPLETE;
1022
1023 failure:
1024    if (data)
1025	free(data);
1026
1027    gss_release_iov_buffer(&junk, iov, iov_count);
1028
1029    return major_status;
1030}
1031
1032OM_uint32
1033_gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1034			    struct gsskrb5_crypto *ctx,
1035			    krb5_context context,
1036			    int conf_req_flag,
1037			    gss_qop_t qop_req,
1038			    int *conf_state,
1039			    gss_iov_buffer_desc *iov,
1040			    int iov_count)
1041{
1042    OM_uint32 major_status;
1043    size_t size;
1044    int i;
1045    gss_iov_buffer_desc *header = NULL;
1046    gss_iov_buffer_desc *padding = NULL;
1047    gss_iov_buffer_desc *trailer = NULL;
1048    size_t gsshsize = 0;
1049    size_t gsstsize = 0;
1050    size_t k5hsize = 0;
1051    size_t k5tsize = 0;
1052
1053    GSSAPI_KRB5_INIT (&context);
1054    *minor_status = 0;
1055
1056    for (size = 0, i = 0; i < iov_count; i++) {
1057	switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1058	case GSS_IOV_BUFFER_TYPE_EMPTY:
1059	    break;
1060	case GSS_IOV_BUFFER_TYPE_DATA:
1061	    size += iov[i].buffer.length;
1062	    break;
1063	case GSS_IOV_BUFFER_TYPE_HEADER:
1064	    if (header != NULL) {
1065		*minor_status = 0;
1066		return GSS_S_FAILURE;
1067	    }
1068	    header = &iov[i];
1069	    break;
1070	case GSS_IOV_BUFFER_TYPE_TRAILER:
1071	    if (trailer != NULL) {
1072		*minor_status = 0;
1073		return GSS_S_FAILURE;
1074	    }
1075	    trailer = &iov[i];
1076	    break;
1077	case GSS_IOV_BUFFER_TYPE_PADDING:
1078	    if (padding != NULL) {
1079		*minor_status = 0;
1080		return GSS_S_FAILURE;
1081	    }
1082	    padding = &iov[i];
1083	    break;
1084	case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1085	    break;
1086	default:
1087	    *minor_status = EINVAL;
1088	    return GSS_S_FAILURE;
1089	}
1090    }
1091
1092    major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1093    if (major_status != GSS_S_COMPLETE) {
1094	return major_status;
1095    }
1096
1097    if (conf_req_flag) {
1098	size_t k5psize = 0;
1099	size_t k5pbase = 0;
1100	size_t k5bsize = 0;
1101	size_t ec = 0;
1102
1103	size += sizeof(gss_cfx_wrap_token_desc);
1104
1105	*minor_status = krb5_crypto_length(context, ctx->crypto,
1106					   KRB5_CRYPTO_TYPE_HEADER,
1107					   &k5hsize);
1108	if (*minor_status)
1109	    return GSS_S_FAILURE;
1110
1111	*minor_status = krb5_crypto_length(context, ctx->crypto,
1112					   KRB5_CRYPTO_TYPE_TRAILER,
1113					   &k5tsize);
1114	if (*minor_status)
1115	    return GSS_S_FAILURE;
1116
1117	*minor_status = krb5_crypto_length(context, ctx->crypto,
1118					   KRB5_CRYPTO_TYPE_PADDING,
1119					   &k5pbase);
1120	if (*minor_status)
1121	    return GSS_S_FAILURE;
1122
1123	if (k5pbase > 1) {
1124	    k5psize = k5pbase - (size % k5pbase);
1125	} else {
1126	    k5psize = 0;
1127	}
1128
1129	if (k5psize == 0 && GK5C_IS_DCE_STYLE(ctx)) {
1130	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1131						     &k5bsize);
1132	    if (*minor_status)
1133		return GSS_S_FAILURE;
1134
1135	    ec = k5bsize;
1136	} else {
1137	    ec = k5psize;
1138	}
1139
1140	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1141	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1142    } else {
1143	*minor_status = krb5_crypto_length(context, ctx->crypto,
1144					   KRB5_CRYPTO_TYPE_CHECKSUM,
1145					   &k5tsize);
1146	if (*minor_status)
1147	    return GSS_S_FAILURE;
1148
1149	gsshsize = sizeof(gss_cfx_wrap_token_desc);
1150	gsstsize = k5tsize;
1151    }
1152
1153    if (trailer != NULL) {
1154	trailer->buffer.length = gsstsize;
1155    } else {
1156	gsshsize += gsstsize;
1157    }
1158
1159    header->buffer.length = gsshsize;
1160
1161    if (padding) {
1162	/* padding is done via EC and is contained in the header or trailer */
1163	padding->buffer.length = 0;
1164    }
1165
1166    if (conf_state) {
1167	*conf_state = conf_req_flag;
1168    }
1169
1170    return GSS_S_COMPLETE;
1171}
1172
1173
1174
1175
1176OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1177			   struct gsskrb5_crypto *ctx,
1178			   krb5_context context,
1179			   int conf_req_flag,
1180			   const gss_buffer_t input_message_buffer,
1181			   int *conf_state,
1182			   gss_buffer_t output_message_buffer)
1183{
1184    gss_cfx_wrap_token token;
1185    krb5_error_code ret;
1186    unsigned usage;
1187    krb5_data cipher;
1188    size_t wrapped_len, cksumsize;
1189    uint16_t padlength, rrc = 0;
1190    u_char *p;
1191
1192    ret = _gsskrb5cfx_wrap_length_cfx(context,
1193				      ctx, conf_req_flag,
1194				      input_message_buffer->length,
1195				      &wrapped_len, &cksumsize, &padlength);
1196    if (ret != 0) {
1197	*minor_status = ret;
1198	return GSS_S_FAILURE;
1199    }
1200
1201    /*
1202     * We actually want to always rotate encrypted token (if any) and
1203     * checksum to header with the checksum size or token, but since
1204     * pure Java Kerberos can't handle that, we have to do RRC = 0
1205     * when using non-DCE-style.
1206     */
1207    if (GK5C_IS_DCE_STYLE(ctx))
1208	rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1209    else
1210	rrc = 0;
1211
1212    output_message_buffer->length = wrapped_len;
1213    output_message_buffer->value = malloc(output_message_buffer->length);
1214    if (output_message_buffer->value == NULL) {
1215	*minor_status = ENOMEM;
1216	return GSS_S_FAILURE;
1217    }
1218
1219    p = output_message_buffer->value;
1220    token = (gss_cfx_wrap_token)p;
1221    token->TOK_ID[0] = 0x05;
1222    token->TOK_ID[1] = 0x04;
1223    token->Flags     = 0;
1224    token->Filler    = 0xFF;
1225    if (ctx->flags & GK5C_ACCEPTOR)
1226	token->Flags |= CFXSentByAcceptor;
1227    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY)
1228	token->Flags |= CFXAcceptorSubkey;
1229    if (conf_req_flag) {
1230	/*
1231	 * In Wrap tokens with confidentiality, the EC field is
1232	 * used to encode the size (in bytes) of the random filler.
1233	 */
1234	token->Flags |= CFXSealed;
1235	token->EC[0] = (padlength >> 8) & 0xFF;
1236	token->EC[1] = (padlength >> 0) & 0xFF;
1237    } else {
1238	/*
1239	 * In Wrap tokens without confidentiality, the EC field is
1240	 * used to encode the size (in bytes) of the trailing
1241	 * checksum.
1242	 *
1243	 * This is not used in the checksum calcuation itself,
1244	 * because the checksum length could potentially vary
1245	 * depending on the data length.
1246	 */
1247	token->EC[0] = 0;
1248	token->EC[1] = 0;
1249    }
1250
1251    /*
1252     * In Wrap tokens that provide for confidentiality, the RRC
1253     * field in the header contains the hex value 00 00 before
1254     * encryption.
1255     *
1256     * In Wrap tokens that do not provide for confidentiality,
1257     * both the EC and RRC fields in the appended checksum
1258     * contain the hex value 00 00 for the purpose of calculating
1259     * the checksum.
1260     */
1261    token->RRC[0] = 0;
1262    token->RRC[1] = 0;
1263
1264    _gss_mg_encode_be_uint32(ctx->seqnumhi, &token->SND_SEQ[0]);
1265    _gss_mg_encode_be_uint32(ctx->seqnumlo, &token->SND_SEQ[4]);
1266
1267    ctx->seqnumlo++;
1268    if (ctx->seqnumlo == 0)
1269	ctx->seqnumhi++;
1270
1271    /*
1272     * If confidentiality is requested, the token header is
1273     * appended to the plaintext before encryption; the resulting
1274     * token is {"header" | encrypt(plaintext | pad | "header")}.
1275     *
1276     * If no confidentiality is requested, the checksum is
1277     * calculated over the plaintext concatenated with the
1278     * token header.
1279     */
1280    if (ctx->flags & GK5C_ACCEPTOR) {
1281	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1282    } else {
1283	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1284    }
1285
1286    if (conf_req_flag) {
1287	/*
1288	 * Any necessary padding is added here to ensure that the
1289	 * encrypted token header is always at the end of the
1290	 * ciphertext.
1291	 *
1292	 * The specification does not require that the padding
1293	 * bytes are initialized.
1294	 */
1295	p += sizeof(*token);
1296	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1297	memset(p + input_message_buffer->length, 0xFF, padlength);
1298	memcpy(p + input_message_buffer->length + padlength,
1299	       token, sizeof(*token));
1300
1301	ret = krb5_encrypt(context, ctx->crypto,
1302			   usage, p,
1303			   input_message_buffer->length + padlength +
1304			   sizeof(*token),
1305			   &cipher);
1306	if (ret != 0) {
1307	    *minor_status = ret;
1308	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1309	    return GSS_S_FAILURE;
1310	}
1311	assert(sizeof(*token) + cipher.length == wrapped_len);
1312	token->RRC[0] = (rrc >> 8) & 0xFF;
1313	token->RRC[1] = (rrc >> 0) & 0xFF;
1314
1315	/*
1316	 * this is really ugly, but needed against windows
1317	 * for DCERPC, as windows rotates by EC+RRC.
1318	 */
1319	if (GK5C_IS_DCE_STYLE(ctx)) {
1320	    ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1321	} else {
1322	    ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1323	}
1324	if (ret != 0) {
1325	    *minor_status = ret;
1326	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1327	    return GSS_S_FAILURE;
1328	}
1329	memcpy(p, cipher.data, cipher.length);
1330	krb5_data_free(&cipher);
1331    } else {
1332	char *buf;
1333	Checksum cksum;
1334
1335	buf = malloc(input_message_buffer->length + sizeof(*token));
1336	if (buf == NULL) {
1337	    *minor_status = ENOMEM;
1338	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1339	    return GSS_S_FAILURE;
1340	}
1341	memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1342	memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1343
1344	ret = krb5_create_checksum(context, ctx->crypto,
1345				   usage, 0, buf,
1346				   input_message_buffer->length +
1347				   sizeof(*token),
1348				   &cksum);
1349	if (ret != 0) {
1350	    *minor_status = ret;
1351	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1352	    free(buf);
1353	    return GSS_S_FAILURE;
1354	}
1355
1356	free(buf);
1357
1358	assert(cksum.checksum.length == cksumsize);
1359	token->EC[0] =  (cksum.checksum.length >> 8) & 0xFF;
1360	token->EC[1] =  (cksum.checksum.length >> 0) & 0xFF;
1361	token->RRC[0] = (rrc >> 8) & 0xFF;
1362	token->RRC[1] = (rrc >> 0) & 0xFF;
1363
1364	p += sizeof(*token);
1365	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1366	memcpy(p + input_message_buffer->length,
1367	       cksum.checksum.data, cksum.checksum.length);
1368
1369	ret = rrc_rotate(p,
1370			 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1371	if (ret != 0) {
1372	    *minor_status = ret;
1373	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1374	    free_Checksum(&cksum);
1375	    return GSS_S_FAILURE;
1376	}
1377	free_Checksum(&cksum);
1378    }
1379
1380    if (conf_state != NULL) {
1381	*conf_state = conf_req_flag;
1382    }
1383
1384    *minor_status = 0;
1385    return GSS_S_COMPLETE;
1386}
1387
1388OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1389			     struct gsskrb5_crypto *ctx,
1390			     krb5_context context,
1391			     const gss_buffer_t input_message_buffer,
1392			     gss_buffer_t output_message_buffer,
1393			     int *conf_state,
1394			     gss_qop_t *qop_state)
1395{
1396    gss_cfx_wrap_token_desc token, *tp;
1397    u_char token_flags;
1398    krb5_error_code ret;
1399    unsigned usage;
1400    krb5_data data;
1401    uint16_t ec, rrc;
1402    OM_uint32 seq_number_lo, seq_number_hi;
1403    size_t len;
1404    u_char *p;
1405
1406    *minor_status = 0;
1407
1408    if (input_message_buffer->length < sizeof(token)) {
1409	_gss_mg_log(5, "cfxunwrap token too short: %ld",
1410		    (unsigned long)input_message_buffer->length);
1411	return GSS_S_DEFECTIVE_TOKEN;
1412    }
1413
1414    p = input_message_buffer->value;
1415
1416    memcpy(&token, p, sizeof(token));
1417
1418    if (token.TOK_ID[0] != 0x05 || token.TOK_ID[1] != 0x04) {
1419	_gss_mg_log(5, "cfxunwrap not a WRAP token: 0x%02x%02x",
1420		    token.TOK_ID[0], token.TOK_ID[1]);
1421	return GSS_S_DEFECTIVE_TOKEN;
1422    }
1423
1424    /* Ignore unknown flags */
1425    token_flags = token.Flags &
1426	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1427
1428    ret = verify_flags(ctx, token_flags, "unwrap");
1429    if (ret)
1430	return ret;
1431
1432    if (token.Filler != 0xFF) {
1433	_gss_mg_log(5, "cfxunwrap filler bad: 0x%02x", token.Filler);
1434	return GSS_S_DEFECTIVE_TOKEN;
1435    }
1436
1437    if (conf_state != NULL) {
1438	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
1439    }
1440
1441    ec  = (token.EC[0]  << 8) | token.EC[1];
1442    rrc = (token.RRC[0] << 8) | token.RRC[1];
1443
1444    /*
1445     * Check sequence number
1446     */
1447    _gss_mg_decode_be_uint32(&token.SND_SEQ[0], &seq_number_hi);
1448    _gss_mg_decode_be_uint32(&token.SND_SEQ[4], &seq_number_lo);
1449    if (seq_number_hi) {
1450	/* no support for 64-bit sequence numbers */
1451	*minor_status = ERANGE;
1452	return GSS_S_UNSEQ_TOKEN;
1453    }
1454
1455    ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1456    if (ret != 0) {
1457	*minor_status = 0;
1458	_gsskrb5_release_buffer(minor_status, output_message_buffer);
1459	return ret;
1460    }
1461
1462    /*
1463     * Decrypt and/or verify checksum
1464     */
1465
1466    if (ctx->flags & GK5C_ACCEPTOR) {
1467	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1468    } else {
1469	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1470    }
1471
1472    p += sizeof(token);
1473    len = input_message_buffer->length;
1474    len -= (p - (u_char *)input_message_buffer->value);
1475
1476    if (token_flags & CFXSealed) {
1477	/*
1478	 * this is really ugly, but needed against windows
1479	 * for DCERPC, as windows rotates by EC+RRC.
1480	 */
1481	if (GK5C_IS_DCE_STYLE(ctx)) {
1482	    *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1483	} else {
1484	    *minor_status = rrc_rotate(p, len, rrc, TRUE);
1485	}
1486	if (*minor_status != 0) {
1487	    return GSS_S_FAILURE;
1488	}
1489
1490	ret = krb5_decrypt(context, ctx->crypto, usage,
1491			   p, len, &data);
1492	if (ret != 0) {
1493	    *minor_status = ret;
1494	    return GSS_S_BAD_MIC;
1495	}
1496
1497	/* Check that there is room for the pad and token header */
1498	if (data.length < ec + sizeof(token)) {
1499	    krb5_data_free(&data);
1500	    _gss_mg_log(5, "cfxunwrap pad and token not fitting: %lu",
1501			(unsigned long)data.length);
1502	    return GSS_S_DEFECTIVE_TOKEN;
1503	}
1504	p = data.data;
1505	p += data.length - sizeof(token);
1506
1507	/* RRC is unprotected */
1508	memcpy(token.RRC, ((gss_cfx_wrap_token)p)->RRC,
1509	       sizeof(((gss_cfx_wrap_token)p)->RRC));
1510
1511	/* Check the integrity of the header */
1512	if (ct_memcmp(p, &token, sizeof(token)) != 0) {
1513	    krb5_data_free(&data);
1514	    return GSS_S_BAD_MIC;
1515	}
1516
1517	output_message_buffer->value = data.data;
1518	output_message_buffer->length = data.length - ec - sizeof(token);
1519    } else {
1520	Checksum cksum;
1521
1522	/* Rotate by RRC; bogus to do this in-place XXX */
1523	*minor_status = rrc_rotate(p, len, rrc, TRUE);
1524	if (*minor_status != 0) {
1525	    return GSS_S_FAILURE;
1526	}
1527
1528	/* Determine checksum type */
1529	ret = krb5_crypto_get_checksum_type(context,
1530					    ctx->crypto,
1531					    &cksum.cksumtype);
1532	if (ret != 0) {
1533	    *minor_status = ret;
1534	    return GSS_S_FAILURE;
1535	}
1536
1537	cksum.checksum.length = ec;
1538
1539	/* Check we have at least as much data as the checksum */
1540	if (len < cksum.checksum.length) {
1541	    *minor_status = ERANGE;
1542	    return GSS_S_BAD_MIC;
1543	}
1544
1545	/* Length now is of the plaintext only, no checksum */
1546	len -= cksum.checksum.length;
1547	cksum.checksum.data = p + len;
1548
1549	output_message_buffer->length = len; /* for later */
1550	output_message_buffer->value = malloc(len + sizeof(token));
1551	if (output_message_buffer->value == NULL) {
1552	    *minor_status = ENOMEM;
1553	    return GSS_S_FAILURE;
1554	}
1555
1556	/* Checksum is over (plaintext-data | "header") */
1557	memcpy(output_message_buffer->value, p, len);
1558	memcpy((u_char *)output_message_buffer->value + len,
1559	       &token, sizeof(token));
1560
1561	/* EC is not included in checksum calculation */
1562	tp = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1563				  len);
1564	memset(tp->EC, 0, sizeof(tp->EC));
1565	memset(tp->RRC, 0, sizeof(tp->RRC));
1566
1567	ret = krb5_verify_checksum(context, ctx->crypto,
1568				   usage,
1569				   output_message_buffer->value,
1570				   len + sizeof(token),
1571				   &cksum);
1572	if (ret != 0) {
1573	    *minor_status = ret;
1574	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1575	    return GSS_S_BAD_MIC;
1576	}
1577    }
1578
1579    if (qop_state != NULL) {
1580	*qop_state = GSS_C_QOP_DEFAULT;
1581    }
1582
1583    *minor_status = 0;
1584    return GSS_S_COMPLETE;
1585}
1586
1587OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1588			  struct gsskrb5_crypto *ctx,
1589			  krb5_context context,
1590			  gss_qop_t qop_req,
1591			  const gss_buffer_t message_buffer,
1592			  gss_buffer_t message_token)
1593{
1594    gss_cfx_mic_token_desc token;
1595    krb5_error_code ret;
1596    unsigned usage;
1597    Checksum cksum;
1598    u_char *buf;
1599    size_t len;
1600
1601    len = message_buffer->length + sizeof(token);
1602    buf = malloc(len);
1603    if (buf == NULL) {
1604	*minor_status = ENOMEM;
1605	return GSS_S_FAILURE;
1606    }
1607
1608    memset(&token, 0, sizeof(token));
1609    token.TOK_ID[0] = 0x04;
1610    token.TOK_ID[1] = 0x04;
1611    token.Flags = 0;
1612    if (ctx->flags & GK5C_ACCEPTOR)
1613	token.Flags |= CFXSentByAcceptor;
1614    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY)
1615	token.Flags |= CFXAcceptorSubkey;
1616    memset(token.Filler, 0xFF, 5);
1617
1618    _gss_mg_encode_be_uint32(ctx->seqnumhi, &token.SND_SEQ[0]);
1619    _gss_mg_encode_be_uint32(ctx->seqnumlo, &token.SND_SEQ[4]);
1620
1621    memcpy(buf, message_buffer->value, message_buffer->length);
1622    memcpy(buf + message_buffer->length, &token, sizeof(token));
1623
1624    ctx->seqnumlo++;
1625    if (ctx->seqnumlo == 0)
1626	ctx->seqnumhi++;
1627
1628    if (ctx->flags & GK5C_ACCEPTOR) {
1629	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1630    } else {
1631	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1632    }
1633
1634    ret = krb5_create_checksum(context, ctx->crypto,
1635			       usage, 0, buf, len, &cksum);
1636    if (ret != 0) {
1637	*minor_status = ret;
1638	free(buf);
1639	return GSS_S_FAILURE;
1640    }
1641
1642    /* Determine MIC length */
1643    message_token->length = sizeof(token) + cksum.checksum.length;
1644    message_token->value = malloc(message_token->length);
1645    if (message_token->value == NULL) {
1646	*minor_status = ENOMEM;
1647	free_Checksum(&cksum);
1648	free(buf);
1649	return GSS_S_FAILURE;
1650    }
1651
1652    /* Token is { "header" | get_mic("header" | plaintext-data) } */
1653    memcpy(message_token->value, &token, sizeof(token));
1654    memcpy((u_char *)message_token->value + sizeof(token),
1655	   cksum.checksum.data, cksum.checksum.length);
1656
1657    free_Checksum(&cksum);
1658    free(buf);
1659
1660    *minor_status = 0;
1661    return GSS_S_COMPLETE;
1662}
1663
1664OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1665				 struct gsskrb5_crypto *ctx,
1666				 krb5_context context,
1667				 const gss_buffer_t message_buffer,
1668				 const gss_buffer_t token_buffer,
1669				 gss_qop_t *qop_state)
1670{
1671    gss_cfx_mic_token_desc token;
1672    u_char token_flags;
1673    krb5_error_code ret;
1674    unsigned usage;
1675    OM_uint32 seq_number_lo, seq_number_hi;
1676    u_char *buf, *p;
1677    Checksum cksum;
1678
1679    *minor_status = 0;
1680
1681    if (token_buffer->length < sizeof(token)) {
1682	_gss_mg_log(5, "cfxverifymic token too short: %ld",
1683		    (unsigned long)message_buffer->length);
1684	return GSS_S_DEFECTIVE_TOKEN;
1685    }
1686
1687    p = token_buffer->value;
1688
1689    memcpy(&token, p, sizeof(token));
1690
1691    if (token.TOK_ID[0] != 0x04 || token.TOK_ID[1] != 0x04) {
1692	_gss_mg_log(5, "cfxverifymic not a MIC token: 0x%02x%02x",
1693		    token.TOK_ID[0], token.TOK_ID[1]);
1694	return GSS_S_DEFECTIVE_TOKEN;
1695    }
1696
1697    /* Ignore unknown flags */
1698    token_flags = token.Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1699
1700    ret = verify_flags(ctx, token_flags, "mic");
1701    if (ret)
1702	return ret;
1703
1704    if (ct_memcmp(token.Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1705	_gss_mg_log(5, "cfxmic filler bad");
1706	return GSS_S_DEFECTIVE_TOKEN;
1707    }
1708
1709    /*
1710     * Check sequence number
1711     */
1712    _gss_mg_decode_be_uint32(&token.SND_SEQ[0], &seq_number_hi);
1713    _gss_mg_decode_be_uint32(&token.SND_SEQ[4], &seq_number_lo);
1714    if (seq_number_hi) {
1715	*minor_status = ERANGE;
1716	return GSS_S_UNSEQ_TOKEN;
1717    }
1718
1719    ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1720    if (ret != 0) {
1721	*minor_status = 0;
1722	return ret;
1723    }
1724
1725    /*
1726     * Verify checksum
1727     */
1728    ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1729					&cksum.cksumtype);
1730    if (ret != 0) {
1731	*minor_status = ret;
1732	return GSS_S_FAILURE;
1733    }
1734
1735    cksum.checksum.data = p + sizeof(token);
1736    cksum.checksum.length = token_buffer->length - sizeof(token);
1737
1738    if (ctx->flags & GK5C_ACCEPTOR) {
1739	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1740    } else {
1741	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1742    }
1743
1744    buf = malloc(message_buffer->length + sizeof(token));
1745    if (buf == NULL) {
1746	*minor_status = ENOMEM;
1747	return GSS_S_FAILURE;
1748    }
1749    memcpy(buf, message_buffer->value, message_buffer->length);
1750    memcpy(buf + message_buffer->length, &token, sizeof(token));
1751
1752    ret = krb5_verify_checksum(context, ctx->crypto,
1753			       usage,
1754			       buf,
1755			       message_buffer->length + sizeof(token),
1756			       &cksum);
1757    if (ret != 0) {
1758	*minor_status = ret;
1759	free(buf);
1760	return GSS_S_BAD_MIC;
1761    }
1762
1763    free(buf);
1764
1765    if (qop_state != NULL) {
1766	*qop_state = GSS_C_QOP_DEFAULT;
1767    }
1768
1769    return GSS_S_COMPLETE;
1770}
1771