1/*
2 * Copyright (c) 2003, PADL Software Pty Ltd.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include "gsskrb5_locl.h"
34
35/*
36 * Implementation of RFC 4121
37 */
38
39#define CFXSentByAcceptor	(1 << 0)
40#define CFXSealed		(1 << 1)
41#define CFXAcceptorSubkey	(1 << 2)
42
43static void
44log_broken_acceptor_server(void *ptr)
45{
46    _gss_mg_log(5, "cfx%s subkey when not expecting one", (char *)ptr);
47}
48
49static OM_uint32
50verify_flags(struct gsskrb5_crypto *ctx, u_char token_flags, const char *token)
51{
52    if (token_flags & CFXSentByAcceptor) {
53	if (ctx->flags & GK5C_ACCEPTOR) {
54	    _gss_mg_log(5, "cfx%s acceptor token set by initiator", token);
55	    return GSS_S_DEFECTIVE_TOKEN;
56	}
57    } else {
58	if ((ctx->flags & GK5C_ACCEPTOR) == 0) {
59	    _gss_mg_log(5, "cfx%s !acceptor token set by acceptor", token);
60	    return GSS_S_DEFECTIVE_TOKEN;
61	}
62    }
63
64    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY) {
65	if ((token_flags & CFXAcceptorSubkey) == 0) {
66	    _gss_mg_log(5, "cfx%s no subkey", token);
67	    return GSS_S_DEFECTIVE_TOKEN;
68	}
69    } else {
70	if (token_flags & CFXAcceptorSubkey) {
71	    /*
72	     * XXX there are broken servers out there that sets
73	     * CFXAcceptorSubkey even though that they didn't set an
74	     * acceptor subkey. Just log once and then ignore the
75	     * error.
76	     */
77	    static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
78	    heim_base_once_f(&once, rk_UNCONST(token), log_broken_acceptor_server);
79	}
80    }
81
82    return GSS_S_COMPLETE;
83}
84
85krb5_error_code
86_gsskrb5cfx_wrap_length_cfx(krb5_context context,
87			    struct gsskrb5_crypto *ctx,
88			    int conf_req_flag,
89			    size_t input_length,
90			    size_t *output_length,
91			    size_t *cksumsize,
92			    uint16_t *padlength)
93{
94    krb5_error_code ret;
95    krb5_cksumtype type;
96
97    /* 16-byte header is always first */
98    *output_length = sizeof(gss_cfx_wrap_token_desc);
99    *padlength = 0;
100
101    ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
102    if (ret)
103	return ret;
104
105    ret = krb5_checksumsize(context, type, cksumsize);
106    if (ret)
107	return ret;
108
109    if (conf_req_flag) {
110	size_t padsize;
111
112	/* Header is concatenated with data before encryption */
113	input_length += sizeof(gss_cfx_wrap_token_desc);
114
115	if (GK5C_IS_DCE_STYLE(ctx)) {
116	    ret = krb5_crypto_getblocksize(context, ctx->crypto, &padsize);
117	} else {
118	    ret = krb5_crypto_getpadsize(context, ctx->crypto, &padsize);
119	}
120	if (ret) {
121	    return ret;
122	}
123	if (padsize > 1) {
124	    /* XXX check this */
125	    *padlength = padsize - (input_length % padsize);
126
127	    /* We add the pad ourselves (noted here for completeness only) */
128	    input_length += *padlength;
129	}
130
131	*output_length += krb5_get_wrapped_length(context,
132						  ctx->crypto, input_length);
133    } else {
134	/* Checksum is concatenated with data */
135	*output_length += input_length + *cksumsize;
136    }
137
138    assert(*output_length > input_length);
139
140    return 0;
141}
142
143OM_uint32
144_gssapi_wrap_size_cfx(OM_uint32 *minor_status,
145		      struct gsskrb5_crypto *ctx,
146		      krb5_context context,
147		      int conf_req_flag,
148		      gss_qop_t qop_req,
149		      OM_uint32 req_output_size,
150		      OM_uint32 *max_input_size)
151{
152    krb5_error_code ret;
153
154    *max_input_size = 0;
155
156    /* 16-byte header is always first */
157    if (req_output_size < 16)
158	return 0;
159    req_output_size -= 16;
160
161    if (conf_req_flag) {
162	size_t wrapped_size, sz;
163
164	wrapped_size = req_output_size + 1;
165	do {
166	    wrapped_size--;
167	    sz = krb5_get_wrapped_length(context,
168					 ctx->crypto, wrapped_size);
169	} while (wrapped_size && sz > req_output_size);
170	if (wrapped_size == 0)
171	    return 0;
172
173	/* inner header */
174	if (wrapped_size < 16)
175	    return 0;
176
177	wrapped_size -= 16;
178
179	*max_input_size = (OM_uint32)wrapped_size;
180    } else {
181	krb5_cksumtype type;
182	size_t cksumsize;
183
184	ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
185	if (ret)
186	    return ret;
187
188	ret = krb5_checksumsize(context, type, &cksumsize);
189	if (ret)
190	    return ret;
191
192	if (req_output_size < cksumsize)
193	    return 0;
194
195	/* Checksum is concatenated with data */
196	*max_input_size = (OM_uint32)(req_output_size - cksumsize);
197    }
198
199    return 0;
200}
201
202/*
203 * Rotate "rrc" bytes to the front or back
204 */
205
206static krb5_error_code
207rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
208{
209    u_char *tmp, buf[256];
210    size_t left;
211
212    if (len == 0)
213	return 0;
214
215    rrc %= len;
216
217    if (rrc == 0)
218	return 0;
219
220    left = len - rrc;
221
222    if (rrc <= sizeof(buf)) {
223	tmp = buf;
224    } else {
225	tmp = malloc(rrc);
226	if (tmp == NULL)
227	    return ENOMEM;
228    }
229
230    if (unrotate) {
231	memcpy(tmp, data, rrc);
232	memmove(data, (u_char *)data + rrc, left);
233	memcpy((u_char *)data + left, tmp, rrc);
234    } else {
235	memcpy(tmp, (u_char *)data + left, rrc);
236	memmove((u_char *)data + rrc, data, left);
237	memcpy(data, tmp, rrc);
238    }
239
240    if (rrc > sizeof(buf))
241	free(tmp);
242
243    return 0;
244}
245
246static OM_uint32
247_gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
248{
249    if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
250	if (buffer->buffer.length == size)
251	    return GSS_S_COMPLETE;
252	free(buffer->buffer.value);
253    }
254
255    buffer->buffer.value = malloc(size);
256    buffer->buffer.length = size;
257    if (buffer->buffer.value == NULL) {
258	*minor_status = ENOMEM;
259	return GSS_S_FAILURE;
260    }
261    buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
262
263    return GSS_S_COMPLETE;
264}
265
266
267OM_uint32
268_gk_verify_buffers(OM_uint32 *minor_status,
269		   struct gsskrb5_crypto *ctx,
270		   const gss_iov_buffer_desc *header,
271		   const gss_iov_buffer_desc *padding,
272		   const gss_iov_buffer_desc *trailer)
273{
274    if (header == NULL) {
275	*minor_status = EINVAL;
276	return GSS_S_FAILURE;
277    }
278
279    if (GK5C_IS_DCE_STYLE(ctx)) {
280	/*
281	 * In DCE style mode we reject having a padding or trailer buffer
282	 */
283	if (padding) {
284	    *minor_status = EINVAL;
285	    return GSS_S_FAILURE;
286	}
287	if (trailer) {
288	    *minor_status = EINVAL;
289	    return GSS_S_FAILURE;
290	}
291    } else {
292	/*
293	 * In non-DCE style mode we require having a padding buffer
294	 */
295	if (padding == NULL) {
296	    *minor_status = EINVAL;
297	    return GSS_S_FAILURE;
298	}
299    }
300
301    *minor_status = 0;
302    return GSS_S_COMPLETE;
303}
304
305OM_uint32
306_gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
307		     struct gsskrb5_crypto *ctx,
308		     krb5_context context,
309		     int conf_req_flag,
310		     int *conf_state,
311		     gss_iov_buffer_desc *iov,
312		     int iov_count)
313{
314    OM_uint32 major_status, junk;
315    gss_iov_buffer_desc *header, *trailer, *padding;
316    size_t gsshsize, k5hsize;
317    size_t gsstsize, k5tsize;
318    size_t rrc = 0, ec = 0;
319    int i;
320    gss_cfx_wrap_token token;
321    krb5_error_code ret;
322    unsigned usage;
323    krb5_crypto_iov *data = NULL;
324
325    header = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
326    if (header == NULL) {
327	*minor_status = EINVAL;
328	return GSS_S_FAILURE;
329    }
330
331    padding = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
332    if (padding != NULL) {
333	padding->buffer.length = 0;
334    }
335
336    trailer = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
337
338    major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
339    if (major_status != GSS_S_COMPLETE) {
340	return major_status;
341    }
342
343    if (conf_req_flag) {
344	size_t k5psize = 0;
345	size_t k5pbase = 0;
346	size_t k5bsize = 0;
347	size_t size = 0;
348
349	for (i = 0; i < iov_count; i++) {
350	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
351	    case GSS_IOV_BUFFER_TYPE_DATA:
352		size += iov[i].buffer.length;
353		break;
354	    default:
355		break;
356	    }
357	}
358
359	size += sizeof(gss_cfx_wrap_token_desc);
360
361	*minor_status = krb5_crypto_length(context, ctx->crypto,
362					   KRB5_CRYPTO_TYPE_HEADER,
363					   &k5hsize);
364	if (*minor_status)
365	    return GSS_S_FAILURE;
366
367	*minor_status = krb5_crypto_length(context, ctx->crypto,
368					   KRB5_CRYPTO_TYPE_TRAILER,
369					   &k5tsize);
370	if (*minor_status)
371	    return GSS_S_FAILURE;
372
373	*minor_status = krb5_crypto_length(context, ctx->crypto,
374					   KRB5_CRYPTO_TYPE_PADDING,
375					   &k5pbase);
376	if (*minor_status)
377	    return GSS_S_FAILURE;
378
379	if (k5pbase > 1) {
380	    k5psize = k5pbase - (size % k5pbase);
381	} else {
382	    k5psize = 0;
383	}
384
385	if (k5psize == 0 && GK5C_IS_DCE_STYLE(ctx)) {
386	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
387						     &k5bsize);
388	    if (*minor_status)
389		return GSS_S_FAILURE;
390	    ec = k5bsize;
391	} else {
392	    ec = k5psize;
393	}
394
395	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
396	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
397    } else {
398	if (GK5C_IS_DCE_STYLE(ctx)) {
399	    *minor_status = EINVAL;
400	    return GSS_S_FAILURE;
401	}
402
403	k5hsize = 0;
404	*minor_status = krb5_crypto_length(context, ctx->crypto,
405					   KRB5_CRYPTO_TYPE_CHECKSUM,
406					   &k5tsize);
407	if (*minor_status)
408	    return GSS_S_FAILURE;
409
410	gsshsize = sizeof(gss_cfx_wrap_token_desc);
411	gsstsize = k5tsize;
412    }
413
414    /*
415     *
416     */
417
418    if (trailer == NULL) {
419	rrc = gsstsize;
420	if (GK5C_IS_DCE_STYLE(ctx))
421	    rrc -= ec;
422	gsshsize += gsstsize;
423	gsstsize = 0;
424    } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
425	major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
426	if (major_status)
427	    goto failure;
428    } else if (trailer->buffer.length < gsstsize) {
429	*minor_status = KRB5_BAD_MSIZE;
430	major_status = GSS_S_FAILURE;
431	goto failure;
432    } else
433	trailer->buffer.length = gsstsize;
434
435    /*
436     *
437     */
438
439    if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
440	major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
441	if (major_status != GSS_S_COMPLETE)
442	    goto failure;
443    } else if (header->buffer.length < gsshsize) {
444	*minor_status = KRB5_BAD_MSIZE;
445	major_status = GSS_S_FAILURE;
446	goto failure;
447    } else
448	header->buffer.length = gsshsize;
449
450    token = (gss_cfx_wrap_token)header->buffer.value;
451
452    token->TOK_ID[0] = 0x05;
453    token->TOK_ID[1] = 0x04;
454    token->Flags     = 0;
455    token->Filler    = 0xFF;
456
457    if (ctx->flags & GK5C_ACCEPTOR)
458      token->Flags |= CFXSentByAcceptor;
459
460    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY)
461	token->Flags |= CFXAcceptorSubkey;
462
463    if (ctx->flags & GK5C_ACCEPTOR)
464	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
465    else
466	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
467
468    if (conf_req_flag) {
469	/*
470	 * In Wrap tokens with confidentiality, the EC field is
471	 * used to encode the size (in bytes) of the random filler.
472	 */
473	token->Flags |= CFXSealed;
474	token->EC[0] = (ec >> 8) & 0xFF;
475	token->EC[1] = (ec >> 0) & 0xFF;
476
477    } else {
478	/*
479	 * In Wrap tokens without confidentiality, the EC field is
480	 * used to encode the size (in bytes) of the trailing
481	 * checksum.
482	 *
483	 * This is not used in the checksum calcuation itself,
484	 * because the checksum length could potentially vary
485	 * depending on the data length.
486	 */
487	token->EC[0] = 0;
488	token->EC[1] = 0;
489    }
490
491    /*
492     * In Wrap tokens that provide for confidentiality, the RRC
493     * field in the header contains the hex value 00 00 before
494     * encryption.
495     *
496     * In Wrap tokens that do not provide for confidentiality,
497     * both the EC and RRC fields in the appended checksum
498     * contain the hex value 00 00 for the purpose of calculating
499     * the checksum.
500     */
501    token->RRC[0] = 0;
502    token->RRC[1] = 0;
503
504    _gss_mg_encode_be_uint32(ctx->seqnumhi, &token->SND_SEQ[0]);
505    _gss_mg_encode_be_uint32(ctx->seqnumlo, &token->SND_SEQ[4]);
506
507    ctx->seqnumlo++;
508    if (ctx->seqnumlo == 0)
509	ctx->seqnumhi++;
510
511    data = calloc(iov_count + 3, sizeof(data[0]));
512    if (data == NULL) {
513	*minor_status = ENOMEM;
514	major_status = GSS_S_FAILURE;
515	goto failure;
516    }
517
518    if (conf_req_flag) {
519	/*
520	  plain packet:
521
522	  {"header" | encrypt(plaintext-data | ec-padding | E"header")}
523
524	  Expanded, this is with with RRC = 0:
525
526	  {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
527
528	  In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
529
530	  {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data  }
531	*/
532
533	i = 0;
534	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
535	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
536	data[i].data.length = k5hsize;
537
538	for (i = 1; i < iov_count + 1; i++) {
539	    switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
540	    case GSS_IOV_BUFFER_TYPE_DATA:
541		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
542		break;
543	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
544		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
545		break;
546	    default:
547		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
548		break;
549	    }
550	    data[i].data.length = iov[i - 1].buffer.length;
551	    data[i].data.data = iov[i - 1].buffer.value;
552	}
553
554	/*
555	 * Any necessary padding is added here to ensure that the
556	 * encrypted token header is always at the end of the
557	 * ciphertext.
558	 */
559
560	/* encrypted CFX header in trailer (or after the header if in
561	   DCE mode). Copy in header into E"header"
562	*/
563	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
564	if (trailer)
565	    data[i].data.data = trailer->buffer.value;
566	else
567	    data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
568
569	data[i].data.length = ec + sizeof(*token);
570	memset(data[i].data.data, 0xFF, ec);
571	memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
572	i++;
573
574	/* Kerberos trailer comes after the gss trailer */
575	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
576	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
577	data[i].data.length = k5tsize;
578	i++;
579
580	ret = krb5_encrypt_iov_ivec(context, ctx->crypto,
581				    usage, data, i, NULL);
582	if (ret != 0) {
583	    *minor_status = ret;
584	    major_status = GSS_S_FAILURE;
585	    goto failure;
586	}
587
588	if (rrc) {
589	    token->RRC[0] = (rrc >> 8) & 0xFF;
590	    token->RRC[1] = (rrc >> 0) & 0xFF;
591	}
592
593    } else {
594	/*
595	  plain packet:
596
597	  {data | "header" | gss-trailer (krb5 checksum)
598
599	  don't do RRC != 0
600
601	*/
602
603	for (i = 0; i < iov_count; i++) {
604	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
605	    case GSS_IOV_BUFFER_TYPE_DATA:
606		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
607		break;
608	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
609		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
610		break;
611	    default:
612		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
613		break;
614	    }
615	    data[i].data.length = iov[i].buffer.length;
616	    data[i].data.data = iov[i].buffer.value;
617	}
618
619	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
620	data[i].data.data = header->buffer.value;
621	data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
622	i++;
623
624	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
625	if (trailer) {
626	    data[i].data.data = trailer->buffer.value;
627	} else {
628	    data[i].data.data = (uint8_t *)header->buffer.value +
629		sizeof(gss_cfx_wrap_token_desc);
630	}
631	data[i].data.length = k5tsize;
632	i++;
633
634	ret = krb5_create_checksum_iov(context, ctx->crypto,
635				       usage, data, i, NULL);
636	if (ret) {
637	    *minor_status = ret;
638	    major_status = GSS_S_FAILURE;
639	    goto failure;
640	}
641
642	if (rrc) {
643	    token->RRC[0] = (rrc >> 8) & 0xFF;
644	    token->RRC[1] = (rrc >> 0) & 0xFF;
645	}
646
647	token->EC[0] =  (k5tsize >> 8) & 0xFF;
648	token->EC[1] =  (k5tsize >> 0) & 0xFF;
649    }
650
651    if (conf_state != NULL)
652	*conf_state = conf_req_flag;
653
654    free(data);
655
656    *minor_status = 0;
657    return GSS_S_COMPLETE;
658
659 failure:
660    if (data)
661	free(data);
662
663    gss_release_iov_buffer(&junk, iov, iov_count);
664
665    return major_status;
666}
667
668/* This is slowpath */
669static OM_uint32
670unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
671{
672    uint8_t *p, *q;
673    size_t len = 0, skip;
674    int i;
675
676    for (i = 0; i < iov_count; i++)
677	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
678	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
679	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
680	    len += iov[i].buffer.length;
681
682    p = malloc(len);
683    if (p == NULL) {
684	*minor_status = ENOMEM;
685	return GSS_S_FAILURE;
686    }
687    q = p;
688
689    /* copy up */
690
691    for (i = 0; i < iov_count; i++) {
692	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
693	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
694	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
695	    {
696		memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
697		q += iov[i].buffer.length;
698	    }
699    }
700    assert((size_t)(q - p) == len);
701
702    /* unrotate first part */
703    q = p + rrc;
704    skip = rrc;
705    for (i = 0; i < iov_count; i++) {
706	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
707	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
708	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
709	    {
710		if (iov[i].buffer.length <= skip) {
711		    skip -= iov[i].buffer.length;
712		} else {
713		    memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
714		    q += iov[i].buffer.length - skip;
715		    skip = 0;
716		}
717	    }
718    }
719    /* copy trailer */
720    q = p;
721    skip = rrc;
722    for (i = 0; i < iov_count; i++) {
723	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
724	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
725	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
726	    {
727		memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
728		if (iov[i].buffer.length > skip)
729		    break;
730		skip -= iov[i].buffer.length;
731		q += iov[i].buffer.length;
732	    }
733    }
734    free(p);
735
736    return GSS_S_COMPLETE;
737}
738
739
740OM_uint32
741_gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
742		       struct gsskrb5_crypto *ctx,
743		       krb5_context context,
744		       int *conf_state,
745		       gss_qop_t *qop_state,
746		       gss_iov_buffer_desc *iov,
747		       int iov_count)
748{
749    OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
750    gss_iov_buffer_desc *header, *trailer, *padding;
751    gss_cfx_wrap_token token, ttoken;
752    u_char token_flags;
753    krb5_error_code ret;
754    unsigned usage;
755    uint16_t ec, rrc;
756    krb5_crypto_iov *data = NULL;
757    int i, j;
758
759    *minor_status = 0;
760
761    header = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
762    if (header == NULL) {
763	*minor_status = EINVAL;
764	return GSS_S_FAILURE;
765    }
766
767    /* we check exact below, this just for sanity */
768    if (header->buffer.length < sizeof(*token)) {
769	_gss_mg_log(5, "cfxunwrap-iov token too short: %ld",
770		    (unsigned long)header->buffer.length);
771	return GSS_S_DEFECTIVE_TOKEN;
772    }
773
774    padding = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
775    if (padding != NULL && padding->buffer.length != 0) {
776	*minor_status = EINVAL;
777	return GSS_S_FAILURE;
778    }
779
780    trailer = _gss_mg_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
781
782    major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
783    if (major_status != GSS_S_COMPLETE) {
784	return major_status;
785    }
786
787    token = (gss_cfx_wrap_token)header->buffer.value;
788
789    if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
790	return GSS_S_DEFECTIVE_TOKEN;
791
792    /* Ignore unknown flags */
793    token_flags = token->Flags &
794	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
795
796    ret = verify_flags(ctx, token_flags, "unwrap-iov");
797    if (ret)
798	return ret;
799
800    if (token->Filler != 0xFF)
801	return GSS_S_DEFECTIVE_TOKEN;
802
803    if (conf_state != NULL)
804	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
805
806    ec  = (token->EC[0]  << 8) | token->EC[1];
807    rrc = (token->RRC[0] << 8) | token->RRC[1];
808
809    /*
810     * Check sequence number
811     */
812    _gss_mg_decode_be_uint32(&token->SND_SEQ[0], &seq_number_hi);
813    _gss_mg_decode_be_uint32(&token->SND_SEQ[4], &seq_number_lo);
814    if (seq_number_hi) {
815	/* no support for 64-bit sequence numbers */
816	*minor_status = ERANGE;
817	return GSS_S_UNSEQ_TOKEN;
818    }
819
820    ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
821    if (ret != 0) {
822	*minor_status = 0;
823	return ret;
824    }
825
826    /*
827     * Decrypt and/or verify checksum
828     */
829
830    if (ctx->flags & GK5C_ACCEPTOR) {
831	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
832    } else {
833	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
834    }
835
836    data = calloc(iov_count + 3, sizeof(data[0]));
837    if (data == NULL) {
838	*minor_status = ENOMEM;
839	major_status = GSS_S_FAILURE;
840	goto failure;
841    }
842
843    if (token_flags & CFXSealed) {
844	size_t k5tsize, k5hsize;
845
846	krb5_crypto_length(context, ctx->crypto,
847			   KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
848	krb5_crypto_length(context, ctx->crypto,
849			   KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
850
851	/* Check RRC */
852	if (trailer == NULL) {
853	    size_t gsstsize = k5tsize + sizeof(*token);
854	    size_t gsshsize = k5hsize + sizeof(*token);
855
856	    if (rrc != gsstsize) {
857		major_status = GSS_S_DEFECTIVE_TOKEN;
858		goto failure;
859	    }
860
861	    if (GK5C_IS_DCE_STYLE(ctx))
862		gsstsize += ec;
863
864	    gsshsize += gsstsize;
865
866	    if (header->buffer.length != gsshsize) {
867		major_status = GSS_S_DEFECTIVE_TOKEN;
868		goto failure;
869	    }
870	} else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
871	    major_status = GSS_S_DEFECTIVE_TOKEN;
872	    goto failure;
873	} else if (header->buffer.length != sizeof(*token) + k5hsize) {
874	    major_status = GSS_S_DEFECTIVE_TOKEN;
875	    goto failure;
876	} else if (rrc != 0) {
877	    /* go though slowpath */
878	    major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
879	    if (major_status)
880		goto failure;
881	}
882
883	i = 0;
884	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
885	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
886	data[i].data.length = k5hsize;
887	i++;
888
889	for (j = 0; j < iov_count; i++, j++) {
890	    switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
891	    case GSS_IOV_BUFFER_TYPE_DATA:
892		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
893		break;
894	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
895		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
896		break;
897	    default:
898		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
899		break;
900	    }
901	    data[i].data.length = iov[j].buffer.length;
902	    data[i].data.data = iov[j].buffer.value;
903	}
904
905	/* encrypted CFX header in trailer (or after the header if in
906	   DCE mode). Copy in header into E"header"
907	*/
908	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
909	if (trailer) {
910	    data[i].data.data = trailer->buffer.value;
911	} else {
912	    data[i].data.data = ((uint8_t *)header->buffer.value) +
913		header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
914	}
915
916	data[i].data.length = ec + sizeof(*token);
917	ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
918	i++;
919
920	/* Kerberos trailer comes after the gss trailer */
921	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
922	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
923	data[i].data.length = k5tsize;
924	i++;
925
926	ret = krb5_decrypt_iov_ivec(context, ctx->crypto,
927				    usage, data, i, NULL);
928	if (ret != 0) {
929	    *minor_status = ret;
930	    major_status = GSS_S_FAILURE;
931	    goto failure;
932	}
933
934	ttoken->RRC[0] = token->RRC[0];
935	ttoken->RRC[1] = token->RRC[1];
936
937	/* Check the integrity of the header */
938	if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
939	    major_status = GSS_S_BAD_MIC;
940	    goto failure;
941	}
942    } else {
943	size_t gsstsize = ec;
944	size_t gsshsize = sizeof(*token);
945
946	if (trailer == NULL) {
947	    /* Check RRC */
948	    if (rrc != gsstsize) {
949		*minor_status = EINVAL;
950		major_status = GSS_S_FAILURE;
951		goto failure;
952	    }
953
954	    gsshsize += gsstsize;
955	    gsstsize = 0;
956	} else if (trailer->buffer.length != gsstsize) {
957	    major_status = GSS_S_DEFECTIVE_TOKEN;
958	    goto failure;
959	} else if (rrc != 0) {
960	    /* Check RRC */
961	    *minor_status = EINVAL;
962	    major_status = GSS_S_FAILURE;
963	    goto failure;
964	}
965
966	if (header->buffer.length != gsshsize) {
967	    major_status = GSS_S_DEFECTIVE_TOKEN;
968	    goto failure;
969	}
970
971	for (i = 0; i < iov_count; i++) {
972	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
973	    case GSS_IOV_BUFFER_TYPE_DATA:
974		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
975		break;
976	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
977		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
978		break;
979	    default:
980		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
981		break;
982	    }
983	    data[i].data.length = iov[i].buffer.length;
984	    data[i].data.data = iov[i].buffer.value;
985	}
986
987	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
988	data[i].data.data = header->buffer.value;
989	data[i].data.length = sizeof(*token);
990	i++;
991
992	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
993	if (trailer) {
994	    data[i].data.data = trailer->buffer.value;
995	} else {
996	    data[i].data.data = (uint8_t *)header->buffer.value +
997		sizeof(*token);
998	}
999	data[i].data.length = ec;
1000	i++;
1001
1002	token = (gss_cfx_wrap_token)header->buffer.value;
1003	token->EC[0]  = 0;
1004	token->EC[1]  = 0;
1005	token->RRC[0] = 0;
1006	token->RRC[1] = 0;
1007
1008	ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
1009	if (ret) {
1010	    *minor_status = ret;
1011	    major_status = GSS_S_FAILURE;
1012	    goto failure;
1013	}
1014    }
1015
1016    if (qop_state != NULL) {
1017	*qop_state = GSS_C_QOP_DEFAULT;
1018    }
1019
1020    free(data);
1021
1022    *minor_status = 0;
1023    return GSS_S_COMPLETE;
1024
1025 failure:
1026    if (data)
1027	free(data);
1028
1029    gss_release_iov_buffer(&junk, iov, iov_count);
1030
1031    return major_status;
1032}
1033
1034OM_uint32
1035_gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1036			    struct gsskrb5_crypto *ctx,
1037			    krb5_context context,
1038			    int conf_req_flag,
1039			    gss_qop_t qop_req,
1040			    int *conf_state,
1041			    gss_iov_buffer_desc *iov,
1042			    int iov_count)
1043{
1044    OM_uint32 major_status;
1045    size_t size;
1046    int i;
1047    gss_iov_buffer_desc *header = NULL;
1048    gss_iov_buffer_desc *padding = NULL;
1049    gss_iov_buffer_desc *trailer = NULL;
1050    size_t gsshsize = 0;
1051    size_t gsstsize = 0;
1052    size_t k5hsize = 0;
1053    size_t k5tsize = 0;
1054
1055    GSSAPI_KRB5_INIT (&context);
1056    *minor_status = 0;
1057
1058    for (size = 0, i = 0; i < iov_count; i++) {
1059	switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1060	case GSS_IOV_BUFFER_TYPE_EMPTY:
1061	    break;
1062	case GSS_IOV_BUFFER_TYPE_DATA:
1063	    size += iov[i].buffer.length;
1064	    break;
1065	case GSS_IOV_BUFFER_TYPE_HEADER:
1066	    if (header != NULL) {
1067		*minor_status = 0;
1068		return GSS_S_FAILURE;
1069	    }
1070	    header = &iov[i];
1071	    break;
1072	case GSS_IOV_BUFFER_TYPE_TRAILER:
1073	    if (trailer != NULL) {
1074		*minor_status = 0;
1075		return GSS_S_FAILURE;
1076	    }
1077	    trailer = &iov[i];
1078	    break;
1079	case GSS_IOV_BUFFER_TYPE_PADDING:
1080	    if (padding != NULL) {
1081		*minor_status = 0;
1082		return GSS_S_FAILURE;
1083	    }
1084	    padding = &iov[i];
1085	    break;
1086	case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1087	    break;
1088	default:
1089	    *minor_status = EINVAL;
1090	    return GSS_S_FAILURE;
1091	}
1092    }
1093
1094    major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1095    if (major_status != GSS_S_COMPLETE) {
1096	return major_status;
1097    }
1098
1099    if (conf_req_flag) {
1100	size_t k5psize = 0;
1101	size_t k5pbase = 0;
1102	size_t k5bsize = 0;
1103	size_t ec = 0;
1104
1105	size += sizeof(gss_cfx_wrap_token_desc);
1106
1107	*minor_status = krb5_crypto_length(context, ctx->crypto,
1108					   KRB5_CRYPTO_TYPE_HEADER,
1109					   &k5hsize);
1110	if (*minor_status)
1111	    return GSS_S_FAILURE;
1112
1113	*minor_status = krb5_crypto_length(context, ctx->crypto,
1114					   KRB5_CRYPTO_TYPE_TRAILER,
1115					   &k5tsize);
1116	if (*minor_status)
1117	    return GSS_S_FAILURE;
1118
1119	*minor_status = krb5_crypto_length(context, ctx->crypto,
1120					   KRB5_CRYPTO_TYPE_PADDING,
1121					   &k5pbase);
1122	if (*minor_status)
1123	    return GSS_S_FAILURE;
1124
1125	if (k5pbase > 1) {
1126	    k5psize = k5pbase - (size % k5pbase);
1127	} else {
1128	    k5psize = 0;
1129	}
1130
1131	if (k5psize == 0 && GK5C_IS_DCE_STYLE(ctx)) {
1132	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1133						     &k5bsize);
1134	    if (*minor_status)
1135		return GSS_S_FAILURE;
1136
1137	    ec = k5bsize;
1138	} else {
1139	    ec = k5psize;
1140	}
1141
1142	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1143	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1144    } else {
1145	*minor_status = krb5_crypto_length(context, ctx->crypto,
1146					   KRB5_CRYPTO_TYPE_CHECKSUM,
1147					   &k5tsize);
1148	if (*minor_status)
1149	    return GSS_S_FAILURE;
1150
1151	gsshsize = sizeof(gss_cfx_wrap_token_desc);
1152	gsstsize = k5tsize;
1153    }
1154
1155    if (trailer != NULL) {
1156	trailer->buffer.length = gsstsize;
1157    } else {
1158	gsshsize += gsstsize;
1159    }
1160
1161    header->buffer.length = gsshsize;
1162
1163    if (padding) {
1164	/* padding is done via EC and is contained in the header or trailer */
1165	padding->buffer.length = 0;
1166    }
1167
1168    if (conf_state) {
1169	*conf_state = conf_req_flag;
1170    }
1171
1172    return GSS_S_COMPLETE;
1173}
1174
1175
1176
1177
1178OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1179			   struct gsskrb5_crypto *ctx,
1180			   krb5_context context,
1181			   int conf_req_flag,
1182			   const gss_buffer_t input_message_buffer,
1183			   int *conf_state,
1184			   gss_buffer_t output_message_buffer)
1185{
1186    gss_cfx_wrap_token token;
1187    krb5_error_code ret;
1188    unsigned usage;
1189    krb5_data cipher;
1190    size_t wrapped_len, cksumsize;
1191    uint16_t padlength, rrc = 0;
1192    u_char *p;
1193
1194    ret = _gsskrb5cfx_wrap_length_cfx(context,
1195				      ctx, conf_req_flag,
1196				      input_message_buffer->length,
1197				      &wrapped_len, &cksumsize, &padlength);
1198    if (ret != 0) {
1199	*minor_status = ret;
1200	return GSS_S_FAILURE;
1201    }
1202
1203    /*
1204     * We actually want to always rotate encrypted token (if any) and
1205     * checksum to header with the checksum size or token, but since
1206     * pure Java Kerberos can't handle that, we have to do RRC = 0
1207     * when using non-DCE-style.
1208     */
1209    if (GK5C_IS_DCE_STYLE(ctx))
1210	rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1211    else
1212	rrc = 0;
1213
1214    output_message_buffer->length = wrapped_len;
1215    output_message_buffer->value = malloc(output_message_buffer->length);
1216    if (output_message_buffer->value == NULL) {
1217	*minor_status = ENOMEM;
1218	return GSS_S_FAILURE;
1219    }
1220
1221    p = output_message_buffer->value;
1222    token = (gss_cfx_wrap_token)p;
1223    token->TOK_ID[0] = 0x05;
1224    token->TOK_ID[1] = 0x04;
1225    token->Flags     = 0;
1226    token->Filler    = 0xFF;
1227    if (ctx->flags & GK5C_ACCEPTOR)
1228	token->Flags |= CFXSentByAcceptor;
1229    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY)
1230	token->Flags |= CFXAcceptorSubkey;
1231    if (conf_req_flag) {
1232	/*
1233	 * In Wrap tokens with confidentiality, the EC field is
1234	 * used to encode the size (in bytes) of the random filler.
1235	 */
1236	token->Flags |= CFXSealed;
1237	token->EC[0] = (padlength >> 8) & 0xFF;
1238	token->EC[1] = (padlength >> 0) & 0xFF;
1239    } else {
1240	/*
1241	 * In Wrap tokens without confidentiality, the EC field is
1242	 * used to encode the size (in bytes) of the trailing
1243	 * checksum.
1244	 *
1245	 * This is not used in the checksum calcuation itself,
1246	 * because the checksum length could potentially vary
1247	 * depending on the data length.
1248	 */
1249	token->EC[0] = 0;
1250	token->EC[1] = 0;
1251    }
1252
1253    /*
1254     * In Wrap tokens that provide for confidentiality, the RRC
1255     * field in the header contains the hex value 00 00 before
1256     * encryption.
1257     *
1258     * In Wrap tokens that do not provide for confidentiality,
1259     * both the EC and RRC fields in the appended checksum
1260     * contain the hex value 00 00 for the purpose of calculating
1261     * the checksum.
1262     */
1263    token->RRC[0] = 0;
1264    token->RRC[1] = 0;
1265
1266    _gss_mg_encode_be_uint32(ctx->seqnumhi, &token->SND_SEQ[0]);
1267    _gss_mg_encode_be_uint32(ctx->seqnumlo, &token->SND_SEQ[4]);
1268
1269    ctx->seqnumlo++;
1270    if (ctx->seqnumlo == 0)
1271	ctx->seqnumhi++;
1272
1273    /*
1274     * If confidentiality is requested, the token header is
1275     * appended to the plaintext before encryption; the resulting
1276     * token is {"header" | encrypt(plaintext | pad | "header")}.
1277     *
1278     * If no confidentiality is requested, the checksum is
1279     * calculated over the plaintext concatenated with the
1280     * token header.
1281     */
1282    if (ctx->flags & GK5C_ACCEPTOR) {
1283	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1284    } else {
1285	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1286    }
1287
1288    if (conf_req_flag) {
1289	/*
1290	 * Any necessary padding is added here to ensure that the
1291	 * encrypted token header is always at the end of the
1292	 * ciphertext.
1293	 *
1294	 * The specification does not require that the padding
1295	 * bytes are initialized.
1296	 */
1297	p += sizeof(*token);
1298	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1299	memset(p + input_message_buffer->length, 0xFF, padlength);
1300	memcpy(p + input_message_buffer->length + padlength,
1301	       token, sizeof(*token));
1302
1303	ret = krb5_encrypt(context, ctx->crypto,
1304			   usage, p,
1305			   input_message_buffer->length + padlength +
1306			   sizeof(*token),
1307			   &cipher);
1308	if (ret != 0) {
1309	    *minor_status = ret;
1310	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1311	    return GSS_S_FAILURE;
1312	}
1313	assert(sizeof(*token) + cipher.length == wrapped_len);
1314	token->RRC[0] = (rrc >> 8) & 0xFF;
1315	token->RRC[1] = (rrc >> 0) & 0xFF;
1316
1317	/*
1318	 * this is really ugly, but needed against windows
1319	 * for DCERPC, as windows rotates by EC+RRC.
1320	 */
1321	if (GK5C_IS_DCE_STYLE(ctx)) {
1322	    ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1323	} else {
1324	    ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1325	}
1326	if (ret != 0) {
1327	    *minor_status = ret;
1328	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1329	    return GSS_S_FAILURE;
1330	}
1331	memcpy(p, cipher.data, cipher.length);
1332	krb5_data_free(&cipher);
1333    } else {
1334	char *buf;
1335	Checksum cksum;
1336
1337	buf = malloc(input_message_buffer->length + sizeof(*token));
1338	if (buf == NULL) {
1339	    *minor_status = ENOMEM;
1340	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1341	    return GSS_S_FAILURE;
1342	}
1343	memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1344	memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1345
1346	ret = krb5_create_checksum(context, ctx->crypto,
1347				   usage, 0, buf,
1348				   input_message_buffer->length +
1349				   sizeof(*token),
1350				   &cksum);
1351	if (ret != 0) {
1352	    *minor_status = ret;
1353	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1354	    free(buf);
1355	    return GSS_S_FAILURE;
1356	}
1357
1358	free(buf);
1359
1360	assert(cksum.checksum.length == cksumsize);
1361	token->EC[0] =  (cksum.checksum.length >> 8) & 0xFF;
1362	token->EC[1] =  (cksum.checksum.length >> 0) & 0xFF;
1363	token->RRC[0] = (rrc >> 8) & 0xFF;
1364	token->RRC[1] = (rrc >> 0) & 0xFF;
1365
1366	p += sizeof(*token);
1367	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1368	memcpy(p + input_message_buffer->length,
1369	       cksum.checksum.data, cksum.checksum.length);
1370
1371	ret = rrc_rotate(p,
1372			 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1373	if (ret != 0) {
1374	    *minor_status = ret;
1375	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1376	    free_Checksum(&cksum);
1377	    return GSS_S_FAILURE;
1378	}
1379	free_Checksum(&cksum);
1380    }
1381
1382    if (conf_state != NULL) {
1383	*conf_state = conf_req_flag;
1384    }
1385
1386    *minor_status = 0;
1387    return GSS_S_COMPLETE;
1388}
1389
1390OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1391			     struct gsskrb5_crypto *ctx,
1392			     krb5_context context,
1393			     const gss_buffer_t input_message_buffer,
1394			     gss_buffer_t output_message_buffer,
1395			     int *conf_state,
1396			     gss_qop_t *qop_state)
1397{
1398    gss_cfx_wrap_token_desc token, *tp;
1399    u_char token_flags;
1400    krb5_error_code ret;
1401    unsigned usage;
1402    krb5_data data;
1403    uint16_t ec, rrc;
1404    OM_uint32 seq_number_lo, seq_number_hi;
1405    size_t len;
1406    u_char *p;
1407
1408    *minor_status = 0;
1409
1410    if (input_message_buffer->length < sizeof(token)) {
1411	_gss_mg_log(5, "cfxunwrap token too short: %ld",
1412		    (unsigned long)input_message_buffer->length);
1413	return GSS_S_DEFECTIVE_TOKEN;
1414    }
1415
1416    p = input_message_buffer->value;
1417
1418    memcpy(&token, p, sizeof(token));
1419
1420    if (token.TOK_ID[0] != 0x05 || token.TOK_ID[1] != 0x04) {
1421	_gss_mg_log(5, "cfxunwrap not a WRAP token: 0x%02x%02x",
1422		    token.TOK_ID[0], token.TOK_ID[1]);
1423	return GSS_S_DEFECTIVE_TOKEN;
1424    }
1425
1426    /* Ignore unknown flags */
1427    token_flags = token.Flags &
1428	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1429
1430    ret = verify_flags(ctx, token_flags, "unwrap");
1431    if (ret)
1432	return ret;
1433
1434    if (token.Filler != 0xFF) {
1435	_gss_mg_log(5, "cfxunwrap filler bad: 0x%02x", token.Filler);
1436	return GSS_S_DEFECTIVE_TOKEN;
1437    }
1438
1439    if (conf_state != NULL) {
1440	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
1441    }
1442
1443    ec  = (token.EC[0]  << 8) | token.EC[1];
1444    rrc = (token.RRC[0] << 8) | token.RRC[1];
1445
1446    /*
1447     * Check sequence number
1448     */
1449    _gss_mg_decode_be_uint32(&token.SND_SEQ[0], &seq_number_hi);
1450    _gss_mg_decode_be_uint32(&token.SND_SEQ[4], &seq_number_lo);
1451    if (seq_number_hi) {
1452	/* no support for 64-bit sequence numbers */
1453	*minor_status = ERANGE;
1454	return GSS_S_UNSEQ_TOKEN;
1455    }
1456
1457    ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1458    if (ret != 0) {
1459	*minor_status = 0;
1460	_gsskrb5_release_buffer(minor_status, output_message_buffer);
1461	return ret;
1462    }
1463
1464    /*
1465     * Decrypt and/or verify checksum
1466     */
1467
1468    if (ctx->flags & GK5C_ACCEPTOR) {
1469	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1470    } else {
1471	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1472    }
1473
1474    p += sizeof(token);
1475    len = input_message_buffer->length;
1476    len -= (p - (u_char *)input_message_buffer->value);
1477
1478    if (token_flags & CFXSealed) {
1479	/*
1480	 * this is really ugly, but needed against windows
1481	 * for DCERPC, as windows rotates by EC+RRC.
1482	 */
1483	if (GK5C_IS_DCE_STYLE(ctx)) {
1484	    *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1485	} else {
1486	    *minor_status = rrc_rotate(p, len, rrc, TRUE);
1487	}
1488	if (*minor_status != 0) {
1489	    return GSS_S_FAILURE;
1490	}
1491
1492	ret = krb5_decrypt(context, ctx->crypto, usage,
1493			   p, len, &data);
1494	if (ret != 0) {
1495	    *minor_status = ret;
1496	    return GSS_S_BAD_MIC;
1497	}
1498
1499	/* Check that there is room for the pad and token header */
1500	if (data.length < ec + sizeof(token)) {
1501	    krb5_data_free(&data);
1502	    _gss_mg_log(5, "cfxunwrap pad and token not fitting: %lu",
1503			(unsigned long)data.length);
1504	    return GSS_S_DEFECTIVE_TOKEN;
1505	}
1506	p = data.data;
1507	p += data.length - sizeof(token);
1508
1509	/* RRC is unprotected */
1510	memcpy(token.RRC, ((gss_cfx_wrap_token)p)->RRC,
1511	       sizeof(((gss_cfx_wrap_token)p)->RRC));
1512
1513	/* Check the integrity of the header */
1514	if (ct_memcmp(p, &token, sizeof(token)) != 0) {
1515	    krb5_data_free(&data);
1516	    return GSS_S_BAD_MIC;
1517	}
1518
1519	output_message_buffer->value = data.data;
1520	output_message_buffer->length = data.length - ec - sizeof(token);
1521    } else {
1522	Checksum cksum;
1523
1524	/* Rotate by RRC; bogus to do this in-place XXX */
1525	*minor_status = rrc_rotate(p, len, rrc, TRUE);
1526	if (*minor_status != 0) {
1527	    return GSS_S_FAILURE;
1528	}
1529
1530	/* Determine checksum type */
1531	ret = krb5_crypto_get_checksum_type(context,
1532					    ctx->crypto,
1533					    &cksum.cksumtype);
1534	if (ret != 0) {
1535	    *minor_status = ret;
1536	    return GSS_S_FAILURE;
1537	}
1538
1539	cksum.checksum.length = ec;
1540
1541	/* Check we have at least as much data as the checksum */
1542	if (len < cksum.checksum.length) {
1543	    *minor_status = ERANGE;
1544	    return GSS_S_BAD_MIC;
1545	}
1546
1547	/* Length now is of the plaintext only, no checksum */
1548	len -= cksum.checksum.length;
1549	cksum.checksum.data = p + len;
1550
1551	output_message_buffer->length = len; /* for later */
1552	output_message_buffer->value = malloc(len + sizeof(token));
1553	if (output_message_buffer->value == NULL) {
1554	    *minor_status = ENOMEM;
1555	    return GSS_S_FAILURE;
1556	}
1557
1558	/* Checksum is over (plaintext-data | "header") */
1559	memcpy(output_message_buffer->value, p, len);
1560	memcpy((u_char *)output_message_buffer->value + len,
1561	       &token, sizeof(token));
1562
1563	/* EC is not included in checksum calculation */
1564	tp = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1565				  len);
1566	memset(tp->EC, 0, sizeof(tp->EC));
1567	memset(tp->RRC, 0, sizeof(tp->RRC));
1568
1569	ret = krb5_verify_checksum(context, ctx->crypto,
1570				   usage,
1571				   output_message_buffer->value,
1572				   len + sizeof(token),
1573				   &cksum);
1574	if (ret != 0) {
1575	    *minor_status = ret;
1576	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1577	    return GSS_S_BAD_MIC;
1578	}
1579    }
1580
1581    if (qop_state != NULL) {
1582	*qop_state = GSS_C_QOP_DEFAULT;
1583    }
1584
1585    *minor_status = 0;
1586    return GSS_S_COMPLETE;
1587}
1588
1589OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1590			  struct gsskrb5_crypto *ctx,
1591			  krb5_context context,
1592			  gss_qop_t qop_req,
1593			  const gss_buffer_t message_buffer,
1594			  gss_buffer_t message_token)
1595{
1596    gss_cfx_mic_token_desc token;
1597    krb5_error_code ret;
1598    unsigned usage;
1599    Checksum cksum;
1600    u_char *buf;
1601    size_t len;
1602
1603    len = message_buffer->length + sizeof(token);
1604    buf = malloc(len);
1605    if (buf == NULL) {
1606	*minor_status = ENOMEM;
1607	return GSS_S_FAILURE;
1608    }
1609
1610    memset(&token, 0, sizeof(token));
1611    token.TOK_ID[0] = 0x04;
1612    token.TOK_ID[1] = 0x04;
1613    token.Flags = 0;
1614    if (ctx->flags & GK5C_ACCEPTOR)
1615	token.Flags |= CFXSentByAcceptor;
1616    if (ctx->flags & GK5C_ACCEPTOR_SUBKEY)
1617	token.Flags |= CFXAcceptorSubkey;
1618    memset(token.Filler, 0xFF, 5);
1619
1620    _gss_mg_encode_be_uint32(ctx->seqnumhi, &token.SND_SEQ[0]);
1621    _gss_mg_encode_be_uint32(ctx->seqnumlo, &token.SND_SEQ[4]);
1622
1623    memcpy(buf, message_buffer->value, message_buffer->length);
1624    memcpy(buf + message_buffer->length, &token, sizeof(token));
1625
1626    ctx->seqnumlo++;
1627    if (ctx->seqnumlo == 0)
1628	ctx->seqnumhi++;
1629
1630    if (ctx->flags & GK5C_ACCEPTOR) {
1631	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1632    } else {
1633	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1634    }
1635
1636    ret = krb5_create_checksum(context, ctx->crypto,
1637			       usage, 0, buf, len, &cksum);
1638    if (ret != 0) {
1639	*minor_status = ret;
1640	free(buf);
1641	return GSS_S_FAILURE;
1642    }
1643
1644    /* Determine MIC length */
1645    message_token->length = sizeof(token) + cksum.checksum.length;
1646    message_token->value = malloc(message_token->length);
1647    if (message_token->value == NULL) {
1648	*minor_status = ENOMEM;
1649	free_Checksum(&cksum);
1650	free(buf);
1651	return GSS_S_FAILURE;
1652    }
1653
1654    /* Token is { "header" | get_mic("header" | plaintext-data) } */
1655    memcpy(message_token->value, &token, sizeof(token));
1656    memcpy((u_char *)message_token->value + sizeof(token),
1657	   cksum.checksum.data, cksum.checksum.length);
1658
1659    free_Checksum(&cksum);
1660    free(buf);
1661
1662    *minor_status = 0;
1663    return GSS_S_COMPLETE;
1664}
1665
1666OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1667				 struct gsskrb5_crypto *ctx,
1668				 krb5_context context,
1669				 const gss_buffer_t message_buffer,
1670				 const gss_buffer_t token_buffer,
1671				 gss_qop_t *qop_state)
1672{
1673    gss_cfx_mic_token_desc token;
1674    u_char token_flags;
1675    krb5_error_code ret;
1676    unsigned usage;
1677    OM_uint32 seq_number_lo, seq_number_hi;
1678    u_char *buf, *p;
1679    Checksum cksum;
1680
1681    *minor_status = 0;
1682
1683    if (token_buffer->length < sizeof(token)) {
1684	_gss_mg_log(5, "cfxverifymic token too short: %ld",
1685		    (unsigned long)message_buffer->length);
1686	return GSS_S_DEFECTIVE_TOKEN;
1687    }
1688
1689    p = token_buffer->value;
1690
1691    memcpy(&token, p, sizeof(token));
1692
1693    if (token.TOK_ID[0] != 0x04 || token.TOK_ID[1] != 0x04) {
1694	_gss_mg_log(5, "cfxverifymic not a MIC token: 0x%02x%02x",
1695		    token.TOK_ID[0], token.TOK_ID[1]);
1696	return GSS_S_DEFECTIVE_TOKEN;
1697    }
1698
1699    /* Ignore unknown flags */
1700    token_flags = token.Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1701
1702    ret = verify_flags(ctx, token_flags, "mic");
1703    if (ret)
1704	return ret;
1705
1706    if (ct_memcmp(token.Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1707	_gss_mg_log(5, "cfxmic filler bad");
1708	return GSS_S_DEFECTIVE_TOKEN;
1709    }
1710
1711    /*
1712     * Check sequence number
1713     */
1714    _gss_mg_decode_be_uint32(&token.SND_SEQ[0], &seq_number_hi);
1715    _gss_mg_decode_be_uint32(&token.SND_SEQ[4], &seq_number_lo);
1716    if (seq_number_hi) {
1717	*minor_status = ERANGE;
1718	return GSS_S_UNSEQ_TOKEN;
1719    }
1720
1721    ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1722    if (ret != 0) {
1723	*minor_status = 0;
1724	return ret;
1725    }
1726
1727    /*
1728     * Verify checksum
1729     */
1730    ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1731					&cksum.cksumtype);
1732    if (ret != 0) {
1733	*minor_status = ret;
1734	return GSS_S_FAILURE;
1735    }
1736
1737    cksum.checksum.data = p + sizeof(token);
1738    cksum.checksum.length = token_buffer->length - sizeof(token);
1739
1740    if (ctx->flags & GK5C_ACCEPTOR) {
1741	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1742    } else {
1743	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1744    }
1745
1746    buf = malloc(message_buffer->length + sizeof(token));
1747    if (buf == NULL) {
1748	*minor_status = ENOMEM;
1749	return GSS_S_FAILURE;
1750    }
1751    memcpy(buf, message_buffer->value, message_buffer->length);
1752    memcpy(buf + message_buffer->length, &token, sizeof(token));
1753
1754    ret = krb5_verify_checksum(context, ctx->crypto,
1755			       usage,
1756			       buf,
1757			       message_buffer->length + sizeof(token),
1758			       &cksum);
1759    if (ret != 0) {
1760	*minor_status = ret;
1761	free(buf);
1762	return GSS_S_BAD_MIC;
1763    }
1764
1765    free(buf);
1766
1767    if (qop_state != NULL) {
1768	*qop_state = GSS_C_QOP_DEFAULT;
1769    }
1770
1771    return GSS_S_COMPLETE;
1772}
1773