1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*************
30 * These functions implement RPCSEC_GSS security for the NFS client and server.
31 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
32 * protection as described in Internet RFC 2203 and 2623.
33 *
34 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
35 * It requires the client and server negotiate a secure connection as part of a
36 * security context. The context state is maintained in client and server structures.
37 * On the client side, each user of an NFS mount is assigned their own context,
38 * identified by UID, on their first use of the mount, and it persists until the
39 * unmount or until the context is renewed.  Each user context has a corresponding
40 * server context which the server maintains until the client destroys it, or
41 * until the context expires.
42 *
43 * The client and server contexts are set up dynamically.  When a user attempts
44 * to send an NFS request, if there is no context for the user, then one is
45 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
46 * During this exchange, the client and server pass a security token that is
47 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
48 * the user to the server (and vice-versa). The client and server also receive
49 * a unique session key that can be used to digitally sign the credentials and
50 * verifier or optionally to provide data integrity and/or privacy.
51 *
52 * Once the context is complete, the client and server enter a normal data
53 * exchange phase - beginning with the NFS request that prompted the context
54 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
55 * credential and verifier, and the server returns a verifier as well.
56 * For simple authentication, the verifier contains a signed checksum of the
57 * RPC header, including the credential.  The server's verifier has a signed
58 * checksum of the current sequence number.
59 *
60 * Each client call contains a sequence number that nominally increases by one
61 * on each request.  The sequence number is intended to prevent replay attacks.
62 * Since the protocol can be used over UDP, there is some allowance for
63 * out-of-sequence requests, so the server checks whether the sequence numbers
64 * are within a sequence "window". If a sequence number is outside the lower
65 * bound of the window, the server silently drops the request. This has some
66 * implications for retransmission. If a request needs to be retransmitted, the
67 * client must bump the sequence number even if the request XID is unchanged.
68 *
69 * When the NFS mount is unmounted, the client sends a "destroy" credential
70 * to delete the server's context for each user of the mount. Since it's
71 * possible for the client to crash or disconnect without sending the destroy
72 * message, the server has a thread that reaps contexts that have been idle
73 * too long.
74 */
75
76#include <stdint.h>
77#include <sys/param.h>
78#include <sys/systm.h>
79#include <sys/proc.h>
80#include <sys/kauth.h>
81#include <sys/kernel.h>
82#include <sys/mount_internal.h>
83#include <sys/vnode.h>
84#include <sys/ubc.h>
85#include <sys/malloc.h>
86#include <sys/kpi_mbuf.h>
87#include <sys/ucred.h>
88
89#include <kern/host.h>
90#include <libkern/libkern.h>
91
92#include <mach/task.h>
93#include <mach/host_special_ports.h>
94#include <mach/host_priv.h>
95#include <mach/thread_act.h>
96#include <mach/mig_errors.h>
97#include <mach/vm_map.h>
98#include <vm/vm_map.h>
99#include <vm/vm_kern.h>
100#include <gssd/gssd_mach.h>
101
102#include <nfs/rpcv2.h>
103#include <nfs/nfsproto.h>
104#include <nfs/nfs.h>
105#include <nfs/nfsnode.h>
106#include <nfs/nfs_gss.h>
107#include <nfs/nfsmount.h>
108#include <nfs/xdr_subs.h>
109#include <nfs/nfsm_subs.h>
110#include <nfs/nfs_gss.h>
111
112#include "nfs_gss_crypto.h"
113
114#define NFS_GSS_MACH_MAX_RETRIES 3
115
116#define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
117#define NFS_GSS_ISDBG  (NFS_DEBUG_FACILITY &  NFS_FAC_GSS)
118
119typedef struct {
120	int type;
121	union {
122		MD5_DESCBC_CTX m_ctx;
123		HMAC_SHA1_DES3KD_CTX h_ctx;
124	};
125} GSS_DIGEST_CTX;
126
127#define MAX_DIGEST SHA_DIGEST_LENGTH
128#ifdef NFS_KERNEL_DEBUG
129#define HASHLEN(ki)  (((ki)->hash_len > MAX_DIGEST) ? \
130		(panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
131#else
132#define HASHLEN(ki)  (((ki)->hash_len > MAX_DIGEST) ? \
133		(printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
134#endif
135
136#if NFSSERVER
137u_long nfs_gss_svc_ctx_hash;
138struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
139lck_mtx_t *nfs_gss_svc_ctx_mutex;
140lck_grp_t *nfs_gss_svc_grp;
141uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
142#define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
143#endif /* NFSSERVER */
144
145#if NFSCLIENT
146lck_grp_t *nfs_gss_clnt_grp;
147int nfs_single_des;
148#endif /* NFSCLIENT */
149
150/*
151 * These octet strings are used to encode/decode ASN.1 tokens
152 * in the RPCSEC_GSS verifiers.
153 */
154static u_char krb5_tokhead[] __attribute__((unused)) = { 0x60, 0x23 };
155       u_char krb5_mech[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
156static u_char krb5_mic[]  = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
157static u_char krb5_mic3[]  = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
158static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
159static u_char krb5_wrap3[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
160static u_char iv0[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
161
162#define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
163#define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
164
165/*
166 * The size of the Kerberos v5 ASN.1 token
167 * in the verifier.
168 *
169 * Note that the second octet of the krb5_tokhead (0x23) is a
170 * DER-encoded size field that has variable length.  If the size
171 * is 128 bytes or greater, then it uses two bytes, three bytes
172 * if 65536 or greater, and so on.  Since the MIC tokens are
173 * separate from the data, the size is always the same: 35 bytes (0x23).
174 * However, the wrap token is different. Its size field includes the
175 * size of the token + the encrypted data that follows. So the size
176 * field may be two, three or four bytes.
177 */
178#define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
179#define KRB5_SZ_MECH	sizeof(krb5_mech)
180#define KRB5_SZ_ALG	sizeof(krb5_mic) // 8 - same as krb5_wrap
181#define KRB5_SZ_SEQ	8
182#define KRB5_SZ_EXTRA	3  // a wrap token may be longer by up to this many octets
183#define KRB5_SZ_TOKEN_NOSUM	(KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
184#define KRB5_SZ_TOKEN(cksumlen)		((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
185#define KRB5_SZ_TOKMAX(cksumlen)	(KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
186
187#if NFSCLIENT
188static int	nfs_gss_clnt_ctx_find(struct nfsreq *);
189static int	nfs_gss_clnt_ctx_failover(struct nfsreq *);
190static int	nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
191static int	nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
192static int	nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
193static uint8_t	*nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *);
194static int	nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
195static void	nfs_gss_clnt_ctx_remove(struct nfsmount *, struct nfs_gss_clnt_ctx *);
196#endif /* NFSCLIENT */
197
198#if NFSSERVER
199static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
200static void	nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
201static void	nfs_gss_svc_ctx_timer(void *, void *);
202static int	nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
203static int	nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
204#endif /* NFSSERVER */
205
206static void	host_release_special_port(mach_port_t);
207static mach_port_t host_copy_special_port(mach_port_t);
208static void	nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
209static int	nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
210static int	nfs_gss_token_get(gss_key_info *ki, u_char *, u_char *, int, uint32_t *, u_char *);
211static int	nfs_gss_token_put(gss_key_info *ki, u_char *, u_char *, int, int, u_char *);
212static int	nfs_gss_der_length_size(int);
213static void	nfs_gss_der_length_put(u_char **, int);
214static int	nfs_gss_der_length_get(u_char **);
215static int	nfs_gss_mchain_length(mbuf_t);
216static int	nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
217static void	nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
218static void	nfs_gss_cksum_mchain(gss_key_info *, mbuf_t, u_char *, int, int, u_char *);
219static void	nfs_gss_cksum_chain(gss_key_info *, struct nfsm_chain *, u_char *, int, int, u_char *);
220static void	nfs_gss_cksum_rep(gss_key_info *, uint32_t, u_char *);
221static void	nfs_gss_encrypt_mchain(gss_key_info *, mbuf_t, int, int, int);
222static void	nfs_gss_encrypt_chain(gss_key_info *, struct nfsm_chain *, int, int, int);
223
224static void	gss_digest_Init(GSS_DIGEST_CTX *, gss_key_info *);
225static void	gss_digest_Update(GSS_DIGEST_CTX *, void *, size_t);
226static void	gss_digest_Final(GSS_DIGEST_CTX *, void *);
227static void	gss_des_crypt(gss_key_info *, des_cblock *, des_cblock *,
228				int32_t, des_cblock *, des_cblock *, int, int);
229static int	gss_key_init(gss_key_info *, uint32_t);
230
231#if NFSSERVER
232thread_call_t nfs_gss_svc_ctx_timer_call;
233int nfs_gss_timer_on = 0;
234uint32_t nfs_gss_ctx_count = 0;
235const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
236#endif /* NFSSERVER */
237
238/*
239 * Initialization when NFS starts
240 */
241void
242nfs_gss_init(void)
243{
244#if NFSCLIENT
245	nfs_gss_clnt_grp = lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL);
246#endif /* NFSCLIENT */
247
248#if NFSSERVER
249	nfs_gss_svc_grp  = lck_grp_alloc_init("rpcsec_gss_svc",  LCK_GRP_ATTR_NULL);
250
251	nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
252	nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
253
254	nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
255#endif /* NFSSERVER */
256}
257
258#if NFSCLIENT
259
260/*
261 * Is it OK to fall back to using AUTH_SYS?
262 */
263static int
264nfs_gss_sysok(struct nfsreq *req)
265{
266	struct nfsmount *nmp = req->r_nmp;
267	int i;
268
269	if (req->r_wrongsec) /* Not OK if we're trying to handle a wrongsec error */
270		return (0);
271	if (!nmp->nm_sec.count) /* assume it's OK if we don't have a set of flavors */
272		return (1);
273	for (i=0; i < nmp->nm_sec.count; i++)
274		if (nmp->nm_sec.flavors[i] == RPCAUTH_SYS)
275			return (1);
276	return (0);
277}
278
279/*
280 * Find the context for a particular user.
281 *
282 * If the context doesn't already exist
283 * then create a new context for this user.
284 *
285 * Note that the code allows superuser (uid == 0)
286 * to adopt the context of another user.
287 *
288 * We'll match on the audit session ids, since those
289 * processes will have acccess to the same credential cache.
290 */
291
292#define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
293#define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
294
295static int
296nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2)
297{
298	if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2))
299		return (1);
300	return (0);
301}
302
303
304static int
305nfs_gss_clnt_ctx_find(struct nfsreq *req)
306{
307	struct nfsmount *nmp = req->r_nmp;
308	struct nfs_gss_clnt_ctx *cp;
309	int error = 0;
310
311	lck_mtx_lock(&nmp->nm_lock);
312	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
313		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
314			if (cp->gss_clnt_flags & GSS_CTX_INVAL)
315				continue;
316			nfs_gss_clnt_ctx_ref(req, cp);
317			lck_mtx_unlock(&nmp->nm_lock);
318			return (0);
319		}
320	}
321
322	if (kauth_cred_getuid(req->r_cred) == 0) {
323		/*
324		 * If superuser is trying to get access, then co-opt
325		 * the first valid context in the list.
326		 * XXX Ultimately, we need to allow superuser to
327		 * go ahead and attempt to set up its own context
328		 * in case one is set up for it.
329		 */
330		TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
331			if (!(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
332				nfs_gss_clnt_ctx_ref(req, cp);
333				lck_mtx_unlock(&nmp->nm_lock);
334				return (0);
335			}
336		}
337	}
338
339	/*
340	 * Not found - create a new context
341	 */
342
343	/*
344	 * If the thread is async, then it cannot get
345	 * kerberos creds and set up a proper context.
346	 * If no sec= mount option is given, attempt
347	 * to failover to sec=sys.
348	 */
349	if (req->r_thread == NULL) {
350		if (nfs_gss_sysok(req)) {
351			error = nfs_gss_clnt_ctx_failover(req);
352		} else {
353			printf("nfs_gss_clnt_ctx_find: no context for async\n");
354			error = NFSERR_EAUTH;
355		}
356
357		lck_mtx_unlock(&nmp->nm_lock);
358		return (error);
359	}
360
361	MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
362	if (cp == NULL) {
363		lck_mtx_unlock(&nmp->nm_lock);
364		return (ENOMEM);
365	}
366
367	cp->gss_clnt_cred = req->r_cred;
368	kauth_cred_ref(cp->gss_clnt_cred);
369	cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
370	cp->gss_clnt_thread = current_thread();
371	nfs_gss_clnt_ctx_ref(req, cp);
372	TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
373	lck_mtx_unlock(&nmp->nm_lock);
374
375	error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context
376	if (error)
377		nfs_gss_clnt_ctx_unref(req);
378
379	/*
380	 * If we failed to set up a Kerberos context for this
381	 * user and no sec= mount option was given, but the
382	 * server indicated that it could support AUTH_SYS, then set
383	 * up a dummy context that allows this user to attempt
384	 * sec=sys calls.
385	 */
386	if (error && nfs_gss_sysok(req) &&
387	    (error != ENXIO) && (error != ETIMEDOUT)) {
388		lck_mtx_lock(&nmp->nm_lock);
389		error = nfs_gss_clnt_ctx_failover(req);
390		lck_mtx_unlock(&nmp->nm_lock);
391	}
392
393	return (error);
394}
395
396/*
397 * Set up a dummy context to allow the use of sec=sys
398 * for this user, if the server allows sec=sys.
399 * The context is valid for GSS_CLNT_SYS_VALID seconds,
400 * so that the user will periodically attempt to fail back
401 * and get a real credential.
402 *
403 * Assumes context list (nm_lock) is locked
404 */
405static int
406nfs_gss_clnt_ctx_failover(struct nfsreq *req)
407{
408	struct nfsmount *nmp = req->r_nmp;
409	struct nfs_gss_clnt_ctx *cp;
410	struct timeval now;
411
412	MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
413	if (cp == NULL)
414		return (ENOMEM);
415
416	cp->gss_clnt_service = RPCSEC_GSS_SVC_SYS;
417	cp->gss_clnt_cred = req->r_cred;
418	kauth_cred_ref(cp->gss_clnt_cred);
419	cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
420	microuptime(&now);
421	cp->gss_clnt_ctime = now.tv_sec;	// time stamp
422	nfs_gss_clnt_ctx_ref(req, cp);
423	TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
424
425	return (0);
426}
427
428/*
429 * Inserts an RPCSEC_GSS credential into an RPC header.
430 * After the credential is inserted, the code continues
431 * to build the verifier which contains a signed checksum
432 * of the RPC header.
433 */
434int
435nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args)
436{
437	struct nfs_gss_clnt_ctx *cp;
438	uint32_t seqnum = 0;
439	int error = 0;
440	int slpflag, recordmark = 0;
441	int start, len, offset = 0;
442	int pad, toklen;
443	struct nfsm_chain nmc_tmp;
444	struct gss_seq *gsp;
445	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
446	u_char cksum[MAX_DIGEST];
447	struct timeval now;
448	gss_key_info *ki;
449
450	slpflag = (PZERO-1);
451	if (req->r_nmp) {
452		slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
453		recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
454	}
455retry:
456	if (req->r_gss_ctx == NULL) {
457		/*
458		 * Find the context for this user.
459		 * If no context is found, one will
460		 * be created.
461		 */
462		error = nfs_gss_clnt_ctx_find(req);
463		if (error)
464			return (error);
465	}
466	cp = req->r_gss_ctx;
467
468	/*
469	 * If it's a dummy context for a user that's using
470	 * a fallback to sec=sys, then just return an error
471	 * so rpchead can encode an RPCAUTH_UNIX cred.
472	 */
473	if (cp->gss_clnt_service == RPCSEC_GSS_SVC_SYS) {
474		/*
475		 * The dummy context is valid for just
476		 * GSS_CLNT_SYS_VALID seconds.  If the context
477		 * is older than this, mark it invalid and try
478		 * again to get a real one.
479		 */
480		lck_mtx_lock(cp->gss_clnt_mtx);
481		microuptime(&now);
482		if (now.tv_sec > cp->gss_clnt_ctime + GSS_CLNT_SYS_VALID) {
483			cp->gss_clnt_flags |= GSS_CTX_INVAL;
484			lck_mtx_unlock(cp->gss_clnt_mtx);
485			nfs_gss_clnt_ctx_unref(req);
486			goto retry;
487		}
488		lck_mtx_unlock(cp->gss_clnt_mtx);
489		return (ENEEDAUTH);
490	}
491
492	/*
493	 * If the context thread isn't null, then the context isn't
494	 * yet complete and is for the exclusive use of the thread
495	 * doing the context setup. Wait until the context thread
496	 * is null.
497	 */
498	lck_mtx_lock(cp->gss_clnt_mtx);
499	if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
500		cp->gss_clnt_flags |= GSS_NEEDCTX;
501		msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
502		slpflag &= ~PCATCH;
503		if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
504			return (error);
505		nfs_gss_clnt_ctx_unref(req);
506		goto retry;
507	}
508	lck_mtx_unlock(cp->gss_clnt_mtx);
509
510	ki = &cp->gss_clnt_kinfo;
511	if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
512		/*
513		 * Get a sequence number for this request.
514		 * Check whether the oldest request in the window is complete.
515		 * If it's still pending, then wait until it's done before
516		 * we allocate a new sequence number and allow this request
517		 * to proceed.
518		 */
519		lck_mtx_lock(cp->gss_clnt_mtx);
520		while (win_getbit(cp->gss_clnt_seqbits,
521			((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
522			cp->gss_clnt_flags |= GSS_NEEDSEQ;
523			msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
524			slpflag &= ~PCATCH;
525			if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
526				return (error);
527			}
528			lck_mtx_lock(cp->gss_clnt_mtx);
529			if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
530				/* Renewed while while we were waiting */
531				lck_mtx_unlock(cp->gss_clnt_mtx);
532				nfs_gss_clnt_ctx_unref(req);
533				goto retry;
534			}
535		}
536		seqnum = ++cp->gss_clnt_seqnum;
537		win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin);
538		lck_mtx_unlock(cp->gss_clnt_mtx);
539
540		MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK|M_ZERO);
541		if (gsp == NULL)
542			return (ENOMEM);
543		gsp->gss_seqnum = seqnum;
544		SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext);
545	}
546
547	/* Insert the credential */
548	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
549	nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len);
550	nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1);
551	nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc);
552	nfsm_chain_add_32(error, nmc, seqnum);
553	nfsm_chain_add_32(error, nmc, cp->gss_clnt_service);
554	nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len);
555	if (cp->gss_clnt_handle_len > 0) {
556	   	if (cp->gss_clnt_handle == NULL)
557		  	return (EBADRPC);
558		nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
559	}
560	if (error)
561	    return(error);
562	/*
563	 * Now add the verifier
564	 */
565	if (cp->gss_clnt_proc == RPCSEC_GSS_INIT ||
566		cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) {
567		/*
568		 * If the context is still being created
569		 * then use a null verifier.
570		 */
571		nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);	// flavor
572		nfsm_chain_add_32(error, nmc, 0);		// length
573		nfsm_chain_build_done(error, nmc);
574		if (!error)
575			nfs_gss_append_chain(nmc, args);
576		return (error);
577	}
578
579	offset = recordmark ? NFSX_UNSIGNED : 0; // record mark
580	nfsm_chain_build_done(error, nmc);
581	nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), offset, 0, cksum);
582
583	toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
584	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);	// flavor
585	nfsm_chain_add_32(error, nmc, toklen);		// length
586	nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
587	nfsm_chain_build_done(error, nmc);
588	if (error)
589		return (error);
590
591	/*
592	 * Now we may have to compute integrity or encrypt the call args
593	 * per RFC 2203 Section 5.3.2
594	 */
595	switch (cp->gss_clnt_service) {
596	case RPCSEC_GSS_SVC_NONE:
597		nfs_gss_append_chain(nmc, args);
598		break;
599	case RPCSEC_GSS_SVC_INTEGRITY:
600		len = nfs_gss_mchain_length(args);	// Find args length
601		req->r_gss_arglen = len;		// Stash the args len
602		len += NFSX_UNSIGNED;			// Add seqnum length
603		nfsm_chain_add_32(error, nmc, len);	// and insert it
604		start = nfsm_chain_offset(nmc);
605		nfsm_chain_add_32(error, nmc, seqnum);	// Insert seqnum
606		req->r_gss_argoff = nfsm_chain_offset(nmc); // Offset to args
607		nfsm_chain_build_done(error, nmc);
608		if (error)
609			return (error);
610		nfs_gss_append_chain(nmc, args);	// Append the args mbufs
611
612		/* Now compute a checksum over the seqnum + args */
613		nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, len, cksum);
614
615		/* Insert it into a token and append to the request */
616		toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
617		nfsm_chain_finish_mbuf(error, nmc);	// force checksum into new mbuf
618		nfsm_chain_add_32(error, nmc, toklen);
619		nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
620		nfsm_chain_build_done(error, nmc);
621		break;
622	case RPCSEC_GSS_SVC_PRIVACY:
623		/* Prepend a new mbuf with the confounder & sequence number */
624		nfsm_chain_build_alloc_init(error, &nmc_tmp, 3 * NFSX_UNSIGNED);
625		nfsm_chain_add_32(error, &nmc_tmp, random());	// confounder bytes 1-4
626		nfsm_chain_add_32(error, &nmc_tmp, random());	// confounder bytes 4-8
627		nfsm_chain_add_32(error, &nmc_tmp, seqnum);
628		nfsm_chain_build_done(error, &nmc_tmp);
629		if (error)
630			return (error);
631		nfs_gss_append_chain(&nmc_tmp, args);		// Append the args mbufs
632
633		len = nfs_gss_mchain_length(args);		// Find args length
634		len += 3 * NFSX_UNSIGNED;			// add confounder & seqnum
635		req->r_gss_arglen = len;			// Stash length
636
637		/*
638		 * Append a pad trailer - per RFC 1964 section 1.2.2.3
639		 * Since XDR data is always 32-bit aligned, it
640		 * needs to be padded either by 4 bytes or 8 bytes.
641		 */
642		nfsm_chain_finish_mbuf(error, &nmc_tmp);	// force padding into new mbuf
643		if (len % 8 > 0) {
644			nfsm_chain_add_32(error, &nmc_tmp, 0x04040404);
645			len += NFSX_UNSIGNED;
646		} else {
647			nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
648			nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
649			len +=  2 * NFSX_UNSIGNED;
650		}
651		nfsm_chain_build_done(error, &nmc_tmp);
652
653		/* Now compute a checksum over the confounder + seqnum + args */
654		nfs_gss_cksum_chain(ki, &nmc_tmp, ALG_WRAP(ki), 0, len, cksum);
655
656		/* Insert it into a token */
657		toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 1, len, cksum);
658		nfsm_chain_add_32(error, nmc, toklen + len);	// token + args length
659		nfsm_chain_add_opaque_nopad(error, nmc, tokbuf, toklen);
660		req->r_gss_argoff = nfsm_chain_offset(nmc);	// Stash offset
661		nfsm_chain_build_done(error, nmc);
662		if (error)
663			return (error);
664		nfs_gss_append_chain(nmc, nmc_tmp.nmc_mhead);	// Append the args mbufs
665
666		/* Finally, encrypt the args */
667		nfs_gss_encrypt_chain(ki, &nmc_tmp, 0, len, DES_ENCRYPT);
668
669		/* Add null XDR pad if the ASN.1 token misaligned the data */
670		pad = nfsm_pad(toklen + len);
671		if (pad > 0) {
672			nfsm_chain_add_opaque_nopad(error, nmc, iv0, pad);
673			nfsm_chain_build_done(error, nmc);
674		}
675		break;
676	}
677
678	return (error);
679}
680
681/*
682 * When receiving a reply, the client checks the verifier
683 * returned by the server. Check that the verifier is the
684 * correct type, then extract the sequence number checksum
685 * from the token in the credential and compare it with a
686 * computed checksum of the sequence number in the request
687 * that was sent.
688 */
689int
690nfs_gss_clnt_verf_get(
691	struct nfsreq *req,
692	struct nfsm_chain *nmc,
693	uint32_t verftype,
694	uint32_t verflen,
695	uint32_t *accepted_statusp)
696{
697	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
698	u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
699	uint32_t seqnum = 0;
700	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
701	struct nfsm_chain nmc_tmp;
702	struct gss_seq *gsp;
703	uint32_t reslen, start, cksumlen, toklen;
704	int error = 0;
705	gss_key_info *ki = &cp->gss_clnt_kinfo;
706
707	reslen = cksumlen = 0;
708	*accepted_statusp = 0;
709
710	if (cp == NULL)
711		return (NFSERR_EAUTH);
712	/*
713	 * If it's not an RPCSEC_GSS verifier, then it has to
714	 * be a null verifier that resulted from either
715	 * a CONTINUE_NEEDED reply during context setup or
716	 * from the reply to an AUTH_UNIX call from a dummy
717	 * context that resulted from a fallback to sec=sys.
718	 */
719	if (verftype != RPCSEC_GSS) {
720		if (verftype != RPCAUTH_NULL)
721			return (NFSERR_EAUTH);
722		if (cp->gss_clnt_flags & GSS_CTX_COMPLETE &&
723			cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)
724			return (NFSERR_EAUTH);
725		if (verflen > 0)
726			nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
727		nfsm_chain_get_32(error, nmc, *accepted_statusp);
728		return (error);
729	}
730
731	/*
732	 * If we received an RPCSEC_GSS verifier but the
733	 * context isn't yet complete, then it must be
734	 * the context complete message from the server.
735	 * The verifier will contain an encrypted checksum
736	 * of the window but we don't have the session key
737	 * yet so we can't decrypt it. Stash the verifier
738	 * and check it later in nfs_gss_clnt_ctx_init() when
739	 * the context is complete.
740	 */
741	if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
742		MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO);
743		if (cp->gss_clnt_verf == NULL)
744			return (ENOMEM);
745		nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf);
746		nfsm_chain_get_32(error, nmc, *accepted_statusp);
747		return (error);
748	}
749
750	if (verflen != KRB5_SZ_TOKEN(ki->hash_len))
751		return (NFSERR_EAUTH);
752
753	/*
754	 * Get the 8 octet sequence number
755	 * checksum out of the verifier token.
756	 */
757	nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
758	if (error)
759		goto nfsmout;
760	error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum1);
761	if (error)
762		goto nfsmout;
763
764	/*
765	 * Search the request sequence numbers for this reply, starting
766	 * with the most recent, looking for a checksum that matches
767	 * the one in the verifier returned by the server.
768	 */
769	SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
770		nfs_gss_cksum_rep(ki, gsp->gss_seqnum, cksum2);
771		if (bcmp(cksum1, cksum2, HASHLEN(ki)) == 0)
772			break;
773	}
774	if (gsp == NULL)
775		return (NFSERR_EAUTH);
776
777	/*
778	 * Get the RPC accepted status
779	 */
780	nfsm_chain_get_32(error, nmc, *accepted_statusp);
781	if (*accepted_statusp != RPC_SUCCESS)
782		return (0);
783
784	/*
785	 * Now we may have to check integrity or decrypt the results
786	 * per RFC 2203 Section 5.3.2
787	 */
788	switch (cp->gss_clnt_service) {
789	case RPCSEC_GSS_SVC_NONE:
790		/* nothing to do */
791		break;
792	case RPCSEC_GSS_SVC_INTEGRITY:
793		/*
794		 * Here's what we expect in the integrity results:
795		 *
796		 * - length of seq num + results (4 bytes)
797		 * - sequence number (4 bytes)
798		 * - results (variable bytes)
799		 * - length of checksum token (37)
800		 * - checksum of seqnum + results (37 bytes)
801		 */
802		nfsm_chain_get_32(error, nmc, reslen);		// length of results
803		if (reslen > NFS_MAXPACKET) {
804			error = EBADRPC;
805			goto nfsmout;
806		}
807
808		/* Compute a checksum over the sequence number + results */
809		start = nfsm_chain_offset(nmc);
810		nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, reslen, cksum1);
811
812		/*
813		 * Get the sequence number prepended to the results
814		 * and compare it against the list in the request.
815		 */
816		nfsm_chain_get_32(error, nmc, seqnum);
817		SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
818			if (seqnum == gsp->gss_seqnum)
819				break;
820		}
821		if (gsp == NULL) {
822			error = EBADRPC;
823			goto nfsmout;
824		}
825
826		/*
827		 * Advance to the end of the results and
828		 * fetch the checksum computed by the server.
829		 */
830		nmc_tmp = *nmc;
831		reslen -= NFSX_UNSIGNED;			// already skipped seqnum
832		nfsm_chain_adv(error, &nmc_tmp, reslen);	// skip over the results
833		nfsm_chain_get_32(error, &nmc_tmp, cksumlen);	// length of checksum
834		if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
835			error = EBADRPC;
836			goto nfsmout;
837		}
838		nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
839		if (error)
840			goto nfsmout;
841		error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum2);
842		if (error)
843			goto nfsmout;
844
845		/* Verify that the checksums are the same */
846		if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
847			error = EBADRPC;
848			goto nfsmout;
849		}
850		break;
851	case RPCSEC_GSS_SVC_PRIVACY:
852		/*
853		 * Here's what we expect in the privacy results:
854		 *
855		 * - length of confounder + seq num + token + results
856		 * - wrap token (37-40 bytes)
857		 * - confounder (8 bytes)
858		 * - sequence number (4 bytes)
859		 * - results (encrypted)
860		 */
861		nfsm_chain_get_32(error, nmc, reslen);		// length of results
862		if (reslen > NFS_MAXPACKET) {
863			error = EBADRPC;
864			goto nfsmout;
865		}
866
867		/* Get the token that prepends the encrypted results */
868		nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
869		if (error)
870			goto nfsmout;
871		error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 0,
872			&toklen, cksum1);
873		if (error)
874			goto nfsmout;
875		nfsm_chain_reverse(nmc, nfsm_pad(toklen));
876		reslen -= toklen;				// size of confounder + seqnum + results
877
878		/* decrypt the confounder + sequence number + results */
879		start = nfsm_chain_offset(nmc);
880		nfs_gss_encrypt_chain(ki, nmc, start, reslen, DES_DECRYPT);
881
882		/* Compute a checksum over the confounder + sequence number + results */
883		nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, reslen, cksum2);
884
885		/* Verify that the checksums are the same */
886		if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
887			error = EBADRPC;
888			goto nfsmout;
889		}
890
891		nfsm_chain_adv(error, nmc, 8);	// skip over the confounder
892
893		/*
894		 * Get the sequence number prepended to the results
895		 * and compare it against the list in the request.
896		 */
897		nfsm_chain_get_32(error, nmc, seqnum);
898		SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
899			if (seqnum == gsp->gss_seqnum)
900				break;
901		}
902		if (gsp == NULL) {
903			error = EBADRPC;
904			goto nfsmout;
905		}
906
907		break;
908	}
909nfsmout:
910	return (error);
911}
912
913/*
914 * An RPCSEC_GSS request with no integrity or privacy consists
915 * of just the header mbufs followed by the arg mbufs.
916 *
917 * However, integrity or privacy both trailer mbufs to the args,
918 * which means we have to do some work to restore the arg mbuf
919 * chain to its previous state in case we need to retransmit.
920 *
921 * The location and length of the args is marked by two fields
922 * in the request structure: r_gss_argoff and r_gss_arglen,
923 * which are stashed when the NFS request is built.
924 */
925int
926nfs_gss_clnt_args_restore(struct nfsreq *req)
927{
928	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
929	struct nfsm_chain mchain, *nmc = &mchain;
930	int len, error = 0;
931
932	if (cp == NULL)
933		return (NFSERR_EAUTH);
934
935	if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0)
936		return (ENEEDAUTH);
937
938	nfsm_chain_dissect_init(error, nmc, req->r_mhead);	// start at RPC header
939	nfsm_chain_adv(error, nmc, req->r_gss_argoff);		// advance to args
940	if (error)
941		return (error);
942
943	switch (cp->gss_clnt_service) {
944	case RPCSEC_GSS_SVC_NONE:
945		/* nothing to do */
946		break;
947	case RPCSEC_GSS_SVC_INTEGRITY:
948		/*
949		 * All we have to do here is remove the appended checksum mbufs.
950		 * We know that the checksum starts in a new mbuf beyond the end
951		 * of the args.
952		 */
953		nfsm_chain_adv(error, nmc, req->r_gss_arglen);	// adv to last args mbuf
954		if (error)
955			return (error);
956
957		mbuf_freem(mbuf_next(nmc->nmc_mcur));		// free the cksum mbuf
958		error = mbuf_setnext(nmc->nmc_mcur, NULL);
959		break;
960	case RPCSEC_GSS_SVC_PRIVACY:
961		/*
962		 * The args are encrypted along with prepended confounders and seqnum.
963		 * First we decrypt, the confounder, seqnum and args then skip to the
964		 * final mbuf of the args.
965		 * The arglen includes 8 bytes of confounder and 4 bytes of seqnum.
966		 * Finally, we remove between 4 and 8 bytes of encryption padding
967		 * as well as any alignment padding in the trailing mbuf.
968		 */
969		len = req->r_gss_arglen;
970		len += len % 8 > 0 ? 4 : 8;			// add DES padding length
971		nfs_gss_encrypt_chain(&cp->gss_clnt_kinfo, nmc,
972					req->r_gss_argoff, len, DES_DECRYPT);
973		nfsm_chain_adv(error, nmc, req->r_gss_arglen);
974		if (error)
975			return (error);
976		mbuf_freem(mbuf_next(nmc->nmc_mcur));		// free the pad mbuf
977		error = mbuf_setnext(nmc->nmc_mcur, NULL);
978		break;
979	}
980
981	return (error);
982}
983
984/*
985 * This function sets up  a new context on the client.
986 * Context setup alternates upcalls to the gssd with NFS nullproc calls
987 * to the server.  Each of these calls exchanges an opaque token, obtained
988 * via the gssd's calls into the GSS-API on either the client or the server.
989 * This cycle of calls ends when the client's upcall to the gssd and the
990 * server's response both return GSS_S_COMPLETE.  At this point, the client
991 * should have its session key and a handle that it can use to refer to its
992 * new context on the server.
993 */
994static int
995nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
996{
997	struct nfsmount *nmp = req->r_nmp;
998	int client_complete = 0;
999	int server_complete = 0;
1000	u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
1001	int error = 0;
1002	struct timeval now;
1003	gss_key_info *ki = &cp->gss_clnt_kinfo;
1004
1005	/* Initialize a new client context */
1006
1007	cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen);
1008	if (cp->gss_clnt_svcname == NULL) {
1009		error = NFSERR_EAUTH;
1010		goto nfsmout;
1011	}
1012
1013	cp->gss_clnt_proc = RPCSEC_GSS_INIT;
1014
1015	cp->gss_clnt_service =
1016		req->r_auth == RPCAUTH_KRB5  ? RPCSEC_GSS_SVC_NONE :
1017		req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
1018		req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
1019
1020	cp->gss_clnt_gssd_flags = (nfs_single_des ? GSSD_NFS_1DES : 0);
1021	/*
1022	 * Now loop around alternating gss_init_sec_context and
1023	 * gss_accept_sec_context upcalls to the gssd on the client
1024	 * and server side until the context is complete - or fails.
1025	 */
1026	for (;;) {
1027
1028retry:
1029		/* Upcall to the gss_init_sec_context in the gssd */
1030		error = nfs_gss_clnt_gssd_upcall(req, cp);
1031		if (error)
1032			goto nfsmout;
1033
1034		if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1035			client_complete = 1;
1036			if (server_complete)
1037				break;
1038		} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1039			error = NFSERR_EAUTH;
1040			goto nfsmout;
1041		}
1042
1043		/*
1044		 * Pass the token to the server.
1045		 */
1046		error = nfs_gss_clnt_ctx_callserver(req, cp);
1047		if (error) {
1048			if (error == ENEEDAUTH && cp->gss_clnt_proc == RPCSEC_GSS_INIT &&
1049				(cp->gss_clnt_gssd_flags & (GSSD_RESTART | GSSD_NFS_1DES)) == 0) {
1050				NFS_GSS_DBG("Retrying with single DES for req %p\n", req);
1051				cp->gss_clnt_gssd_flags = (GSSD_RESTART | GSSD_NFS_1DES);
1052				if (cp->gss_clnt_token)
1053					FREE(cp->gss_clnt_token, M_TEMP);
1054				cp->gss_clnt_token = NULL;
1055				cp->gss_clnt_tokenlen = 0;
1056				goto retry;
1057			}
1058			// Reset flags, if error = ENEEDAUTH we will try 3des again
1059			cp->gss_clnt_gssd_flags = 0;
1060			goto nfsmout;
1061		}
1062		if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1063			server_complete = 1;
1064			if (client_complete)
1065				break;
1066		} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1067			error = NFSERR_EAUTH;
1068			goto nfsmout;
1069		}
1070
1071		cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
1072	}
1073
1074	/*
1075	 * The context is apparently established successfully
1076	 */
1077	lck_mtx_lock(cp->gss_clnt_mtx);
1078	cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
1079	lck_mtx_unlock(cp->gss_clnt_mtx);
1080	cp->gss_clnt_proc = RPCSEC_GSS_DATA;
1081	microuptime(&now);
1082	cp->gss_clnt_ctime = now.tv_sec;	// time stamp
1083
1084
1085	/*
1086	 * Compute checksum of the server's window
1087	 */
1088	nfs_gss_cksum_rep(ki, cp->gss_clnt_seqwin, cksum1);
1089
1090	/*
1091	 * and see if it matches the one in the
1092	 * verifier the server returned.
1093	 */
1094	error = nfs_gss_token_get(ki, ALG_MIC(ki), cp->gss_clnt_verf, 0,
1095		NULL, cksum2);
1096	FREE(cp->gss_clnt_verf, M_TEMP);
1097	cp->gss_clnt_verf = NULL;
1098
1099	if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
1100		error = NFSERR_EAUTH;
1101		goto nfsmout;
1102	}
1103
1104	/*
1105	 * Set an initial sequence number somewhat randomized.
1106	 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1107	 * Add the size of the sequence window so seqbits arithmetic
1108	 * doesn't go negative.
1109	 */
1110	cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin;
1111
1112	/*
1113	 * Allocate a bitmap to keep track of which requests
1114	 * are pending within the sequence number window.
1115	 */
1116	MALLOC(cp->gss_clnt_seqbits, uint32_t *,
1117		nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
1118	if (cp->gss_clnt_seqbits == NULL)
1119		error = NFSERR_EAUTH;
1120nfsmout:
1121 	/*
1122	 * If the error is ENEEDAUTH we're not done, so no need
1123	 * to wake up other threads again. This thread will retry in
1124	 * the find or renew routines.
1125	 */
1126	if (error == ENEEDAUTH)
1127		return (error);
1128
1129	/*
1130	 * If there's an error, just mark it as invalid.
1131	 * It will be removed when the reference count
1132	 * drops to zero.
1133	 */
1134	lck_mtx_lock(cp->gss_clnt_mtx);
1135	if (error)
1136		cp->gss_clnt_flags |= GSS_CTX_INVAL;
1137
1138	/*
1139	 * Wake any threads waiting to use the context
1140	 */
1141	cp->gss_clnt_thread = NULL;
1142	if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1143		cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1144		wakeup(cp);
1145	}
1146	lck_mtx_unlock(cp->gss_clnt_mtx);
1147
1148	return (error);
1149}
1150
1151/*
1152 * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
1153 * But if there's a failure in trying to establish the context it keeps
1154 * retrying at progressively longer intervals in case the failure is
1155 * due to some transient condition.  For instance, the server might be
1156 * failing the context setup because directory services is not coming
1157 * up in a timely fashion.
1158 */
1159static int
1160nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1161{
1162	struct nfsmount *nmp = req->r_nmp;
1163	struct timeval now;
1164	time_t waituntil;
1165	int error, slpflag;
1166	int retries = 0;
1167	int timeo = NFS_TRYLATERDEL;
1168
1169	if (nmp == NULL) {
1170		error = ENXIO;
1171		goto bad;
1172	}
1173
1174	/* For an "intr" mount allow a signal to interrupt the retries */
1175	slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
1176
1177	while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) {
1178		microuptime(&now);
1179		waituntil = now.tv_sec + timeo;
1180		while (now.tv_sec < waituntil) {
1181			tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz);
1182			slpflag = 0;
1183			error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
1184			if (error)
1185				goto bad;
1186			microuptime(&now);
1187		}
1188
1189		retries++;
1190		/* If it's a soft mount just give up after a while */
1191		if (NMFLAG(nmp, SOFT) && (retries > nmp->nm_retry)) {
1192			error = ETIMEDOUT;
1193			goto bad;
1194		}
1195		timeo *= 2;
1196		if (timeo > 60)
1197			timeo = 60;
1198	}
1199
1200	if (error == 0)
1201		return 0;	// success
1202bad:
1203	/*
1204	 * Give up on this context
1205	 */
1206	lck_mtx_lock(cp->gss_clnt_mtx);
1207	cp->gss_clnt_flags |= GSS_CTX_INVAL;
1208
1209	/*
1210	 * Wake any threads waiting to use the context
1211	 */
1212	cp->gss_clnt_thread = NULL;
1213	if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1214		cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1215		wakeup(cp);
1216	}
1217	lck_mtx_unlock(cp->gss_clnt_mtx);
1218
1219	return error;
1220}
1221
1222/*
1223 * Call the NFS server using a null procedure for context setup.
1224 * Even though it's a null procedure and nominally has no arguments
1225 * RFC 2203 requires that the GSS-API token be passed as an argument
1226 * and received as a reply.
1227 */
1228static int
1229nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1230{
1231	struct nfsm_chain nmreq, nmrep;
1232	int error = 0, status;
1233	int sz;
1234
1235	if (!req->r_nmp)
1236		return (ENXIO);
1237	nfsm_chain_null(&nmreq);
1238	nfsm_chain_null(&nmrep);
1239	sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen);
1240	nfsm_chain_build_alloc_init(error, &nmreq, sz);
1241	nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen);
1242	if (cp->gss_clnt_tokenlen > 0)
1243		nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
1244	nfsm_chain_build_done(error, &nmreq);
1245	if (error)
1246		goto nfsmout;
1247
1248	/* Call the server */
1249	error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred,
1250				(req->r_flags & R_OPTMASK), cp, &nmrep, &status);
1251	if (cp->gss_clnt_token != NULL) {
1252		FREE(cp->gss_clnt_token, M_TEMP);
1253		cp->gss_clnt_token = NULL;
1254	}
1255	if (!error)
1256		error = status;
1257	if (error)
1258		goto nfsmout;
1259
1260	/* Get the server's reply */
1261
1262	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len);
1263	if (cp->gss_clnt_handle != NULL) {
1264		FREE(cp->gss_clnt_handle, M_TEMP);
1265		cp->gss_clnt_handle = NULL;
1266	}
1267	if (cp->gss_clnt_handle_len > 0) {
1268		MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK);
1269		if (cp->gss_clnt_handle == NULL) {
1270			error = ENOMEM;
1271			goto nfsmout;
1272		}
1273		nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle);
1274	}
1275	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major);
1276	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor);
1277	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin);
1278	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen);
1279	if (error)
1280		goto nfsmout;
1281	if (cp->gss_clnt_tokenlen > 0) {
1282		MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK);
1283		if (cp->gss_clnt_token == NULL) {
1284			error = ENOMEM;
1285			goto nfsmout;
1286		}
1287		nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token);
1288	}
1289
1290	/*
1291	 * Make sure any unusual errors are expanded and logged by gssd
1292	 */
1293	if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1294	    cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1295		char who[] = "server";
1296		char unknown[] = "<unknown>";
1297
1298		(void) mach_gss_log_error(
1299			cp->gss_clnt_mport,
1300			!req->r_nmp ? unknown :
1301			vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
1302			kauth_cred_getuid(cp->gss_clnt_cred),
1303			who,
1304			cp->gss_clnt_major,
1305			cp->gss_clnt_minor);
1306	}
1307
1308nfsmout:
1309	nfsm_chain_cleanup(&nmreq);
1310	nfsm_chain_cleanup(&nmrep);
1311
1312	return (error);
1313}
1314
1315/*
1316 * We construct the service principal as a gss hostbased service principal of
1317 * the form nfs@<server>, unless the servers principal was passed down in the
1318 * mount arguments. If the arguments don't specify the service principal, the
1319 * server name is extracted the location passed in the mount argument if
1320 * available.  Otherwise assume a format of <server>:<path> in the
1321 * mntfromname. We don't currently support url's or other bizarre formats like
1322 * path@server. Mount_url will convert the nfs url into <server>:<path> when
1323 * calling mount, so this works out well in practice.
1324 *
1325 */
1326
1327static uint8_t *
1328nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len)
1329{
1330	char *svcname, *d, *server;
1331	int lindx, sindx;
1332
1333	if (!nmp)
1334		return (NULL);
1335
1336	if (nmp->nm_sprinc) {
1337		*len = strlen(nmp->nm_sprinc) + 1;
1338		MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
1339		*nt = GSSD_HOSTBASED;
1340		if (svcname == NULL)
1341			return (NULL);
1342		strlcpy(svcname, nmp->nm_sprinc, *len);
1343
1344		return ((uint8_t *)svcname);
1345	}
1346
1347	*nt = GSSD_HOSTBASED;
1348	if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x1))) {
1349		lindx = nmp->nm_locations.nl_current.nli_loc;
1350		sindx = nmp->nm_locations.nl_current.nli_serv;
1351		server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name;
1352		*len = (uint32_t)strlen(server);
1353	} else {
1354		/* Older binaries using older mount args end up here */
1355		server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
1356		NFS_GSS_DBG("nfs getting gss svcname from %s\n", server);
1357		d = strchr(server, ':');
1358		*len = (uint32_t)(d ? (d - server) : strlen(server));
1359	}
1360
1361	*len +=  5; /* "nfs@" plus null */
1362	MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
1363	strlcpy(svcname, "nfs", *len);
1364	strlcat(svcname, "@", *len);
1365	strlcat(svcname, server, *len);
1366	NFS_GSS_DBG("nfs svcname = %s\n", svcname);
1367
1368	return ((uint8_t *)svcname);
1369}
1370
1371/*
1372 * Get a mach port to talk to gssd.
1373 * gssd lives in the root bootstrap, so we call gssd's lookup routine
1374 * to get a send right to talk to a new gssd instance that launchd has launched
1375 * based on the cred's uid and audit session id.
1376 */
1377
1378static mach_port_t
1379nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)
1380{
1381	mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL;
1382	kern_return_t kr;
1383	au_asid_t asid;
1384	uid_t uid;
1385
1386	kr = host_get_gssd_port(host_priv_self(), &gssd_host_port);
1387	if (kr != KERN_SUCCESS) {
1388		printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr);
1389		return (IPC_PORT_NULL);
1390	}
1391	if (!IPC_PORT_VALID(gssd_host_port)) {
1392		printf("nfs_gss_get_upcall_port: gssd port not valid\n");
1393		return (IPC_PORT_NULL);
1394	}
1395
1396	asid = kauth_cred_getasid(credp);
1397	uid = kauth_cred_getauid(credp);
1398	if (uid == AU_DEFAUDITID)
1399		uid = kauth_cred_getuid(credp);
1400	kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port);
1401	if (kr != KERN_SUCCESS)
1402		printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr);
1403
1404	return (uc_port);
1405}
1406
1407/*
1408 * Make an upcall to the gssd using Mach RPC
1409 * The upcall is made using a host special port.
1410 * This allows launchd to fire up the gssd in the
1411 * user's session.  This is important, since gssd
1412 * must have access to the user's credential cache.
1413 */
1414static int
1415nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1416{
1417	kern_return_t kr;
1418	gssd_byte_buffer okey = NULL;
1419	uint32_t skeylen = 0;
1420	int retry_cnt = 0;
1421	vm_map_copy_t itoken = NULL;
1422	gssd_byte_buffer otoken = NULL;
1423	mach_msg_type_number_t otokenlen;
1424	int error = 0;
1425	uint8_t *principal = NULL;
1426	uint32_t plen = 0;
1427	int32_t nt = GSSD_STRING_NAME;
1428	vm_map_copy_t pname = NULL;
1429	vm_map_copy_t svcname = NULL;
1430	char display_name[MAX_DISPLAY_STR] = "";
1431	uint32_t ret_flags;
1432	uint32_t nfs_1des = (cp->gss_clnt_gssd_flags & GSSD_NFS_1DES);
1433	struct nfsmount *nmp;
1434
1435	/*
1436	 * NFS currently only supports default principals or
1437	 * principals based on the uid of the caller, unless
1438	 * the principal to use for the mounting cred was specified
1439	 * in the mount argmuments. If the realm to use was specified
1440	 * then will send that up as the principal since the realm is
1441	 * preceed by an "@" gssd that will try and select the default
1442	 * principal for that realm.
1443	 */
1444
1445	nmp = req->r_nmp;
1446	if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)))
1447		return (ENXIO);
1448
1449	if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) {
1450		principal = cp->gss_clnt_principal;
1451		plen = cp->gss_clnt_prinlen;
1452		nt = cp->gss_clnt_prinnt;
1453	} else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) {
1454		plen = (uint32_t)strlen(nmp->nm_principal);
1455		MALLOC(principal, uint8_t *, plen, M_TEMP, M_WAITOK | M_ZERO);
1456		if (principal == NULL)
1457			return (ENOMEM);
1458		bcopy(nmp->nm_principal, principal, plen);
1459		cp->gss_clnt_prinnt = nt = GSSD_USER;
1460	}
1461	else if (nmp->nm_realm) {
1462		plen = (uint32_t)strlen(nmp->nm_realm);
1463		principal = (uint8_t *)nmp->nm_realm;
1464		nt = GSSD_USER;
1465	}
1466
1467	if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
1468		cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred);
1469		if (cp->gss_clnt_mport == IPC_PORT_NULL)
1470			goto out;
1471	}
1472
1473	if (plen)
1474		nfs_gss_mach_alloc_buffer(principal, plen, &pname);
1475	if (cp->gss_clnt_svcnamlen)
1476		nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
1477	if (cp->gss_clnt_tokenlen)
1478		nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1479
1480retry:
1481	kr = mach_gss_init_sec_context_v2(
1482		cp->gss_clnt_mport,
1483		GSSD_KRB5_MECH,
1484		(gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
1485		kauth_cred_getuid(cp->gss_clnt_cred),
1486		nt,
1487		(gssd_byte_buffer)pname, (mach_msg_type_number_t) plen,
1488		cp->gss_clnt_svcnt,
1489		(gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen,
1490		GSSD_MUTUAL_FLAG,
1491		&cp->gss_clnt_gssd_flags,
1492		&cp->gss_clnt_context,
1493		&cp->gss_clnt_cred_handle,
1494		&ret_flags,
1495		&okey,  (mach_msg_type_number_t *) &skeylen,
1496		&otoken, &otokenlen,
1497		cp->gss_clnt_display ? NULL : display_name,
1498		&cp->gss_clnt_major,
1499		&cp->gss_clnt_minor);
1500
1501	/* Should be cleared and set in gssd ? */
1502	cp->gss_clnt_gssd_flags &= ~GSSD_RESTART;
1503	cp->gss_clnt_gssd_flags |= nfs_1des;
1504
1505	if (kr != KERN_SUCCESS) {
1506		printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr);
1507		if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
1508			retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES &&
1509			!vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) {
1510			if (plen)
1511				nfs_gss_mach_alloc_buffer(principal, plen, &pname);
1512			if (cp->gss_clnt_svcnamlen)
1513				nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
1514			if (cp->gss_clnt_tokenlen > 0)
1515				nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1516			goto retry;
1517		}
1518
1519		host_release_special_port(cp->gss_clnt_mport);
1520		cp->gss_clnt_mport = IPC_PORT_NULL;
1521		goto out;
1522	}
1523
1524	if (cp->gss_clnt_display == NULL && *display_name != '\0') {
1525		int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1;  /* Add extra byte to include '\0' */
1526
1527		if (dlen < MAX_DISPLAY_STR) {
1528			MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK);
1529			if (cp->gss_clnt_display == NULL)
1530				goto skip;
1531			bcopy(display_name, cp->gss_clnt_display, dlen);
1532		} else {
1533			goto skip;
1534		}
1535	}
1536skip:
1537	/*
1538	 * Make sure any unusual errors are expanded and logged by gssd
1539	 *
1540	 * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
1541	 */
1542	if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1543	    cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1544#define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
1545		char who[] = "client";
1546		uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major);
1547
1548		(void) mach_gss_log_error(
1549			cp->gss_clnt_mport,
1550			vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1551			kauth_cred_getuid(cp->gss_clnt_cred),
1552			who,
1553			cp->gss_clnt_major,
1554			cp->gss_clnt_minor);
1555		gss_error = gss_error ? gss_error : cp->gss_clnt_major;
1556		printf("NFS gssd auth failure mount %s for %s major = %d minor = %d\n",
1557		       vfs_statfs(nmp->nm_mountp)->f_mntfromname, cp->gss_clnt_display ? cp->gss_clnt_display : who,
1558		       gss_error, (int32_t)cp->gss_clnt_minor);
1559	}
1560
1561	if (skeylen > 0) {
1562		if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
1563			printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen);
1564			vm_map_copy_discard((vm_map_copy_t) okey);
1565			vm_map_copy_discard((vm_map_copy_t) otoken);
1566			goto out;
1567		}
1568		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen,
1569				cp->gss_clnt_kinfo.skey);
1570		if (error) {
1571			vm_map_copy_discard((vm_map_copy_t) otoken);
1572			goto out;
1573		}
1574
1575		error = gss_key_init(&cp->gss_clnt_kinfo, skeylen);
1576		if (error)
1577			goto out;
1578	}
1579
1580	/* Free context token used as input */
1581	if (cp->gss_clnt_token)
1582		FREE(cp->gss_clnt_token, M_TEMP);
1583	cp->gss_clnt_token = NULL;
1584	cp->gss_clnt_tokenlen = 0;
1585
1586	if (otokenlen > 0) {
1587		/* Set context token to gss output token */
1588		MALLOC(cp->gss_clnt_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
1589		if (cp->gss_clnt_token == NULL) {
1590			printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen);
1591			vm_map_copy_discard((vm_map_copy_t) otoken);
1592			return (ENOMEM);
1593		}
1594		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token);
1595		if (error) {
1596			FREE(cp->gss_clnt_token, M_TEMP);
1597			cp->gss_clnt_token = NULL;
1598			return (NFSERR_EAUTH);
1599		}
1600		cp->gss_clnt_tokenlen = otokenlen;
1601	}
1602
1603	return (0);
1604
1605out:
1606	if (cp->gss_clnt_token)
1607		FREE(cp->gss_clnt_token, M_TEMP);
1608	cp->gss_clnt_token = NULL;
1609	cp->gss_clnt_tokenlen = 0;
1610
1611	return (NFSERR_EAUTH);
1612}
1613
1614/*
1615 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
1616 * credential. The sequence number window that the server returns
1617 * at context setup indicates the maximum number of client calls that
1618 * can be outstanding on a context. The client maintains a bitmap that
1619 * represents the server's window.  Each pending request has a bit set
1620 * in the window bitmap.  When a reply comes in or times out, we reset
1621 * the bit in the bitmap and if there are any other threads waiting for
1622 * a context slot we notify the waiting thread(s).
1623 *
1624 * Note that if a request is retransmitted, it will have a single XID
1625 * but it may be associated with multiple sequence numbers.  So we
1626 * may have to reset multiple sequence number bits in the window bitmap.
1627 */
1628void
1629nfs_gss_clnt_rpcdone(struct nfsreq *req)
1630{
1631	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1632	struct gss_seq *gsp, *ngsp;
1633	int i = 0;
1634
1635	if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE))
1636		return;	// no context - don't bother
1637	/*
1638	 * Reset the bit for this request in the
1639	 * sequence number window to indicate it's done.
1640	 * We do this even if the request timed out.
1641	 */
1642	lck_mtx_lock(cp->gss_clnt_mtx);
1643	gsp = SLIST_FIRST(&req->r_gss_seqlist);
1644	if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin))
1645		win_resetbit(cp->gss_clnt_seqbits,
1646			gsp->gss_seqnum % cp->gss_clnt_seqwin);
1647
1648	/*
1649	 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
1650	 */
1651	SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) {
1652		if (++i > GSS_CLNT_SEQLISTMAX) {
1653			SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext);
1654			FREE(gsp, M_TEMP);
1655		}
1656	}
1657
1658	/*
1659	 * If there's a thread waiting for
1660	 * the window to advance, wake it up.
1661	 */
1662	if (cp->gss_clnt_flags & GSS_NEEDSEQ) {
1663		cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
1664		wakeup(cp);
1665	}
1666	lck_mtx_unlock(cp->gss_clnt_mtx);
1667}
1668
1669/*
1670 * Create a reference to a context from a request
1671 * and bump the reference count
1672 */
1673void
1674nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1675{
1676	req->r_gss_ctx = cp;
1677
1678	lck_mtx_lock(cp->gss_clnt_mtx);
1679	cp->gss_clnt_refcnt++;
1680	lck_mtx_unlock(cp->gss_clnt_mtx);
1681}
1682
1683/*
1684 * Remove a context reference from a request
1685 * If the reference count drops to zero, and the
1686 * context is invalid, destroy the context
1687 */
1688void
1689nfs_gss_clnt_ctx_unref(struct nfsreq *req)
1690{
1691	struct nfsmount *nmp = req->r_nmp;
1692	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1693
1694	if (cp == NULL)
1695		return;
1696
1697	req->r_gss_ctx = NULL;
1698
1699	lck_mtx_lock(cp->gss_clnt_mtx);
1700	if (--cp->gss_clnt_refcnt == 0
1701		&& cp->gss_clnt_flags & GSS_CTX_INVAL) {
1702		lck_mtx_unlock(cp->gss_clnt_mtx);
1703
1704		if (nmp)
1705			lck_mtx_lock(&nmp->nm_lock);
1706		nfs_gss_clnt_ctx_remove(nmp, cp);
1707		if (nmp)
1708			lck_mtx_unlock(&nmp->nm_lock);
1709
1710		return;
1711	}
1712	lck_mtx_unlock(cp->gss_clnt_mtx);
1713}
1714
1715/*
1716 * Remove a context
1717 */
1718static void
1719nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp)
1720{
1721	/*
1722	 * If dequeueing, assume nmp->nm_lock is held
1723	 */
1724	if (nmp != NULL)
1725		TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
1726
1727	host_release_special_port(cp->gss_clnt_mport);
1728
1729	if (cp->gss_clnt_mtx)
1730		lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
1731	if (IS_VALID_CRED(cp->gss_clnt_cred))
1732		kauth_cred_unref(&cp->gss_clnt_cred);
1733	if (cp->gss_clnt_principal)
1734		FREE(cp->gss_clnt_principal, M_TEMP);
1735	if (cp->gss_clnt_display)
1736		FREE(cp->gss_clnt_display, M_TEMP);
1737	if (cp->gss_clnt_handle)
1738		FREE(cp->gss_clnt_handle, M_TEMP);
1739	if (cp->gss_clnt_seqbits)
1740		FREE(cp->gss_clnt_seqbits, M_TEMP);
1741	if (cp->gss_clnt_token)
1742		FREE(cp->gss_clnt_token, M_TEMP);
1743	if (cp->gss_clnt_svcname)
1744		FREE(cp->gss_clnt_svcname, M_TEMP);
1745	FREE(cp, M_TEMP);
1746}
1747
1748/*
1749 * The context for a user is invalid.
1750 * Mark the context as invalid, then
1751 * create a new context.
1752 */
1753int
1754nfs_gss_clnt_ctx_renew(struct nfsreq *req)
1755{
1756	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1757	struct nfsmount *nmp = req->r_nmp;
1758	struct nfs_gss_clnt_ctx *ncp;
1759	int error = 0;
1760	kauth_cred_t saved_cred;
1761	mach_port_t saved_mport;
1762
1763	if (cp == NULL)
1764		return (0);
1765
1766	lck_mtx_lock(cp->gss_clnt_mtx);
1767	if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
1768		lck_mtx_unlock(cp->gss_clnt_mtx);
1769		nfs_gss_clnt_ctx_unref(req);
1770		return (0);	// already being renewed
1771	}
1772	saved_cred = cp->gss_clnt_cred;
1773	kauth_cred_ref(saved_cred);
1774	saved_mport = host_copy_special_port(cp->gss_clnt_mport);
1775
1776	/* Remove the old context */
1777	cp->gss_clnt_flags |= GSS_CTX_INVAL;
1778
1779	/*
1780	 * If there's a thread waiting
1781	 * in the old context, wake it up.
1782	 */
1783	if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
1784		cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
1785		wakeup(cp);
1786	}
1787	lck_mtx_unlock(cp->gss_clnt_mtx);
1788
1789	/*
1790	 * Create a new context
1791	 */
1792	MALLOC(ncp, struct nfs_gss_clnt_ctx *, sizeof(*ncp),
1793		M_TEMP, M_WAITOK|M_ZERO);
1794	if (ncp == NULL) {
1795		error = ENOMEM;
1796		goto out;
1797	}
1798
1799	ncp->gss_clnt_cred = saved_cred;
1800	kauth_cred_ref(ncp->gss_clnt_cred);
1801	ncp->gss_clnt_mport = host_copy_special_port(saved_mport); // re-use the gssd port
1802	ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
1803	ncp->gss_clnt_thread = current_thread();
1804	lck_mtx_lock(&nmp->nm_lock);
1805	TAILQ_INSERT_TAIL(&nmp->nm_gsscl, ncp, gss_clnt_entries);
1806	lck_mtx_unlock(&nmp->nm_lock);
1807
1808	/* Adjust reference counts to new and old context */
1809	nfs_gss_clnt_ctx_unref(req);
1810	nfs_gss_clnt_ctx_ref(req, ncp);
1811
1812	error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context
1813out:
1814	host_release_special_port(saved_mport);
1815	kauth_cred_unref(&saved_cred);
1816	if (error)
1817		nfs_gss_clnt_ctx_unref(req);
1818
1819	return (error);
1820}
1821
1822/*
1823 * Destroy all the contexts associated with a mount.
1824 * The contexts are also destroyed by the server.
1825 */
1826void
1827nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp)
1828{
1829	struct nfs_gss_clnt_ctx *cp;
1830	struct nfsm_chain nmreq, nmrep;
1831	int error, status;
1832	struct nfsreq req;
1833
1834	req.r_nmp = nmp;
1835
1836	for (;;) {
1837		lck_mtx_lock(&nmp->nm_lock);
1838		cp = TAILQ_FIRST(&nmp->nm_gsscl);
1839		if (cp) {
1840			lck_mtx_lock(cp->gss_clnt_mtx);
1841			cp->gss_clnt_refcnt++;
1842			lck_mtx_unlock(cp->gss_clnt_mtx);
1843			req.r_gss_ctx = cp;
1844		}
1845		lck_mtx_unlock(&nmp->nm_lock);
1846		if (cp == NULL)
1847			break;
1848
1849		/*
1850		 * Tell the server to destroy its context.
1851		 * But don't bother if it's a forced unmount
1852		 * or if it's a dummy sec=sys context.
1853		 */
1854		if (!(nmp->nm_state & NFSSTA_FORCE) && (cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)) {
1855			cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
1856
1857			error = 0;
1858			nfsm_chain_null(&nmreq);
1859			nfsm_chain_null(&nmrep);
1860			nfsm_chain_build_alloc_init(error, &nmreq, 0);
1861			nfsm_chain_build_done(error, &nmreq);
1862			if (!error)
1863				nfs_request_gss(nmp->nm_mountp, &nmreq,
1864					current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status);
1865			nfsm_chain_cleanup(&nmreq);
1866			nfsm_chain_cleanup(&nmrep);
1867		}
1868
1869		/*
1870		 * Mark the context invalid then drop
1871		 * the reference to remove it if its
1872		 * refcount is zero.
1873		 */
1874		lck_mtx_lock(cp->gss_clnt_mtx);
1875		cp->gss_clnt_flags |= GSS_CTX_INVAL;
1876		lck_mtx_unlock(cp->gss_clnt_mtx);
1877		nfs_gss_clnt_ctx_unref(&req);
1878	}
1879}
1880
1881/*
1882 * Destroy a mounts context for a credential
1883 */
1884int
1885nfs_gss_clnt_ctx_destroy(struct nfsmount *nmp, kauth_cred_t cred)
1886{
1887	struct nfs_gss_clnt_ctx *cp;
1888	struct nfsreq req;
1889
1890	req.r_nmp = nmp;
1891
1892	lck_mtx_lock(&nmp->nm_lock);
1893	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
1894		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
1895			if (cp->gss_clnt_flags & GSS_CTX_INVAL)
1896				continue;
1897			lck_mtx_lock(cp->gss_clnt_mtx);
1898			cp->gss_clnt_refcnt++;
1899			cp->gss_clnt_flags |= GSS_CTX_INVAL;
1900			lck_mtx_unlock(cp->gss_clnt_mtx);
1901			req.r_gss_ctx = cp;
1902			break;
1903		}
1904	}
1905	lck_mtx_unlock(&nmp->nm_lock);
1906
1907	if (cp == NULL)
1908		return (ENOENT);
1909
1910	/*
1911	 * Drop the reference to remove it if its
1912	 * refcount is zero.
1913	 */
1914	nfs_gss_clnt_ctx_unref(&req);
1915
1916	return (0);
1917}
1918
1919
1920#endif /* NFSCLIENT */
1921
1922/*************
1923 *
1924 * Server functions
1925 */
1926
1927#if NFSSERVER
1928
1929/*
1930 * Find a server context based on a handle value received
1931 * in an RPCSEC_GSS credential.
1932 */
1933static struct nfs_gss_svc_ctx *
1934nfs_gss_svc_ctx_find(uint32_t handle)
1935{
1936	struct nfs_gss_svc_ctx_hashhead *head;
1937	struct nfs_gss_svc_ctx *cp;
1938	uint64_t timenow;
1939
1940	if (handle == 0)
1941		return (NULL);
1942
1943	head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
1944	/*
1945	 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
1946	 */
1947	clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
1948
1949	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
1950
1951	LIST_FOREACH(cp, head, gss_svc_entries) {
1952		if (cp->gss_svc_handle == handle) {
1953			if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
1954				/*
1955				 * Context has or is about to expire. Don't use.
1956				 * We'll return null and the client will have to create
1957				 * a new context.
1958				 */
1959				cp->gss_svc_handle = 0;
1960				/*
1961				 * Make sure though that we stay around for GSS_CTX_PEND seconds
1962				 * for other threads that might be using the context.
1963				 */
1964				cp->gss_svc_incarnation = timenow;
1965
1966				cp = NULL;
1967				break;
1968			}
1969			lck_mtx_lock(cp->gss_svc_mtx);
1970			cp->gss_svc_refcnt++;
1971			lck_mtx_unlock(cp->gss_svc_mtx);
1972			break;
1973		}
1974	}
1975
1976	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
1977
1978	return (cp);
1979}
1980
1981/*
1982 * Insert a new server context into the hash table
1983 * and start the context reap thread if necessary.
1984 */
1985static void
1986nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
1987{
1988	struct nfs_gss_svc_ctx_hashhead *head;
1989	struct nfs_gss_svc_ctx *p;
1990
1991	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
1992
1993	/*
1994	 * Give the client a random handle so that if we reboot
1995	 * it's unlikely the client will get a bad context match.
1996	 * Make sure it's not zero or already assigned.
1997	 */
1998retry:
1999	cp->gss_svc_handle = random();
2000	if (cp->gss_svc_handle == 0)
2001		goto retry;
2002	head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
2003	LIST_FOREACH(p, head, gss_svc_entries)
2004		if (p->gss_svc_handle == cp->gss_svc_handle)
2005			goto retry;
2006
2007	clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2008		&cp->gss_svc_incarnation);
2009	LIST_INSERT_HEAD(head, cp, gss_svc_entries);
2010	nfs_gss_ctx_count++;
2011
2012	if (!nfs_gss_timer_on) {
2013		nfs_gss_timer_on = 1;
2014
2015		nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
2016			min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
2017	}
2018
2019	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2020}
2021
2022/*
2023 * This function is called via the kernel's callout
2024 * mechanism.  It runs only when there are
2025 * cached RPCSEC_GSS contexts.
2026 */
2027void
2028nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
2029{
2030	struct nfs_gss_svc_ctx *cp, *next;
2031	uint64_t timenow;
2032	int contexts = 0;
2033	int i;
2034
2035	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
2036	clock_get_uptime(&timenow);
2037
2038	/*
2039	 * Scan all the hash chains
2040	 */
2041	for (i = 0; i < SVC_CTX_HASHSZ; i++) {
2042		/*
2043		 * For each hash chain, look for entries
2044		 * that haven't been used in a while.
2045		 */
2046		LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
2047			contexts++;
2048			if (timenow > cp->gss_svc_incarnation +
2049				(cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
2050				&& cp->gss_svc_refcnt == 0) {
2051				/*
2052				 * A stale context - remove it
2053				 */
2054				LIST_REMOVE(cp, gss_svc_entries);
2055				if (cp->gss_svc_seqbits)
2056					FREE(cp->gss_svc_seqbits, M_TEMP);
2057				lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2058				FREE(cp, M_TEMP);
2059				contexts--;
2060			}
2061		}
2062	}
2063
2064	nfs_gss_ctx_count = contexts;
2065
2066	/*
2067	 * If there are still some cached contexts left,
2068	 * set up another callout to check on them later.
2069	 */
2070	nfs_gss_timer_on = nfs_gss_ctx_count > 0;
2071	if (nfs_gss_timer_on)
2072		nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
2073			min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
2074
2075	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2076}
2077
2078/*
2079 * Here the server receives an RPCSEC_GSS credential in an
2080 * RPC call header.  First there's some checking to make sure
2081 * the credential is appropriate - whether the context is still
2082 * being set up, or is complete.  Then we use the handle to find
2083 * the server's context and validate the verifier, which contains
2084 * a signed checksum of the RPC header. If the verifier checks
2085 * out, we extract the user's UID and groups from the context
2086 * and use it to set up a UNIX credential for the user's request.
2087 */
2088int
2089nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2090{
2091	uint32_t vers, proc, seqnum, service;
2092	uint32_t handle, handle_len;
2093	struct nfs_gss_svc_ctx *cp = NULL;
2094	uint32_t flavor = 0, verflen = 0;
2095	int error = 0;
2096	uint32_t arglen, start, toklen, cksumlen;
2097	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
2098	u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
2099	struct nfsm_chain nmc_tmp;
2100	gss_key_info *ki;
2101
2102	vers = proc = seqnum = service = handle_len = 0;
2103	arglen = cksumlen = 0;
2104
2105	nfsm_chain_get_32(error, nmc, vers);
2106	if (vers != RPCSEC_GSS_VERS_1) {
2107		error = NFSERR_AUTHERR | AUTH_REJECTCRED;
2108		goto nfsmout;
2109	}
2110
2111	nfsm_chain_get_32(error, nmc, proc);
2112	nfsm_chain_get_32(error, nmc, seqnum);
2113	nfsm_chain_get_32(error, nmc, service);
2114	nfsm_chain_get_32(error, nmc, handle_len);
2115	if (error)
2116		goto nfsmout;
2117
2118	/*
2119	 * Make sure context setup/destroy is being done with a nullproc
2120	 */
2121	if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
2122		error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2123		goto nfsmout;
2124	}
2125
2126	/*
2127	 * If the sequence number is greater than the max
2128	 * allowable, reject and have the client init a
2129	 * new context.
2130	 */
2131	if (seqnum > GSS_MAXSEQ) {
2132		error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2133		goto nfsmout;
2134	}
2135
2136	nd->nd_sec =
2137		service == RPCSEC_GSS_SVC_NONE ?      RPCAUTH_KRB5 :
2138		service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
2139		service == RPCSEC_GSS_SVC_PRIVACY ?   RPCAUTH_KRB5P : 0;
2140
2141	if (proc == RPCSEC_GSS_INIT) {
2142		/*
2143		 * Limit the total number of contexts
2144		 */
2145		if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
2146			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2147			goto nfsmout;
2148		}
2149
2150		/*
2151		 * Set up a new context
2152		 */
2153		MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
2154		if (cp == NULL) {
2155			error = ENOMEM;
2156			goto nfsmout;
2157		}
2158		cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
2159		cp->gss_svc_refcnt = 1;
2160	} else {
2161
2162		/*
2163		 * Use the handle to find the context
2164		 */
2165		if (handle_len != sizeof(handle)) {
2166			error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2167			goto nfsmout;
2168		}
2169		nfsm_chain_get_32(error, nmc, handle);
2170		if (error)
2171			goto nfsmout;
2172		cp = nfs_gss_svc_ctx_find(handle);
2173		if (cp == NULL) {
2174			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2175			goto nfsmout;
2176		}
2177	}
2178
2179	cp->gss_svc_proc = proc;
2180	ki = &cp->gss_svc_kinfo;
2181
2182	if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
2183		struct posix_cred temp_pcred;
2184
2185		if (cp->gss_svc_seqwin == 0) {
2186			/*
2187			 * Context isn't complete
2188			 */
2189			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2190			goto nfsmout;
2191		}
2192
2193		if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
2194			/*
2195			 * Sequence number is bad
2196			 */
2197			error = EINVAL;	// drop the request
2198			goto nfsmout;
2199		}
2200
2201		/* Now compute the client's call header checksum */
2202		nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), 0, 0, cksum1);
2203
2204		/*
2205		 * Validate the verifier.
2206		 * The verifier contains an encrypted checksum
2207		 * of the call header from the XID up to and
2208		 * including the credential.  We compute the
2209		 * checksum and compare it with what came in
2210		 * the verifier.
2211		 */
2212		nfsm_chain_get_32(error, nmc, flavor);
2213		nfsm_chain_get_32(error, nmc, verflen);
2214		if (error)
2215			goto nfsmout;
2216		if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN(ki->hash_len))
2217			error = NFSERR_AUTHERR | AUTH_BADVERF;
2218		nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
2219		if (error)
2220			goto nfsmout;
2221
2222		/* Get the checksum from the token inside the verifier */
2223		error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
2224			NULL, cksum2);
2225		if (error)
2226			goto nfsmout;
2227
2228		if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2229			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2230			goto nfsmout;
2231		}
2232
2233		nd->nd_gss_seqnum = seqnum;
2234
2235		/*
2236		 * Set up the user's cred
2237		 */
2238		bzero(&temp_pcred, sizeof(temp_pcred));
2239		temp_pcred.cr_uid = cp->gss_svc_uid;
2240		bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
2241				sizeof(gid_t) * cp->gss_svc_ngroups);
2242		temp_pcred.cr_ngroups = cp->gss_svc_ngroups;
2243
2244		nd->nd_cr = posix_cred_create(&temp_pcred);
2245		if (nd->nd_cr == NULL) {
2246			error = ENOMEM;
2247			goto nfsmout;
2248		}
2249		clock_get_uptime(&cp->gss_svc_incarnation);
2250
2251		/*
2252		 * If the call arguments are integrity or privacy protected
2253		 * then we need to check them here.
2254		 */
2255		switch (service) {
2256		case RPCSEC_GSS_SVC_NONE:
2257			/* nothing to do */
2258			break;
2259		case RPCSEC_GSS_SVC_INTEGRITY:
2260			/*
2261			 * Here's what we expect in the integrity call args:
2262			 *
2263			 * - length of seq num + call args (4 bytes)
2264			 * - sequence number (4 bytes)
2265			 * - call args (variable bytes)
2266			 * - length of checksum token (37)
2267			 * - checksum of seqnum + call args (37 bytes)
2268			 */
2269			nfsm_chain_get_32(error, nmc, arglen);		// length of args
2270			if (arglen > NFS_MAXPACKET) {
2271				error = EBADRPC;
2272				goto nfsmout;
2273			}
2274
2275			/* Compute the checksum over the call args */
2276			start = nfsm_chain_offset(nmc);
2277			nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, arglen, cksum1);
2278
2279			/*
2280			 * Get the sequence number prepended to the args
2281			 * and compare it against the one sent in the
2282			 * call credential.
2283			 */
2284			nfsm_chain_get_32(error, nmc, seqnum);
2285			if (seqnum != nd->nd_gss_seqnum) {
2286				error = EBADRPC;			// returns as GARBAGEARGS
2287				goto nfsmout;
2288			}
2289
2290			/*
2291			 * Advance to the end of the args and
2292			 * fetch the checksum computed by the client.
2293			 */
2294			nmc_tmp = *nmc;
2295			arglen -= NFSX_UNSIGNED;			// skipped seqnum
2296			nfsm_chain_adv(error, &nmc_tmp, arglen);	// skip args
2297			nfsm_chain_get_32(error, &nmc_tmp, cksumlen);	// length of checksum
2298			if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
2299				error = EBADRPC;
2300				goto nfsmout;
2301			}
2302			nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
2303			if (error)
2304				goto nfsmout;
2305			error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
2306				NULL, cksum2);
2307
2308			/* Verify that the checksums are the same */
2309			if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2310				error = EBADRPC;
2311				goto nfsmout;
2312			}
2313			break;
2314		case RPCSEC_GSS_SVC_PRIVACY:
2315			/*
2316			 * Here's what we expect in the privacy call args:
2317			 *
2318			 * - length of confounder + seq num + token + call args
2319			 * - wrap token (37-40 bytes)
2320			 * - confounder (8 bytes)
2321			 * - sequence number (4 bytes)
2322			 * - call args (encrypted)
2323			 */
2324			nfsm_chain_get_32(error, nmc, arglen);		// length of args
2325			if (arglen > NFS_MAXPACKET) {
2326				error = EBADRPC;
2327				goto nfsmout;
2328			}
2329
2330			/* Get the token that prepends the encrypted args */
2331			nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
2332			if (error)
2333				goto nfsmout;
2334			error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 1,
2335							&toklen, cksum1);
2336			if (error)
2337				goto nfsmout;
2338			nfsm_chain_reverse(nmc, nfsm_pad(toklen));
2339
2340			/* decrypt the 8 byte confounder + seqnum + args */
2341			start = nfsm_chain_offset(nmc);
2342			arglen -= toklen;
2343			nfs_gss_encrypt_chain(ki, nmc, start, arglen, DES_DECRYPT);
2344
2345			/* Compute a checksum over the sequence number + results */
2346			nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, arglen, cksum2);
2347
2348			/* Verify that the checksums are the same */
2349			if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2350				error = EBADRPC;
2351				goto nfsmout;
2352			}
2353
2354			/*
2355			 * Get the sequence number prepended to the args
2356			 * and compare it against the one sent in the
2357			 * call credential.
2358			 */
2359			nfsm_chain_adv(error, nmc, 8);			// skip over the confounder
2360			nfsm_chain_get_32(error, nmc, seqnum);
2361			if (seqnum != nd->nd_gss_seqnum) {
2362				error = EBADRPC;			// returns as GARBAGEARGS
2363				goto nfsmout;
2364			}
2365			break;
2366		}
2367	} else {
2368		/*
2369		 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
2370		 * then we expect a null verifier.
2371		 */
2372		nfsm_chain_get_32(error, nmc, flavor);
2373		nfsm_chain_get_32(error, nmc, verflen);
2374		if (error || flavor != RPCAUTH_NULL || verflen > 0)
2375			error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2376		if (error) {
2377			if (proc == RPCSEC_GSS_INIT) {
2378				lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2379				FREE(cp, M_TEMP);
2380				cp = NULL;
2381			}
2382			goto nfsmout;
2383		}
2384	}
2385
2386	nd->nd_gss_context = cp;
2387	return 0;
2388nfsmout:
2389	if (cp)
2390		nfs_gss_svc_ctx_deref(cp);
2391	return (error);
2392}
2393
2394/*
2395 * Insert the server's verifier into the RPC reply header.
2396 * It contains a signed checksum of the sequence number that
2397 * was received in the RPC call.
2398 * Then go on to add integrity or privacy if necessary.
2399 */
2400int
2401nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2402{
2403	struct nfs_gss_svc_ctx *cp;
2404	int error = 0;
2405	u_char tokbuf[KRB5_SZ_TOKEN(MAX_DIGEST)];
2406	int toklen;
2407	u_char cksum[MAX_DIGEST];
2408	gss_key_info *ki;
2409
2410	cp = nd->nd_gss_context;
2411	ki = &cp->gss_svc_kinfo;
2412
2413	if (cp->gss_svc_major != GSS_S_COMPLETE) {
2414		/*
2415		 * If the context isn't yet complete
2416		 * then return a null verifier.
2417		 */
2418		nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
2419		nfsm_chain_add_32(error, nmc, 0);
2420		return (error);
2421	}
2422
2423	/*
2424	 * Compute checksum of the request seq number
2425	 * If it's the final reply of context setup
2426	 * then return the checksum of the context
2427	 * window size.
2428	 */
2429	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2430	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
2431		nfs_gss_cksum_rep(ki, cp->gss_svc_seqwin, cksum);
2432	else
2433		nfs_gss_cksum_rep(ki, nd->nd_gss_seqnum, cksum);
2434	/*
2435	 * Now wrap it in a token and add
2436	 * the verifier to the reply.
2437	 */
2438	toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
2439	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
2440	nfsm_chain_add_32(error, nmc, toklen);
2441	nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
2442
2443	return (error);
2444}
2445
2446/*
2447 * The results aren't available yet, but if they need to be
2448 * checksummed for integrity protection or encrypted, then
2449 * we can record the start offset here, insert a place-holder
2450 * for the results length, as well as the sequence number.
2451 * The rest of the work is done later by nfs_gss_svc_protect_reply()
2452 * when the results are available.
2453 */
2454int
2455nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2456{
2457	struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
2458	int error = 0;
2459
2460	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2461	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
2462		return (0);
2463
2464	switch (nd->nd_sec) {
2465	case RPCAUTH_KRB5:
2466		/* Nothing to do */
2467		break;
2468	case RPCAUTH_KRB5I:
2469		nd->nd_gss_mb = nmc->nmc_mcur;			// record current mbuf
2470		nfsm_chain_finish_mbuf(error, nmc);		// split the chain here
2471		nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); // req sequence number
2472		break;
2473	case RPCAUTH_KRB5P:
2474		nd->nd_gss_mb = nmc->nmc_mcur;			// record current mbuf
2475		nfsm_chain_finish_mbuf(error, nmc);		// split the chain here
2476		nfsm_chain_add_32(error, nmc, random());	// confounder bytes 1-4
2477		nfsm_chain_add_32(error, nmc, random());	// confounder bytes 5-8
2478		nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); // req sequence number
2479		break;
2480	}
2481
2482	return (error);
2483}
2484
2485/*
2486 * The results are checksummed or encrypted for return to the client
2487 */
2488int
2489nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep)
2490{
2491	struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
2492	struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
2493	struct nfsm_chain nmrep_pre, *nmc_pre = &nmrep_pre;
2494	mbuf_t mb, results;
2495	uint32_t reslen;
2496	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
2497	int pad, toklen;
2498	u_char cksum[MAX_DIGEST];
2499	int error = 0;
2500	gss_key_info *ki = &cp->gss_svc_kinfo;
2501
2502	/*
2503	 * Using a reference to the mbuf where we previously split the reply
2504	 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
2505	 * one that allows us to prepend a length field or token, (nmc_pre)
2506	 * and the second which holds just the results that we're going to
2507	 * checksum and/or encrypt.  When we're done, we join the chains back
2508	 * together.
2509	 */
2510	nfs_gss_nfsm_chain(nmc_res, mrep);		// set up the results chain
2511	mb = nd->nd_gss_mb;				// the mbuf where we split
2512	results = mbuf_next(mb);			// first mbuf in the results
2513	reslen = nfs_gss_mchain_length(results);	// length of results
2514	error = mbuf_setnext(mb, NULL);			// disconnect the chains
2515	if (error)
2516		return (error);
2517	nfs_gss_nfsm_chain(nmc_pre, mb);		// set up the prepend chain
2518
2519	if (nd->nd_sec == RPCAUTH_KRB5I) {
2520		nfsm_chain_add_32(error, nmc_pre, reslen);
2521		nfsm_chain_build_done(error, nmc_pre);
2522		if (error)
2523			return (error);
2524		nfs_gss_append_chain(nmc_pre, results);	// Append the results mbufs
2525
2526		/* Now compute the checksum over the results data */
2527		nfs_gss_cksum_mchain(ki, results, ALG_MIC(ki), 0, reslen, cksum);
2528
2529		/* Put it into a token and append to the request */
2530		toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
2531		nfsm_chain_add_32(error, nmc_res, toklen);
2532		nfsm_chain_add_opaque(error, nmc_res, tokbuf, toklen);
2533		nfsm_chain_build_done(error, nmc_res);
2534	} else {
2535		/* RPCAUTH_KRB5P */
2536		/*
2537		 * Append a pad trailer - per RFC 1964 section 1.2.2.3
2538		 * Since XDR data is always 32-bit aligned, it
2539		 * needs to be padded either by 4 bytes or 8 bytes.
2540		 */
2541		if (reslen % 8 > 0) {
2542			nfsm_chain_add_32(error, nmc_res, 0x04040404);
2543			reslen += NFSX_UNSIGNED;
2544		} else {
2545			nfsm_chain_add_32(error, nmc_res, 0x08080808);
2546			nfsm_chain_add_32(error, nmc_res, 0x08080808);
2547			reslen +=  2 * NFSX_UNSIGNED;
2548		}
2549		nfsm_chain_build_done(error, nmc_res);
2550
2551		/* Now compute the checksum over the results data */
2552		nfs_gss_cksum_mchain(ki, results, ALG_WRAP(ki), 0, reslen, cksum);
2553
2554		/* Put it into a token and insert in the reply */
2555		toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 0, reslen, cksum);
2556		nfsm_chain_add_32(error, nmc_pre, toklen + reslen);
2557		nfsm_chain_add_opaque_nopad(error, nmc_pre, tokbuf, toklen);
2558		nfsm_chain_build_done(error, nmc_pre);
2559		if (error)
2560			return (error);
2561		nfs_gss_append_chain(nmc_pre, results);	// Append the results mbufs
2562
2563		/* Encrypt the confounder + seqnum + results */
2564		nfs_gss_encrypt_mchain(ki, results, 0, reslen, DES_ENCRYPT);
2565
2566		/* Add null XDR pad if the ASN.1 token misaligned the data */
2567		pad = nfsm_pad(toklen + reslen);
2568		if (pad > 0) {
2569			nfsm_chain_add_opaque_nopad(error, nmc_pre, iv0, pad);
2570			nfsm_chain_build_done(error, nmc_pre);
2571		}
2572	}
2573
2574	return (error);
2575}
2576
2577/*
2578 * This function handles the context setup calls from the client.
2579 * Essentially, it implements the NFS null procedure calls when
2580 * an RPCSEC_GSS credential is used.
2581 * This is the context maintenance function.  It creates and
2582 * destroys server contexts at the whim of the client.
2583 * During context creation, it receives GSS-API tokens from the
2584 * client, passes them up to gssd, and returns a received token
2585 * back to the client in the null procedure reply.
2586 */
2587int
2588nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
2589{
2590	struct nfs_gss_svc_ctx *cp = NULL;
2591	int error = 0;
2592	int autherr = 0;
2593	struct nfsm_chain *nmreq, nmrep;
2594	int sz;
2595
2596	nmreq = &nd->nd_nmreq;
2597	nfsm_chain_null(&nmrep);
2598	*mrepp = NULL;
2599	cp = nd->nd_gss_context;
2600	nd->nd_repstat = 0;
2601
2602	switch (cp->gss_svc_proc) {
2603	case RPCSEC_GSS_INIT:
2604		nfs_gss_svc_ctx_insert(cp);
2605		/* FALLTHRU */
2606
2607	case RPCSEC_GSS_CONTINUE_INIT:
2608		/* Get the token from the request */
2609		nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
2610		if (cp->gss_svc_tokenlen == 0) {
2611			autherr = RPCSEC_GSS_CREDPROBLEM;
2612			break;
2613		}
2614		MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK);
2615		if (cp->gss_svc_token == NULL) {
2616			autherr = RPCSEC_GSS_CREDPROBLEM;
2617			break;
2618		}
2619		nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
2620
2621		/* Use the token in a gss_accept_sec_context upcall */
2622		error = nfs_gss_svc_gssd_upcall(cp);
2623		if (error) {
2624			autherr = RPCSEC_GSS_CREDPROBLEM;
2625			if (error == NFSERR_EAUTH)
2626				error = 0;
2627			break;
2628		}
2629
2630		/*
2631		 * If the context isn't complete, pass the new token
2632		 * back to the client for another round.
2633		 */
2634		if (cp->gss_svc_major != GSS_S_COMPLETE)
2635			break;
2636
2637		/*
2638		 * Now the server context is complete.
2639		 * Finish setup.
2640		 */
2641		clock_get_uptime(&cp->gss_svc_incarnation);
2642
2643		cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
2644		MALLOC(cp->gss_svc_seqbits, uint32_t *,
2645			nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
2646		if (cp->gss_svc_seqbits == NULL) {
2647			autherr = RPCSEC_GSS_CREDPROBLEM;
2648			break;
2649		}
2650		break;
2651
2652	case RPCSEC_GSS_DATA:
2653		/* Just a nullproc ping - do nothing */
2654		break;
2655
2656	case RPCSEC_GSS_DESTROY:
2657		/*
2658		 * Don't destroy the context immediately because
2659		 * other active requests might still be using it.
2660		 * Instead, schedule it for destruction after
2661		 * GSS_CTX_PEND time has elapsed.
2662		 */
2663		cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
2664		if (cp != NULL) {
2665			cp->gss_svc_handle = 0;	// so it can't be found
2666			lck_mtx_lock(cp->gss_svc_mtx);
2667			clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2668				&cp->gss_svc_incarnation);
2669			lck_mtx_unlock(cp->gss_svc_mtx);
2670		}
2671		break;
2672	default:
2673		autherr = RPCSEC_GSS_CREDPROBLEM;
2674		break;
2675	}
2676
2677	/* Now build the reply  */
2678
2679	if (nd->nd_repstat == 0)
2680		nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
2681	sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results
2682	error = nfsrv_rephead(nd, slp, &nmrep, sz);
2683	*mrepp = nmrep.nmc_mhead;
2684	if (error || autherr)
2685		goto nfsmout;
2686
2687	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2688	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
2689		nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
2690		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
2691
2692		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
2693		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
2694		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
2695
2696		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
2697		if (cp->gss_svc_token != NULL) {
2698			nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
2699			FREE(cp->gss_svc_token, M_TEMP);
2700			cp->gss_svc_token = NULL;
2701		}
2702	}
2703
2704nfsmout:
2705	if (autherr != 0) {
2706		nd->nd_gss_context = NULL;
2707		LIST_REMOVE(cp, gss_svc_entries);
2708		if (cp->gss_svc_seqbits != NULL)
2709			FREE(cp->gss_svc_seqbits, M_TEMP);
2710		if (cp->gss_svc_token != NULL)
2711			FREE(cp->gss_svc_token, M_TEMP);
2712		lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2713		FREE(cp, M_TEMP);
2714	}
2715
2716	nfsm_chain_build_done(error, &nmrep);
2717	if (error) {
2718		nfsm_chain_cleanup(&nmrep);
2719		*mrepp = NULL;
2720	}
2721	return (error);
2722}
2723
2724/*
2725 * This is almost a mirror-image of the client side upcall.
2726 * It passes and receives a token, but invokes gss_accept_sec_context.
2727 * If it's the final call of the context setup, then gssd also returns
2728 * the session key and the user's UID.
2729 */
2730static int
2731nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
2732{
2733	kern_return_t kr;
2734	mach_port_t mp;
2735	int retry_cnt = 0;
2736	gssd_byte_buffer okey = NULL;
2737	uint32_t skeylen = 0;
2738	uint32_t ret_flags;
2739	vm_map_copy_t itoken = NULL;
2740	gssd_byte_buffer otoken = NULL;
2741	mach_msg_type_number_t otokenlen;
2742	int error = 0;
2743	char svcname[] = "nfs";
2744
2745	kr = host_get_gssd_port(host_priv_self(), &mp);
2746	if (kr != KERN_SUCCESS) {
2747		printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
2748		goto out;
2749	}
2750	if (!IPC_PORT_VALID(mp)) {
2751		printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
2752		goto out;
2753	}
2754
2755	if (cp->gss_svc_tokenlen > 0)
2756		nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
2757
2758retry:
2759	kr = mach_gss_accept_sec_context(
2760		mp,
2761		(gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
2762		svcname,
2763		0,
2764		&cp->gss_svc_context,
2765		&cp->gss_svc_cred_handle,
2766		&ret_flags,
2767		&cp->gss_svc_uid,
2768		cp->gss_svc_gids,
2769		&cp->gss_svc_ngroups,
2770		&okey, (mach_msg_type_number_t *) &skeylen,
2771		&otoken, &otokenlen,
2772		&cp->gss_svc_major,
2773		&cp->gss_svc_minor);
2774
2775	if (kr != KERN_SUCCESS) {
2776		printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
2777		if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
2778			retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
2779			if (cp->gss_svc_tokenlen > 0)
2780				nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
2781			goto retry;
2782		}
2783		host_release_special_port(mp);
2784		goto out;
2785	}
2786
2787	host_release_special_port(mp);
2788
2789	if (skeylen > 0) {
2790		if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
2791			printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen);
2792			vm_map_copy_discard((vm_map_copy_t) okey);
2793			vm_map_copy_discard((vm_map_copy_t) otoken);
2794			goto out;
2795		}
2796		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_svc_kinfo.skey);
2797		if (error) {
2798			vm_map_copy_discard((vm_map_copy_t) otoken);
2799			goto out;
2800		}
2801		error = gss_key_init(&cp->gss_svc_kinfo, skeylen);
2802		if (error)
2803			goto out;
2804
2805	}
2806
2807	/* Free context token used as input */
2808	if (cp->gss_svc_token)
2809		FREE(cp->gss_svc_token, M_TEMP);
2810	cp->gss_svc_token = NULL;
2811	cp->gss_svc_tokenlen = 0;
2812
2813	if (otokenlen > 0) {
2814		/* Set context token to gss output token */
2815		MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
2816		if (cp->gss_svc_token == NULL) {
2817			printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
2818			vm_map_copy_discard((vm_map_copy_t) otoken);
2819			return (ENOMEM);
2820		}
2821		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
2822		if (error) {
2823			FREE(cp->gss_svc_token, M_TEMP);
2824			cp->gss_svc_token = NULL;
2825			return (NFSERR_EAUTH);
2826		}
2827		cp->gss_svc_tokenlen = otokenlen;
2828	}
2829
2830	return (0);
2831
2832out:
2833	FREE(cp->gss_svc_token, M_TEMP);
2834	cp->gss_svc_tokenlen = 0;
2835	cp->gss_svc_token = NULL;
2836
2837	return (NFSERR_EAUTH);
2838}
2839
2840/*
2841 * Validate the sequence number in the credential as described
2842 * in RFC 2203 Section 5.3.3.1
2843 *
2844 * Here the window of valid sequence numbers is represented by
2845 * a bitmap.  As each sequence number is received, its bit is
2846 * set in the bitmap.  An invalid sequence number lies below
2847 * the lower bound of the window, or is within the window but
2848 * has its bit already set.
2849 */
2850static int
2851nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
2852{
2853	uint32_t *bits = cp->gss_svc_seqbits;
2854	uint32_t win = cp->gss_svc_seqwin;
2855	uint32_t i;
2856
2857	lck_mtx_lock(cp->gss_svc_mtx);
2858
2859	/*
2860	 * If greater than the window upper bound,
2861	 * move the window up, and set the bit.
2862	 */
2863	if (seq > cp->gss_svc_seqmax) {
2864		if (seq - cp->gss_svc_seqmax > win)
2865			bzero(bits, nfsm_rndup((win + 7) / 8));
2866		else
2867			for (i = cp->gss_svc_seqmax + 1; i < seq; i++)
2868				win_resetbit(bits, i % win);
2869		win_setbit(bits, seq % win);
2870		cp->gss_svc_seqmax = seq;
2871		lck_mtx_unlock(cp->gss_svc_mtx);
2872		return (1);
2873	}
2874
2875	/*
2876	 * Invalid if below the lower bound of the window
2877	 */
2878	if (seq <= cp->gss_svc_seqmax - win) {
2879		lck_mtx_unlock(cp->gss_svc_mtx);
2880		return (0);
2881	}
2882
2883	/*
2884	 * In the window, invalid if the bit is already set
2885	 */
2886	if (win_getbit(bits, seq % win)) {
2887		lck_mtx_unlock(cp->gss_svc_mtx);
2888		return (0);
2889	}
2890	win_setbit(bits, seq % win);
2891	lck_mtx_unlock(cp->gss_svc_mtx);
2892	return (1);
2893}
2894
2895/*
2896 * Drop a reference to a context
2897 *
2898 * Note that it's OK for the context to exist
2899 * with a refcount of zero.  The refcount isn't
2900 * checked until we're about to reap an expired one.
2901 */
2902void
2903nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
2904{
2905	lck_mtx_lock(cp->gss_svc_mtx);
2906	if (cp->gss_svc_refcnt > 0)
2907		cp->gss_svc_refcnt--;
2908	else
2909		printf("nfs_gss_ctx_deref: zero refcount\n");
2910	lck_mtx_unlock(cp->gss_svc_mtx);
2911}
2912
2913/*
2914 * Called at NFS server shutdown - destroy all contexts
2915 */
2916void
2917nfs_gss_svc_cleanup(void)
2918{
2919	struct nfs_gss_svc_ctx_hashhead *head;
2920	struct nfs_gss_svc_ctx *cp, *ncp;
2921	int i;
2922
2923	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
2924
2925	/*
2926	 * Run through all the buckets
2927	 */
2928	for (i = 0; i < SVC_CTX_HASHSZ; i++) {
2929		/*
2930		 * Remove and free all entries in the bucket
2931		 */
2932		head = &nfs_gss_svc_ctx_hashtbl[i];
2933		LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
2934			LIST_REMOVE(cp, gss_svc_entries);
2935			if (cp->gss_svc_seqbits)
2936				FREE(cp->gss_svc_seqbits, M_TEMP);
2937			lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2938			FREE(cp, M_TEMP);
2939		}
2940	}
2941
2942	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2943}
2944
2945#endif /* NFSSERVER */
2946
2947
2948/*************
2949 * The following functions are used by both client and server.
2950 */
2951
2952/*
2953 * Release a host special port that was obtained by host_get_special_port
2954 * or one of its macros (host_get_gssd_port in this case).
2955 * This really should be in a public kpi.
2956 */
2957
2958/* This should be in a public header if this routine is not */
2959extern void ipc_port_release_send(ipc_port_t);
2960extern ipc_port_t ipc_port_copy_send(ipc_port_t);
2961
2962static void
2963host_release_special_port(mach_port_t mp)
2964{
2965	if (IPC_PORT_VALID(mp))
2966		ipc_port_release_send(mp);
2967}
2968
2969static mach_port_t
2970host_copy_special_port(mach_port_t mp)
2971{
2972	return (ipc_port_copy_send(mp));
2973}
2974
2975/*
2976 * The token that is sent and received in the gssd upcall
2977 * has unbounded variable length.  Mach RPC does not pass
2978 * the token in-line.  Instead it uses page mapping to handle
2979 * these parameters.  This function allocates a VM buffer
2980 * to hold the token for an upcall and copies the token
2981 * (received from the client) into it.  The VM buffer is
2982 * marked with a src_destroy flag so that the upcall will
2983 * automatically de-allocate the buffer when the upcall is
2984 * complete.
2985 */
2986static void
2987nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr)
2988{
2989	kern_return_t kr;
2990	vm_offset_t kmem_buf;
2991	vm_size_t tbuflen;
2992
2993	*addr = NULL;
2994	if (buf == NULL || buflen == 0)
2995		return;
2996
2997	tbuflen = vm_map_round_page(buflen,
2998				    vm_map_page_mask(ipc_kernel_map));
2999	kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE);
3000	if (kr != 0) {
3001		printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
3002		return;
3003	}
3004
3005	kr = vm_map_wire(ipc_kernel_map,
3006			 vm_map_trunc_page(kmem_buf,
3007					   vm_map_page_mask(ipc_kernel_map)),
3008			 vm_map_round_page(kmem_buf + tbuflen,
3009					   vm_map_page_mask(ipc_kernel_map)),
3010		VM_PROT_READ|VM_PROT_WRITE, FALSE);
3011	if (kr != 0) {
3012		printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
3013		return;
3014	}
3015
3016	bcopy(buf, (void *) kmem_buf, buflen);
3017	// Shouldn't need to bzero below since vm_allocate returns zeroed pages
3018	// bzero(kmem_buf + buflen, tbuflen - buflen);
3019
3020	kr = vm_map_unwire(ipc_kernel_map,
3021			   vm_map_trunc_page(kmem_buf,
3022					     vm_map_page_mask(ipc_kernel_map)),
3023			   vm_map_round_page(kmem_buf + tbuflen,
3024					     vm_map_page_mask(ipc_kernel_map)),
3025			   FALSE);
3026	if (kr != 0) {
3027		printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
3028		return;
3029	}
3030
3031	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
3032		(vm_map_size_t) buflen, TRUE, addr);
3033	if (kr != 0) {
3034		printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
3035		return;
3036	}
3037}
3038
3039/*
3040 * Here we handle a token received from the gssd via an upcall.
3041 * The received token resides in an allocate VM buffer.
3042 * We copy the token out of this buffer to a chunk of malloc'ed
3043 * memory of the right size, then de-allocate the VM buffer.
3044 */
3045static int
3046nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
3047{
3048	vm_map_offset_t map_data;
3049	vm_offset_t data;
3050	int error;
3051
3052	error = vm_map_copyout(ipc_kernel_map, &map_data, in);
3053	if (error)
3054		return (error);
3055
3056	data = CAST_DOWN(vm_offset_t, map_data);
3057	bcopy((void *) data, out, len);
3058	vm_deallocate(ipc_kernel_map, data, len);
3059
3060	return (0);
3061}
3062
3063/*
3064 * Encode an ASN.1 token to be wrapped in an RPCSEC_GSS verifier.
3065 * Returns the size of the token, since it contains a variable
3066 * length DER encoded size field.
3067 */
3068static int
3069nfs_gss_token_put(
3070	gss_key_info *ki,
3071	u_char *alg,
3072	u_char *p,
3073	int initiator,
3074	int datalen,
3075	u_char *cksum)
3076{
3077	static uint32_t seqnum = 0;
3078	u_char *psave = p;
3079	u_char plain[8];
3080	int toklen, i;
3081
3082	/*
3083	 * Fill in the token header: 2 octets.
3084	 * This is 0x06 - an ASN.1 tag for APPLICATION, 0, SEQUENCE
3085	 * followed by the length of the token: 35 + 0 octets for a
3086	 * MIC token, or 35 + encrypted octets for a wrap token;
3087	 */
3088	*p++ = 0x060;
3089	toklen = KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + HASHLEN(ki);
3090	nfs_gss_der_length_put(&p, toklen + datalen);
3091
3092	/*
3093	 * Fill in the DER encoded mech OID for Kerberos v5.
3094	 * This represents the Kerberos OID 1.2.840.113554.1.2.2
3095	 * described in RFC 2623, section 4.2
3096	 */
3097	bcopy(krb5_mech, p, sizeof(krb5_mech));
3098	p += sizeof(krb5_mech);
3099
3100	/*
3101	 * Now at the token described in RFC 1964, section 1.2.1
3102	 * Fill in the token ID, integrity algorithm indicator,
3103	 * for DES MAC MD5, and four filler octets.
3104	 * The alg string encodes the bytes to represent either
3105	 * a MIC token or a WRAP token for Kerberos.
3106	 */
3107	bcopy(alg, p, KRB5_SZ_ALG);
3108	p += KRB5_SZ_ALG;
3109
3110	/*
3111	 * Now encode the sequence number according to
3112	 * RFC 1964, section 1.2.1.2 which dictates 4 octets
3113	 * of sequence number followed by 4 bytes of direction
3114	 * indicator: 0x00 for initiator or 0xff for acceptor.
3115	 * We DES CBC encrypt the sequence number using the first
3116	 * 8 octets of the checksum field as an initialization
3117	 * vector.
3118	 * Note that this sequence number is not at all related
3119	 * to the RPCSEC_GSS protocol sequence number.  This
3120	 * number is private to the ASN.1 token.  The only
3121	 * requirement is that it not be repeated in case the
3122	 * server has replay detection on, which normally should
3123	 * not be the case, since RFC 2203 section 5.2.3 says that
3124	 * replay detection and sequence checking must be turned off.
3125	 */
3126	seqnum++;
3127	for (i = 0; i < 4; i++)
3128		plain[i] = (u_char) ((seqnum >> (i * 8)) & 0xff);
3129	for (i = 4; i < 8; i++)
3130		plain[i] = initiator ? 0x00 : 0xff;
3131	gss_des_crypt(ki, (des_cblock *) plain, (des_cblock *) p, 8,
3132			(des_cblock *) cksum, NULL, DES_ENCRYPT, KG_USAGE_SEQ);
3133	p += 8;
3134
3135	/*
3136	 * Finally, append the octets of the
3137	 * checksum of the alg + plaintext data.
3138	 * The plaintext could be an RPC call header,
3139	 * the window value, or a sequence number.
3140	 */
3141	bcopy(cksum, p, HASHLEN(ki));
3142	p += HASHLEN(ki);
3143
3144	return (p - psave);
3145}
3146
3147/*
3148 * Determine size of ASN.1 DER length
3149 */
3150static int
3151nfs_gss_der_length_size(int len)
3152{
3153	return
3154		len < (1 <<  7) ? 1 :
3155		len < (1 <<  8) ? 2 :
3156		len < (1 << 16) ? 3 :
3157		len < (1 << 24) ? 4 : 5;
3158}
3159
3160/*
3161 * Encode an ASN.1 DER length field
3162 */
3163static void
3164nfs_gss_der_length_put(u_char **pp, int len)
3165{
3166	int sz = nfs_gss_der_length_size(len);
3167	u_char *p = *pp;
3168
3169	if (sz == 1) {
3170		*p++ = (u_char) len;
3171	} else {
3172		*p++ = (u_char) ((sz-1) | 0x80);
3173		sz -= 1;
3174		while (sz--)
3175			*p++ = (u_char) ((len >> (sz * 8)) & 0xff);
3176	}
3177
3178	*pp = p;
3179}
3180
3181/*
3182 * Decode an ASN.1 DER length field
3183 */
3184static int
3185nfs_gss_der_length_get(u_char **pp)
3186{
3187	u_char *p = *pp;
3188	uint32_t flen, len = 0;
3189
3190	flen = *p & 0x7f;
3191
3192	if ((*p++ & 0x80) == 0)
3193		len = flen;
3194	else {
3195		if (flen > sizeof(uint32_t))
3196			return (-1);
3197		while (flen--)
3198			len = (len << 8) + *p++;
3199	}
3200	*pp = p;
3201	return (len);
3202}
3203
3204/*
3205 * Decode an ASN.1 token from an RPCSEC_GSS verifier.
3206 */
3207static int
3208nfs_gss_token_get(
3209	gss_key_info *ki,
3210	u_char *alg,
3211	u_char *p,
3212	int initiator,
3213	uint32_t *len,
3214	u_char *cksum)
3215{
3216	u_char d, plain[8];
3217	u_char *psave = p;
3218	int seqnum, i;
3219
3220	/*
3221	 * Check that we have a valid token header
3222	 */
3223	if (*p++ != 0x60)
3224		return (AUTH_BADCRED);
3225	(void) nfs_gss_der_length_get(&p);	// ignore the size
3226
3227	/*
3228	 * Check that we have the DER encoded Kerberos v5 mech OID
3229	 */
3230	if (bcmp(p, krb5_mech, sizeof(krb5_mech) != 0))
3231		return (AUTH_BADCRED);
3232	p += sizeof(krb5_mech);
3233
3234	/*
3235	 * Now check the token ID, DES MAC MD5 algorithm
3236	 * indicator, and filler octets.
3237	 */
3238	if (bcmp(p, alg, KRB5_SZ_ALG) != 0)
3239		return (AUTH_BADCRED);
3240	p += KRB5_SZ_ALG;
3241
3242	/*
3243	 * Now decrypt the sequence number.
3244	 * Note that the gss decryption uses the first 8 octets
3245	 * of the checksum field as an initialization vector (p + 8).
3246	 * Per RFC 2203 section 5.2.2 we don't check the sequence number
3247	 * in the ASN.1 token because the RPCSEC_GSS protocol has its
3248	 * own sequence number described in section 5.3.3.1
3249	 */
3250	seqnum = 0;
3251	gss_des_crypt(ki, (des_cblock *)p, (des_cblock *) plain, 8,
3252			(des_cblock *) (p + 8), NULL, DES_DECRYPT, KG_USAGE_SEQ);
3253	p += 8;
3254	for (i = 0; i < 4; i++)
3255		seqnum |= plain[i] << (i * 8);
3256
3257	/*
3258	 * Make sure the direction
3259	 * indicator octets are correct.
3260	 */
3261	d = initiator ? 0x00 : 0xff;
3262	for (i = 4; i < 8; i++)
3263		if (plain[i] != d)
3264			return (AUTH_BADCRED);
3265
3266	/*
3267	 * Finally, get the checksum
3268	 */
3269	bcopy(p, cksum, HASHLEN(ki));
3270	p += HASHLEN(ki);
3271
3272	if (len != NULL)
3273		*len = p - psave;
3274
3275	return (0);
3276}
3277
3278/*
3279 * Return the number of bytes in an mbuf chain.
3280 */
3281static int
3282nfs_gss_mchain_length(mbuf_t mhead)
3283{
3284	mbuf_t mb;
3285	int len = 0;
3286
3287	for (mb = mhead; mb; mb = mbuf_next(mb))
3288		len += mbuf_len(mb);
3289
3290	return (len);
3291}
3292
3293/*
3294 * Append an args or results mbuf chain to the header chain
3295 */
3296static int
3297nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
3298{
3299	int error = 0;
3300	mbuf_t mb, tail;
3301
3302	/* Connect the mbuf chains */
3303	error = mbuf_setnext(nmc->nmc_mcur, mc);
3304	if (error)
3305		return (error);
3306
3307	/* Find the last mbuf in the chain */
3308	tail = NULL;
3309	for (mb = mc; mb; mb = mbuf_next(mb))
3310		tail = mb;
3311
3312	nmc->nmc_mcur = tail;
3313	nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3314	nmc->nmc_left = mbuf_trailingspace(tail);
3315
3316	return (0);
3317}
3318
3319/*
3320 * Convert an mbuf chain to an NFS mbuf chain
3321 */
3322static void
3323nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
3324{
3325	mbuf_t mb, tail;
3326
3327	/* Find the last mbuf in the chain */
3328	tail = NULL;
3329	for (mb = mc; mb; mb = mbuf_next(mb))
3330		tail = mb;
3331
3332	nmc->nmc_mhead = mc;
3333	nmc->nmc_mcur = tail;
3334	nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3335	nmc->nmc_left = mbuf_trailingspace(tail);
3336	nmc->nmc_flags = 0;
3337}
3338
3339
3340/*
3341 * Compute a checksum over an mbuf chain.
3342 * Start building an MD5 digest at the given offset and keep
3343 * going until the end of data in the current mbuf is reached.
3344 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3345 * checksum.
3346 */
3347static void
3348nfs_gss_cksum_mchain(
3349	gss_key_info *ki,
3350	mbuf_t mhead,
3351	u_char *alg,
3352	int offset,
3353	int len,
3354	u_char *digest)
3355{
3356	mbuf_t mb;
3357	u_char *ptr;
3358	int left, bytes;
3359	GSS_DIGEST_CTX context;
3360
3361	gss_digest_Init(&context, ki);
3362
3363	/*
3364	 * Logically prepend the first 8 bytes of the algorithm
3365	 * field as required by RFC 1964, section 1.2.1.1
3366	 */
3367	gss_digest_Update(&context, alg, KRB5_SZ_ALG);
3368
3369	/*
3370	 * Move down the mbuf chain until we reach the given
3371	 * byte offset, then start MD5 on the mbuf data until
3372	 * we've done len bytes.
3373	 */
3374
3375	for (mb = mhead; mb && len > 0; mb = mbuf_next(mb)) {
3376		ptr  = mbuf_data(mb);
3377		left = mbuf_len(mb);
3378		if (offset >= left) {
3379			/* Offset not yet reached */
3380			offset -= left;
3381			continue;
3382		}
3383		/* At or beyond offset - checksum data */
3384		ptr += offset;
3385		left -= offset;
3386		offset = 0;
3387
3388		bytes = left < len ? left : len;
3389		if (bytes > 0)
3390			gss_digest_Update(&context, ptr, bytes);
3391		len -= bytes;
3392	}
3393
3394	gss_digest_Final(&context, digest);
3395}
3396
3397/*
3398 * Compute a checksum over an NFS mbuf chain.
3399 * Start building an MD5 digest at the given offset and keep
3400 * going until the end of data in the current mbuf is reached.
3401 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3402 * checksum.
3403 */
3404static void
3405nfs_gss_cksum_chain(
3406	gss_key_info *ki,
3407	struct nfsm_chain *nmc,
3408	u_char *alg,
3409	int offset,
3410	int len,
3411	u_char *cksum)
3412{
3413	/*
3414	 * If the length parameter is zero, then we need
3415	 * to use the length from the offset to the current
3416	 * encode/decode offset.
3417	 */
3418	if (len == 0)
3419		len = nfsm_chain_offset(nmc) - offset;
3420
3421	return (nfs_gss_cksum_mchain(ki, nmc->nmc_mhead, alg, offset, len, cksum));
3422}
3423
3424/*
3425 * Compute a checksum of the sequence number (or sequence window)
3426 * of an RPCSEC_GSS reply.
3427 */
3428static void
3429nfs_gss_cksum_rep(gss_key_info *ki, uint32_t seqnum, u_char *cksum)
3430{
3431	GSS_DIGEST_CTX context;
3432	uint32_t val = htonl(seqnum);
3433
3434	gss_digest_Init(&context, ki);
3435
3436	/*
3437	 * Logically prepend the first 8 bytes of the MIC
3438	 * token as required by RFC 1964, section 1.2.1.1
3439	 */
3440	gss_digest_Update(&context, ALG_MIC(ki), KRB5_SZ_ALG);
3441
3442	/*
3443	 * Compute the digest of the seqnum in network order
3444	 */
3445	gss_digest_Update(&context, &val, 4);
3446	gss_digest_Final(&context, cksum);
3447}
3448
3449/*
3450 * Encrypt or decrypt data in an mbuf chain with des-cbc.
3451 */
3452static void
3453nfs_gss_encrypt_mchain(
3454	gss_key_info *ki,
3455	mbuf_t mhead,
3456	int offset,
3457	int len,
3458	int encrypt)
3459{
3460	mbuf_t mb, mbn;
3461	u_char *ptr, *nptr;
3462	u_char tmp[8], ivec[8];
3463	int left, left8, remain;
3464
3465
3466	bzero(ivec, 8);
3467
3468	/*
3469	 * Move down the mbuf chain until we reach the given
3470	 * byte offset, then start encrypting the mbuf data until
3471	 * we've done len bytes.
3472	 */
3473
3474	for (mb = mhead; mb && len > 0; mb = mbn) {
3475		mbn  = mbuf_next(mb);
3476		ptr  = mbuf_data(mb);
3477		left = mbuf_len(mb);
3478		if (offset >= left) {
3479			/* Offset not yet reached */
3480			offset -= left;
3481			continue;
3482		}
3483		/* At or beyond offset - encrypt data */
3484		ptr += offset;
3485		left -= offset;
3486		offset = 0;
3487
3488		/*
3489		 * DES or DES3 CBC has to encrypt 8 bytes at a time.
3490		 * If the number of bytes to be encrypted in this
3491		 * mbuf isn't some multiple of 8 bytes, encrypt all
3492		 * the 8 byte blocks, then combine the remaining
3493		 * bytes with enough from the next mbuf to make up
3494		 * an 8 byte block and encrypt that block separately,
3495		 * i.e. that block is split across two mbufs.
3496		 */
3497		remain = left % 8;
3498		left8 = left - remain;
3499		left = left8 < len ? left8 : len;
3500		if (left > 0) {
3501			gss_des_crypt(ki, (des_cblock *) ptr, (des_cblock *) ptr,
3502					left, &ivec, &ivec, encrypt, KG_USAGE_SEAL);
3503			len -= left;
3504		}
3505
3506		if (mbn && remain > 0) {
3507			nptr = mbuf_data(mbn);
3508			offset = 8 - remain;
3509			bcopy(ptr + left, tmp, remain);		// grab from this mbuf
3510			bcopy(nptr, tmp + remain, offset);	// grab from next mbuf
3511			gss_des_crypt(ki, (des_cblock *) tmp, (des_cblock *) tmp, 8,
3512					&ivec, &ivec, encrypt, KG_USAGE_SEAL);
3513			bcopy(tmp, ptr + left, remain);		// return to this mbuf
3514			bcopy(tmp + remain, nptr, offset);	// return to next mbuf
3515			len -= 8;
3516		}
3517	}
3518}
3519
3520/*
3521 * Encrypt or decrypt data in an NFS mbuf chain with des-cbc.
3522 */
3523static void
3524nfs_gss_encrypt_chain(
3525	gss_key_info *ki,
3526	struct nfsm_chain *nmc,
3527	int offset,
3528	int len,
3529	int encrypt)
3530{
3531	/*
3532	 * If the length parameter is zero, then we need
3533	 * to use the length from the offset to the current
3534	 * encode/decode offset.
3535	 */
3536	if (len == 0)
3537		len = nfsm_chain_offset(nmc) - offset;
3538
3539	return (nfs_gss_encrypt_mchain(ki, nmc->nmc_mhead, offset, len, encrypt));
3540}
3541
3542/*
3543 * The routines that follow provide abstractions for doing digests and crypto.
3544 */
3545
3546static void
3547gss_digest_Init(GSS_DIGEST_CTX *ctx, gss_key_info *ki)
3548{
3549	ctx->type = ki->type;
3550	switch (ki->type) {
3551	case NFS_GSS_1DES:	MD5_DESCBC_Init(&ctx->m_ctx, &ki->ks_u.des.gss_sched);
3552				break;
3553	case NFS_GSS_3DES:	HMAC_SHA1_DES3KD_Init(&ctx->h_ctx, ki->ks_u.des3.ckey, 0);
3554				break;
3555	default:
3556			printf("gss_digest_Init: Unknown key info type %d\n", ki->type);
3557	}
3558}
3559
3560static void
3561gss_digest_Update(GSS_DIGEST_CTX *ctx, void *data, size_t len)
3562{
3563	switch (ctx->type) {
3564	case NFS_GSS_1DES:	MD5_DESCBC_Update(&ctx->m_ctx, data, len);
3565				break;
3566	case NFS_GSS_3DES:	HMAC_SHA1_DES3KD_Update(&ctx->h_ctx, data, len);
3567				break;
3568	}
3569}
3570
3571static void
3572gss_digest_Final(GSS_DIGEST_CTX *ctx, void *digest)
3573{
3574	switch (ctx->type) {
3575	case NFS_GSS_1DES:	MD5_DESCBC_Final(digest, &ctx->m_ctx);
3576				break;
3577	case NFS_GSS_3DES:	HMAC_SHA1_DES3KD_Final(digest, &ctx->h_ctx);
3578				break;
3579	}
3580}
3581
3582static void
3583gss_des_crypt(gss_key_info *ki, des_cblock *in, des_cblock *out,
3584		int32_t len, des_cblock *iv, des_cblock *retiv, int encrypt, int usage)
3585{
3586	switch (ki->type) {
3587	case NFS_GSS_1DES:
3588			{
3589				des_cbc_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
3590							&ki->ks_u.des.gss_sched_Ke :
3591							&ki->ks_u.des.gss_sched);
3592				des_cbc_encrypt(in, out, len, sched, iv, retiv, encrypt);
3593			}
3594			break;
3595	case NFS_GSS_3DES:
3596
3597			des3_cbc_encrypt(in, out, len, &ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
3598			break;
3599	}
3600}
3601
3602static int
3603gss_key_init(gss_key_info *ki, uint32_t skeylen)
3604{
3605	size_t i;
3606	int rc;
3607	des_cblock k[3];
3608
3609	ki->keybytes = skeylen;
3610	switch (skeylen) {
3611	case sizeof(des_cblock):
3612				ki->type = NFS_GSS_1DES;
3613				ki->hash_len = MD5_DESCBC_DIGEST_LENGTH;
3614				ki->ks_u.des.key = (des_cblock *)ki->skey;
3615				rc = des_cbc_key_sched(ki->ks_u.des.key, &ki->ks_u.des.gss_sched);
3616				if (rc)
3617					return (rc);
3618				for (i = 0; i < ki->keybytes; i++)
3619					k[0][i] = 0xf0 ^ (*ki->ks_u.des.key)[i];
3620				rc = des_cbc_key_sched(&k[0], &ki->ks_u.des.gss_sched_Ke);
3621				break;
3622	case 3*sizeof(des_cblock):
3623				ki->type = NFS_GSS_3DES;
3624				ki->hash_len = SHA_DIGEST_LENGTH;
3625				ki->ks_u.des3.key = (des_cblock (*)[3])ki->skey;
3626				des3_derive_key(*ki->ks_u.des3.key, ki->ks_u.des3.ckey,
3627						KEY_USAGE_DES3_SIGN, KEY_USAGE_LEN);
3628				rc = des3_cbc_key_sched(*ki->ks_u.des3.key, &ki->ks_u.des3.gss_sched);
3629				if (rc)
3630					return (rc);
3631				break;
3632	default:
3633				printf("gss_key_init: Invalid key length %d\n", skeylen);
3634				rc = EINVAL;
3635				break;
3636	}
3637
3638	return (rc);
3639}
3640
3641#if 0
3642#define DISPLAYLEN 16
3643#define MAXDISPLAYLEN 256
3644
3645static void
3646hexdump(const char *msg, void *data, size_t len)
3647{
3648	size_t i, j;
3649	u_char *d = data;
3650	char *p, disbuf[3*DISPLAYLEN+1];
3651
3652	printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
3653	if (len > MAXDISPLAYLEN)
3654		len = MAXDISPLAYLEN;
3655
3656	for (i = 0; i < len; i += DISPLAYLEN) {
3657		for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3)
3658			snprintf(p, 4, "%02x ", d[i + j]);
3659		printf("\t%s\n", disbuf);
3660	}
3661}
3662#endif
3663