1/*
2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*************
30 * These functions implement RPCSEC_GSS security for the NFS client and server.
31 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
32 * protection as described in Internet RFC 2203 and 2623.
33 *
34 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
35 * It requires the client and server negotiate a secure connection as part of a
36 * security context. The context state is maintained in client and server structures.
37 * On the client side, each user of an NFS mount is assigned their own context,
38 * identified by UID, on their first use of the mount, and it persists until the
39 * unmount or until the context is renewed.  Each user context has a corresponding
40 * server context which the server maintains until the client destroys it, or
41 * until the context expires.
42 *
43 * The client and server contexts are set up dynamically.  When a user attempts
44 * to send an NFS request, if there is no context for the user, then one is
45 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
46 * During this exchange, the client and server pass a security token that is
47 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
48 * the user to the server (and vice-versa). The client and server also receive
49 * a unique session key that can be used to digitally sign the credentials and
50 * verifier or optionally to provide data integrity and/or privacy.
51 *
52 * Once the context is complete, the client and server enter a normal data
53 * exchange phase - beginning with the NFS request that prompted the context
54 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
55 * credential and verifier, and the server returns a verifier as well.
56 * For simple authentication, the verifier contains a signed checksum of the
57 * RPC header, including the credential.  The server's verifier has a signed
58 * checksum of the current sequence number.
59 *
60 * Each client call contains a sequence number that nominally increases by one
61 * on each request.  The sequence number is intended to prevent replay attacks.
62 * Since the protocol can be used over UDP, there is some allowance for
63 * out-of-sequence requests, so the server checks whether the sequence numbers
64 * are within a sequence "window". If a sequence number is outside the lower
65 * bound of the window, the server silently drops the request. This has some
66 * implications for retransmission. If a request needs to be retransmitted, the
67 * client must bump the sequence number even if the request XID is unchanged.
68 *
69 * When the NFS mount is unmounted, the client sends a "destroy" credential
70 * to delete the server's context for each user of the mount. Since it's
71 * possible for the client to crash or disconnect without sending the destroy
72 * message, the server has a thread that reaps contexts that have been idle
73 * too long.
74 */
75
76#include <stdint.h>
77#include <sys/param.h>
78#include <sys/systm.h>
79#include <sys/proc.h>
80#include <sys/kauth.h>
81#include <sys/kernel.h>
82#include <sys/mount_internal.h>
83#include <sys/vnode.h>
84#include <sys/ubc.h>
85#include <sys/malloc.h>
86#include <sys/kpi_mbuf.h>
87#include <sys/ucred.h>
88
89#include <kern/host.h>
90#include <kern/task.h>
91#include <libkern/libkern.h>
92
93#include <mach/task.h>
94#include <mach/host_special_ports.h>
95#include <mach/host_priv.h>
96#include <mach/thread_act.h>
97#include <mach/mig_errors.h>
98#include <mach/vm_map.h>
99#include <vm/vm_map.h>
100#include <vm/vm_kern.h>
101#include <gssd/gssd_mach.h>
102
103#include <nfs/rpcv2.h>
104#include <nfs/nfsproto.h>
105#include <nfs/nfs.h>
106#include <nfs/nfsnode.h>
107#include <nfs/nfs_gss.h>
108#include <nfs/nfsmount.h>
109#include <nfs/xdr_subs.h>
110#include <nfs/nfsm_subs.h>
111#include <nfs/nfs_gss.h>
112
113#include "nfs_gss_crypto.h"
114
115#define NFS_GSS_MACH_MAX_RETRIES 3
116
117#define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
118#define NFS_GSS_ISDBG  (NFS_DEBUG_FACILITY &  NFS_FAC_GSS)
119
120typedef struct {
121	int type;
122	union {
123		MD5_DESCBC_CTX m_ctx;
124		HMAC_SHA1_DES3KD_CTX h_ctx;
125	};
126} GSS_DIGEST_CTX;
127
128#define MAX_DIGEST SHA_DIGEST_LENGTH
129#ifdef NFS_KERNEL_DEBUG
130#define HASHLEN(ki)  (((ki)->hash_len > MAX_DIGEST) ? \
131		(panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
132#else
133#define HASHLEN(ki)  (((ki)->hash_len > MAX_DIGEST) ? \
134		(printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
135#endif
136
137#if NFSSERVER
138u_long nfs_gss_svc_ctx_hash;
139struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
140lck_mtx_t *nfs_gss_svc_ctx_mutex;
141lck_grp_t *nfs_gss_svc_grp;
142uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
143#define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
144#endif /* NFSSERVER */
145
146#if NFSCLIENT
147lck_grp_t *nfs_gss_clnt_grp;
148int nfs_single_des;
149#endif /* NFSCLIENT */
150
151/*
152 * These octet strings are used to encode/decode ASN.1 tokens
153 * in the RPCSEC_GSS verifiers.
154 */
155static u_char krb5_tokhead[] __attribute__((unused)) = { 0x60, 0x23 };
156       u_char krb5_mech[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
157static u_char krb5_mic[]  = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
158static u_char krb5_mic3[]  = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
159static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
160static u_char krb5_wrap3[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
161static u_char iv0[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
162
163#define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
164#define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
165
166/*
167 * The size of the Kerberos v5 ASN.1 token
168 * in the verifier.
169 *
170 * Note that the second octet of the krb5_tokhead (0x23) is a
171 * DER-encoded size field that has variable length.  If the size
172 * is 128 bytes or greater, then it uses two bytes, three bytes
173 * if 65536 or greater, and so on.  Since the MIC tokens are
174 * separate from the data, the size is always the same: 35 bytes (0x23).
175 * However, the wrap token is different. Its size field includes the
176 * size of the token + the encrypted data that follows. So the size
177 * field may be two, three or four bytes.
178 */
179#define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
180#define KRB5_SZ_MECH	sizeof(krb5_mech)
181#define KRB5_SZ_ALG	sizeof(krb5_mic) // 8 - same as krb5_wrap
182#define KRB5_SZ_SEQ	8
183#define KRB5_SZ_EXTRA	3  // a wrap token may be longer by up to this many octets
184#define KRB5_SZ_TOKEN_NOSUM	(KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
185#define KRB5_SZ_TOKEN(cksumlen)		((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
186#define KRB5_SZ_TOKMAX(cksumlen)	(KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
187
188#if NFSCLIENT
189static int	nfs_gss_clnt_ctx_find(struct nfsreq *);
190static int	nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
191static int	nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
192static int	nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
193static uint8_t	*nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *);
194static int	nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
195void	nfs_gss_clnt_ctx_neg_cache_enter(struct nfs_gss_clnt_ctx *, struct nfsmount *);
196static void	nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *);
197static void	nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *);
198static void	nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t);
199#endif /* NFSCLIENT */
200
201#if NFSSERVER
202static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
203static void	nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
204static void	nfs_gss_svc_ctx_timer(void *, void *);
205static int	nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
206static int	nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
207#endif /* NFSSERVER */
208
209static void	host_release_special_port(mach_port_t);
210static mach_port_t host_copy_special_port(mach_port_t);
211static void	nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
212static int	nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
213static int	nfs_gss_token_get(gss_key_info *ki, u_char *, u_char *, int, uint32_t *, u_char *);
214static int	nfs_gss_token_put(gss_key_info *ki, u_char *, u_char *, int, int, u_char *);
215static int	nfs_gss_der_length_size(int);
216static void	nfs_gss_der_length_put(u_char **, int);
217static int	nfs_gss_der_length_get(u_char **);
218static int	nfs_gss_mchain_length(mbuf_t);
219static int	nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
220static void	nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
221static void	nfs_gss_cksum_mchain(gss_key_info *, mbuf_t, u_char *, int, int, u_char *);
222static void	nfs_gss_cksum_chain(gss_key_info *, struct nfsm_chain *, u_char *, int, int, u_char *);
223static void	nfs_gss_cksum_rep(gss_key_info *, uint32_t, u_char *);
224static void	nfs_gss_encrypt_mchain(gss_key_info *, mbuf_t, int, int, int);
225static void	nfs_gss_encrypt_chain(gss_key_info *, struct nfsm_chain *, int, int, int);
226
227static void	gss_digest_Init(GSS_DIGEST_CTX *, gss_key_info *);
228static void	gss_digest_Update(GSS_DIGEST_CTX *, void *, size_t);
229static void	gss_digest_Final(GSS_DIGEST_CTX *, void *);
230static void	gss_des_crypt(gss_key_info *, des_cblock *, des_cblock *,
231				int32_t, des_cblock *, des_cblock *, int, int);
232static int	gss_key_init(gss_key_info *, uint32_t);
233
234#if NFSSERVER
235thread_call_t nfs_gss_svc_ctx_timer_call;
236int nfs_gss_timer_on = 0;
237uint32_t nfs_gss_ctx_count = 0;
238const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
239#endif /* NFSSERVER */
240
241/*
242 * Initialization when NFS starts
243 */
244void
245nfs_gss_init(void)
246{
247#if NFSCLIENT
248	nfs_gss_clnt_grp = lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL);
249#endif /* NFSCLIENT */
250
251#if NFSSERVER
252	nfs_gss_svc_grp  = lck_grp_alloc_init("rpcsec_gss_svc",  LCK_GRP_ATTR_NULL);
253
254	nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
255	nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
256
257	nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
258#endif /* NFSSERVER */
259}
260
261#if NFSCLIENT
262
263/*
264 * Find the context for a particular user.
265 *
266 * If the context doesn't already exist
267 * then create a new context for this user.
268 *
269 * Note that the code allows superuser (uid == 0)
270 * to adopt the context of another user.
271 *
272 * We'll match on the audit session ids, since those
273 * processes will have acccess to the same credential cache.
274 */
275
276#define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
277#define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
278
279/*
280 * Debugging
281 */
282static void
283nfs_gss_clnt_ctx_dump(struct nfsmount *nmp)
284{
285	struct nfs_gss_clnt_ctx *cp;
286
287	lck_mtx_lock(&nmp->nm_lock);
288	NFS_GSS_DBG("Enter");
289	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
290		lck_mtx_lock(cp->gss_clnt_mtx);
291		printf("context %d/%d: refcnt = %d, flags = %x\n",
292		       kauth_cred_getasid(cp->gss_clnt_cred),
293		       kauth_cred_getauid(cp->gss_clnt_cred),
294		       cp->gss_clnt_refcnt, cp->gss_clnt_flags);
295		lck_mtx_unlock(cp->gss_clnt_mtx);
296	}
297
298	TAILQ_FOREACH(cp, &nmp->nm_gssnccl, gss_clnt_entries) {
299		lck_mtx_lock(cp->gss_clnt_mtx);
300		printf("context %d/%d: refcnt = %d, flags = %x\n",
301		       kauth_cred_getasid(cp->gss_clnt_cred),
302		       kauth_cred_getauid(cp->gss_clnt_cred),
303		       cp->gss_clnt_refcnt, cp->gss_clnt_flags);
304		lck_mtx_unlock(cp->gss_clnt_mtx);
305	}
306	NFS_GSS_DBG("Exit");
307	lck_mtx_unlock(&nmp->nm_lock);
308}
309
310#define NFS_GSS_CLNT_CTX_DUMP(nmp)		\
311	do {		      \
312		if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2))	\
313			nfs_gss_clnt_ctx_dump((nmp));	\
314	} while (0)
315
316static int
317nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2)
318{
319	if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2))
320		return (1);
321	return (0);
322}
323
324
325static int
326nfs_gss_clnt_ctx_find(struct nfsreq *req)
327{
328	struct nfsmount *nmp = req->r_nmp;
329	struct nfs_gss_clnt_ctx *cp;
330	int error = 0;
331	struct timeval now;
332
333	microuptime(&now);
334	lck_mtx_lock(&nmp->nm_lock);
335	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
336		lck_mtx_lock(cp->gss_clnt_mtx);
337		if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
338			NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
339				    kauth_cred_getasid(cp->gss_clnt_cred),
340				    kauth_cred_getauid(cp->gss_clnt_cred),
341				    cp->gss_clnt_refcnt);
342			lck_mtx_unlock(cp->gss_clnt_mtx);
343			continue;
344		}
345		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
346			if (nmp->nm_gsscl.tqh_first != cp) {
347				TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
348				TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
349			}
350			if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
351			/*
352			 * We haven't been moved to the neg cache list
353			 * but we're about to be, finding an entry on
354			 * the negative cache list will result in an
355			 * NFSERR_EAUTH for GSS_NEG_CACHE_TO so we just
356			 * return that now.
357			 */
358				lck_mtx_unlock(cp->gss_clnt_mtx);
359				lck_mtx_unlock(&nmp->nm_lock);
360				return (NFSERR_EAUTH);
361			}
362			lck_mtx_unlock(cp->gss_clnt_mtx);
363			lck_mtx_unlock(&nmp->nm_lock);
364			nfs_gss_clnt_ctx_ref(req, cp);
365			return (0);
366		}
367		lck_mtx_unlock(cp->gss_clnt_mtx);
368	}
369
370	if (kauth_cred_getuid(req->r_cred) == 0) {
371		/*
372		 * If superuser is trying to get access, then co-opt
373		 * the first valid context in the list.
374		 * XXX Ultimately, we need to allow superuser to
375		 * go ahead and attempt to set up its own context
376		 * in case one is set up for it.
377		 */
378		TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
379			if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL|GSS_CTX_DESTROY))) {
380				nfs_gss_clnt_ctx_ref(req, cp);
381				lck_mtx_unlock(&nmp->nm_lock);
382				NFS_GSS_DBG("Root stole context %d/%d\n",
383					    kauth_cred_getasid(cp->gss_clnt_cred), kauth_cred_getauid(cp->gss_clnt_cred));
384				return (0);
385			}
386		}
387	}
388
389	/*
390	 * Check negative context cache
391	 * If found and the cache has not expired
392	 * return NFSERR_EAUTH, else remove
393	 * from the cache and try to create a new context
394	 */
395	TAILQ_FOREACH(cp, &nmp->nm_gssnccl, gss_clnt_entries) {
396		lck_mtx_lock(cp->gss_clnt_mtx);
397		if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
398			NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
399				    kauth_cred_getasid(cp->gss_clnt_cred),
400				    kauth_cred_getauid(cp->gss_clnt_cred), cp->gss_clnt_refcnt);
401			lck_mtx_unlock(cp->gss_clnt_mtx);
402			continue;
403		}
404		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
405			/*
406			 * If we're still being used and invalid or we're not expired
407			 * just return and don't bother gssd again.
408			 */
409			if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec) {
410				NFS_GSS_DBG("Context %d/%d (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
411					    kauth_cred_getasid(cp->gss_clnt_cred),
412					    kauth_cred_getauid(cp->gss_clnt_cred),
413					    cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec);
414				lck_mtx_unlock(cp->gss_clnt_mtx);
415				lck_mtx_unlock(&nmp->nm_lock);
416				return (NFSERR_EAUTH);
417			}
418			if (cp->gss_clnt_refcnt && (cp->gss_clnt_flags & GSS_CTX_INVAL)) {
419				NFS_GSS_DBG("Context %d/%d has expired but we still have %d references\n",
420					    kauth_cred_getasid(cp->gss_clnt_cred),
421					    kauth_cred_getauid(cp->gss_clnt_cred),
422					    cp->gss_clnt_refcnt);
423				lck_mtx_unlock(cp->gss_clnt_mtx);
424				lck_mtx_unlock(&nmp->nm_lock);
425				return (NFSERR_EAUTH);
426			}
427			TAILQ_REMOVE(&nmp->nm_gssnccl, cp, gss_clnt_entries);
428			lck_mtx_unlock(cp->gss_clnt_mtx);
429			nmp->nm_ncentries--;
430			break;
431		}
432		lck_mtx_unlock(cp->gss_clnt_mtx);
433	}
434
435
436	NFS_GSS_DBG("Context %d/%d %sfound in Neg Cache @  %ld\n",
437		    kauth_cred_getasid(req->r_cred),
438		    kauth_cred_getauid(req->r_cred),
439		    cp == NULL ? "not " : "",
440		    cp == NULL ? 0L : cp->gss_clnt_nctime);
441
442	/*
443	 * Not found - create a new context
444	 */
445
446	if (cp == NULL) {
447		MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
448		if (cp == NULL) {
449			lck_mtx_unlock(&nmp->nm_lock);
450			return (ENOMEM);
451		}
452		cp->gss_clnt_cred = req->r_cred;
453		kauth_cred_ref(cp->gss_clnt_cred);
454		cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
455		cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY;
456	} else {
457		nfs_gss_clnt_ctx_clean(cp);
458	}
459
460	cp->gss_clnt_thread = current_thread();
461	nfs_gss_clnt_ctx_ref(req, cp);
462	TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
463	lck_mtx_unlock(&nmp->nm_lock);
464
465	error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context
466	if (error)
467		nfs_gss_clnt_ctx_unref(req);
468
469	return (error);
470}
471
472/*
473 * Inserts an RPCSEC_GSS credential into an RPC header.
474 * After the credential is inserted, the code continues
475 * to build the verifier which contains a signed checksum
476 * of the RPC header.
477 */
478int
479nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args)
480{
481	struct nfs_gss_clnt_ctx *cp;
482	uint32_t seqnum = 0;
483	int error = 0;
484	int slpflag, recordmark = 0;
485	int start, len, offset = 0;
486	int pad, toklen;
487	struct nfsm_chain nmc_tmp;
488	struct gss_seq *gsp;
489	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
490	u_char cksum[MAX_DIGEST];
491	gss_key_info *ki;
492
493	slpflag = (PZERO-1);
494	if (req->r_nmp) {
495		slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
496		recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
497	}
498retry:
499	if (req->r_gss_ctx == NULL) {
500		/*
501		 * Find the context for this user.
502		 * If no context is found, one will
503		 * be created.
504		 */
505		error = nfs_gss_clnt_ctx_find(req);
506		if (error)
507			return (error);
508	}
509	cp = req->r_gss_ctx;
510
511	/*
512	 * If the context thread isn't null, then the context isn't
513	 * yet complete and is for the exclusive use of the thread
514	 * doing the context setup. Wait until the context thread
515	 * is null.
516	 */
517	lck_mtx_lock(cp->gss_clnt_mtx);
518	if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
519		cp->gss_clnt_flags |= GSS_NEEDCTX;
520		msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
521		slpflag &= ~PCATCH;
522		if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
523			return (error);
524		nfs_gss_clnt_ctx_unref(req);
525		goto retry;
526	}
527	lck_mtx_unlock(cp->gss_clnt_mtx);
528
529	ki = &cp->gss_clnt_kinfo;
530	if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
531		/*
532		 * Get a sequence number for this request.
533		 * Check whether the oldest request in the window is complete.
534		 * If it's still pending, then wait until it's done before
535		 * we allocate a new sequence number and allow this request
536		 * to proceed.
537		 */
538		lck_mtx_lock(cp->gss_clnt_mtx);
539		while (win_getbit(cp->gss_clnt_seqbits,
540			((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
541			cp->gss_clnt_flags |= GSS_NEEDSEQ;
542			msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
543			slpflag &= ~PCATCH;
544			if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
545				return (error);
546			}
547			lck_mtx_lock(cp->gss_clnt_mtx);
548			if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
549				/* Renewed while while we were waiting */
550				lck_mtx_unlock(cp->gss_clnt_mtx);
551				nfs_gss_clnt_ctx_unref(req);
552				goto retry;
553			}
554		}
555		seqnum = ++cp->gss_clnt_seqnum;
556		win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin);
557		lck_mtx_unlock(cp->gss_clnt_mtx);
558
559		MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK|M_ZERO);
560		if (gsp == NULL)
561			return (ENOMEM);
562		gsp->gss_seqnum = seqnum;
563		SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext);
564	}
565
566	/* Insert the credential */
567	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
568	nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len);
569	nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1);
570	nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc);
571	nfsm_chain_add_32(error, nmc, seqnum);
572	nfsm_chain_add_32(error, nmc, cp->gss_clnt_service);
573	nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len);
574	if (cp->gss_clnt_handle_len > 0) {
575	   	if (cp->gss_clnt_handle == NULL)
576		  	return (EBADRPC);
577		nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
578	}
579	if (error)
580	    return(error);
581	/*
582	 * Now add the verifier
583	 */
584	if (cp->gss_clnt_proc == RPCSEC_GSS_INIT ||
585		cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) {
586		/*
587		 * If the context is still being created
588		 * then use a null verifier.
589		 */
590		nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);	// flavor
591		nfsm_chain_add_32(error, nmc, 0);		// length
592		nfsm_chain_build_done(error, nmc);
593		if (!error)
594			nfs_gss_append_chain(nmc, args);
595		return (error);
596	}
597
598	offset = recordmark ? NFSX_UNSIGNED : 0; // record mark
599	nfsm_chain_build_done(error, nmc);
600	nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), offset, 0, cksum);
601
602	toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
603	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);	// flavor
604	nfsm_chain_add_32(error, nmc, toklen);		// length
605	nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
606	nfsm_chain_build_done(error, nmc);
607	if (error)
608		return (error);
609
610	/*
611	 * Now we may have to compute integrity or encrypt the call args
612	 * per RFC 2203 Section 5.3.2
613	 */
614	switch (cp->gss_clnt_service) {
615	case RPCSEC_GSS_SVC_NONE:
616		nfs_gss_append_chain(nmc, args);
617		break;
618	case RPCSEC_GSS_SVC_INTEGRITY:
619		len = nfs_gss_mchain_length(args);	// Find args length
620		req->r_gss_arglen = len;		// Stash the args len
621		len += NFSX_UNSIGNED;			// Add seqnum length
622		nfsm_chain_add_32(error, nmc, len);	// and insert it
623		start = nfsm_chain_offset(nmc);
624		nfsm_chain_add_32(error, nmc, seqnum);	// Insert seqnum
625		req->r_gss_argoff = nfsm_chain_offset(nmc); // Offset to args
626		nfsm_chain_build_done(error, nmc);
627		if (error)
628			return (error);
629		nfs_gss_append_chain(nmc, args);	// Append the args mbufs
630
631		/* Now compute a checksum over the seqnum + args */
632		nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, len, cksum);
633
634		/* Insert it into a token and append to the request */
635		toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
636		nfsm_chain_finish_mbuf(error, nmc);	// force checksum into new mbuf
637		nfsm_chain_add_32(error, nmc, toklen);
638		nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
639		nfsm_chain_build_done(error, nmc);
640		break;
641	case RPCSEC_GSS_SVC_PRIVACY:
642		/* Prepend a new mbuf with the confounder & sequence number */
643		nfsm_chain_build_alloc_init(error, &nmc_tmp, 3 * NFSX_UNSIGNED);
644		nfsm_chain_add_32(error, &nmc_tmp, random());	// confounder bytes 1-4
645		nfsm_chain_add_32(error, &nmc_tmp, random());	// confounder bytes 4-8
646		nfsm_chain_add_32(error, &nmc_tmp, seqnum);
647		nfsm_chain_build_done(error, &nmc_tmp);
648		if (error)
649			return (error);
650		nfs_gss_append_chain(&nmc_tmp, args);		// Append the args mbufs
651
652		len = nfs_gss_mchain_length(args);		// Find args length
653		len += 3 * NFSX_UNSIGNED;			// add confounder & seqnum
654		req->r_gss_arglen = len;			// Stash length
655
656		/*
657		 * Append a pad trailer - per RFC 1964 section 1.2.2.3
658		 * Since XDR data is always 32-bit aligned, it
659		 * needs to be padded either by 4 bytes or 8 bytes.
660		 */
661		nfsm_chain_finish_mbuf(error, &nmc_tmp);	// force padding into new mbuf
662		if (len % 8 > 0) {
663			nfsm_chain_add_32(error, &nmc_tmp, 0x04040404);
664			len += NFSX_UNSIGNED;
665		} else {
666			nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
667			nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
668			len +=  2 * NFSX_UNSIGNED;
669		}
670		nfsm_chain_build_done(error, &nmc_tmp);
671
672		/* Now compute a checksum over the confounder + seqnum + args */
673		nfs_gss_cksum_chain(ki, &nmc_tmp, ALG_WRAP(ki), 0, len, cksum);
674
675		/* Insert it into a token */
676		toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 1, len, cksum);
677		nfsm_chain_add_32(error, nmc, toklen + len);	// token + args length
678		nfsm_chain_add_opaque_nopad(error, nmc, tokbuf, toklen);
679		req->r_gss_argoff = nfsm_chain_offset(nmc);	// Stash offset
680		nfsm_chain_build_done(error, nmc);
681		if (error)
682			return (error);
683		nfs_gss_append_chain(nmc, nmc_tmp.nmc_mhead);	// Append the args mbufs
684
685		/* Finally, encrypt the args */
686		nfs_gss_encrypt_chain(ki, &nmc_tmp, 0, len, DES_ENCRYPT);
687
688		/* Add null XDR pad if the ASN.1 token misaligned the data */
689		pad = nfsm_pad(toklen + len);
690		if (pad > 0) {
691			nfsm_chain_add_opaque_nopad(error, nmc, iv0, pad);
692			nfsm_chain_build_done(error, nmc);
693		}
694		break;
695	}
696
697	return (error);
698}
699
700/*
701 * When receiving a reply, the client checks the verifier
702 * returned by the server. Check that the verifier is the
703 * correct type, then extract the sequence number checksum
704 * from the token in the credential and compare it with a
705 * computed checksum of the sequence number in the request
706 * that was sent.
707 */
708int
709nfs_gss_clnt_verf_get(
710	struct nfsreq *req,
711	struct nfsm_chain *nmc,
712	uint32_t verftype,
713	uint32_t verflen,
714	uint32_t *accepted_statusp)
715{
716	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
717	u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
718	uint32_t seqnum = 0;
719	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
720	struct nfsm_chain nmc_tmp;
721	struct gss_seq *gsp;
722	uint32_t reslen, start, cksumlen, toklen;
723	int error = 0;
724	gss_key_info *ki = &cp->gss_clnt_kinfo;
725
726	reslen = cksumlen = 0;
727	*accepted_statusp = 0;
728
729	if (cp == NULL)
730		return (NFSERR_EAUTH);
731	/*
732	 * If it's not an RPCSEC_GSS verifier, then it has to
733	 * be a null verifier that resulted from either
734	 * a CONTINUE_NEEDED reply during context setup or
735	 * from the reply to an AUTH_UNIX call from a dummy
736	 * context that resulted from a fallback to sec=sys.
737	 */
738	if (verftype != RPCSEC_GSS) {
739		if (verftype != RPCAUTH_NULL)
740			return (NFSERR_EAUTH);
741		if (cp->gss_clnt_flags & GSS_CTX_COMPLETE)
742			return (NFSERR_EAUTH);
743		if (verflen > 0)
744			nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
745		nfsm_chain_get_32(error, nmc, *accepted_statusp);
746		return (error);
747	}
748
749	/*
750	 * If we received an RPCSEC_GSS verifier but the
751	 * context isn't yet complete, then it must be
752	 * the context complete message from the server.
753	 * The verifier will contain an encrypted checksum
754	 * of the window but we don't have the session key
755	 * yet so we can't decrypt it. Stash the verifier
756	 * and check it later in nfs_gss_clnt_ctx_init() when
757	 * the context is complete.
758	 */
759	if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
760		MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO);
761		if (cp->gss_clnt_verf == NULL)
762			return (ENOMEM);
763		nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf);
764		nfsm_chain_get_32(error, nmc, *accepted_statusp);
765		return (error);
766	}
767
768	if (verflen != KRB5_SZ_TOKEN(ki->hash_len))
769		return (NFSERR_EAUTH);
770
771	/*
772	 * Get the 8 octet sequence number
773	 * checksum out of the verifier token.
774	 */
775	nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
776	if (error)
777		goto nfsmout;
778	error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum1);
779	if (error)
780		goto nfsmout;
781
782	/*
783	 * Search the request sequence numbers for this reply, starting
784	 * with the most recent, looking for a checksum that matches
785	 * the one in the verifier returned by the server.
786	 */
787	SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
788		nfs_gss_cksum_rep(ki, gsp->gss_seqnum, cksum2);
789		if (bcmp(cksum1, cksum2, HASHLEN(ki)) == 0)
790			break;
791	}
792	if (gsp == NULL)
793		return (NFSERR_EAUTH);
794
795	/*
796	 * Get the RPC accepted status
797	 */
798	nfsm_chain_get_32(error, nmc, *accepted_statusp);
799	if (*accepted_statusp != RPC_SUCCESS)
800		return (0);
801
802	/*
803	 * Now we may have to check integrity or decrypt the results
804	 * per RFC 2203 Section 5.3.2
805	 */
806	switch (cp->gss_clnt_service) {
807	case RPCSEC_GSS_SVC_NONE:
808		/* nothing to do */
809		break;
810	case RPCSEC_GSS_SVC_INTEGRITY:
811		/*
812		 * Here's what we expect in the integrity results:
813		 *
814		 * - length of seq num + results (4 bytes)
815		 * - sequence number (4 bytes)
816		 * - results (variable bytes)
817		 * - length of checksum token (37)
818		 * - checksum of seqnum + results (37 bytes)
819		 */
820		nfsm_chain_get_32(error, nmc, reslen);		// length of results
821		if (reslen > NFS_MAXPACKET) {
822			error = EBADRPC;
823			goto nfsmout;
824		}
825
826		/* Compute a checksum over the sequence number + results */
827		start = nfsm_chain_offset(nmc);
828		nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, reslen, cksum1);
829
830		/*
831		 * Get the sequence number prepended to the results
832		 * and compare it against the list in the request.
833		 */
834		nfsm_chain_get_32(error, nmc, seqnum);
835		SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
836			if (seqnum == gsp->gss_seqnum)
837				break;
838		}
839		if (gsp == NULL) {
840			error = EBADRPC;
841			goto nfsmout;
842		}
843
844		/*
845		 * Advance to the end of the results and
846		 * fetch the checksum computed by the server.
847		 */
848		nmc_tmp = *nmc;
849		reslen -= NFSX_UNSIGNED;			// already skipped seqnum
850		nfsm_chain_adv(error, &nmc_tmp, reslen);	// skip over the results
851		nfsm_chain_get_32(error, &nmc_tmp, cksumlen);	// length of checksum
852		if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
853			error = EBADRPC;
854			goto nfsmout;
855		}
856		nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
857		if (error)
858			goto nfsmout;
859		error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum2);
860		if (error)
861			goto nfsmout;
862
863		/* Verify that the checksums are the same */
864		if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
865			error = EBADRPC;
866			goto nfsmout;
867		}
868		break;
869	case RPCSEC_GSS_SVC_PRIVACY:
870		/*
871		 * Here's what we expect in the privacy results:
872		 *
873		 * - length of confounder + seq num + token + results
874		 * - wrap token (37-40 bytes)
875		 * - confounder (8 bytes)
876		 * - sequence number (4 bytes)
877		 * - results (encrypted)
878		 */
879		nfsm_chain_get_32(error, nmc, reslen);		// length of results
880		if (reslen > NFS_MAXPACKET) {
881			error = EBADRPC;
882			goto nfsmout;
883		}
884
885		/* Get the token that prepends the encrypted results */
886		nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
887		if (error)
888			goto nfsmout;
889		error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 0,
890			&toklen, cksum1);
891		if (error)
892			goto nfsmout;
893		nfsm_chain_reverse(nmc, nfsm_pad(toklen));
894		reslen -= toklen;				// size of confounder + seqnum + results
895
896		/* decrypt the confounder + sequence number + results */
897		start = nfsm_chain_offset(nmc);
898		nfs_gss_encrypt_chain(ki, nmc, start, reslen, DES_DECRYPT);
899
900		/* Compute a checksum over the confounder + sequence number + results */
901		nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, reslen, cksum2);
902
903		/* Verify that the checksums are the same */
904		if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
905			error = EBADRPC;
906			goto nfsmout;
907		}
908
909		nfsm_chain_adv(error, nmc, 8);	// skip over the confounder
910
911		/*
912		 * Get the sequence number prepended to the results
913		 * and compare it against the list in the request.
914		 */
915		nfsm_chain_get_32(error, nmc, seqnum);
916		SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
917			if (seqnum == gsp->gss_seqnum)
918				break;
919		}
920		if (gsp == NULL) {
921			error = EBADRPC;
922			goto nfsmout;
923		}
924
925		break;
926	}
927nfsmout:
928	return (error);
929}
930
931/*
932 * An RPCSEC_GSS request with no integrity or privacy consists
933 * of just the header mbufs followed by the arg mbufs.
934 *
935 * However, integrity or privacy both trailer mbufs to the args,
936 * which means we have to do some work to restore the arg mbuf
937 * chain to its previous state in case we need to retransmit.
938 *
939 * The location and length of the args is marked by two fields
940 * in the request structure: r_gss_argoff and r_gss_arglen,
941 * which are stashed when the NFS request is built.
942 */
943int
944nfs_gss_clnt_args_restore(struct nfsreq *req)
945{
946	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
947	struct nfsm_chain mchain, *nmc = &mchain;
948	int len, error = 0;
949
950	if (cp == NULL)
951		return (NFSERR_EAUTH);
952
953	if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0)
954		return (ENEEDAUTH);
955
956	nfsm_chain_dissect_init(error, nmc, req->r_mhead);	// start at RPC header
957	nfsm_chain_adv(error, nmc, req->r_gss_argoff);		// advance to args
958	if (error)
959		return (error);
960
961	switch (cp->gss_clnt_service) {
962	case RPCSEC_GSS_SVC_NONE:
963		/* nothing to do */
964		break;
965	case RPCSEC_GSS_SVC_INTEGRITY:
966		/*
967		 * All we have to do here is remove the appended checksum mbufs.
968		 * We know that the checksum starts in a new mbuf beyond the end
969		 * of the args.
970		 */
971		nfsm_chain_adv(error, nmc, req->r_gss_arglen);	// adv to last args mbuf
972		if (error)
973			return (error);
974
975		mbuf_freem(mbuf_next(nmc->nmc_mcur));		// free the cksum mbuf
976		error = mbuf_setnext(nmc->nmc_mcur, NULL);
977		break;
978	case RPCSEC_GSS_SVC_PRIVACY:
979		/*
980		 * The args are encrypted along with prepended confounders and seqnum.
981		 * First we decrypt, the confounder, seqnum and args then skip to the
982		 * final mbuf of the args.
983		 * The arglen includes 8 bytes of confounder and 4 bytes of seqnum.
984		 * Finally, we remove between 4 and 8 bytes of encryption padding
985		 * as well as any alignment padding in the trailing mbuf.
986		 */
987		len = req->r_gss_arglen;
988		len += len % 8 > 0 ? 4 : 8;			// add DES padding length
989		nfs_gss_encrypt_chain(&cp->gss_clnt_kinfo, nmc,
990					req->r_gss_argoff, len, DES_DECRYPT);
991		nfsm_chain_adv(error, nmc, req->r_gss_arglen);
992		if (error)
993			return (error);
994		mbuf_freem(mbuf_next(nmc->nmc_mcur));		// free the pad mbuf
995		error = mbuf_setnext(nmc->nmc_mcur, NULL);
996		break;
997	}
998
999	return (error);
1000}
1001
1002/*
1003 * This function sets up  a new context on the client.
1004 * Context setup alternates upcalls to the gssd with NFS nullproc calls
1005 * to the server.  Each of these calls exchanges an opaque token, obtained
1006 * via the gssd's calls into the GSS-API on either the client or the server.
1007 * This cycle of calls ends when the client's upcall to the gssd and the
1008 * server's response both return GSS_S_COMPLETE.  At this point, the client
1009 * should have its session key and a handle that it can use to refer to its
1010 * new context on the server.
1011 */
1012static int
1013nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1014{
1015	struct nfsmount *nmp = req->r_nmp;
1016	int client_complete = 0;
1017	int server_complete = 0;
1018	u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
1019	int error = 0;
1020	gss_key_info *ki = &cp->gss_clnt_kinfo;
1021
1022	/* Initialize a new client context */
1023
1024
1025	if (cp->gss_clnt_svcname == NULL) {
1026		cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen);
1027		if (cp->gss_clnt_svcname == NULL) {
1028			error = NFSERR_EAUTH;
1029			goto nfsmout;
1030		}
1031	}
1032
1033	cp->gss_clnt_proc = RPCSEC_GSS_INIT;
1034
1035	cp->gss_clnt_service =
1036		req->r_auth == RPCAUTH_KRB5  ? RPCSEC_GSS_SVC_NONE :
1037		req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
1038		req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
1039
1040	cp->gss_clnt_gssd_flags = (nfs_single_des ? GSSD_NFS_1DES : 0);
1041	/*
1042	 * Now loop around alternating gss_init_sec_context and
1043	 * gss_accept_sec_context upcalls to the gssd on the client
1044	 * and server side until the context is complete - or fails.
1045	 */
1046	for (;;) {
1047
1048retry:
1049		/* Upcall to the gss_init_sec_context in the gssd */
1050		error = nfs_gss_clnt_gssd_upcall(req, cp);
1051		if (error)
1052			goto nfsmout;
1053
1054		if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1055			client_complete = 1;
1056			if (server_complete)
1057				break;
1058		} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1059			error = NFSERR_EAUTH;
1060			goto nfsmout;
1061		}
1062
1063		/*
1064		 * Pass the token to the server.
1065		 */
1066		error = nfs_gss_clnt_ctx_callserver(req, cp);
1067		if (error) {
1068			if (error == ENEEDAUTH && cp->gss_clnt_proc == RPCSEC_GSS_INIT &&
1069				(cp->gss_clnt_gssd_flags & (GSSD_RESTART | GSSD_NFS_1DES)) == 0) {
1070				NFS_GSS_DBG("Retrying with single DES for req %p\n", req);
1071				cp->gss_clnt_gssd_flags = (GSSD_RESTART | GSSD_NFS_1DES);
1072				if (cp->gss_clnt_token)
1073					FREE(cp->gss_clnt_token, M_TEMP);
1074				cp->gss_clnt_token = NULL;
1075				cp->gss_clnt_tokenlen = 0;
1076				goto retry;
1077			}
1078			// Reset flags, if error = ENEEDAUTH we will try 3des again
1079			cp->gss_clnt_gssd_flags = 0;
1080			goto nfsmout;
1081		}
1082		if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1083			server_complete = 1;
1084			if (client_complete)
1085				break;
1086		}
1087		cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
1088	}
1089
1090	/*
1091	 * The context is apparently established successfully
1092	 */
1093	lck_mtx_lock(cp->gss_clnt_mtx);
1094	cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
1095	lck_mtx_unlock(cp->gss_clnt_mtx);
1096	cp->gss_clnt_proc = RPCSEC_GSS_DATA;
1097
1098	/*
1099	 * Compute checksum of the server's window
1100	 */
1101	nfs_gss_cksum_rep(ki, cp->gss_clnt_seqwin, cksum1);
1102
1103	/*
1104	 * and see if it matches the one in the
1105	 * verifier the server returned.
1106	 */
1107	error = nfs_gss_token_get(ki, ALG_MIC(ki), cp->gss_clnt_verf, 0,
1108		NULL, cksum2);
1109	FREE(cp->gss_clnt_verf, M_TEMP);
1110	cp->gss_clnt_verf = NULL;
1111
1112	if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
1113		error = NFSERR_EAUTH;
1114		goto nfsmout;
1115	}
1116
1117	/*
1118	 * Set an initial sequence number somewhat randomized.
1119	 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1120	 * Add the size of the sequence window so seqbits arithmetic
1121	 * doesn't go negative.
1122	 */
1123	cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin;
1124
1125	/*
1126	 * Allocate a bitmap to keep track of which requests
1127	 * are pending within the sequence number window.
1128	 */
1129	MALLOC(cp->gss_clnt_seqbits, uint32_t *,
1130		nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
1131	if (cp->gss_clnt_seqbits == NULL)
1132		error = NFSERR_EAUTH;
1133nfsmout:
1134 	/*
1135	 * If the error is ENEEDAUTH we're not done, so no need
1136	 * to wake up other threads again. This thread will retry in
1137	 * the find or renew routines.
1138	 */
1139	if (error == ENEEDAUTH)
1140		return (error);
1141
1142	/*
1143	 * If there's an error, just mark it as invalid.
1144	 * It will be removed when the reference count
1145	 * drops to zero.
1146	 */
1147	lck_mtx_lock(cp->gss_clnt_mtx);
1148	if (error)
1149		cp->gss_clnt_flags |= GSS_CTX_INVAL;
1150
1151	/*
1152	 * Wake any threads waiting to use the context
1153	 */
1154	cp->gss_clnt_thread = NULL;
1155	if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1156		cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1157		wakeup(cp);
1158	}
1159	lck_mtx_unlock(cp->gss_clnt_mtx);
1160
1161	return (error);
1162}
1163
1164/*
1165 * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
1166 * But if there's a failure in trying to establish the context it keeps
1167 * retrying at progressively longer intervals in case the failure is
1168 * due to some transient condition.  For instance, the server might be
1169 * failing the context setup because directory services is not coming
1170 * up in a timely fashion.
1171 */
1172static int
1173nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1174{
1175	struct nfsmount *nmp = req->r_nmp;
1176	struct timeval now;
1177	time_t waituntil;
1178	int error, slpflag;
1179	int retries = 0;
1180	int timeo = NFS_TRYLATERDEL;
1181
1182	if (nfs_mount_gone(nmp)) {
1183		error = ENXIO;
1184		goto bad;
1185	}
1186
1187	/* For an "intr" mount allow a signal to interrupt the retries */
1188	slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
1189
1190	while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) {
1191		microuptime(&now);
1192		waituntil = now.tv_sec + timeo;
1193		while (now.tv_sec < waituntil) {
1194			tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz);
1195			slpflag = 0;
1196			error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
1197			if (error)
1198				goto bad;
1199			microuptime(&now);
1200		}
1201
1202		retries++;
1203		/* If it's a soft mount just give up after a while */
1204		if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (retries > nmp->nm_retry)) {
1205			error = ETIMEDOUT;
1206			goto bad;
1207		}
1208		timeo *= 2;
1209		if (timeo > 60)
1210			timeo = 60;
1211	}
1212
1213	if (error == 0)
1214		return 0;	// success
1215bad:
1216	/*
1217	 * Give up on this context
1218	 */
1219	lck_mtx_lock(cp->gss_clnt_mtx);
1220	cp->gss_clnt_flags |= GSS_CTX_INVAL;
1221
1222	/*
1223	 * Wake any threads waiting to use the context
1224	 */
1225	cp->gss_clnt_thread = NULL;
1226	if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1227		cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1228		wakeup(cp);
1229	}
1230	lck_mtx_unlock(cp->gss_clnt_mtx);
1231
1232	return error;
1233}
1234
1235/*
1236 * Call the NFS server using a null procedure for context setup.
1237 * Even though it's a null procedure and nominally has no arguments
1238 * RFC 2203 requires that the GSS-API token be passed as an argument
1239 * and received as a reply.
1240 */
1241static int
1242nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1243{
1244	struct nfsm_chain nmreq, nmrep;
1245	int error = 0, status;
1246	uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
1247	int sz;
1248
1249	if (nfs_mount_gone(req->r_nmp))
1250		return (ENXIO);
1251	nfsm_chain_null(&nmreq);
1252	nfsm_chain_null(&nmrep);
1253	sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen);
1254	nfsm_chain_build_alloc_init(error, &nmreq, sz);
1255	nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen);
1256	if (cp->gss_clnt_tokenlen > 0)
1257		nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
1258	nfsm_chain_build_done(error, &nmreq);
1259	if (error)
1260		goto nfsmout;
1261
1262	/* Call the server */
1263	error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred,
1264				(req->r_flags & R_OPTMASK), cp, &nmrep, &status);
1265	if (cp->gss_clnt_token != NULL) {
1266		FREE(cp->gss_clnt_token, M_TEMP);
1267		cp->gss_clnt_token = NULL;
1268	}
1269	if (!error)
1270		error = status;
1271	if (error)
1272		goto nfsmout;
1273
1274	/* Get the server's reply */
1275
1276	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len);
1277	if (cp->gss_clnt_handle != NULL) {
1278		FREE(cp->gss_clnt_handle, M_TEMP);
1279		cp->gss_clnt_handle = NULL;
1280	}
1281	if (cp->gss_clnt_handle_len > 0) {
1282		MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK);
1283		if (cp->gss_clnt_handle == NULL) {
1284			error = ENOMEM;
1285			goto nfsmout;
1286		}
1287		nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle);
1288	}
1289	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major);
1290	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor);
1291	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin);
1292	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen);
1293	if (error)
1294		goto nfsmout;
1295	if (cp->gss_clnt_tokenlen > 0) {
1296		MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK);
1297		if (cp->gss_clnt_token == NULL) {
1298			error = ENOMEM;
1299			goto nfsmout;
1300		}
1301		nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token);
1302	}
1303
1304	/*
1305	 * Make sure any unusual errors are expanded and logged by gssd
1306	 */
1307	if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1308	    cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1309
1310		printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp->gss_clnt_major);
1311		nfs_gss_clnt_log_error(req, cp, major, minor);
1312
1313	}
1314
1315nfsmout:
1316	nfsm_chain_cleanup(&nmreq);
1317	nfsm_chain_cleanup(&nmrep);
1318
1319	return (error);
1320}
1321
1322/*
1323 * We construct the service principal as a gss hostbased service principal of
1324 * the form nfs@<server>, unless the servers principal was passed down in the
1325 * mount arguments. If the arguments don't specify the service principal, the
1326 * server name is extracted the location passed in the mount argument if
1327 * available.  Otherwise assume a format of <server>:<path> in the
1328 * mntfromname. We don't currently support url's or other bizarre formats like
1329 * path@server. Mount_url will convert the nfs url into <server>:<path> when
1330 * calling mount, so this works out well in practice.
1331 *
1332 */
1333
1334static uint8_t *
1335nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len)
1336{
1337	char *svcname, *d, *server;
1338	int lindx, sindx;
1339
1340	if (nfs_mount_gone(nmp))
1341		return (NULL);
1342
1343	if (nmp->nm_sprinc) {
1344		*len = strlen(nmp->nm_sprinc) + 1;
1345		MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
1346		*nt = GSSD_HOSTBASED;
1347		if (svcname == NULL)
1348			return (NULL);
1349		strlcpy(svcname, nmp->nm_sprinc, *len);
1350
1351		return ((uint8_t *)svcname);
1352	}
1353
1354	*nt = GSSD_HOSTBASED;
1355	if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x1))) {
1356		lindx = nmp->nm_locations.nl_current.nli_loc;
1357		sindx = nmp->nm_locations.nl_current.nli_serv;
1358		server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name;
1359		*len = (uint32_t)strlen(server);
1360	} else {
1361		/* Older binaries using older mount args end up here */
1362		server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
1363		NFS_GSS_DBG("nfs getting gss svcname from %s\n", server);
1364		d = strchr(server, ':');
1365		*len = (uint32_t)(d ? (d - server) : strlen(server));
1366	}
1367
1368	*len +=  5; /* "nfs@" plus null */
1369	MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK);
1370	strlcpy(svcname, "nfs", *len);
1371	strlcat(svcname, "@", *len);
1372	strlcat(svcname, server, *len);
1373	NFS_GSS_DBG("nfs svcname = %s\n", svcname);
1374
1375	return ((uint8_t *)svcname);
1376}
1377
1378/*
1379 * Get a mach port to talk to gssd.
1380 * gssd lives in the root bootstrap, so we call gssd's lookup routine
1381 * to get a send right to talk to a new gssd instance that launchd has launched
1382 * based on the cred's uid and audit session id.
1383 */
1384
1385static mach_port_t
1386nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)
1387{
1388	mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL;
1389	kern_return_t kr;
1390	au_asid_t asid;
1391	uid_t uid;
1392
1393	kr = host_get_gssd_port(host_priv_self(), &gssd_host_port);
1394	if (kr != KERN_SUCCESS) {
1395		printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr);
1396		return (IPC_PORT_NULL);
1397	}
1398	if (!IPC_PORT_VALID(gssd_host_port)) {
1399		printf("nfs_gss_get_upcall_port: gssd port not valid\n");
1400		return (IPC_PORT_NULL);
1401	}
1402
1403	asid = kauth_cred_getasid(credp);
1404	uid = kauth_cred_getauid(credp);
1405	if (uid == AU_DEFAUDITID)
1406		uid = kauth_cred_getuid(credp);
1407	kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port);
1408	if (kr != KERN_SUCCESS)
1409		printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr);
1410
1411	return (uc_port);
1412}
1413
1414
1415static void
1416nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor)
1417{
1418#define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
1419	struct nfsmount *nmp = req->r_nmp;
1420	char who[] = "client";
1421	uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major);
1422	const char *procn = "unkown";
1423	proc_t proc;
1424	pid_t pid = -1;
1425	struct timeval now;
1426
1427	if (req->r_thread) {
1428		proc = (proc_t)get_bsdthreadtask_info(req->r_thread);
1429		if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK)))
1430			proc = NULL;
1431		if (proc) {
1432			if (*proc->p_comm)
1433				procn = proc->p_comm;
1434			pid = proc->p_pid;
1435		}
1436	} else {
1437		procn = "kernproc";
1438		pid = 0;
1439	}
1440
1441	microuptime(&now);
1442	if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor ||
1443	     cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) &&
1444	    (nmp->nm_state & NFSSTA_MOUNTED)) {
1445		/*
1446		 * Will let gssd do some logging in hopes that it can translate
1447		 * the minor code.
1448		 */
1449		if (cp->gss_clnt_minor && cp->gss_clnt_minor != minor) {
1450			(void) mach_gss_log_error(
1451				cp->gss_clnt_mport,
1452				vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1453				kauth_cred_getuid(cp->gss_clnt_cred),
1454				who,
1455				cp->gss_clnt_major,
1456				cp->gss_clnt_minor);
1457		}
1458		gss_error = gss_error ? gss_error : cp->gss_clnt_major;
1459
1460		/*
1461		 *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here.
1462		 */
1463		printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1464		       cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
1465		       procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
1466		cp->gss_clnt_ptime = now.tv_sec;
1467		switch (gss_error) {
1468		case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n",
1469			       kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
1470			break;
1471		case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n",
1472			       kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
1473			break;
1474		}
1475	} else {
1476		NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1477			    cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
1478			    procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
1479	}
1480}
1481
1482/*
1483 * Make an upcall to the gssd using Mach RPC
1484 * The upcall is made using a host special port.
1485 * This allows launchd to fire up the gssd in the
1486 * user's session.  This is important, since gssd
1487 * must have access to the user's credential cache.
1488 */
1489static int
1490nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1491{
1492	kern_return_t kr;
1493	gssd_byte_buffer okey = NULL;
1494	uint32_t skeylen = 0;
1495	int retry_cnt = 0;
1496	vm_map_copy_t itoken = NULL;
1497	gssd_byte_buffer otoken = NULL;
1498	mach_msg_type_number_t otokenlen;
1499	int error = 0;
1500	uint8_t *principal = NULL;
1501	uint32_t plen = 0;
1502	int32_t nt = GSSD_STRING_NAME;
1503	vm_map_copy_t pname = NULL;
1504	vm_map_copy_t svcname = NULL;
1505	char display_name[MAX_DISPLAY_STR] = "";
1506	uint32_t ret_flags;
1507	uint32_t nfs_1des = (cp->gss_clnt_gssd_flags & GSSD_NFS_1DES);
1508	struct nfsmount *nmp;
1509	uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
1510
1511	/*
1512	 * NFS currently only supports default principals or
1513	 * principals based on the uid of the caller, unless
1514	 * the principal to use for the mounting cred was specified
1515	 * in the mount argmuments. If the realm to use was specified
1516	 * then will send that up as the principal since the realm is
1517	 * preceed by an "@" gssd that will try and select the default
1518	 * principal for that realm.
1519	 */
1520
1521	nmp = req->r_nmp;
1522	if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)))
1523		return (ENXIO);
1524
1525	if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) {
1526		principal = cp->gss_clnt_principal;
1527		plen = cp->gss_clnt_prinlen;
1528		nt = cp->gss_clnt_prinnt;
1529	} else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) {
1530		plen = (uint32_t)strlen(nmp->nm_principal);
1531		MALLOC(principal, uint8_t *, plen, M_TEMP, M_WAITOK | M_ZERO);
1532		if (principal == NULL)
1533			return (ENOMEM);
1534		bcopy(nmp->nm_principal, principal, plen);
1535		cp->gss_clnt_prinnt = nt = GSSD_USER;
1536	}
1537	else if (nmp->nm_realm) {
1538		plen = (uint32_t)strlen(nmp->nm_realm);
1539		principal = (uint8_t *)nmp->nm_realm;
1540		nt = GSSD_USER;
1541	}
1542
1543	if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
1544		cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred);
1545		if (cp->gss_clnt_mport == IPC_PORT_NULL)
1546			goto out;
1547	}
1548
1549	if (plen)
1550		nfs_gss_mach_alloc_buffer(principal, plen, &pname);
1551	if (cp->gss_clnt_svcnamlen)
1552		nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
1553	if (cp->gss_clnt_tokenlen)
1554		nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1555
1556retry:
1557	kr = mach_gss_init_sec_context_v2(
1558		cp->gss_clnt_mport,
1559		GSSD_KRB5_MECH,
1560		(gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
1561		kauth_cred_getuid(cp->gss_clnt_cred),
1562		nt,
1563		(gssd_byte_buffer)pname, (mach_msg_type_number_t) plen,
1564		cp->gss_clnt_svcnt,
1565		(gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen,
1566		GSSD_MUTUAL_FLAG,
1567		&cp->gss_clnt_gssd_flags,
1568		&cp->gss_clnt_context,
1569		&cp->gss_clnt_cred_handle,
1570		&ret_flags,
1571		&okey,  (mach_msg_type_number_t *) &skeylen,
1572		&otoken, &otokenlen,
1573		cp->gss_clnt_display ? NULL : display_name,
1574		&cp->gss_clnt_major,
1575		&cp->gss_clnt_minor);
1576
1577	/* Should be cleared and set in gssd ? */
1578	cp->gss_clnt_gssd_flags &= ~GSSD_RESTART;
1579	cp->gss_clnt_gssd_flags |= nfs_1des;
1580
1581	if (kr != KERN_SUCCESS) {
1582		printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr);
1583		if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
1584			retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES &&
1585			!vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) {
1586			if (plen)
1587				nfs_gss_mach_alloc_buffer(principal, plen, &pname);
1588			if (cp->gss_clnt_svcnamlen)
1589				nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
1590			if (cp->gss_clnt_tokenlen > 0)
1591				nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1592			goto retry;
1593		}
1594
1595		host_release_special_port(cp->gss_clnt_mport);
1596		cp->gss_clnt_mport = IPC_PORT_NULL;
1597		goto out;
1598	}
1599
1600	if (cp->gss_clnt_display == NULL && *display_name != '\0') {
1601		int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1;  /* Add extra byte to include '\0' */
1602
1603		if (dlen < MAX_DISPLAY_STR) {
1604			MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK);
1605			if (cp->gss_clnt_display == NULL)
1606				goto skip;
1607			bcopy(display_name, cp->gss_clnt_display, dlen);
1608		} else {
1609			goto skip;
1610		}
1611	}
1612skip:
1613	/*
1614	 * Make sure any unusual errors are expanded and logged by gssd
1615	 *
1616	 * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
1617	 */
1618	if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1619	    cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1620		nfs_gss_clnt_log_error(req, cp, major, minor);
1621	}
1622
1623	if (skeylen > 0) {
1624		if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
1625			printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen);
1626			vm_map_copy_discard((vm_map_copy_t) okey);
1627			vm_map_copy_discard((vm_map_copy_t) otoken);
1628			goto out;
1629		}
1630		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen,
1631				cp->gss_clnt_kinfo.skey);
1632		if (error) {
1633			vm_map_copy_discard((vm_map_copy_t) otoken);
1634			goto out;
1635		}
1636
1637		error = gss_key_init(&cp->gss_clnt_kinfo, skeylen);
1638		if (error)
1639			goto out;
1640	}
1641
1642	/* Free context token used as input */
1643	if (cp->gss_clnt_token)
1644		FREE(cp->gss_clnt_token, M_TEMP);
1645	cp->gss_clnt_token = NULL;
1646	cp->gss_clnt_tokenlen = 0;
1647
1648	if (otokenlen > 0) {
1649		/* Set context token to gss output token */
1650		MALLOC(cp->gss_clnt_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
1651		if (cp->gss_clnt_token == NULL) {
1652			printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen);
1653			vm_map_copy_discard((vm_map_copy_t) otoken);
1654			return (ENOMEM);
1655		}
1656		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token);
1657		if (error) {
1658			FREE(cp->gss_clnt_token, M_TEMP);
1659			cp->gss_clnt_token = NULL;
1660			return (NFSERR_EAUTH);
1661		}
1662		cp->gss_clnt_tokenlen = otokenlen;
1663	}
1664
1665	return (0);
1666
1667out:
1668	if (cp->gss_clnt_token)
1669		FREE(cp->gss_clnt_token, M_TEMP);
1670	cp->gss_clnt_token = NULL;
1671	cp->gss_clnt_tokenlen = 0;
1672
1673	return (NFSERR_EAUTH);
1674}
1675
1676/*
1677 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
1678 * credential. The sequence number window that the server returns
1679 * at context setup indicates the maximum number of client calls that
1680 * can be outstanding on a context. The client maintains a bitmap that
1681 * represents the server's window.  Each pending request has a bit set
1682 * in the window bitmap.  When a reply comes in or times out, we reset
1683 * the bit in the bitmap and if there are any other threads waiting for
1684 * a context slot we notify the waiting thread(s).
1685 *
1686 * Note that if a request is retransmitted, it will have a single XID
1687 * but it may be associated with multiple sequence numbers.  So we
1688 * may have to reset multiple sequence number bits in the window bitmap.
1689 */
1690void
1691nfs_gss_clnt_rpcdone(struct nfsreq *req)
1692{
1693	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1694	struct gss_seq *gsp, *ngsp;
1695	int i = 0;
1696
1697	if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE))
1698		return;	// no context - don't bother
1699	/*
1700	 * Reset the bit for this request in the
1701	 * sequence number window to indicate it's done.
1702	 * We do this even if the request timed out.
1703	 */
1704	lck_mtx_lock(cp->gss_clnt_mtx);
1705	gsp = SLIST_FIRST(&req->r_gss_seqlist);
1706	if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin))
1707		win_resetbit(cp->gss_clnt_seqbits,
1708			gsp->gss_seqnum % cp->gss_clnt_seqwin);
1709
1710	/*
1711	 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
1712	 */
1713	SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) {
1714		if (++i > GSS_CLNT_SEQLISTMAX) {
1715			SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext);
1716			FREE(gsp, M_TEMP);
1717		}
1718	}
1719
1720	/*
1721	 * If there's a thread waiting for
1722	 * the window to advance, wake it up.
1723	 */
1724	if (cp->gss_clnt_flags & GSS_NEEDSEQ) {
1725		cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
1726		wakeup(cp);
1727	}
1728	lck_mtx_unlock(cp->gss_clnt_mtx);
1729}
1730
1731/*
1732 * Create a reference to a context from a request
1733 * and bump the reference count
1734 */
1735void
1736nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1737{
1738	req->r_gss_ctx = cp;
1739
1740	lck_mtx_lock(cp->gss_clnt_mtx);
1741	cp->gss_clnt_refcnt++;
1742	lck_mtx_unlock(cp->gss_clnt_mtx);
1743}
1744
1745/*
1746 * Remove a context reference from a request
1747 * If the reference count drops to zero, and the
1748 * context is invalid, destroy the context
1749 */
1750void
1751nfs_gss_clnt_ctx_unref(struct nfsreq *req)
1752{
1753	struct nfsmount *nmp = req->r_nmp;
1754	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1755	int neg_cache = 0;
1756	int on_neg_cache = 0;
1757	int destroy = 0;
1758
1759	if (cp == NULL)
1760		return;
1761
1762	req->r_gss_ctx = NULL;
1763
1764	lck_mtx_lock(cp->gss_clnt_mtx);
1765	if (--cp->gss_clnt_refcnt < 0)
1766		panic("Over release of gss context!\n");
1767
1768	if (cp->gss_clnt_refcnt == 0 && (cp->gss_clnt_flags & GSS_CTX_DESTROY)) {
1769		destroy = 1;
1770		if (cp->gss_clnt_flags & GSS_CTX_NC)
1771			on_neg_cache = 1;
1772	} else if ((cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_NC)) == GSS_CTX_INVAL) {
1773		neg_cache = 1;
1774	}
1775	lck_mtx_unlock(cp->gss_clnt_mtx);
1776	if (destroy) {
1777		if (nmp) {
1778			lck_mtx_lock(&nmp->nm_lock);
1779			if (cp->gss_clnt_entries.tqe_next != NFSNOLIST) {
1780				if (on_neg_cache)
1781					TAILQ_REMOVE(&nmp->nm_gssnccl, cp, gss_clnt_entries);
1782				else
1783					TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
1784			}
1785			lck_mtx_unlock(&nmp->nm_lock);
1786		}
1787		nfs_gss_clnt_ctx_destroy(cp);
1788	} else if (neg_cache)
1789		nfs_gss_clnt_ctx_neg_cache_enter(cp, nmp);
1790	NFS_GSS_CLNT_CTX_DUMP(nmp);
1791}
1792
1793/*
1794 * Enter the gss context associated with req on to the neg context
1795 * cache queue.
1796 */
1797void
1798nfs_gss_clnt_ctx_neg_cache_enter(struct nfs_gss_clnt_ctx *cp, struct nfsmount *nmp)
1799{
1800	struct nfs_gss_clnt_ctx *nccp, *tcp;
1801	struct timeval now;
1802	int reaped = 0;
1803
1804	if (nmp == NULL)
1805		return;
1806
1807	microuptime(&now);
1808	lck_mtx_lock(&nmp->nm_lock);
1809
1810	lck_mtx_lock(cp->gss_clnt_mtx);
1811	if (cp->gss_clnt_entries.tqe_next != NFSNOLIST)
1812		TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
1813
1814	cp->gss_clnt_flags |= GSS_CTX_NC;
1815	cp->gss_clnt_nctime = now.tv_sec;
1816	lck_mtx_unlock(cp->gss_clnt_mtx);
1817
1818	TAILQ_INSERT_TAIL(&nmp->nm_gssnccl, cp, gss_clnt_entries);
1819	nmp->nm_ncentries++;
1820
1821	NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp->nm_ncentries);
1822	/* Try and reap old, unreferenced, expired contexts */
1823	TAILQ_FOREACH_SAFE(nccp, &nmp->nm_gssnccl, gss_clnt_entries, tcp) {
1824		int destroy = 0;
1825
1826		/* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */
1827		if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES)
1828			break;
1829		/* Contexts to young */
1830		if (nccp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec)
1831			break;
1832		/* Not referenced, remove it. */
1833		lck_mtx_lock(nccp->gss_clnt_mtx);
1834		if (nccp->gss_clnt_refcnt == 0) {
1835			TAILQ_REMOVE(&nmp->nm_gssnccl, nccp, gss_clnt_entries);
1836			reaped++;
1837			destroy = 1;
1838		}
1839		lck_mtx_unlock(nccp->gss_clnt_mtx);
1840		if (destroy)
1841			nfs_gss_clnt_ctx_destroy(nccp);
1842		nmp->nm_ncentries--;
1843	}
1844	NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped, nmp->nm_ncentries);
1845	lck_mtx_unlock(&nmp->nm_lock);
1846}
1847
1848/*
1849 * Clean a context to be cached
1850 */
1851static void
1852nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *cp)
1853{
1854	cp->gss_clnt_flags = 0;
1855	if (cp->gss_clnt_handle) {
1856		FREE(cp->gss_clnt_handle, M_TEMP);
1857		cp->gss_clnt_handle = NULL;
1858	}
1859	if (cp->gss_clnt_seqbits) {
1860		FREE(cp->gss_clnt_seqbits, M_TEMP);
1861		cp->gss_clnt_seqbits = NULL;
1862	}
1863	if (cp->gss_clnt_token) {
1864		FREE(cp->gss_clnt_token, M_TEMP);
1865		cp->gss_clnt_token = NULL;
1866	}
1867	if (cp->gss_clnt_svcname) {
1868		FREE(cp->gss_clnt_svcname, M_TEMP);
1869		cp->gss_clnt_svcname = NULL;
1870	}
1871	cp->gss_clnt_flags = 0;
1872	cp->gss_clnt_seqwin = 0;
1873	cp->gss_clnt_seqnum = 0;
1874}
1875
1876/*
1877 * Remove a context
1878 */
1879static void
1880nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp)
1881{
1882	NFS_GSS_DBG("Destroying context %d/%d\n",
1883		    kauth_cred_getasid(cp->gss_clnt_cred),
1884		    kauth_cred_getauid(cp->gss_clnt_cred));
1885
1886	host_release_special_port(cp->gss_clnt_mport);
1887	cp->gss_clnt_mport = IPC_PORT_NULL;
1888
1889	if (cp->gss_clnt_mtx) {
1890		lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
1891		cp->gss_clnt_mtx = (lck_mtx_t *)NULL;
1892	}
1893	if (IS_VALID_CRED(cp->gss_clnt_cred))
1894		kauth_cred_unref(&cp->gss_clnt_cred);
1895	cp->gss_clnt_entries.tqe_next = NFSNOLIST;
1896	cp->gss_clnt_entries.tqe_prev = NFSNOLIST;
1897	if (cp->gss_clnt_principal) {
1898		FREE(cp->gss_clnt_principal, M_TEMP);
1899		cp->gss_clnt_principal = NULL;
1900	}
1901	if (cp->gss_clnt_display) {
1902		FREE(cp->gss_clnt_display, M_TEMP);
1903		cp->gss_clnt_display = NULL;
1904	}
1905
1906	nfs_gss_clnt_ctx_clean(cp);
1907
1908	FREE(cp, M_TEMP);
1909}
1910
1911/*
1912 * The context for a user is invalid.
1913 * Mark the context as invalid, then
1914 * create a new context.
1915 */
1916int
1917nfs_gss_clnt_ctx_renew(struct nfsreq *req)
1918{
1919	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1920	struct nfsmount *nmp = req->r_nmp;
1921	struct nfs_gss_clnt_ctx tmp;
1922	struct nfs_gss_clnt_ctx *ncp;
1923
1924	int error = 0;
1925
1926	if (cp == NULL)
1927		return (0);
1928
1929	lck_mtx_lock(cp->gss_clnt_mtx);
1930	if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
1931		lck_mtx_unlock(cp->gss_clnt_mtx);
1932		nfs_gss_clnt_ctx_unref(req);
1933		return (0);	// already being renewed
1934	}
1935
1936	bzero(&tmp, sizeof(tmp));
1937	tmp.gss_clnt_cred = cp->gss_clnt_cred;
1938	kauth_cred_ref(tmp.gss_clnt_cred);
1939	tmp.gss_clnt_mport = host_copy_special_port(cp->gss_clnt_mport);
1940	tmp.gss_clnt_principal = cp->gss_clnt_principal;
1941	cp->gss_clnt_principal = NULL;
1942	tmp.gss_clnt_prinlen = cp->gss_clnt_prinlen;
1943	tmp.gss_clnt_prinnt = cp->gss_clnt_prinnt;
1944	tmp.gss_clnt_major = cp->gss_clnt_major;
1945	tmp.gss_clnt_minor = cp->gss_clnt_minor;
1946	tmp.gss_clnt_ptime = cp->gss_clnt_ptime;
1947
1948	NFS_GSS_DBG("Renewing context %d/%d\n",
1949		    kauth_cred_getasid(tmp.gss_clnt_cred),
1950		    kauth_cred_getauid(tmp.gss_clnt_cred));
1951	cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
1952
1953	/*
1954	 * If there's a thread waiting
1955	 * in the old context, wake it up.
1956	 */
1957	if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
1958		cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
1959		wakeup(cp);
1960	}
1961	lck_mtx_unlock(cp->gss_clnt_mtx);
1962
1963	/*
1964	 * Create a new context
1965	 */
1966	MALLOC(ncp, struct nfs_gss_clnt_ctx *, sizeof(*ncp),
1967		M_TEMP, M_WAITOK|M_ZERO);
1968	if (ncp == NULL) {
1969		error = ENOMEM;
1970		goto out;
1971	}
1972
1973	*ncp = tmp;
1974	ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
1975	ncp->gss_clnt_thread = current_thread();
1976
1977	lck_mtx_lock(&nmp->nm_lock);
1978	TAILQ_INSERT_TAIL(&nmp->nm_gsscl, ncp, gss_clnt_entries);
1979	lck_mtx_unlock(&nmp->nm_lock);
1980
1981	/* Adjust reference counts to new and old context */
1982	nfs_gss_clnt_ctx_unref(req);
1983	nfs_gss_clnt_ctx_ref(req, ncp);
1984
1985	error = nfs_gss_clnt_ctx_init_retry(req, ncp);
1986out:
1987	if (error)
1988		nfs_gss_clnt_ctx_unref(req);
1989	return (error);
1990}
1991
1992
1993/*
1994 * Destroy all the contexts associated with a mount.
1995 * The contexts are also destroyed by the server.
1996 */
1997void
1998nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp)
1999{
2000	struct nfs_gss_clnt_ctx *cp;
2001	struct nfsm_chain nmreq, nmrep;
2002	int error, status;
2003	struct nfsreq req;
2004	req.r_nmp = nmp;
2005
2006	if (!nmp)
2007		return;
2008
2009	for (;;) {
2010		lck_mtx_lock(&nmp->nm_lock);
2011		cp = TAILQ_FIRST(&nmp->nm_gsscl);
2012		if (cp == NULL) {
2013			lck_mtx_unlock(&nmp->nm_lock);
2014			goto remove_neg_cache;
2015		}
2016
2017		lck_mtx_lock(cp->gss_clnt_mtx);
2018		cp->gss_clnt_refcnt++;
2019		lck_mtx_unlock(cp->gss_clnt_mtx);
2020		req.r_gss_ctx = cp;
2021
2022		lck_mtx_unlock(&nmp->nm_lock);
2023
2024		/*
2025		 * Tell the server to destroy its context.
2026		 * But don't bother if it's a forced unmount.
2027		 */
2028		if (!nfs_mount_gone(nmp)) {
2029			cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
2030
2031			error = 0;
2032			nfsm_chain_null(&nmreq);
2033			nfsm_chain_null(&nmrep);
2034			nfsm_chain_build_alloc_init(error, &nmreq, 0);
2035			nfsm_chain_build_done(error, &nmreq);
2036			if (!error)
2037				nfs_request_gss(nmp->nm_mountp, &nmreq,
2038					current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status);
2039			nfsm_chain_cleanup(&nmreq);
2040			nfsm_chain_cleanup(&nmrep);
2041		}
2042
2043		/*
2044		 * Mark the context invalid then drop
2045		 * the reference to remove it if its
2046		 * refcount is zero.
2047		 */
2048		lck_mtx_lock(cp->gss_clnt_mtx);
2049		cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
2050		lck_mtx_unlock(cp->gss_clnt_mtx);
2051		nfs_gss_clnt_ctx_unref(&req);
2052	}
2053
2054	/* Now all the remaining contexts should be on the negative cache list */
2055remove_neg_cache:
2056	for (;;) {
2057		lck_mtx_lock(&nmp->nm_lock);
2058		cp = TAILQ_FIRST(&nmp->nm_gssnccl);
2059		if (cp == NULL) {
2060			lck_mtx_unlock(&nmp->nm_lock);
2061			return;
2062		}
2063		req.r_gss_ctx = cp;
2064		TAILQ_REMOVE(&nmp->nm_gssnccl, cp, gss_clnt_entries);
2065		cp->gss_clnt_entries.tqe_next = NFSNOLIST;
2066
2067		lck_mtx_lock(cp->gss_clnt_mtx);
2068		if (cp->gss_clnt_refcnt)
2069			NFS_GSS_DBG("Context %d/%d found with %d references\n",
2070				    kauth_cred_getasid(cp->gss_clnt_cred),
2071				    kauth_cred_getauid(cp->gss_clnt_cred),
2072				    cp->gss_clnt_refcnt);
2073		cp->gss_clnt_refcnt++;
2074		cp->gss_clnt_flags |= GSS_CTX_DESTROY;
2075		lck_mtx_unlock(cp->gss_clnt_mtx);
2076		lck_mtx_unlock(&nmp->nm_lock);
2077
2078		nfs_gss_clnt_ctx_unref(&req);
2079	}
2080	NFS_GSS_CLNT_CTX_DUMP(nmp);
2081}
2082
2083/*
2084 * Removes a mounts context for a credential
2085 */
2086int
2087nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred)
2088{
2089	struct nfs_gss_clnt_ctx *cp;
2090	struct nfsreq req;
2091
2092	req.r_nmp = nmp;
2093
2094	NFS_GSS_DBG("Enter\n");
2095	NFS_GSS_CLNT_CTX_DUMP(nmp);
2096	lck_mtx_lock(&nmp->nm_lock);
2097	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
2098		lck_mtx_lock(cp->gss_clnt_mtx);
2099		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
2100			if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
2101				NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
2102					    kauth_cred_getasid(cp->gss_clnt_cred),
2103					    kauth_cred_getauid(cp->gss_clnt_cred),
2104					    cp->gss_clnt_refcnt);
2105				lck_mtx_unlock(cp->gss_clnt_mtx);
2106				continue;
2107			}
2108			cp->gss_clnt_refcnt++;
2109			cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
2110			lck_mtx_unlock(cp->gss_clnt_mtx);
2111			req.r_gss_ctx = cp;
2112			lck_mtx_unlock(&nmp->nm_lock);
2113			/*
2114			 * Drop the reference to remove it if its
2115			 * refcount is zero.
2116			 */
2117			NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n",
2118				    kauth_cred_getasid(cp->gss_clnt_cred),
2119				    kauth_cred_getuid(cp->gss_clnt_cred),
2120				    cp->gss_clnt_refcnt);
2121			nfs_gss_clnt_ctx_unref(&req);
2122			return (0);
2123		}
2124		lck_mtx_unlock(cp->gss_clnt_mtx);
2125	}
2126
2127	TAILQ_FOREACH(cp, &nmp->nm_gssnccl, gss_clnt_entries) {
2128		lck_mtx_lock(cp->gss_clnt_mtx);
2129		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
2130			if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
2131				NFS_GSS_DBG("Found destroyed context %d/%d refcnt = %d continuing\n",
2132					    kauth_cred_getasid(cp->gss_clnt_cred),
2133					    kauth_cred_getuid(cp->gss_clnt_cred),
2134					    cp->gss_clnt_refcnt);
2135				lck_mtx_unlock(cp->gss_clnt_mtx);
2136				continue;
2137			}
2138			cp->gss_clnt_refcnt++;
2139			cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
2140			lck_mtx_unlock(cp->gss_clnt_mtx);
2141			req.r_gss_ctx = cp;
2142			lck_mtx_unlock(&nmp->nm_lock);
2143			/*
2144			 * Drop the reference to remove it if its
2145			 * refcount is zero.
2146			 */
2147			NFS_GSS_DBG("Removed context from neg cache %d/%d refcnt = %d\n",
2148				    kauth_cred_getasid(cp->gss_clnt_cred),
2149				    kauth_cred_getuid(cp->gss_clnt_cred),
2150				    cp->gss_clnt_refcnt);
2151			nfs_gss_clnt_ctx_unref(&req);
2152			return (0);
2153		}
2154		lck_mtx_unlock(cp->gss_clnt_mtx);
2155	}
2156
2157	lck_mtx_unlock(&nmp->nm_lock);
2158
2159	NFS_GSS_DBG("Returning ENOENT\n");
2160	return (ENOENT);
2161}
2162
2163
2164#endif /* NFSCLIENT */
2165
2166/*************
2167 *
2168 * Server functions
2169 */
2170
2171#if NFSSERVER
2172
2173/*
2174 * Find a server context based on a handle value received
2175 * in an RPCSEC_GSS credential.
2176 */
2177static struct nfs_gss_svc_ctx *
2178nfs_gss_svc_ctx_find(uint32_t handle)
2179{
2180	struct nfs_gss_svc_ctx_hashhead *head;
2181	struct nfs_gss_svc_ctx *cp;
2182	uint64_t timenow;
2183
2184	if (handle == 0)
2185		return (NULL);
2186
2187	head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
2188	/*
2189	 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
2190	 */
2191	clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
2192
2193	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
2194
2195	LIST_FOREACH(cp, head, gss_svc_entries) {
2196		if (cp->gss_svc_handle == handle) {
2197			if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
2198				/*
2199				 * Context has or is about to expire. Don't use.
2200				 * We'll return null and the client will have to create
2201				 * a new context.
2202				 */
2203				cp->gss_svc_handle = 0;
2204				/*
2205				 * Make sure though that we stay around for GSS_CTX_PEND seconds
2206				 * for other threads that might be using the context.
2207				 */
2208				cp->gss_svc_incarnation = timenow;
2209
2210				cp = NULL;
2211				break;
2212			}
2213			lck_mtx_lock(cp->gss_svc_mtx);
2214			cp->gss_svc_refcnt++;
2215			lck_mtx_unlock(cp->gss_svc_mtx);
2216			break;
2217		}
2218	}
2219
2220	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2221
2222	return (cp);
2223}
2224
2225/*
2226 * Insert a new server context into the hash table
2227 * and start the context reap thread if necessary.
2228 */
2229static void
2230nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
2231{
2232	struct nfs_gss_svc_ctx_hashhead *head;
2233	struct nfs_gss_svc_ctx *p;
2234
2235	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
2236
2237	/*
2238	 * Give the client a random handle so that if we reboot
2239	 * it's unlikely the client will get a bad context match.
2240	 * Make sure it's not zero or already assigned.
2241	 */
2242retry:
2243	cp->gss_svc_handle = random();
2244	if (cp->gss_svc_handle == 0)
2245		goto retry;
2246	head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
2247	LIST_FOREACH(p, head, gss_svc_entries)
2248		if (p->gss_svc_handle == cp->gss_svc_handle)
2249			goto retry;
2250
2251	clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2252		&cp->gss_svc_incarnation);
2253	LIST_INSERT_HEAD(head, cp, gss_svc_entries);
2254	nfs_gss_ctx_count++;
2255
2256	if (!nfs_gss_timer_on) {
2257		nfs_gss_timer_on = 1;
2258
2259		nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
2260			min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
2261	}
2262
2263	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2264}
2265
2266/*
2267 * This function is called via the kernel's callout
2268 * mechanism.  It runs only when there are
2269 * cached RPCSEC_GSS contexts.
2270 */
2271void
2272nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
2273{
2274	struct nfs_gss_svc_ctx *cp, *next;
2275	uint64_t timenow;
2276	int contexts = 0;
2277	int i;
2278
2279	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
2280	clock_get_uptime(&timenow);
2281
2282	NFS_GSS_DBG("is running\n");
2283
2284	/*
2285	 * Scan all the hash chains
2286	 */
2287	for (i = 0; i < SVC_CTX_HASHSZ; i++) {
2288		/*
2289		 * For each hash chain, look for entries
2290		 * that haven't been used in a while.
2291		 */
2292		LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
2293			contexts++;
2294			if (timenow > cp->gss_svc_incarnation +
2295				(cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
2296				&& cp->gss_svc_refcnt == 0) {
2297				/*
2298				 * A stale context - remove it
2299				 */
2300				LIST_REMOVE(cp, gss_svc_entries);
2301				NFS_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid);
2302				if (cp->gss_svc_seqbits)
2303					FREE(cp->gss_svc_seqbits, M_TEMP);
2304				lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2305				FREE(cp, M_TEMP);
2306				contexts--;
2307			}
2308		}
2309	}
2310
2311	nfs_gss_ctx_count = contexts;
2312
2313	/*
2314	 * If there are still some cached contexts left,
2315	 * set up another callout to check on them later.
2316	 */
2317	nfs_gss_timer_on = nfs_gss_ctx_count > 0;
2318	if (nfs_gss_timer_on)
2319		nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
2320			min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
2321
2322	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2323}
2324
2325/*
2326 * Here the server receives an RPCSEC_GSS credential in an
2327 * RPC call header.  First there's some checking to make sure
2328 * the credential is appropriate - whether the context is still
2329 * being set up, or is complete.  Then we use the handle to find
2330 * the server's context and validate the verifier, which contains
2331 * a signed checksum of the RPC header. If the verifier checks
2332 * out, we extract the user's UID and groups from the context
2333 * and use it to set up a UNIX credential for the user's request.
2334 */
2335int
2336nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2337{
2338	uint32_t vers, proc, seqnum, service;
2339	uint32_t handle, handle_len;
2340	struct nfs_gss_svc_ctx *cp = NULL;
2341	uint32_t flavor = 0, verflen = 0;
2342	int error = 0;
2343	uint32_t arglen, start, toklen, cksumlen;
2344	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
2345	u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
2346	struct nfsm_chain nmc_tmp;
2347	gss_key_info *ki;
2348
2349	vers = proc = seqnum = service = handle_len = 0;
2350	arglen = cksumlen = 0;
2351
2352	nfsm_chain_get_32(error, nmc, vers);
2353	if (vers != RPCSEC_GSS_VERS_1) {
2354		error = NFSERR_AUTHERR | AUTH_REJECTCRED;
2355		goto nfsmout;
2356	}
2357
2358	nfsm_chain_get_32(error, nmc, proc);
2359	nfsm_chain_get_32(error, nmc, seqnum);
2360	nfsm_chain_get_32(error, nmc, service);
2361	nfsm_chain_get_32(error, nmc, handle_len);
2362	if (error)
2363		goto nfsmout;
2364
2365	/*
2366	 * Make sure context setup/destroy is being done with a nullproc
2367	 */
2368	if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
2369		error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2370		goto nfsmout;
2371	}
2372
2373	/*
2374	 * If the sequence number is greater than the max
2375	 * allowable, reject and have the client init a
2376	 * new context.
2377	 */
2378	if (seqnum > GSS_MAXSEQ) {
2379		error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2380		goto nfsmout;
2381	}
2382
2383	nd->nd_sec =
2384		service == RPCSEC_GSS_SVC_NONE ?      RPCAUTH_KRB5 :
2385		service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
2386		service == RPCSEC_GSS_SVC_PRIVACY ?   RPCAUTH_KRB5P : 0;
2387
2388	if (proc == RPCSEC_GSS_INIT) {
2389		/*
2390		 * Limit the total number of contexts
2391		 */
2392		if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
2393			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2394			goto nfsmout;
2395		}
2396
2397		/*
2398		 * Set up a new context
2399		 */
2400		MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
2401		if (cp == NULL) {
2402			error = ENOMEM;
2403			goto nfsmout;
2404		}
2405		cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
2406		cp->gss_svc_refcnt = 1;
2407	} else {
2408
2409		/*
2410		 * Use the handle to find the context
2411		 */
2412		if (handle_len != sizeof(handle)) {
2413			error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2414			goto nfsmout;
2415		}
2416		nfsm_chain_get_32(error, nmc, handle);
2417		if (error)
2418			goto nfsmout;
2419		cp = nfs_gss_svc_ctx_find(handle);
2420		if (cp == NULL) {
2421			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2422			goto nfsmout;
2423		}
2424	}
2425
2426	cp->gss_svc_proc = proc;
2427	ki = &cp->gss_svc_kinfo;
2428
2429	if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
2430		struct posix_cred temp_pcred;
2431
2432		if (cp->gss_svc_seqwin == 0) {
2433			/*
2434			 * Context isn't complete
2435			 */
2436			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2437			goto nfsmout;
2438		}
2439
2440		if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
2441			/*
2442			 * Sequence number is bad
2443			 */
2444			error = EINVAL;	// drop the request
2445			goto nfsmout;
2446		}
2447
2448		/* Now compute the client's call header checksum */
2449		nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), 0, 0, cksum1);
2450
2451		/*
2452		 * Validate the verifier.
2453		 * The verifier contains an encrypted checksum
2454		 * of the call header from the XID up to and
2455		 * including the credential.  We compute the
2456		 * checksum and compare it with what came in
2457		 * the verifier.
2458		 */
2459		nfsm_chain_get_32(error, nmc, flavor);
2460		nfsm_chain_get_32(error, nmc, verflen);
2461		if (error)
2462			goto nfsmout;
2463		if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN(ki->hash_len))
2464			error = NFSERR_AUTHERR | AUTH_BADVERF;
2465		nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
2466		if (error)
2467			goto nfsmout;
2468
2469		/* Get the checksum from the token inside the verifier */
2470		error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
2471			NULL, cksum2);
2472		if (error)
2473			goto nfsmout;
2474
2475		if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2476			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
2477			goto nfsmout;
2478		}
2479
2480		nd->nd_gss_seqnum = seqnum;
2481
2482		/*
2483		 * Set up the user's cred
2484		 */
2485		bzero(&temp_pcred, sizeof(temp_pcred));
2486		temp_pcred.cr_uid = cp->gss_svc_uid;
2487		bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
2488				sizeof(gid_t) * cp->gss_svc_ngroups);
2489		temp_pcred.cr_ngroups = cp->gss_svc_ngroups;
2490
2491		nd->nd_cr = posix_cred_create(&temp_pcred);
2492		if (nd->nd_cr == NULL) {
2493			error = ENOMEM;
2494			goto nfsmout;
2495		}
2496		clock_get_uptime(&cp->gss_svc_incarnation);
2497
2498		/*
2499		 * If the call arguments are integrity or privacy protected
2500		 * then we need to check them here.
2501		 */
2502		switch (service) {
2503		case RPCSEC_GSS_SVC_NONE:
2504			/* nothing to do */
2505			break;
2506		case RPCSEC_GSS_SVC_INTEGRITY:
2507			/*
2508			 * Here's what we expect in the integrity call args:
2509			 *
2510			 * - length of seq num + call args (4 bytes)
2511			 * - sequence number (4 bytes)
2512			 * - call args (variable bytes)
2513			 * - length of checksum token (37)
2514			 * - checksum of seqnum + call args (37 bytes)
2515			 */
2516			nfsm_chain_get_32(error, nmc, arglen);		// length of args
2517			if (arglen > NFS_MAXPACKET) {
2518				error = EBADRPC;
2519				goto nfsmout;
2520			}
2521
2522			/* Compute the checksum over the call args */
2523			start = nfsm_chain_offset(nmc);
2524			nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, arglen, cksum1);
2525
2526			/*
2527			 * Get the sequence number prepended to the args
2528			 * and compare it against the one sent in the
2529			 * call credential.
2530			 */
2531			nfsm_chain_get_32(error, nmc, seqnum);
2532			if (seqnum != nd->nd_gss_seqnum) {
2533				error = EBADRPC;			// returns as GARBAGEARGS
2534				goto nfsmout;
2535			}
2536
2537			/*
2538			 * Advance to the end of the args and
2539			 * fetch the checksum computed by the client.
2540			 */
2541			nmc_tmp = *nmc;
2542			arglen -= NFSX_UNSIGNED;			// skipped seqnum
2543			nfsm_chain_adv(error, &nmc_tmp, arglen);	// skip args
2544			nfsm_chain_get_32(error, &nmc_tmp, cksumlen);	// length of checksum
2545			if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
2546				error = EBADRPC;
2547				goto nfsmout;
2548			}
2549			nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
2550			if (error)
2551				goto nfsmout;
2552			error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
2553				NULL, cksum2);
2554
2555			/* Verify that the checksums are the same */
2556			if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2557				error = EBADRPC;
2558				goto nfsmout;
2559			}
2560			break;
2561		case RPCSEC_GSS_SVC_PRIVACY:
2562			/*
2563			 * Here's what we expect in the privacy call args:
2564			 *
2565			 * - length of confounder + seq num + token + call args
2566			 * - wrap token (37-40 bytes)
2567			 * - confounder (8 bytes)
2568			 * - sequence number (4 bytes)
2569			 * - call args (encrypted)
2570			 */
2571			nfsm_chain_get_32(error, nmc, arglen);		// length of args
2572			if (arglen > NFS_MAXPACKET) {
2573				error = EBADRPC;
2574				goto nfsmout;
2575			}
2576
2577			/* Get the token that prepends the encrypted args */
2578			nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
2579			if (error)
2580				goto nfsmout;
2581			error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 1,
2582							&toklen, cksum1);
2583			if (error)
2584				goto nfsmout;
2585			nfsm_chain_reverse(nmc, nfsm_pad(toklen));
2586
2587			/* decrypt the 8 byte confounder + seqnum + args */
2588			start = nfsm_chain_offset(nmc);
2589			arglen -= toklen;
2590			nfs_gss_encrypt_chain(ki, nmc, start, arglen, DES_DECRYPT);
2591
2592			/* Compute a checksum over the sequence number + results */
2593			nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, arglen, cksum2);
2594
2595			/* Verify that the checksums are the same */
2596			if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2597				error = EBADRPC;
2598				goto nfsmout;
2599			}
2600
2601			/*
2602			 * Get the sequence number prepended to the args
2603			 * and compare it against the one sent in the
2604			 * call credential.
2605			 */
2606			nfsm_chain_adv(error, nmc, 8);			// skip over the confounder
2607			nfsm_chain_get_32(error, nmc, seqnum);
2608			if (seqnum != nd->nd_gss_seqnum) {
2609				error = EBADRPC;			// returns as GARBAGEARGS
2610				goto nfsmout;
2611			}
2612			break;
2613		}
2614	} else {
2615		/*
2616		 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
2617		 * then we expect a null verifier.
2618		 */
2619		nfsm_chain_get_32(error, nmc, flavor);
2620		nfsm_chain_get_32(error, nmc, verflen);
2621		if (error || flavor != RPCAUTH_NULL || verflen > 0)
2622			error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2623		if (error) {
2624			if (proc == RPCSEC_GSS_INIT) {
2625				lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2626				FREE(cp, M_TEMP);
2627				cp = NULL;
2628			}
2629			goto nfsmout;
2630		}
2631	}
2632
2633	nd->nd_gss_context = cp;
2634	return 0;
2635nfsmout:
2636	if (cp)
2637		nfs_gss_svc_ctx_deref(cp);
2638	return (error);
2639}
2640
2641/*
2642 * Insert the server's verifier into the RPC reply header.
2643 * It contains a signed checksum of the sequence number that
2644 * was received in the RPC call.
2645 * Then go on to add integrity or privacy if necessary.
2646 */
2647int
2648nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2649{
2650	struct nfs_gss_svc_ctx *cp;
2651	int error = 0;
2652	u_char tokbuf[KRB5_SZ_TOKEN(MAX_DIGEST)];
2653	int toklen;
2654	u_char cksum[MAX_DIGEST];
2655	gss_key_info *ki;
2656
2657	cp = nd->nd_gss_context;
2658	ki = &cp->gss_svc_kinfo;
2659
2660	if (cp->gss_svc_major != GSS_S_COMPLETE) {
2661		/*
2662		 * If the context isn't yet complete
2663		 * then return a null verifier.
2664		 */
2665		nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
2666		nfsm_chain_add_32(error, nmc, 0);
2667		return (error);
2668	}
2669
2670	/*
2671	 * Compute checksum of the request seq number
2672	 * If it's the final reply of context setup
2673	 * then return the checksum of the context
2674	 * window size.
2675	 */
2676	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2677	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
2678		nfs_gss_cksum_rep(ki, cp->gss_svc_seqwin, cksum);
2679	else
2680		nfs_gss_cksum_rep(ki, nd->nd_gss_seqnum, cksum);
2681	/*
2682	 * Now wrap it in a token and add
2683	 * the verifier to the reply.
2684	 */
2685	toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
2686	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
2687	nfsm_chain_add_32(error, nmc, toklen);
2688	nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
2689
2690	return (error);
2691}
2692
2693/*
2694 * The results aren't available yet, but if they need to be
2695 * checksummed for integrity protection or encrypted, then
2696 * we can record the start offset here, insert a place-holder
2697 * for the results length, as well as the sequence number.
2698 * The rest of the work is done later by nfs_gss_svc_protect_reply()
2699 * when the results are available.
2700 */
2701int
2702nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2703{
2704	struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
2705	int error = 0;
2706
2707	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2708	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
2709		return (0);
2710
2711	switch (nd->nd_sec) {
2712	case RPCAUTH_KRB5:
2713		/* Nothing to do */
2714		break;
2715	case RPCAUTH_KRB5I:
2716		nd->nd_gss_mb = nmc->nmc_mcur;			// record current mbuf
2717		nfsm_chain_finish_mbuf(error, nmc);		// split the chain here
2718		nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); // req sequence number
2719		break;
2720	case RPCAUTH_KRB5P:
2721		nd->nd_gss_mb = nmc->nmc_mcur;			// record current mbuf
2722		nfsm_chain_finish_mbuf(error, nmc);		// split the chain here
2723		nfsm_chain_add_32(error, nmc, random());	// confounder bytes 1-4
2724		nfsm_chain_add_32(error, nmc, random());	// confounder bytes 5-8
2725		nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); // req sequence number
2726		break;
2727	}
2728
2729	return (error);
2730}
2731
2732/*
2733 * The results are checksummed or encrypted for return to the client
2734 */
2735int
2736nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep)
2737{
2738	struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
2739	struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
2740	struct nfsm_chain nmrep_pre, *nmc_pre = &nmrep_pre;
2741	mbuf_t mb, results;
2742	uint32_t reslen;
2743	u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
2744	int pad, toklen;
2745	u_char cksum[MAX_DIGEST];
2746	int error = 0;
2747	gss_key_info *ki = &cp->gss_svc_kinfo;
2748
2749	/*
2750	 * Using a reference to the mbuf where we previously split the reply
2751	 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
2752	 * one that allows us to prepend a length field or token, (nmc_pre)
2753	 * and the second which holds just the results that we're going to
2754	 * checksum and/or encrypt.  When we're done, we join the chains back
2755	 * together.
2756	 */
2757	nfs_gss_nfsm_chain(nmc_res, mrep);		// set up the results chain
2758	mb = nd->nd_gss_mb;				// the mbuf where we split
2759	results = mbuf_next(mb);			// first mbuf in the results
2760	reslen = nfs_gss_mchain_length(results);	// length of results
2761	error = mbuf_setnext(mb, NULL);			// disconnect the chains
2762	if (error)
2763		return (error);
2764	nfs_gss_nfsm_chain(nmc_pre, mb);		// set up the prepend chain
2765
2766	if (nd->nd_sec == RPCAUTH_KRB5I) {
2767		nfsm_chain_add_32(error, nmc_pre, reslen);
2768		nfsm_chain_build_done(error, nmc_pre);
2769		if (error)
2770			return (error);
2771		nfs_gss_append_chain(nmc_pre, results);	// Append the results mbufs
2772
2773		/* Now compute the checksum over the results data */
2774		nfs_gss_cksum_mchain(ki, results, ALG_MIC(ki), 0, reslen, cksum);
2775
2776		/* Put it into a token and append to the request */
2777		toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
2778		nfsm_chain_add_32(error, nmc_res, toklen);
2779		nfsm_chain_add_opaque(error, nmc_res, tokbuf, toklen);
2780		nfsm_chain_build_done(error, nmc_res);
2781	} else {
2782		/* RPCAUTH_KRB5P */
2783		/*
2784		 * Append a pad trailer - per RFC 1964 section 1.2.2.3
2785		 * Since XDR data is always 32-bit aligned, it
2786		 * needs to be padded either by 4 bytes or 8 bytes.
2787		 */
2788		if (reslen % 8 > 0) {
2789			nfsm_chain_add_32(error, nmc_res, 0x04040404);
2790			reslen += NFSX_UNSIGNED;
2791		} else {
2792			nfsm_chain_add_32(error, nmc_res, 0x08080808);
2793			nfsm_chain_add_32(error, nmc_res, 0x08080808);
2794			reslen +=  2 * NFSX_UNSIGNED;
2795		}
2796		nfsm_chain_build_done(error, nmc_res);
2797
2798		/* Now compute the checksum over the results data */
2799		nfs_gss_cksum_mchain(ki, results, ALG_WRAP(ki), 0, reslen, cksum);
2800
2801		/* Put it into a token and insert in the reply */
2802		toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 0, reslen, cksum);
2803		nfsm_chain_add_32(error, nmc_pre, toklen + reslen);
2804		nfsm_chain_add_opaque_nopad(error, nmc_pre, tokbuf, toklen);
2805		nfsm_chain_build_done(error, nmc_pre);
2806		if (error)
2807			return (error);
2808		nfs_gss_append_chain(nmc_pre, results);	// Append the results mbufs
2809
2810		/* Encrypt the confounder + seqnum + results */
2811		nfs_gss_encrypt_mchain(ki, results, 0, reslen, DES_ENCRYPT);
2812
2813		/* Add null XDR pad if the ASN.1 token misaligned the data */
2814		pad = nfsm_pad(toklen + reslen);
2815		if (pad > 0) {
2816			nfsm_chain_add_opaque_nopad(error, nmc_pre, iv0, pad);
2817			nfsm_chain_build_done(error, nmc_pre);
2818		}
2819	}
2820
2821	return (error);
2822}
2823
2824/*
2825 * This function handles the context setup calls from the client.
2826 * Essentially, it implements the NFS null procedure calls when
2827 * an RPCSEC_GSS credential is used.
2828 * This is the context maintenance function.  It creates and
2829 * destroys server contexts at the whim of the client.
2830 * During context creation, it receives GSS-API tokens from the
2831 * client, passes them up to gssd, and returns a received token
2832 * back to the client in the null procedure reply.
2833 */
2834int
2835nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
2836{
2837	struct nfs_gss_svc_ctx *cp = NULL;
2838	int error = 0;
2839	int autherr = 0;
2840	struct nfsm_chain *nmreq, nmrep;
2841	int sz;
2842
2843	nmreq = &nd->nd_nmreq;
2844	nfsm_chain_null(&nmrep);
2845	*mrepp = NULL;
2846	cp = nd->nd_gss_context;
2847	nd->nd_repstat = 0;
2848
2849	switch (cp->gss_svc_proc) {
2850	case RPCSEC_GSS_INIT:
2851		nfs_gss_svc_ctx_insert(cp);
2852		/* FALLTHRU */
2853
2854	case RPCSEC_GSS_CONTINUE_INIT:
2855		/* Get the token from the request */
2856		nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
2857		if (cp->gss_svc_tokenlen == 0) {
2858			autherr = RPCSEC_GSS_CREDPROBLEM;
2859			break;
2860		}
2861		MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK);
2862		if (cp->gss_svc_token == NULL) {
2863			autherr = RPCSEC_GSS_CREDPROBLEM;
2864			break;
2865		}
2866		nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
2867
2868		/* Use the token in a gss_accept_sec_context upcall */
2869		error = nfs_gss_svc_gssd_upcall(cp);
2870		if (error) {
2871			autherr = RPCSEC_GSS_CREDPROBLEM;
2872			if (error == NFSERR_EAUTH)
2873				error = 0;
2874			break;
2875		}
2876
2877		/*
2878		 * If the context isn't complete, pass the new token
2879		 * back to the client for another round.
2880		 */
2881		if (cp->gss_svc_major != GSS_S_COMPLETE)
2882			break;
2883
2884		/*
2885		 * Now the server context is complete.
2886		 * Finish setup.
2887		 */
2888		clock_get_uptime(&cp->gss_svc_incarnation);
2889
2890		cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
2891		MALLOC(cp->gss_svc_seqbits, uint32_t *,
2892			nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
2893		if (cp->gss_svc_seqbits == NULL) {
2894			autherr = RPCSEC_GSS_CREDPROBLEM;
2895			break;
2896		}
2897		break;
2898
2899	case RPCSEC_GSS_DATA:
2900		/* Just a nullproc ping - do nothing */
2901		break;
2902
2903	case RPCSEC_GSS_DESTROY:
2904		/*
2905		 * Don't destroy the context immediately because
2906		 * other active requests might still be using it.
2907		 * Instead, schedule it for destruction after
2908		 * GSS_CTX_PEND time has elapsed.
2909		 */
2910		cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
2911		if (cp != NULL) {
2912			cp->gss_svc_handle = 0;	// so it can't be found
2913			lck_mtx_lock(cp->gss_svc_mtx);
2914			clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2915				&cp->gss_svc_incarnation);
2916			lck_mtx_unlock(cp->gss_svc_mtx);
2917		}
2918		break;
2919	default:
2920		autherr = RPCSEC_GSS_CREDPROBLEM;
2921		break;
2922	}
2923
2924	/* Now build the reply  */
2925
2926	if (nd->nd_repstat == 0)
2927		nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
2928	sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results
2929	error = nfsrv_rephead(nd, slp, &nmrep, sz);
2930	*mrepp = nmrep.nmc_mhead;
2931	if (error || autherr)
2932		goto nfsmout;
2933
2934	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2935	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
2936		nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
2937		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
2938
2939		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
2940		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
2941		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
2942
2943		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
2944		if (cp->gss_svc_token != NULL) {
2945			nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
2946			FREE(cp->gss_svc_token, M_TEMP);
2947			cp->gss_svc_token = NULL;
2948		}
2949	}
2950
2951nfsmout:
2952	if (autherr != 0) {
2953		nd->nd_gss_context = NULL;
2954		LIST_REMOVE(cp, gss_svc_entries);
2955		if (cp->gss_svc_seqbits != NULL)
2956			FREE(cp->gss_svc_seqbits, M_TEMP);
2957		if (cp->gss_svc_token != NULL)
2958			FREE(cp->gss_svc_token, M_TEMP);
2959		lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2960		FREE(cp, M_TEMP);
2961	}
2962
2963	nfsm_chain_build_done(error, &nmrep);
2964	if (error) {
2965		nfsm_chain_cleanup(&nmrep);
2966		*mrepp = NULL;
2967	}
2968	return (error);
2969}
2970
2971/*
2972 * This is almost a mirror-image of the client side upcall.
2973 * It passes and receives a token, but invokes gss_accept_sec_context.
2974 * If it's the final call of the context setup, then gssd also returns
2975 * the session key and the user's UID.
2976 */
2977static int
2978nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
2979{
2980	kern_return_t kr;
2981	mach_port_t mp;
2982	int retry_cnt = 0;
2983	gssd_byte_buffer okey = NULL;
2984	uint32_t skeylen = 0;
2985	uint32_t ret_flags;
2986	vm_map_copy_t itoken = NULL;
2987	gssd_byte_buffer otoken = NULL;
2988	mach_msg_type_number_t otokenlen;
2989	int error = 0;
2990	char svcname[] = "nfs";
2991
2992	kr = host_get_gssd_port(host_priv_self(), &mp);
2993	if (kr != KERN_SUCCESS) {
2994		printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
2995		goto out;
2996	}
2997	if (!IPC_PORT_VALID(mp)) {
2998		printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
2999		goto out;
3000	}
3001
3002	if (cp->gss_svc_tokenlen > 0)
3003		nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
3004
3005retry:
3006	kr = mach_gss_accept_sec_context(
3007		mp,
3008		(gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
3009		svcname,
3010		0,
3011		&cp->gss_svc_context,
3012		&cp->gss_svc_cred_handle,
3013		&ret_flags,
3014		&cp->gss_svc_uid,
3015		cp->gss_svc_gids,
3016		&cp->gss_svc_ngroups,
3017		&okey, (mach_msg_type_number_t *) &skeylen,
3018		&otoken, &otokenlen,
3019		&cp->gss_svc_major,
3020		&cp->gss_svc_minor);
3021
3022	if (kr != KERN_SUCCESS) {
3023		printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
3024		if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
3025			retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
3026			if (cp->gss_svc_tokenlen > 0)
3027				nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
3028			goto retry;
3029		}
3030		host_release_special_port(mp);
3031		goto out;
3032	}
3033
3034	host_release_special_port(mp);
3035
3036	if (skeylen > 0) {
3037		if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
3038			printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen);
3039			vm_map_copy_discard((vm_map_copy_t) okey);
3040			vm_map_copy_discard((vm_map_copy_t) otoken);
3041			goto out;
3042		}
3043		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_svc_kinfo.skey);
3044		if (error) {
3045			vm_map_copy_discard((vm_map_copy_t) otoken);
3046			goto out;
3047		}
3048		error = gss_key_init(&cp->gss_svc_kinfo, skeylen);
3049		if (error)
3050			goto out;
3051
3052	}
3053
3054	/* Free context token used as input */
3055	if (cp->gss_svc_token)
3056		FREE(cp->gss_svc_token, M_TEMP);
3057	cp->gss_svc_token = NULL;
3058	cp->gss_svc_tokenlen = 0;
3059
3060	if (otokenlen > 0) {
3061		/* Set context token to gss output token */
3062		MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
3063		if (cp->gss_svc_token == NULL) {
3064			printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
3065			vm_map_copy_discard((vm_map_copy_t) otoken);
3066			return (ENOMEM);
3067		}
3068		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
3069		if (error) {
3070			FREE(cp->gss_svc_token, M_TEMP);
3071			cp->gss_svc_token = NULL;
3072			return (NFSERR_EAUTH);
3073		}
3074		cp->gss_svc_tokenlen = otokenlen;
3075	}
3076
3077	return (0);
3078
3079out:
3080	FREE(cp->gss_svc_token, M_TEMP);
3081	cp->gss_svc_tokenlen = 0;
3082	cp->gss_svc_token = NULL;
3083
3084	return (NFSERR_EAUTH);
3085}
3086
3087/*
3088 * Validate the sequence number in the credential as described
3089 * in RFC 2203 Section 5.3.3.1
3090 *
3091 * Here the window of valid sequence numbers is represented by
3092 * a bitmap.  As each sequence number is received, its bit is
3093 * set in the bitmap.  An invalid sequence number lies below
3094 * the lower bound of the window, or is within the window but
3095 * has its bit already set.
3096 */
3097static int
3098nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
3099{
3100	uint32_t *bits = cp->gss_svc_seqbits;
3101	uint32_t win = cp->gss_svc_seqwin;
3102	uint32_t i;
3103
3104	lck_mtx_lock(cp->gss_svc_mtx);
3105
3106	/*
3107	 * If greater than the window upper bound,
3108	 * move the window up, and set the bit.
3109	 */
3110	if (seq > cp->gss_svc_seqmax) {
3111		if (seq - cp->gss_svc_seqmax > win)
3112			bzero(bits, nfsm_rndup((win + 7) / 8));
3113		else
3114			for (i = cp->gss_svc_seqmax + 1; i < seq; i++)
3115				win_resetbit(bits, i % win);
3116		win_setbit(bits, seq % win);
3117		cp->gss_svc_seqmax = seq;
3118		lck_mtx_unlock(cp->gss_svc_mtx);
3119		return (1);
3120	}
3121
3122	/*
3123	 * Invalid if below the lower bound of the window
3124	 */
3125	if (seq <= cp->gss_svc_seqmax - win) {
3126		lck_mtx_unlock(cp->gss_svc_mtx);
3127		return (0);
3128	}
3129
3130	/*
3131	 * In the window, invalid if the bit is already set
3132	 */
3133	if (win_getbit(bits, seq % win)) {
3134		lck_mtx_unlock(cp->gss_svc_mtx);
3135		return (0);
3136	}
3137	win_setbit(bits, seq % win);
3138	lck_mtx_unlock(cp->gss_svc_mtx);
3139	return (1);
3140}
3141
3142/*
3143 * Drop a reference to a context
3144 *
3145 * Note that it's OK for the context to exist
3146 * with a refcount of zero.  The refcount isn't
3147 * checked until we're about to reap an expired one.
3148 */
3149void
3150nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
3151{
3152	lck_mtx_lock(cp->gss_svc_mtx);
3153	if (cp->gss_svc_refcnt > 0)
3154		cp->gss_svc_refcnt--;
3155	else
3156		printf("nfs_gss_ctx_deref: zero refcount\n");
3157	lck_mtx_unlock(cp->gss_svc_mtx);
3158}
3159
3160/*
3161 * Called at NFS server shutdown - destroy all contexts
3162 */
3163void
3164nfs_gss_svc_cleanup(void)
3165{
3166	struct nfs_gss_svc_ctx_hashhead *head;
3167	struct nfs_gss_svc_ctx *cp, *ncp;
3168	int i;
3169
3170	lck_mtx_lock(nfs_gss_svc_ctx_mutex);
3171
3172	/*
3173	 * Run through all the buckets
3174	 */
3175	for (i = 0; i < SVC_CTX_HASHSZ; i++) {
3176		/*
3177		 * Remove and free all entries in the bucket
3178		 */
3179		head = &nfs_gss_svc_ctx_hashtbl[i];
3180		LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
3181			LIST_REMOVE(cp, gss_svc_entries);
3182			if (cp->gss_svc_seqbits)
3183				FREE(cp->gss_svc_seqbits, M_TEMP);
3184			lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
3185			FREE(cp, M_TEMP);
3186		}
3187	}
3188
3189	lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
3190}
3191
3192#endif /* NFSSERVER */
3193
3194
3195/*************
3196 * The following functions are used by both client and server.
3197 */
3198
3199/*
3200 * Release a host special port that was obtained by host_get_special_port
3201 * or one of its macros (host_get_gssd_port in this case).
3202 * This really should be in a public kpi.
3203 */
3204
3205/* This should be in a public header if this routine is not */
3206extern void ipc_port_release_send(ipc_port_t);
3207extern ipc_port_t ipc_port_copy_send(ipc_port_t);
3208
3209static void
3210host_release_special_port(mach_port_t mp)
3211{
3212	if (IPC_PORT_VALID(mp))
3213		ipc_port_release_send(mp);
3214}
3215
3216static mach_port_t
3217host_copy_special_port(mach_port_t mp)
3218{
3219	return (ipc_port_copy_send(mp));
3220}
3221
3222/*
3223 * The token that is sent and received in the gssd upcall
3224 * has unbounded variable length.  Mach RPC does not pass
3225 * the token in-line.  Instead it uses page mapping to handle
3226 * these parameters.  This function allocates a VM buffer
3227 * to hold the token for an upcall and copies the token
3228 * (received from the client) into it.  The VM buffer is
3229 * marked with a src_destroy flag so that the upcall will
3230 * automatically de-allocate the buffer when the upcall is
3231 * complete.
3232 */
3233static void
3234nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr)
3235{
3236	kern_return_t kr;
3237	vm_offset_t kmem_buf;
3238	vm_size_t tbuflen;
3239
3240	*addr = NULL;
3241	if (buf == NULL || buflen == 0)
3242		return;
3243
3244	tbuflen = vm_map_round_page(buflen,
3245				    vm_map_page_mask(ipc_kernel_map));
3246	kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE);
3247	if (kr != 0) {
3248		printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
3249		return;
3250	}
3251
3252	kr = vm_map_wire(ipc_kernel_map,
3253			 vm_map_trunc_page(kmem_buf,
3254					   vm_map_page_mask(ipc_kernel_map)),
3255			 vm_map_round_page(kmem_buf + tbuflen,
3256					   vm_map_page_mask(ipc_kernel_map)),
3257		VM_PROT_READ|VM_PROT_WRITE, FALSE);
3258	if (kr != 0) {
3259		printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
3260		return;
3261	}
3262
3263	bcopy(buf, (void *) kmem_buf, buflen);
3264	// Shouldn't need to bzero below since vm_allocate returns zeroed pages
3265	// bzero(kmem_buf + buflen, tbuflen - buflen);
3266
3267	kr = vm_map_unwire(ipc_kernel_map,
3268			   vm_map_trunc_page(kmem_buf,
3269					     vm_map_page_mask(ipc_kernel_map)),
3270			   vm_map_round_page(kmem_buf + tbuflen,
3271					     vm_map_page_mask(ipc_kernel_map)),
3272			   FALSE);
3273	if (kr != 0) {
3274		printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
3275		return;
3276	}
3277
3278	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
3279		(vm_map_size_t) buflen, TRUE, addr);
3280	if (kr != 0) {
3281		printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
3282		return;
3283	}
3284}
3285
3286/*
3287 * Here we handle a token received from the gssd via an upcall.
3288 * The received token resides in an allocate VM buffer.
3289 * We copy the token out of this buffer to a chunk of malloc'ed
3290 * memory of the right size, then de-allocate the VM buffer.
3291 */
3292static int
3293nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
3294{
3295	vm_map_offset_t map_data;
3296	vm_offset_t data;
3297	int error;
3298
3299	error = vm_map_copyout(ipc_kernel_map, &map_data, in);
3300	if (error)
3301		return (error);
3302
3303	data = CAST_DOWN(vm_offset_t, map_data);
3304	bcopy((void *) data, out, len);
3305	vm_deallocate(ipc_kernel_map, data, len);
3306
3307	return (0);
3308}
3309
3310/*
3311 * Encode an ASN.1 token to be wrapped in an RPCSEC_GSS verifier.
3312 * Returns the size of the token, since it contains a variable
3313 * length DER encoded size field.
3314 */
3315static int
3316nfs_gss_token_put(
3317	gss_key_info *ki,
3318	u_char *alg,
3319	u_char *p,
3320	int initiator,
3321	int datalen,
3322	u_char *cksum)
3323{
3324	static uint32_t seqnum = 0;
3325	u_char *psave = p;
3326	u_char plain[8];
3327	int toklen, i;
3328
3329	/*
3330	 * Fill in the token header: 2 octets.
3331	 * This is 0x06 - an ASN.1 tag for APPLICATION, 0, SEQUENCE
3332	 * followed by the length of the token: 35 + 0 octets for a
3333	 * MIC token, or 35 + encrypted octets for a wrap token;
3334	 */
3335	*p++ = 0x060;
3336	toklen = KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + HASHLEN(ki);
3337	nfs_gss_der_length_put(&p, toklen + datalen);
3338
3339	/*
3340	 * Fill in the DER encoded mech OID for Kerberos v5.
3341	 * This represents the Kerberos OID 1.2.840.113554.1.2.2
3342	 * described in RFC 2623, section 4.2
3343	 */
3344	bcopy(krb5_mech, p, sizeof(krb5_mech));
3345	p += sizeof(krb5_mech);
3346
3347	/*
3348	 * Now at the token described in RFC 1964, section 1.2.1
3349	 * Fill in the token ID, integrity algorithm indicator,
3350	 * for DES MAC MD5, and four filler octets.
3351	 * The alg string encodes the bytes to represent either
3352	 * a MIC token or a WRAP token for Kerberos.
3353	 */
3354	bcopy(alg, p, KRB5_SZ_ALG);
3355	p += KRB5_SZ_ALG;
3356
3357	/*
3358	 * Now encode the sequence number according to
3359	 * RFC 1964, section 1.2.1.2 which dictates 4 octets
3360	 * of sequence number followed by 4 bytes of direction
3361	 * indicator: 0x00 for initiator or 0xff for acceptor.
3362	 * We DES CBC encrypt the sequence number using the first
3363	 * 8 octets of the checksum field as an initialization
3364	 * vector.
3365	 * Note that this sequence number is not at all related
3366	 * to the RPCSEC_GSS protocol sequence number.  This
3367	 * number is private to the ASN.1 token.  The only
3368	 * requirement is that it not be repeated in case the
3369	 * server has replay detection on, which normally should
3370	 * not be the case, since RFC 2203 section 5.2.3 says that
3371	 * replay detection and sequence checking must be turned off.
3372	 */
3373	seqnum++;
3374	for (i = 0; i < 4; i++)
3375		plain[i] = (u_char) ((seqnum >> (i * 8)) & 0xff);
3376	for (i = 4; i < 8; i++)
3377		plain[i] = initiator ? 0x00 : 0xff;
3378	gss_des_crypt(ki, (des_cblock *) plain, (des_cblock *) p, 8,
3379			(des_cblock *) cksum, NULL, DES_ENCRYPT, KG_USAGE_SEQ);
3380	p += 8;
3381
3382	/*
3383	 * Finally, append the octets of the
3384	 * checksum of the alg + plaintext data.
3385	 * The plaintext could be an RPC call header,
3386	 * the window value, or a sequence number.
3387	 */
3388	bcopy(cksum, p, HASHLEN(ki));
3389	p += HASHLEN(ki);
3390
3391	return (p - psave);
3392}
3393
3394/*
3395 * Determine size of ASN.1 DER length
3396 */
3397static int
3398nfs_gss_der_length_size(int len)
3399{
3400	return
3401		len < (1 <<  7) ? 1 :
3402		len < (1 <<  8) ? 2 :
3403		len < (1 << 16) ? 3 :
3404		len < (1 << 24) ? 4 : 5;
3405}
3406
3407/*
3408 * Encode an ASN.1 DER length field
3409 */
3410static void
3411nfs_gss_der_length_put(u_char **pp, int len)
3412{
3413	int sz = nfs_gss_der_length_size(len);
3414	u_char *p = *pp;
3415
3416	if (sz == 1) {
3417		*p++ = (u_char) len;
3418	} else {
3419		*p++ = (u_char) ((sz-1) | 0x80);
3420		sz -= 1;
3421		while (sz--)
3422			*p++ = (u_char) ((len >> (sz * 8)) & 0xff);
3423	}
3424
3425	*pp = p;
3426}
3427
3428/*
3429 * Decode an ASN.1 DER length field
3430 */
3431static int
3432nfs_gss_der_length_get(u_char **pp)
3433{
3434	u_char *p = *pp;
3435	uint32_t flen, len = 0;
3436
3437	flen = *p & 0x7f;
3438
3439	if ((*p++ & 0x80) == 0)
3440		len = flen;
3441	else {
3442		if (flen > sizeof(uint32_t))
3443			return (-1);
3444		while (flen--)
3445			len = (len << 8) + *p++;
3446	}
3447	*pp = p;
3448	return (len);
3449}
3450
3451/*
3452 * Decode an ASN.1 token from an RPCSEC_GSS verifier.
3453 */
3454static int
3455nfs_gss_token_get(
3456	gss_key_info *ki,
3457	u_char *alg,
3458	u_char *p,
3459	int initiator,
3460	uint32_t *len,
3461	u_char *cksum)
3462{
3463	u_char d, plain[8];
3464	u_char *psave = p;
3465	int seqnum, i;
3466
3467	/*
3468	 * Check that we have a valid token header
3469	 */
3470	if (*p++ != 0x60)
3471		return (AUTH_BADCRED);
3472	(void) nfs_gss_der_length_get(&p);	// ignore the size
3473
3474	/*
3475	 * Check that we have the DER encoded Kerberos v5 mech OID
3476	 */
3477	if (bcmp(p, krb5_mech, sizeof(krb5_mech) != 0))
3478		return (AUTH_BADCRED);
3479	p += sizeof(krb5_mech);
3480
3481	/*
3482	 * Now check the token ID, DES MAC MD5 algorithm
3483	 * indicator, and filler octets.
3484	 */
3485	if (bcmp(p, alg, KRB5_SZ_ALG) != 0)
3486		return (AUTH_BADCRED);
3487	p += KRB5_SZ_ALG;
3488
3489	/*
3490	 * Now decrypt the sequence number.
3491	 * Note that the gss decryption uses the first 8 octets
3492	 * of the checksum field as an initialization vector (p + 8).
3493	 * Per RFC 2203 section 5.2.2 we don't check the sequence number
3494	 * in the ASN.1 token because the RPCSEC_GSS protocol has its
3495	 * own sequence number described in section 5.3.3.1
3496	 */
3497	seqnum = 0;
3498	gss_des_crypt(ki, (des_cblock *)p, (des_cblock *) plain, 8,
3499			(des_cblock *) (p + 8), NULL, DES_DECRYPT, KG_USAGE_SEQ);
3500	p += 8;
3501	for (i = 0; i < 4; i++)
3502		seqnum |= plain[i] << (i * 8);
3503
3504	/*
3505	 * Make sure the direction
3506	 * indicator octets are correct.
3507	 */
3508	d = initiator ? 0x00 : 0xff;
3509	for (i = 4; i < 8; i++)
3510		if (plain[i] != d)
3511			return (AUTH_BADCRED);
3512
3513	/*
3514	 * Finally, get the checksum
3515	 */
3516	bcopy(p, cksum, HASHLEN(ki));
3517	p += HASHLEN(ki);
3518
3519	if (len != NULL)
3520		*len = p - psave;
3521
3522	return (0);
3523}
3524
3525/*
3526 * Return the number of bytes in an mbuf chain.
3527 */
3528static int
3529nfs_gss_mchain_length(mbuf_t mhead)
3530{
3531	mbuf_t mb;
3532	int len = 0;
3533
3534	for (mb = mhead; mb; mb = mbuf_next(mb))
3535		len += mbuf_len(mb);
3536
3537	return (len);
3538}
3539
3540/*
3541 * Append an args or results mbuf chain to the header chain
3542 */
3543static int
3544nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
3545{
3546	int error = 0;
3547	mbuf_t mb, tail;
3548
3549	/* Connect the mbuf chains */
3550	error = mbuf_setnext(nmc->nmc_mcur, mc);
3551	if (error)
3552		return (error);
3553
3554	/* Find the last mbuf in the chain */
3555	tail = NULL;
3556	for (mb = mc; mb; mb = mbuf_next(mb))
3557		tail = mb;
3558
3559	nmc->nmc_mcur = tail;
3560	nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3561	nmc->nmc_left = mbuf_trailingspace(tail);
3562
3563	return (0);
3564}
3565
3566/*
3567 * Convert an mbuf chain to an NFS mbuf chain
3568 */
3569static void
3570nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
3571{
3572	mbuf_t mb, tail;
3573
3574	/* Find the last mbuf in the chain */
3575	tail = NULL;
3576	for (mb = mc; mb; mb = mbuf_next(mb))
3577		tail = mb;
3578
3579	nmc->nmc_mhead = mc;
3580	nmc->nmc_mcur = tail;
3581	nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3582	nmc->nmc_left = mbuf_trailingspace(tail);
3583	nmc->nmc_flags = 0;
3584}
3585
3586
3587/*
3588 * Compute a checksum over an mbuf chain.
3589 * Start building an MD5 digest at the given offset and keep
3590 * going until the end of data in the current mbuf is reached.
3591 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3592 * checksum.
3593 */
3594static void
3595nfs_gss_cksum_mchain(
3596	gss_key_info *ki,
3597	mbuf_t mhead,
3598	u_char *alg,
3599	int offset,
3600	int len,
3601	u_char *digest)
3602{
3603	mbuf_t mb;
3604	u_char *ptr;
3605	int left, bytes;
3606	GSS_DIGEST_CTX context;
3607
3608	gss_digest_Init(&context, ki);
3609
3610	/*
3611	 * Logically prepend the first 8 bytes of the algorithm
3612	 * field as required by RFC 1964, section 1.2.1.1
3613	 */
3614	gss_digest_Update(&context, alg, KRB5_SZ_ALG);
3615
3616	/*
3617	 * Move down the mbuf chain until we reach the given
3618	 * byte offset, then start MD5 on the mbuf data until
3619	 * we've done len bytes.
3620	 */
3621
3622	for (mb = mhead; mb && len > 0; mb = mbuf_next(mb)) {
3623		ptr  = mbuf_data(mb);
3624		left = mbuf_len(mb);
3625		if (offset >= left) {
3626			/* Offset not yet reached */
3627			offset -= left;
3628			continue;
3629		}
3630		/* At or beyond offset - checksum data */
3631		ptr += offset;
3632		left -= offset;
3633		offset = 0;
3634
3635		bytes = left < len ? left : len;
3636		if (bytes > 0)
3637			gss_digest_Update(&context, ptr, bytes);
3638		len -= bytes;
3639	}
3640
3641	gss_digest_Final(&context, digest);
3642}
3643
3644/*
3645 * Compute a checksum over an NFS mbuf chain.
3646 * Start building an MD5 digest at the given offset and keep
3647 * going until the end of data in the current mbuf is reached.
3648 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3649 * checksum.
3650 */
3651static void
3652nfs_gss_cksum_chain(
3653	gss_key_info *ki,
3654	struct nfsm_chain *nmc,
3655	u_char *alg,
3656	int offset,
3657	int len,
3658	u_char *cksum)
3659{
3660	/*
3661	 * If the length parameter is zero, then we need
3662	 * to use the length from the offset to the current
3663	 * encode/decode offset.
3664	 */
3665	if (len == 0)
3666		len = nfsm_chain_offset(nmc) - offset;
3667
3668	return (nfs_gss_cksum_mchain(ki, nmc->nmc_mhead, alg, offset, len, cksum));
3669}
3670
3671/*
3672 * Compute a checksum of the sequence number (or sequence window)
3673 * of an RPCSEC_GSS reply.
3674 */
3675static void
3676nfs_gss_cksum_rep(gss_key_info *ki, uint32_t seqnum, u_char *cksum)
3677{
3678	GSS_DIGEST_CTX context;
3679	uint32_t val = htonl(seqnum);
3680
3681	gss_digest_Init(&context, ki);
3682
3683	/*
3684	 * Logically prepend the first 8 bytes of the MIC
3685	 * token as required by RFC 1964, section 1.2.1.1
3686	 */
3687	gss_digest_Update(&context, ALG_MIC(ki), KRB5_SZ_ALG);
3688
3689	/*
3690	 * Compute the digest of the seqnum in network order
3691	 */
3692	gss_digest_Update(&context, &val, 4);
3693	gss_digest_Final(&context, cksum);
3694}
3695
3696/*
3697 * Encrypt or decrypt data in an mbuf chain with des-cbc.
3698 */
3699static void
3700nfs_gss_encrypt_mchain(
3701	gss_key_info *ki,
3702	mbuf_t mhead,
3703	int offset,
3704	int len,
3705	int encrypt)
3706{
3707	mbuf_t mb, mbn;
3708	u_char *ptr, *nptr;
3709	u_char tmp[8], ivec[8];
3710	int left, left8, remain;
3711
3712
3713	bzero(ivec, 8);
3714
3715	/*
3716	 * Move down the mbuf chain until we reach the given
3717	 * byte offset, then start encrypting the mbuf data until
3718	 * we've done len bytes.
3719	 */
3720
3721	for (mb = mhead; mb && len > 0; mb = mbn) {
3722		mbn  = mbuf_next(mb);
3723		ptr  = mbuf_data(mb);
3724		left = mbuf_len(mb);
3725		if (offset >= left) {
3726			/* Offset not yet reached */
3727			offset -= left;
3728			continue;
3729		}
3730		/* At or beyond offset - encrypt data */
3731		ptr += offset;
3732		left -= offset;
3733		offset = 0;
3734
3735		/*
3736		 * DES or DES3 CBC has to encrypt 8 bytes at a time.
3737		 * If the number of bytes to be encrypted in this
3738		 * mbuf isn't some multiple of 8 bytes, encrypt all
3739		 * the 8 byte blocks, then combine the remaining
3740		 * bytes with enough from the next mbuf to make up
3741		 * an 8 byte block and encrypt that block separately,
3742		 * i.e. that block is split across two mbufs.
3743		 */
3744		remain = left % 8;
3745		left8 = left - remain;
3746		left = left8 < len ? left8 : len;
3747		if (left > 0) {
3748			gss_des_crypt(ki, (des_cblock *) ptr, (des_cblock *) ptr,
3749					left, &ivec, &ivec, encrypt, KG_USAGE_SEAL);
3750			len -= left;
3751		}
3752
3753		if (mbn && remain > 0) {
3754			nptr = mbuf_data(mbn);
3755			offset = 8 - remain;
3756			bcopy(ptr + left, tmp, remain);		// grab from this mbuf
3757			bcopy(nptr, tmp + remain, offset);	// grab from next mbuf
3758			gss_des_crypt(ki, (des_cblock *) tmp, (des_cblock *) tmp, 8,
3759					&ivec, &ivec, encrypt, KG_USAGE_SEAL);
3760			bcopy(tmp, ptr + left, remain);		// return to this mbuf
3761			bcopy(tmp + remain, nptr, offset);	// return to next mbuf
3762			len -= 8;
3763		}
3764	}
3765}
3766
3767/*
3768 * Encrypt or decrypt data in an NFS mbuf chain with des-cbc.
3769 */
3770static void
3771nfs_gss_encrypt_chain(
3772	gss_key_info *ki,
3773	struct nfsm_chain *nmc,
3774	int offset,
3775	int len,
3776	int encrypt)
3777{
3778	/*
3779	 * If the length parameter is zero, then we need
3780	 * to use the length from the offset to the current
3781	 * encode/decode offset.
3782	 */
3783	if (len == 0)
3784		len = nfsm_chain_offset(nmc) - offset;
3785
3786	return (nfs_gss_encrypt_mchain(ki, nmc->nmc_mhead, offset, len, encrypt));
3787}
3788
3789/*
3790 * The routines that follow provide abstractions for doing digests and crypto.
3791 */
3792
3793static void
3794gss_digest_Init(GSS_DIGEST_CTX *ctx, gss_key_info *ki)
3795{
3796	ctx->type = ki->type;
3797	switch (ki->type) {
3798	case NFS_GSS_1DES:	MD5_DESCBC_Init(&ctx->m_ctx, &ki->ks_u.des.gss_sched);
3799				break;
3800	case NFS_GSS_3DES:	HMAC_SHA1_DES3KD_Init(&ctx->h_ctx, ki->ks_u.des3.ckey, 0);
3801				break;
3802	default:
3803			printf("gss_digest_Init: Unknown key info type %d\n", ki->type);
3804	}
3805}
3806
3807static void
3808gss_digest_Update(GSS_DIGEST_CTX *ctx, void *data, size_t len)
3809{
3810	switch (ctx->type) {
3811	case NFS_GSS_1DES:	MD5_DESCBC_Update(&ctx->m_ctx, data, len);
3812				break;
3813	case NFS_GSS_3DES:	HMAC_SHA1_DES3KD_Update(&ctx->h_ctx, data, len);
3814				break;
3815	}
3816}
3817
3818static void
3819gss_digest_Final(GSS_DIGEST_CTX *ctx, void *digest)
3820{
3821	switch (ctx->type) {
3822	case NFS_GSS_1DES:	MD5_DESCBC_Final(digest, &ctx->m_ctx);
3823				break;
3824	case NFS_GSS_3DES:	HMAC_SHA1_DES3KD_Final(digest, &ctx->h_ctx);
3825				break;
3826	}
3827}
3828
3829static void
3830gss_des_crypt(gss_key_info *ki, des_cblock *in, des_cblock *out,
3831		int32_t len, des_cblock *iv, des_cblock *retiv, int encrypt, int usage)
3832{
3833	switch (ki->type) {
3834	case NFS_GSS_1DES:
3835			{
3836				des_cbc_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
3837							&ki->ks_u.des.gss_sched_Ke :
3838							&ki->ks_u.des.gss_sched);
3839				des_cbc_encrypt(in, out, len, sched, iv, retiv, encrypt);
3840			}
3841			break;
3842	case NFS_GSS_3DES:
3843
3844			des3_cbc_encrypt(in, out, len, &ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
3845			break;
3846	}
3847}
3848
3849static int
3850gss_key_init(gss_key_info *ki, uint32_t skeylen)
3851{
3852	size_t i;
3853	int rc;
3854	des_cblock k[3];
3855
3856	ki->keybytes = skeylen;
3857	switch (skeylen) {
3858	case sizeof(des_cblock):
3859				ki->type = NFS_GSS_1DES;
3860				ki->hash_len = MD5_DESCBC_DIGEST_LENGTH;
3861				ki->ks_u.des.key = (des_cblock *)ki->skey;
3862				rc = des_cbc_key_sched(ki->ks_u.des.key, &ki->ks_u.des.gss_sched);
3863				if (rc)
3864					return (rc);
3865				for (i = 0; i < ki->keybytes; i++)
3866					k[0][i] = 0xf0 ^ (*ki->ks_u.des.key)[i];
3867				rc = des_cbc_key_sched(&k[0], &ki->ks_u.des.gss_sched_Ke);
3868				break;
3869	case 3*sizeof(des_cblock):
3870				ki->type = NFS_GSS_3DES;
3871				ki->hash_len = SHA_DIGEST_LENGTH;
3872				ki->ks_u.des3.key = (des_cblock (*)[3])ki->skey;
3873				des3_derive_key(*ki->ks_u.des3.key, ki->ks_u.des3.ckey,
3874						KEY_USAGE_DES3_SIGN, KEY_USAGE_LEN);
3875				rc = des3_cbc_key_sched(*ki->ks_u.des3.key, &ki->ks_u.des3.gss_sched);
3876				if (rc)
3877					return (rc);
3878				break;
3879	default:
3880				printf("gss_key_init: Invalid key length %d\n", skeylen);
3881				rc = EINVAL;
3882				break;
3883	}
3884
3885	return (rc);
3886}
3887
3888#if 0
3889#define DISPLAYLEN 16
3890#define MAXDISPLAYLEN 256
3891
3892static void
3893hexdump(const char *msg, void *data, size_t len)
3894{
3895	size_t i, j;
3896	u_char *d = data;
3897	char *p, disbuf[3*DISPLAYLEN+1];
3898
3899	printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
3900	if (len > MAXDISPLAYLEN)
3901		len = MAXDISPLAYLEN;
3902
3903	for (i = 0; i < len; i += DISPLAYLEN) {
3904		for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3)
3905			snprintf(p, 4, "%02x ", d[i + j]);
3906		printf("\t%s\n", disbuf);
3907	}
3908}
3909#endif
3910