1/*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *    This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/proc.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/kpi_mbuf.h>
42#include <sys/unistd.h>
43#include <sys/mount.h>
44#include <sys/vnode.h>
45
46#include <sys/kauth.h>
47
48#include <sys/smb_apple.h>
49
50#include <netsmb/smb.h>
51#include <netsmb/smb_2.h>
52#include <netsmb/smb_rq.h>
53#include <netsmb/smb_rq_2.h>
54#include <netsmb/smb_conn.h>
55#include <netsmb/smb_conn_2.h>
56#include <netsmb/smb_rq.h>
57#include <netsmb/smb_rq_2.h>
58#include <netsmb/smb_tran.h>
59#include <netsmb/smb_trantcp.h>
60#include <netsmb/smb_subr.h>
61#include <smbfs/smbfs.h>
62#include <netsmb/smb_packets_2.h>
63#include <smbclient/ntstatus.h>
64
65#include <IOKit/IOLib.h>
66#include <netsmb/smb_sleephandler.h>
67
68static int smb_iod_next;
69
70int smb_iod_sendall(struct smbiod *iod);
71
72/*
73 * Check to see if the share has a routine to handle going away, if so.
74 */
75static int isShareGoingAway(struct smb_share* share)
76{
77	int goingAway = FALSE;
78
79	lck_mtx_lock(&share->ss_shlock);
80	if (share->ss_going_away) {
81		goingAway = share->ss_going_away(share);
82	}
83	lck_mtx_unlock(&share->ss_shlock);
84	return goingAway;
85}
86
87static int smb_iod_check_timeout(struct timespec *starttime, int SecondsTillTimeout)
88{
89	struct timespec waittime, tsnow;
90
91	waittime.tv_sec = SecondsTillTimeout;
92	waittime.tv_nsec = 0;
93	timespecadd(&waittime, starttime);
94	nanouptime(&tsnow);
95	if (timespeccmp(&tsnow, &waittime, >))
96		return TRUE;
97	else return FALSE;
98}
99
100
101static __inline void
102smb_iod_rqprocessed(struct smb_rq *rqp, int error, int flags)
103{
104	SMBRQ_SLOCK(rqp);
105	rqp->sr_flags |= flags;
106	rqp->sr_lerror = error;
107	rqp->sr_rpgen++;
108	rqp->sr_state = SMBRQ_NOTIFIED;
109	if (rqp->sr_flags & SMBR_ASYNC) {
110		DBG_ASSERT(rqp->sr_callback);
111		rqp->sr_callback(rqp->sr_callback_args);
112	} else
113		wakeup(&rqp->sr_state);
114	SMBRQ_SUNLOCK(rqp);
115}
116
117/*
118 * Gets called from smb_iod_dead, smb_iod_negotiate and smb_iod_ssnsetup. This routine
119 * should never get called while we are in reconnect state. This routine just flushes
120 * any old messages left after a connection went down.
121 */
122static void smb_iod_invrq(struct smbiod *iod)
123{
124	struct smb_rq *rqp, *trqp;
125
126	/*
127	 * Invalidate all outstanding requests for this connection
128	 */
129	SMB_IOD_RQLOCK(iod);
130	TAILQ_FOREACH_SAFE(rqp, &iod->iod_rqlist, sr_link, trqp) {
131		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_DEAD);
132	}
133	SMB_IOD_RQUNLOCK(iod);
134}
135
136static void
137smb_iod_sockwakeup(struct smbiod *iod)
138{
139	/* note: called from socket upcall... */
140	iod->iod_workflag = 1;		/* new work to do */
141
142	wakeup(&(iod)->iod_flags);
143}
144
145static void
146smb_iod_closetran(struct smbiod *iod)
147{
148	struct smb_vc *vcp = iod->iod_vc;
149
150	if (vcp->vc_tdata == NULL)
151		return;
152	SMB_TRAN_DISCONNECT(vcp);
153	SMB_TRAN_DONE(vcp);
154}
155
156static void
157smb_iod_dead(struct smbiod *iod)
158{
159	struct smb_rq *rqp, *trqp;
160
161	iod->iod_state = SMBIOD_ST_DEAD;
162	smb_iod_closetran(iod);
163	smb_iod_invrq(iod);
164	SMB_IOD_RQLOCK(iod);
165	TAILQ_FOREACH_SAFE(rqp, &iod->iod_rqlist, sr_link, trqp) {
166		if (rqp->sr_share) {
167			lck_mtx_lock(&rqp->sr_share->ss_shlock);
168			if (rqp->sr_share->ss_dead)
169				rqp->sr_share->ss_dead(rqp->sr_share);
170			lck_mtx_unlock(&rqp->sr_share->ss_shlock);
171		}
172	}
173	SMB_IOD_RQUNLOCK(iod);
174}
175
176/*
177 * We lost the connection. Set the vc flag saying we need to do a reconnect and
178 * tell all the shares we are starting reconnect. At this point all non reconnect messages
179 * should block until the reconnect process is completed. This routine is always excuted
180 * from the main thread.
181 */
182static void smb_iod_start_reconnect(struct smbiod *iod)
183{
184	struct smb_share *share, *tshare;
185	struct smb_rq *rqp, *trqp;
186
187	/* This should never happen, but for testing lets leave it in */
188	if (iod->iod_flags & SMBIOD_START_RECONNECT) {
189		SMBWARNING("Already in start reconnect with %s\n", iod->iod_vc->vc_srvname);
190		return; /* Nothing to do here we are already in start reconnect mode */
191	}
192
193	/*
194	 * Only start a reconnect on an active sessions or when a reconnect failed because we
195	 * went to sleep. If we are in the middle of a connection then mark the connection
196	 * as dead and get out.
197	 */
198	switch (iod->iod_state) {
199	case SMBIOD_ST_VCACTIVE:	/* session established */
200	case SMBIOD_ST_RECONNECT:	/* betweeen reconnect attempts; sleep happened. */
201		break;
202	case SMBIOD_ST_NOTCONN:	/* no connect request was made */
203	case SMBIOD_ST_CONNECT:	/* a connect attempt is in progress */
204	case SMBIOD_ST_TRANACTIVE:	/* transport level is up */
205	case SMBIOD_ST_NEGOACTIVE:	/* completed negotiation */
206	case SMBIOD_ST_SSNSETUP:	/* started (a) session setup */
207	case SMBIOD_ST_DEAD:		/* connection broken, transport is down */
208		SMBDEBUG("%s: iod->iod_state = %x iod->iod_flags = 0x%x\n",
209				 iod->iod_vc->vc_srvname, iod->iod_state, iod->iod_flags);
210		if (!(iod->iod_flags & SMBIOD_RECONNECT)) {
211			smb_iod_dead(iod);
212			return;
213		}
214		break;
215	}
216
217	/* Set the flag saying we are starting reconnect. */
218	SMB_IOD_FLAGSLOCK(iod);
219	iod->iod_flags |= SMBIOD_START_RECONNECT;
220	SMB_IOD_FLAGSUNLOCK(iod);
221
222	/* Search through the request list and set them to the correct state */
223	SMB_IOD_RQLOCK(iod);
224	TAILQ_FOREACH_SAFE(rqp, &iod->iod_rqlist, sr_link, trqp) {
225		SMBRQ_SLOCK(rqp);
226
227		/* Clear any internal or async request out of the queue */
228		if (rqp->sr_flags & (SMBR_INTERNAL | SMBR_ASYNC)) {
229            /* pretend like it did not get sent to recover SMB 2/3 credits */
230            rqp->sr_extflags &= ~SMB2_REQ_SENT;
231
232			SMBRQ_SUNLOCK(rqp);
233			if (rqp->sr_flags & SMBR_ASYNC)
234				smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
235			else
236				smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_DEAD);
237		}
238        else {
239            /* If SMB 2/3 and soft mount, cancel all requests with ETIMEDOUT */
240            if ((rqp->sr_share) && (rqp->sr_share->ss_soft_timer) &&
241                (rqp->sr_extflags & SMB2_REQUEST)) {
242                /*
243                 * Pretend like it did not get sent to recover SMB 2/3 credits
244                 */
245                rqp->sr_extflags &= ~SMB2_REQ_SENT;
246
247                SMBRQ_SUNLOCK(rqp);
248
249                SMBDEBUG("Soft Mount timed out! cmd 0x%x message_id %lld \n",
250                         (UInt32) rqp->sr_cmd, rqp->sr_messageid);
251                smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
252            }
253            else {
254                /*
255                 * Let the upper layer know that this message was processed
256                 * while we were in reconnect mode. If they receive an error
257                 * they may want to handle this message differently.
258                 */
259                rqp->sr_flags |= SMBR_RECONNECTED;
260
261                /* If we have not received a reply set the state to reconnect */
262                if (rqp->sr_state != SMBRQ_NOTIFIED) {
263                    rqp->sr_extflags &= ~SMB2_REQ_SENT; /* clear the SMB 2/3 sent flag */
264                    rqp->sr_state = SMBRQ_RECONNECT; /* Wait for reconnect to complete */
265                    rqp->sr_flags |= SMBR_REXMIT;	/* Tell the upper layer this message was resent */
266                    rqp->sr_lerror = 0;		/* We are going to resend clear the error */
267                }
268
269                SMBRQ_SUNLOCK(rqp);
270            }
271		}
272	}
273	SMB_IOD_RQUNLOCK(iod);
274
275	/* We are already in reconnect, so we are done */
276	if (iod->iod_flags & SMBIOD_RECONNECT) {
277		goto done;
278	}
279	/* Set our flag saying we need to do a reconnect, but not until we finish the work in this routine. */
280	SMB_IOD_FLAGSLOCK(iod);
281	iod->iod_flags |= SMBIOD_RECONNECT;
282	SMB_IOD_FLAGSUNLOCK(iod);
283
284	/*
285	 * We have the vc list locked so the shares can't be remove and they can't
286	 * go away. If the share is not gone then mark that we are in reconnect mode.
287	 */
288	smb_vc_lock(iod->iod_vc);
289	SMBCO_FOREACH_SAFE(share, VCTOCP(iod->iod_vc), tshare) {
290		lck_mtx_lock(&share->ss_stlock);
291		if (!(share->ss_flags & SMBO_GONE)) {
292			share->ss_flags |= SMBS_RECONNECTING;
293		}
294		lck_mtx_unlock(&(share)->ss_stlock);
295	}
296	smb_vc_unlock(iod->iod_vc);
297done:
298	/* Ok now we can do the reconnect */
299	SMB_IOD_FLAGSLOCK(iod);
300	iod->iod_flags &= ~SMBIOD_START_RECONNECT;
301	SMB_IOD_FLAGSUNLOCK(iod);
302}
303
304static int
305smb_iod_negotiate(struct smbiod *iod, vfs_context_t user_context)
306{
307	struct smb_vc *vcp = iod->iod_vc;
308	int error;
309
310	SMBIODEBUG("%d\n", iod->iod_state);
311	switch(iod->iod_state) {
312	    case SMBIOD_ST_TRANACTIVE:
313	    case SMBIOD_ST_NEGOACTIVE:
314	    case SMBIOD_ST_SSNSETUP:
315            SMBERROR("smb_iod_negotiate is invalid now, state=%d\n", iod->iod_state);
316            return EINVAL;
317	    case SMBIOD_ST_VCACTIVE:
318            SMBERROR("smb_iod_negotiate called when connected\n");
319            return EISCONN;
320	    case SMBIOD_ST_DEAD:
321            return ENOTCONN;	/* XXX: last error code ? */
322	    default:
323            break;
324	}
325	iod->iod_state = SMBIOD_ST_CONNECT;
326	error = SMB_TRAN_CREATE(vcp);
327	if (error) {
328		goto errorOut;
329	}
330	SMBIODEBUG("tcreate\n");
331	/* We only bind when doing a NetBIOS connection */
332	if (vcp->vc_saddr->sa_family == AF_NETBIOS) {
333		error = SMB_TRAN_BIND(vcp, vcp->vc_laddr);
334		if (error) {
335			goto errorOut;
336		}
337		SMBIODEBUG("tbind\n");
338	}
339	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, iod);
340	SMB_TRAN_SETPARAM(vcp, SMBTP_UPCALL, smb_iod_sockwakeup);
341	error = SMB_TRAN_CONNECT(vcp, vcp->vc_saddr);
342	if (error == 0) {
343		iod->iod_state = SMBIOD_ST_TRANACTIVE;
344		SMBIODEBUG("tconnect\n");
345		error = smb_smb_negotiate(vcp, user_context, FALSE, iod->iod_context);
346	}
347	if (error) {
348		goto errorOut;
349	}
350	iod->iod_state = SMBIOD_ST_NEGOACTIVE;
351	SMBIODEBUG("completed\n");
352	smb_iod_invrq(iod);
353	return 0;
354
355errorOut:
356	smb_iod_dead(iod);
357	return error;
358}
359
360static int
361smb_iod_ssnsetup(struct smbiod *iod, int inReconnect)
362{
363	struct smb_vc *vcp = iod->iod_vc;
364	int error;
365
366	SMBIODEBUG("%d\n", iod->iod_state);
367	switch(iod->iod_state) {
368	    case SMBIOD_ST_NEGOACTIVE:
369            break;
370	    case SMBIOD_ST_DEAD:
371            return ENOTCONN;	/* XXX: last error code ? */
372	    case SMBIOD_ST_VCACTIVE:
373            SMBERROR("smb_iod_ssnsetup called when connected\n");
374            return EISCONN;
375	    default:
376            SMBERROR("smb_iod_ssnsetup is invalid now, state=%d\n",
377                     iod->iod_state);
378		return EINVAL;
379	}
380	iod->iod_state = SMBIOD_ST_SSNSETUP;
381	error = smb_smb_ssnsetup(vcp, inReconnect, iod->iod_context);
382	if (error) {
383		/*
384		 * We no longer call smb_io_dead here, the vc could still be
385		 * alive. Allow for other attempt to authenticate on this same
386		 * circuit. If the connect went down let the call process
387		 * decide what to do with the circuit.
388		 *
389		 * Now all we do is reset the iod state back to what it was, but only if
390		 * it hasn't change from the time we came in here. If the connection goes
391		 * down(server dies) then we shouldn't change the state.
392		 */
393		if (iod->iod_state == SMBIOD_ST_SSNSETUP)
394			iod->iod_state = SMBIOD_ST_NEGOACTIVE;
395	} else {
396		iod->iod_state = SMBIOD_ST_VCACTIVE;
397		SMBIODEBUG("completed\n");
398		/* Don't flush the queue if we are in reconnect state. We need to resend those messages. */
399		if ((iod->iod_flags & SMBIOD_RECONNECT) != SMBIOD_RECONNECT)
400			smb_iod_invrq(iod);
401	}
402	return error;
403}
404
405static int
406smb_iod_disconnect(struct smbiod *iod)
407{
408	struct smb_vc *vcp = iod->iod_vc;
409
410	SMBIODEBUG("\n");
411	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
412		smb_smb_ssnclose(vcp, iod->iod_context);
413		iod->iod_state = SMBIOD_ST_TRANACTIVE;
414	}
415	vcp->vc_smbuid = SMB_UID_UNKNOWN;
416	smb_iod_closetran(iod);
417	iod->iod_state = SMBIOD_ST_NOTCONN;
418	return 0;
419}
420
421static int
422smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
423{
424	struct smb_vc *vcp = iod->iod_vc;
425	mbuf_t m, m2;
426	int error = 0;
427    uint32_t do_encrypt;
428    struct smb_rq *tmp_rqp;
429	struct mbchain *mbp;
430
431	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
432	switch (iod->iod_state) {
433	    case SMBIOD_ST_NOTCONN:
434            smb_iod_rqprocessed(rqp, ENOTCONN, 0);
435            return 0;
436	    case SMBIOD_ST_DEAD:
437            /* This is what keeps the iod itself from sending more */
438            smb_iod_rqprocessed(rqp, ENOTCONN, 0);
439            return 0;
440	    case SMBIOD_ST_CONNECT:
441            return 0;
442	    case SMBIOD_ST_NEGOACTIVE:
443            SMBERROR("smb_iod_sendrq in unexpected state(%d)\n",
444                     iod->iod_state);
445	    default:
446            break;
447	}
448
449    if (rqp->sr_extflags & SMB2_REQUEST) {
450        /* filled in by smb2_rq_init_internal */
451    }
452    else {
453        /* Use to check for vc, can't have an iod without a vc */
454        *rqp->sr_rquid = htoles(vcp->vc_smbuid);
455
456        /* If the request has a share then it has a reference on it */
457        *rqp->sr_rqtid = htoles(rqp->sr_share ?
458                                rqp->sr_share->ss_tid : SMB_TID_UNKNOWN);
459    }
460
461    smb_rq_getrequest(rqp, &mbp);
462	mb_fixhdr(mbp);
463
464    /*
465     * NOTE:
466     *
467     * SMB 1 calls mbuf_copym to create a duplicate mbuf of sr_rq.mp_top
468     * to send. If a reconnect happens, then its easy to resend the exact same
469     * packet again by just duplicating sr_rq.mp_top again and sending it again.
470     *
471     * For SMB 2/3, the exact same packet can not be sent. After a reconnect
472     * the credits reset to 0 and the volatile part of the FID can also change.
473     * Thus, the entire packet has to be rebuilt and then resent. Thus, for
474     * SMB 2/3, we do not bother creating a duplicate of the mbuf before
475     * sending. This will allow SMB 2/3 to use fewer mbufs.
476     */
477
478    if (rqp->sr_extflags & SMB2_REQUEST) {
479        /*
480         * SMB 2/3
481         *
482         * Set the message_id right before we sent the request
483         *
484         * This is because Windows based servers will stop granting credits
485         * if the difference between the smallest available sequence number and
486         * the largest available sequence number exceeds 2 times the number
487         * of granted credits.  Refer to Secion 3.3.1.1 in SMB Version 2
488         * Protocol Specification.
489         */
490        smb2_rq_message_id_increment(rqp);
491
492        SMBSDEBUG("MessageID:%llu\n", rqp->sr_messageid);
493
494        /* Determine if outgoing request(s) must be encrypted */
495        do_encrypt = 0;
496
497        if (vcp->vc_flags & (SMBV_SMB30 | SMBV_SMB302)) {
498            /* Check if session is encrypted */
499            if (vcp->vc_sopt.sv_sessflags & SMB2_SESSION_FLAG_ENCRYPT_DATA) {
500                if (rqp->sr_command != SMB2_NEGOTIATE) {
501                    do_encrypt = 1;
502                }
503            } else if (rqp->sr_share != NULL) {
504                if ( (rqp->sr_command != SMB2_NEGOTIATE) &&
505                    (rqp->sr_command != SMB2_SESSION_SETUP) &&
506                    (rqp->sr_command != SMB2_TREE_CONNECT) &&
507                    (rqp->sr_share->ss_share_flags & SMB2_SHAREFLAG_ENCRYPT_DATA) ){
508                    do_encrypt = 1;
509                }
510            }
511        }
512
513        if ( !(do_encrypt) &&
514            ((vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) ||
515             ((rqp->sr_flags & SMBR_SIGNED)))) {
516            // Only sign if not encrypting
517            smb2_rq_sign(rqp);
518        }
519
520        if (rqp->sr_flags & SMBR_COMPOUND_RQ) {
521            /*
522             * Compound request to send. The first rqp has its sr_next_rq set to
523             * point to the next request to send and so on. The last request will
524             * have sr_next_rq set to NULL. The next_command fields should already
525             * be filled in with correct offsets. Have to copy all the requests
526             * into a single mbuf chain before sending it.
527             *
528             * ONLY the first rqp in the chain will have its sr_ fields updated.
529             */
530            DBG_ASSERT(rqp->sr_next_rqp != NULL);
531
532            /*
533             * Create the first chain
534             * Save current sr_rq.mp_top into "m", set sr_rq.mp_top to NULL,
535             * then send "m"
536             */
537            m = mb_detach(mbp);
538
539            /* Concatenate the other requests into the mbuf chain */
540            tmp_rqp = rqp->sr_next_rqp;
541            while (tmp_rqp != NULL) {
542                /* copy next request into new mbuf m2 */
543                smb_rq_getrequest(tmp_rqp, &mbp);
544                m2 = mb_detach(mbp);
545
546                /* concatenate m2 to m */
547                m = mbuf_concatenate(m, m2);
548
549                tmp_rqp = tmp_rqp->sr_next_rqp;
550            }
551
552            /* fix up the mbuf packet header */
553            m_fixhdr(m);
554        }
555        else {
556            /*
557             * Not a compound request
558             * Save current sr_rq.mp_top into "m", set sr_rq.mp_top to NULL,
559             * then send "m"
560             */
561            m = mb_detach(mbp);
562        }
563
564        if (do_encrypt) {
565            error = smb3_rq_encrypt(rqp, &m);
566            if (error) {
567                SMBERROR("SMB3 transform failed, error: %d\n", error);
568                smb_iod_rqprocessed(rqp, error, 0);
569                return (0);
570            }
571        }
572
573    }
574    else {
575        /*
576         * SMB 1
577         */
578        if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
579            smb_rq_sign(rqp);
580        }
581
582        SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
583        m_dumpm(rqp->sr_rq.mb_top);
584
585        /* SMB 1 always duplicates the sr_rq.mb_top and sends the dupe */
586        error = mbuf_copym(rqp->sr_rq.mb_top, 0, MBUF_COPYALL, MBUF_WAITOK, &m);
587        DBG_ASSERT(error == 0);
588    }
589
590    /* Record the current thread for VFS_CTL_NSTATUS */
591    SMB_IOD_RQLOCK(iod);
592    rqp->sr_threadId = thread_tid(current_thread());
593    SMB_IOD_RQUNLOCK(iod);
594
595
596    /* Call SMB_TRAN_SEND to send the mbufs in "m" */
597    error = rqp->sr_lerror = (error) ? error : SMB_TRAN_SEND(vcp, m);
598	if (error == 0) {
599        nanouptime(&rqp->sr_timesent);
600        iod->iod_lastrqsent = rqp->sr_timesent;
601        rqp->sr_state = SMBRQ_SENT;
602
603        /*
604         * For SMB 2/3, set flag indicating this request was sent. Used for
605         * keeping track of credits.
606         */
607        if (rqp->sr_flags & SMBR_COMPOUND_RQ) {
608            rqp->sr_extflags |= SMB2_REQ_SENT;
609            tmp_rqp = rqp->sr_next_rqp;
610            while (tmp_rqp != NULL) {
611                tmp_rqp->sr_extflags |= SMB2_REQ_SENT;
612                tmp_rqp = tmp_rqp->sr_next_rqp;
613            }
614        }
615        else {
616            rqp->sr_extflags |= SMB2_REQ_SENT;
617        }
618
619		return 0;
620	}
621
622	/* Did the connection go down, we may need to reconnect. */
623	if (SMB_TRAN_FATAL(vcp, error)) {
624		return ENOTCONN;
625    }
626	else if (error) {	/* Either the send failed or the mbuf_copym? */
627		SMBERROR("TRAN_SEND returned non-fatal error %d sr_cmd = 0x%x\n",
628                 error, rqp->sr_cmd);
629		error = EIO; /* Couldn't send not much else we can do */
630        smb_iod_rqprocessed(rqp, error, 0);
631	}
632	return 0;
633}
634
635/*
636 * Process incoming packets
637 */
638static int
639smb_iod_recvall(struct smbiod *iod)
640{
641	struct smb_vc *vcp = iod->iod_vc;
642	struct smb_rq *rqp, *trqp, *temp_rqp, *cmpd_rqp;
643	mbuf_t m;
644	u_char *hp;
645	uint16_t mid = 0;
646	uint16_t pidHigh = 0;
647	uint16_t pidLow = 0;
648	uint8_t cmd = 0;
649	int error;
650    struct smb2_header* smb2_hdr;
651    boolean_t smb2_packet;
652    uint64_t message_id = 0;
653    boolean_t smb1_allowed = true;
654	struct mdchain *mdp = NULL;
655    int skip_wakeup = 0;
656
657	switch (iod->iod_state) {
658	    case SMBIOD_ST_NOTCONN:
659	    case SMBIOD_ST_DEAD:
660	    case SMBIOD_ST_CONNECT:
661            return 0;
662	    default:
663            break;
664	}
665
666	for (;;) {
667        m = NULL;
668        smb2_packet = false;
669        cmpd_rqp = NULL;
670        temp_rqp = NULL;
671        skip_wakeup = 0;
672
673        /* this reads in the entire response packet based on the NetBIOS hdr */
674		error = SMB_TRAN_RECV(vcp, &m);
675		if (error == EWOULDBLOCK) {
676            break;
677        }
678		if (SMB_TRAN_FATAL(vcp, error)) {
679            SMBDEBUG("SMB_TRAN_FATAL failed %d\n", error);
680            smb_iod_start_reconnect(iod);
681            break;
682		}
683		if (error) {
684            SMBDEBUG("SMB_TRAN_FATAL failed %d\n", error);
685            break;
686        }
687		if (m == NULL) {
688			SMBDEBUG("tran return NULL without error\n");
689			continue;
690		}
691
692        /*
693         * It's possible the first mbuf in the chain
694         * has zero length, so we will do the pullup
695         * now, fixes <rdar://problem/17166274>.
696         *
697         * Note: Ideally we would simply pullup SMB2_HDRLEN bytes,
698         * here, but we still have to support SMB 1, which has
699         * messages less than 64 bytes (SMB2_HDRLEN). Once we
700         * remove SMB 1 support, we can change this pullup to
701         * SMB2_HDRLEN bytes, and remove the additional pullup
702         * in the "SMB 2/3 Response packet" block below.
703         */
704        if (mbuf_pullup(&m, SMB_HDRLEN))
705            continue;
706
707        /*
708         * Parse out enough of the response to be able to match it with an
709         * existing smb_rq in the queue.
710         */
711
712        /*
713         * For SMB 2/3, client sends out a SMB 1 Negotiate request, but the
714         * server replies with a SMB 2/3 Negotiate response that has no mid
715         * and a pid of 0.  Have to just match it to any Negotiate request
716         * waiting for a response.
717         */
718
719        m_dumpm(m);
720        hp = mbuf_data(m);
721
722		if (*hp == 0xfe) {
723            /*
724             * SMB 2/3 Response packet
725             */
726
727            /* Wait for entire header to be read in */
728            if (mbuf_pullup(&m, SMB2_HDRLEN))
729                continue;
730
731            hp = mbuf_data(m);
732
733            /* Verify SMB 2/3 signature */
734            if (bcmp(hp, SMB2_SIGNATURE, SMB2_SIGLEN) != 0) {
735                SMBERROR("dumping non SMB 2/3 packet\n");
736                mbuf_freem(m);
737                continue;
738            }
739
740            /* this response is an SMB 2/3 response */
741            smb2_packet = true;
742
743            /*
744             * Once using SMB 2/3, ignore any more SMB 1 responses
745             */
746            if (smb1_allowed)
747                smb1_allowed = false;
748
749            /*
750             * At this point we have the SMB 2/3 Header and packet data read in
751             * Get the Message ID so we can find the matching smb_rq
752             */
753            m_dumpm(m);
754            smb2_hdr = mbuf_data(m);
755
756            cmd = letohs(smb2_hdr->command);
757            message_id = letohq(smb2_hdr->message_id);
758            SMBSDEBUG("message_id %lld cmd = %d\n", letohq(message_id), cmd);
759		}
760        else {
761            /*
762             * SMB 1 Response packet
763             */
764
765            /*
766             * We don't need to call mbuf_pullup(&m, SMB_HDRLEN),
767             * since it was already done above.
768             */
769
770            hp = mbuf_data(m);
771
772            /* Verify SMB 1 signature */
773            if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
774                SMBERROR("dumping non SMB 1 packet\n");
775                mbuf_freem(m);
776                continue;
777            }
778
779            /* if no more SMB 1 packets allowed, then ignore this packet */
780            if (!smb1_allowed) {
781                SMBERROR("No more SMB 1 packets allowed, dumping request\n");
782                mbuf_freem(m);
783                continue;
784            }
785
786            /*
787             * At this point we have the SMB 1 Header and packet data read in
788             * Get the cmd, mid, pid so we can find the matching smb_rq
789             */
790            mid = SMB_HDRMID(hp);
791            cmd = SMB_HDRCMD(hp);
792            pidHigh = SMB_HDRPIDHIGH(hp);
793            pidLow = SMB_HDRPIDLOW(hp);
794            SMBSDEBUG("mid %04x cmd = 0x%x\n", (unsigned)mid, cmd);
795        }
796
797        /*
798         * Search queue of smb_rq to find a match
799         */
800        SMB_IOD_RQLOCK(iod);
801		nanouptime(&iod->iod_lastrecv);
802		TAILQ_FOREACH_SAFE(rqp, &iod->iod_rqlist, sr_link, trqp) {
803            if (smb2_packet) {
804                if (rqp->sr_messageid == message_id) {
805                    /*
806                     * Matched non compound rqp or matched first rqp in a
807                     * compound rqp.
808                     */
809
810                    /*
811                     * If sent compound req, and this is not an
812                     * Async/STATUS_PENDING reply, then we should have gotten
813                     * a compound response
814                     */
815                    if ((rqp->sr_flags & SMBR_COMPOUND_RQ) &&
816                        (smb2_hdr->next_command == 0) &&
817                        !((smb2_hdr->flags & SMBR_ASYNC) && (smb2_hdr->status == STATUS_PENDING))) {
818
819                        if (!(vcp->vc_misc_flags & SMBV_NON_COMPOUND_REPLIES)) {
820                            /*
821                             * <14227703> Some NetApp servers send back non
822                             * compound replies to compound requests. Sigh.
823                             */
824                            SMBWARNING("Non compound reply to compound req. message_id %lld, cmd %d\n", message_id, cmd);
825
826                            /* Once set, this remains set forever */
827                            vcp->vc_misc_flags |= SMBV_NON_COMPOUND_REPLIES;
828                        }
829
830                        /*
831                         * Must be first non compound reply to a compound
832                         * request, thus there must be more replies pending.
833                         */
834                        cmpd_rqp = rqp; /* save start of compound rqp */
835                    }
836                }
837                else {
838                    /*
839                     * Did not match non compound rqp or did not match
840                     * first rqp in a compound rqp
841                     */
842                    if (!(rqp->sr_flags & SMBR_COMPOUND_RQ) ||
843                        !(vcp->vc_misc_flags & SMBV_NON_COMPOUND_REPLIES)) {
844                        /*
845                         * Not a compound request or server supports compound
846                         * replies, go check next rqp
847                         */
848                        continue;
849                    }
850
851                    /*
852                     * <14227703> Server is using non compound replies, so have
853                     * to search each request chain to see if this reply matches
854                     * a middle or last request in the chain.
855                     */
856                    cmpd_rqp = rqp;     /* save start of compound rqp */
857
858                    temp_rqp = rqp->sr_next_rqp;
859                    while (temp_rqp != NULL) {
860                        if (temp_rqp->sr_messageid == message_id) {
861                            /* Matches a rqp in this chain */
862                            rqp = temp_rqp;
863                            break;
864                        }
865                        temp_rqp = temp_rqp->sr_next_rqp;
866                    }
867
868                    if (temp_rqp == NULL) {
869                        /* Not found in this compound rqp */
870                        cmpd_rqp = NULL;
871                        continue;
872                    }
873                }
874
875                /* Verify that found smb_rq is a SMB 2/3 request */
876                if (!(rqp->sr_extflags & SMB2_REQUEST) &&
877                    (cmd != SMB2_NEGOTIATE)) {
878                    SMBERROR("Found non SMB 2/3 request? message_id %lld, cmd %d\n", message_id, cmd);
879                }
880
881                rqp->sr_extflags |= SMB2_RESPONSE;
882            }
883            else {
884                /*
885                 * <12071582>
886                 * We now use the mid and the low pid as a single mid, this gives
887                 * us a larger mid and helps prevent finding the wrong item. So we
888                 * need to make sure the messages match up, so use the cmd to confirm
889                 * we have the correct message.
890                 *
891                 * NOTE: SMB 2/3 does not have this issue.
892                 */
893                if ((rqp->sr_mid != mid) ||
894                    (rqp->sr_cmd != cmd) ||
895                    (rqp->sr_pidHigh != pidHigh) ||
896                    (rqp->sr_pidLow != pidLow)) {
897                    continue;
898                }
899            }
900
901            /*
902             * Found a matching smb_rq
903             */
904
905            /* We received a packet on the vc, clear the not responsive flag */
906			SMB_IOD_FLAGSLOCK(iod);
907			iod->iod_flags &= ~SMBIOD_VC_NOTRESP;
908			SMB_IOD_FLAGSUNLOCK(iod);
909
910			if (rqp->sr_share) {
911				lck_mtx_lock(&rqp->sr_share->ss_shlock);
912				if (rqp->sr_share->ss_up)
913					rqp->sr_share->ss_up(rqp->sr_share, FALSE);
914				lck_mtx_unlock(&rqp->sr_share->ss_shlock);
915			}
916
917            if (smb2_packet) {
918                /*
919                 * Check for Async and STATUS_PENDING responses.
920                 * Ignore this response and wait for the real one
921                 * to arrive later
922                 */
923                if ((smb2_hdr->flags & SMBR_ASYNC) &&
924                    (smb2_hdr->status == STATUS_PENDING)) {
925                    rqp->sr_rspasyncid = letohq(smb2_hdr->async.async_id);
926                    rqp->sr_rspcreditsgranted = letohs(smb2_hdr->credit_reqrsp);
927
928                    /* Get granted credits from this response */
929                    smb2_rq_credit_increment(rqp);
930                    rqp = NULL;
931                    break;
932                }
933            }
934
935            /*
936             * For compound replies received,
937             * ONLY the first rqp in the chain will have ALL the reply data
938             * in its mbuf chains. Its up to the upper layers to parse out
939             * the extra SMB 2/3 headers and know how to parse the SMB 2/3 reply
940             * data.
941             *
942             * Note: an alternate way to do this would be to somehow split up
943             * each of the replies into the seperate rqp's.
944             *
945             * <14227703> Some NetApp servers do not support compound replies.
946             * Those replies are in each rqp in the chain.
947             */
948
949            SMBRQ_SLOCK(rqp);
950
951            smb_rq_getreply(rqp, &mdp);
952            if (rqp->sr_rp.md_top == NULL) {
953                md_initm(mdp, m);
954            }
955            else {
956                if (rqp->sr_flags & SMBR_MULTIPACKET) {
957                    md_append_record(mdp, m);
958                }
959                else {
960                    SMBRQ_SUNLOCK(rqp);
961                    SMBERROR("duplicate response %d (ignored)\n", mid);
962                    break;
963                }
964            }
965
966            /*
967             * <14227703> For servers that do not support compound replies,
968             * check to see if entire reply has arrived.
969             */
970            if (cmpd_rqp != NULL) {
971                temp_rqp = cmpd_rqp;
972                while (temp_rqp != NULL) {
973                    if (!(temp_rqp->sr_extflags & SMB2_RESPONSE)) {
974                        /* Still missing a reply */
975                        skip_wakeup = 1;
976                        break;
977                    }
978                    temp_rqp = temp_rqp->sr_next_rqp;
979                }
980            }
981
982            SMBRQ_SUNLOCK(rqp);
983
984            /* Wake up thread waiting for this response */
985            if (skip_wakeup == 0) {
986                if (cmpd_rqp != NULL) {
987                    /*
988                     * <14227703> Have to wake up the head of the compound
989                     * chain
990                     */
991                    smb_iod_rqprocessed(cmpd_rqp, 0, 0);
992                }
993                else {
994                    smb_iod_rqprocessed(rqp, 0, 0);
995                }
996            }
997
998            break;
999		}
1000		SMB_IOD_RQUNLOCK(iod);
1001
1002		if (rqp == NULL) {
1003            if (smb2_packet) {
1004                /* Is it a lease break? */
1005                if ((cmd == SMB2_OPLOCK_BREAK) &&
1006                    (smb2_hdr->message_id == 0xffffffffffffffff) &&
1007                    (smb2_hdr->sync.tree_id == 0) &&
1008                    (smb2_hdr->session_id == 0))
1009                {
1010                    (void) smb2_smb_parse_lease_break(iod, m);
1011                    continue;
1012                }
1013
1014                /* Ignore Echo or (Async and STATUS_PENDING) responses */
1015                if ((cmd != SMB2_ECHO) &&
1016                    !((smb2_hdr->flags & SMBR_ASYNC) && (smb2_hdr->status == STATUS_PENDING))) {
1017                    SMBWARNING("drop resp: message_id %lld, cmd %d status 0x%x\n",
1018                               smb2_hdr->message_id,
1019                               smb2_hdr->command,
1020                               smb2_hdr->status);
1021                }
1022            }
1023            else {
1024                /* Ignore ECHO and NTNotify dropped messages */
1025                if ((cmd != SMB_COM_ECHO) && (cmd != SMB_COM_NT_TRANSACT)) {
1026                    SMBWARNING("drop resp: mid %d, cmd %d\n", (unsigned)mid, cmd);
1027                }
1028            }
1029			mbuf_freem(m);
1030		}
1031	}
1032
1033	return 0;
1034}
1035
1036int
1037smb_iod_request(struct smbiod *iod, int event, void *ident)
1038{
1039	struct smbiod_event *evp;
1040	int error;
1041
1042	SMBIODEBUG("\n");
1043	SMB_MALLOC(evp, struct smbiod_event *, sizeof(*evp), M_SMBIOD, M_WAITOK | M_ZERO);
1044	evp->ev_type = event;
1045	evp->ev_ident = ident;
1046	SMB_IOD_EVLOCK(iod);
1047	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
1048	if ((event & SMBIOD_EV_SYNC) == 0) {
1049		SMB_IOD_EVUNLOCK(iod);
1050		smb_iod_wakeup(iod);
1051		return 0;
1052	}
1053	smb_iod_wakeup(iod);
1054	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "iod-ev", 0);
1055	error = evp->ev_error;
1056	SMB_FREE(evp, M_SMBIOD);
1057	return error;
1058}
1059
1060/*
1061 * Place request in the queue.
1062 * Request from smbiod have a high priority.
1063 */
1064int
1065smb_iod_rq_enqueue(struct smb_rq *rqp)
1066{
1067	struct smb_vc *vcp = rqp->sr_vc;
1068	struct smbiod *iod = vcp->vc_iod;
1069	struct timespec ts;
1070    struct smb_rq *tmp_rqp;
1071    int return_error = 0;
1072
1073	if (rqp->sr_context == iod->iod_context) {
1074		DBG_ASSERT((rqp->sr_flags & SMBR_ASYNC) != SMBR_ASYNC);
1075		rqp->sr_flags |= SMBR_INTERNAL;
1076		SMB_IOD_RQLOCK(iod);
1077		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
1078		SMB_IOD_RQUNLOCK(iod);
1079		for (;;) {
1080			if (smb_iod_sendrq(iod, rqp) != 0) {
1081				smb_iod_start_reconnect(iod);
1082				break;
1083			}
1084			/*
1085			 * we don't need to lock state field here
1086			 */
1087			if (rqp->sr_state != SMBRQ_NOTSENT)
1088				break;
1089			ts.tv_sec = 1;
1090			ts.tv_nsec = 0;
1091			msleep(&iod->iod_flags, 0, PWAIT, "90sndw", &ts);
1092		}
1093		if (rqp->sr_lerror)
1094			smb_iod_removerq(rqp);
1095		return rqp->sr_lerror;
1096	}
1097
1098	switch (iod->iod_state) {
1099		case SMBIOD_ST_DEAD:
1100			if (rqp->sr_share) {
1101				lck_mtx_lock(&rqp->sr_share->ss_shlock);
1102				if (rqp->sr_share->ss_dead)
1103					rqp->sr_share->ss_dead(rqp->sr_share);
1104				lck_mtx_unlock(&rqp->sr_share->ss_shlock);
1105			}
1106	    case SMBIOD_ST_NOTCONN:
1107			return ENOTCONN;
1108
1109	    case SMBIOD_ST_TRANACTIVE:
1110	    case SMBIOD_ST_NEGOACTIVE:
1111	    case SMBIOD_ST_SSNSETUP:
1112			/* This can happen if we are doing a reconnect */
1113	    default:
1114            /*
1115             * If this is not an internal message and we are in reconnect then
1116             * check for softmount timouts.
1117             */
1118            if ((!(rqp->sr_flags & SMBR_INTERNAL)) && (iod->iod_flags & SMBIOD_RECONNECT) &&
1119                (rqp->sr_share) && (rqp->sr_share->ss_soft_timer)) {
1120                /* It soft mounted, check to see if should return ETIMEDOUT */
1121                if (rqp->sr_extflags & SMB2_REQUEST) {
1122                    /*
1123                     * If its SMB 2/3 and its not part of reconnect, return
1124                     * ETIMEDOUT.
1125                     */
1126                    if (!(rqp->sr_context == iod->iod_context)) {
1127                        SMBDEBUG("Soft Mount timed out! cmd = 0x%x message_id %lld \n",
1128                                 (UInt32) rqp->sr_cmd, rqp->sr_messageid);
1129                        return ETIMEDOUT;
1130                    }
1131                }
1132                else {
1133                    /* For SMB 1, see if soft mount timer has expired or not */
1134                    if (smb_iod_check_timeout(&iod->reconnectStartTime,
1135                                              rqp->sr_share->ss_soft_timer)) {
1136                        SMBDEBUG("Soft Mount timed out! cmd = 0x%x\n",
1137                                 (UInt32) rqp->sr_cmd);
1138                        return ETIMEDOUT;
1139                    }
1140                }
1141            }
1142			break;
1143	}
1144
1145	SMB_IOD_RQLOCK(iod);
1146
1147    if (!(rqp->sr_extflags & SMB2_REQUEST)) {
1148        if (vcp->vc_flags & SMBV_SMB2) {
1149            /*
1150             * Huh? Why are we trying to send SMB 1 request on SMB 2/3
1151             * connection. This is not allowed. Need to find the code path
1152             * that got to here and fix it.
1153             */
1154            SMBERROR("SMB 1 not allowed on SMB 2/3 connection. cmd = %x\n", rqp->sr_cmd);
1155            SMB_IOD_RQUNLOCK(iod);
1156            return ERPCMISMATCH;
1157        }
1158
1159        /* SMB 1 Flow Control */
1160        for (;;) {
1161            if (iod->iod_muxcnt < vcp->vc_maxmux)
1162                break;
1163            iod->iod_muxwant++;
1164            msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod), PWAIT,
1165                   "iod-rq-mux", 0);
1166        }
1167    }
1168    else {
1169        /*
1170         * Check for SMB 2/3 request that got partially built when
1171         * reconnect occurred. Example, Create got built with SessionID 1,
1172         * QueryInfo was blocked waiting on credits and after reconnect, it
1173         * gets built with SessionID 2 and so does the Close. Then the
1174         * compound request gets sent with an invalid SessionID in the Create.
1175         *
1176         * Check sr_rqsessionid and make sure its correct for each rqp. If any
1177         * are old, then mark that rqp with Reconnect (so no credits are
1178         * recovered since they are from previous session) and return an error.
1179         */
1180        tmp_rqp = rqp;
1181        while (tmp_rqp != NULL) {
1182            if (tmp_rqp->sr_rqsessionid != rqp->sr_vc->vc_session_id) {
1183                tmp_rqp->sr_flags |= SMBR_RECONNECTED;
1184                return_error = 1;
1185            }
1186            tmp_rqp = tmp_rqp->sr_next_rqp;
1187        }
1188
1189        if (return_error == 1) {
1190            SMB_IOD_RQUNLOCK(iod);
1191            return (EAGAIN);
1192        }
1193    }
1194
1195	/*
1196     * SMB 1
1197	 * Should be noted here Window 2003 and Samba don't seem to care about going
1198	 * over the maxmux count when doing notification messages. XPhome does for sure,
1199	 * they will actual break the connection. SMB 2/3 will solve this issue and some
1200	 * day I would like to see which server care and which don't. Should we do
1201	 * something special for Samba or Apple, since they don't care?
1202	 *
1203	 * So for now we never use more than two thirds, if vc_maxmux is less than
1204	 * three then don't allow any. Should never happen, but just to be safe.
1205	 */
1206	if (rqp->sr_flags & SMBR_ASYNC) {
1207        if (!(rqp->sr_extflags & SMB2_REQUEST)) {
1208            /* SMB 1 Flow Control */
1209            if (iod->iod_asynccnt >= ((vcp->vc_maxmux / 3) * 2)) {
1210                SMBWARNING("Max out on VC async notify request %d\n", iod->iod_asynccnt);
1211                SMB_IOD_RQUNLOCK(iod);
1212                return EWOULDBLOCK;
1213            }
1214        }
1215
1216        /* Number of pending async requests */
1217		iod->iod_asynccnt++;
1218	} else if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
1219		if (vcp->throttle_info)
1220			throttle_info_update(vcp->throttle_info, 0);
1221	}
1222
1223    /* Number of pending requests (sync and async) */
1224    iod->iod_muxcnt++;
1225
1226	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
1227	SMB_IOD_RQUNLOCK(iod);
1228	iod->iod_workflag = 1;
1229	smb_iod_wakeup(iod);
1230	return 0;
1231}
1232
1233int
1234smb_iod_removerq(struct smb_rq *rqp)
1235{
1236	struct smb_vc *vcp = rqp->sr_vc;
1237	struct smbiod *iod = vcp->vc_iod;
1238
1239	SMBIODEBUG("\n");
1240	SMB_IOD_RQLOCK(iod);
1241
1242	if (rqp->sr_flags & SMBR_INTERNAL) {
1243		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
1244		SMB_IOD_RQUNLOCK(iod);
1245		return 0;
1246	}
1247
1248	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
1249
1250	if (rqp->sr_flags & SMBR_ASYNC) {
1251        /* decrement number of pending async requests */
1252		iod->iod_asynccnt--;
1253    }
1254
1255    /* Decrement number of pending requests (sync and async) */
1256    if (!(rqp->sr_flags & SMBR_RECONNECTED)) {
1257        /*
1258         * Reconnect resets muxcnt to 0, so any request that was in progress
1259         * at that time should not decrement muxcnt else it will go negative
1260         */
1261        iod->iod_muxcnt--;
1262    }
1263
1264    if (!(rqp->sr_extflags & SMB2_REQUEST)) {
1265        /* SMB 1 Flow Control */
1266        if (iod->iod_muxwant) {
1267            iod->iod_muxwant--;
1268            wakeup(&iod->iod_muxwant);
1269        }
1270    }
1271
1272	SMB_IOD_RQUNLOCK(iod);
1273	return 0;
1274}
1275
1276int
1277smb_iod_waitrq(struct smb_rq *rqp)
1278{
1279	struct smbiod *iod = rqp->sr_vc->vc_iod;
1280	int error;
1281	struct timespec ts;
1282
1283	SMBIODEBUG("\n");
1284	if (rqp->sr_flags & SMBR_INTERNAL) {
1285		for (;;) {
1286			smb_iod_sendall(iod);
1287			smb_iod_recvall(iod);
1288			if (rqp->sr_rpgen != rqp->sr_rplast)
1289				break;
1290			ts.tv_sec = 1;
1291			ts.tv_nsec = 0;
1292			msleep(&iod->iod_flags, 0, PWAIT, "90irq", &ts);
1293		}
1294		smb_iod_removerq(rqp);
1295		return rqp->sr_lerror;
1296	}
1297
1298	SMBRQ_SLOCK(rqp);
1299	if (rqp->sr_rpgen == rqp->sr_rplast) {
1300        /*
1301         * First thing to note here is that the transaction messages can have
1302         * multiple replies. We have to watch out for this if a reconnect
1303         * happens. So if we sent the message and received at least one reply
1304         * make sure a reconnect hasn't happen in between. So we check for
1305         * SMBR_MULTIPACKET flag because it tells us this is a transaction
1306         * message, we also check for the SMBR_RECONNECTED flag because it
1307         * tells us that a reconnect happen and we also check to make sure the
1308         * SMBR_REXMIT flags isn't set because that would mean we resent the
1309         * whole message over. If the sr_rplast is set then we have received
1310         * at least one response, so there is not much we can do with this
1311         * transaction. So just treat it like a softmount happened and return
1312         * ETIMEDOUT.
1313         *
1314         * Make sure we didn't get reconnect while we were asleep waiting on the next response.
1315         */
1316		do {
1317			ts.tv_sec = 15;
1318			ts.tv_nsec = 0;
1319			msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "srs-rq", &ts);
1320			if ((rqp->sr_rplast) && (rqp->sr_rpgen == rqp->sr_rplast) &&
1321				 ((rqp->sr_flags & (SMBR_MULTIPACKET | SMBR_RECONNECTED | SMBR_REXMIT)) == (SMBR_MULTIPACKET | SMBR_RECONNECTED))) {
1322				SMBERROR("Reconnect in the middle of a transaction messages, just return ETIMEDOUT\n");
1323				rqp->sr_lerror = ETIMEDOUT;
1324			}
1325		} while ((rqp->sr_lerror == 0) && (rqp->sr_rpgen == rqp->sr_rplast));
1326	}
1327	rqp->sr_rplast++;
1328	SMBRQ_SUNLOCK(rqp);
1329	error = rqp->sr_lerror;
1330	if (rqp->sr_flags & SMBR_MULTIPACKET) {
1331		/*
1332		 * If request should stay in the list, then reinsert it
1333		 * at the end of queue so other waiters have chance to concur
1334		 */
1335		SMB_IOD_RQLOCK(iod);
1336		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
1337		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
1338		SMB_IOD_RQUNLOCK(iod);
1339	} else
1340		smb_iod_removerq(rqp);
1341	return error;
1342}
1343
1344/*
1345 * Error out any outstanding requests on the VC that belong to the specified
1346 * share. The calling routine should hold a reference on this share before
1347 * calling this routine.
1348 */
1349void
1350smb_iod_errorout_share_request(struct smb_share *share, int error)
1351{
1352	struct smbiod *iod = SSTOVC(share)->vc_iod;
1353	struct smb_rq *rqp, *trqp;
1354
1355	/*
1356	 * Loop through the list of requests and error out all those that belong
1357	 * to the specified share.
1358	 */
1359	SMB_IOD_RQLOCK(iod);
1360	TAILQ_FOREACH_SAFE(rqp, &iod->iod_rqlist, sr_link, trqp) {
1361		if (rqp->sr_share && (rqp->sr_share == share) &&
1362			(rqp->sr_state != SMBRQ_NOTIFIED))
1363			smb_iod_rqprocessed(rqp, error, 0);
1364	}
1365	SMB_IOD_RQUNLOCK(iod);
1366}
1367
1368int
1369smb_iod_sendall(struct smbiod *iod)
1370{
1371	struct smb_vc *vcp = iod->iod_vc;
1372	struct smb_rq *rqp, *trqp;
1373	struct timespec now, ts, uetimeout;
1374	int herror, echo, drop_req_lock;
1375	uint64_t oldest_message_id = 0;
1376	struct timespec oldest_timesent = {0, 0};
1377    uint32_t pending_reply = 0;
1378    uint32_t need_wakeup = 0;
1379
1380	herror = 0;
1381	echo = 0;
1382
1383	/*
1384	 * Loop through the list of requests and send them if possible
1385	 */
1386retry:
1387	SMB_IOD_RQLOCK(iod);
1388    drop_req_lock = 1;
1389	TAILQ_FOREACH_SAFE(rqp, &iod->iod_rqlist, sr_link, trqp) {
1390		if (iod->iod_state == SMBIOD_ST_DEAD) {
1391			/* VC is down, just time out any message on the list */
1392			smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
1393			continue;
1394		}
1395
1396		/* If the share is going away then just timeout the request. */
1397		if ((rqp->sr_share) && (isShareGoingAway(rqp->sr_share))) {
1398			smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
1399			continue;
1400		}
1401
1402        /* Are we currently in reconnect? */
1403		if ((iod->iod_flags & SMBIOD_RECONNECT) &&
1404            (!(rqp->sr_flags & SMBR_INTERNAL))) {
1405            /*
1406             * If SMB 2/3 and soft mounted, then cancel the request (unless
1407             * its a reconnect request) with ETIMEDOUT
1408             */
1409            if ((rqp->sr_share) && (rqp->sr_share->ss_soft_timer) &&
1410                (rqp->sr_extflags & SMB2_REQUEST) &&
1411                !(rqp->sr_context == iod->iod_context)) {
1412                /*
1413                 * Pretend like it did not get sent to recover SMB 2/3 credits
1414                 */
1415                rqp->sr_extflags &= ~SMB2_REQ_SENT;
1416				smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
1417
1418                SMBDEBUG("Soft Mount timed out! cmd = 0x%x message_id %lld \n",
1419                         (UInt32) rqp->sr_cmd, rqp->sr_messageid);
1420                continue;
1421            }
1422
1423			if (rqp->sr_flags & SMBR_ASYNC) {
1424				smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
1425				continue;
1426			}
1427
1428            /* Should never be in the sent state at this point */
1429			DBG_ASSERT(rqp->sr_state != SMBRQ_SENT)
1430
1431			if (rqp->sr_state == SMBRQ_NOTSENT) {
1432                rqp->sr_extflags &= ~SMB2_REQ_SENT; /* clear the SMB 2/3 sent flag */
1433				rqp->sr_state = SMBRQ_RECONNECT;
1434            }
1435
1436			/*
1437             * Tell the upper layer that any error may have been the result
1438             * of a reconnect.
1439             */
1440			rqp->sr_flags |= SMBR_RECONNECTED;
1441		}
1442
1443		switch (rqp->sr_state) {
1444			case SMBRQ_RECONNECT:
1445				if (iod->iod_flags & SMBIOD_RECONNECT)
1446					break;		/* Do nothing to do but wait for reconnect to end. */
1447				rqp->sr_state = SMBRQ_NOTSENT;
1448				/*
1449				 * Make sure this is not a bad server. If we reconnected more
1450				 * than MAX_SR_RECONNECT_CNT (5) times trying to send this
1451				 * message, then just give up and kill the mount.
1452				 */
1453				rqp->sr_reconnect_cnt += 1;
1454				if (rqp->sr_reconnect_cnt > MAX_SR_RECONNECT_CNT) {
1455					SMBERROR("Looks like we are in a reconnect loop with server %s, canceling the reconnect. (cmd = %x)\n",
1456						vcp->vc_srvname, rqp->sr_cmd);
1457					iod->iod_state = SMBIOD_ST_DEAD;
1458					smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
1459					continue;
1460				}
1461
1462                /*
1463                 * Due to crediting issues, have to rebuild the entire request.
1464                 * Send the request back with error of EAGAIN to indicate that
1465                 * it needs to be rebuilt. The SMBR_RECONNECTED flag will also
1466                 * be set in rqp->sr_flags.
1467                 */
1468                if (rqp->sr_extflags & SMB2_REQUEST) {
1469					smb_iod_rqprocessed(rqp, EAGAIN, 0);
1470                    continue;
1471                }
1472
1473                /* Fall through here and send it */
1474			case SMBRQ_NOTSENT:
1475				SMB_IOD_RQUNLOCK(iod);
1476                /* Indicate that we are not holding the lock */
1477                drop_req_lock = 0;
1478				herror = smb_iod_sendrq(iod, rqp);
1479                if (herror == 0)
1480                    /*
1481                     * We will need to go back and reaquire the request queue lock
1482                     * and start over, since dropping the lock before sending the
1483                     * request, the queue could be in a completely differen state.
1484                     */
1485                    goto retry;
1486				break;
1487			case SMBRQ_SENT:
1488                if (vcp->vc_flags & SMBV_SMB2) {
1489                    /*
1490                     * Keep track of oldest pending Message ID as this is used
1491                     * in crediting.
1492                     */
1493                    if (!(rqp->sr_flags & SMBR_ASYNC)) {
1494                        /*
1495                         * Ignore async requests as they can take an indefinite
1496                         * amount of time.
1497                         */
1498                        if (pending_reply == 0) {
1499                            /* first pending reply found */
1500                            pending_reply = 1;
1501                            oldest_message_id = rqp->sr_messageid;
1502                            oldest_timesent = rqp->sr_timesent;
1503                        }
1504                        else {
1505                            if (timespeccmp(&oldest_timesent, &rqp->sr_timesent, >)) {
1506                                oldest_message_id = rqp->sr_messageid;
1507                                oldest_timesent = rqp->sr_timesent;
1508                            }
1509                        }
1510                    }
1511                }
1512
1513				/*
1514				 * If this is an async call or a long-running request then it
1515				 * can't timeout so we are done.
1516				 */
1517				if ((rqp->sr_flags & SMBR_ASYNC) ||
1518					(rqp->sr_flags & SMBR_NO_TIMEOUT)) {
1519					break;
1520				}
1521				nanouptime(&now);
1522				if (rqp->sr_share) {
1523					/*
1524					 * If its been over vc_resp_wait_timeout since
1525                     * the last time we received a message from the server and
1526                     * its been over vc_resp_wait_timeout since we sent this
1527                     * message break the connection. Let the reconnect code
1528                     * handle breaking the connection and cleaning up.
1529                     *
1530                     * We check both the iod->iod_lastrecv and rqp->sr_timesent
1531                     * because when the client has no work to do, then no
1532                     * requests are sent and thus nothing received. Then
1533                     * iod_lastrecv could exceed the timeout by quite a bit. By
1534                     * checking both the iod_lastrecv and sr_timesent, we are
1535                     * only checking when we know we are actually doing work.
1536                     *
1537                     * The rqp->sr_timo field was intended to have variable time
1538                     * out lengths, but never implemented. This code handles
1539                     * time outs on a share. Negotiate, SessionSetup, Logout,
1540                     * etc, timeouts are handled below with the
1541                     * SMB_SEND_WAIT_TIMO check.
1542					 */
1543					ts = now;
1544					uetimeout.tv_sec = vcp->vc_resp_wait_timeout;
1545					uetimeout.tv_nsec = 0;
1546					timespecsub(&ts, &uetimeout);
1547
1548					if (timespeccmp(&ts, &iod->iod_lastrecv, >) &&
1549                        timespeccmp(&ts, &rqp->sr_timesent, >)) {
1550						/* See if the connection went down */
1551						herror = ENOTCONN;
1552						break;
1553					}
1554				}
1555
1556                /*
1557                 * Here is the state of things at this point.
1558                 * 1. We believe the connection is still up.
1559                 * 2. The server is still responsive.
1560                 * 3. We have a sent message that has not received a response yet.
1561                 *
1562				 * How long should we wait for a response. In theory forever,
1563                 * but what if we never get a response. Should we break the
1564                 * connection or just return an error. The old code would wait
1565                 * from 12 to 60 seconds depending on the smb message. This
1566                 * seems crazy to me, why should the type of smb message
1567                 * matter. I know some writes can take a long time, but if the
1568                 * server is busy couldn't that happen with any message. We now
1569                 * wait for 2 minutes, if time expires we time out the call and
1570                 * log a message to the system log.
1571                 */
1572				ts.tv_sec = SMB_SEND_WAIT_TIMO;
1573				ts.tv_nsec = 0;
1574				timespecadd(&ts, &rqp->sr_timesent);
1575				if (timespeccmp(&now, &ts, >)) {
1576                    if (rqp->sr_extflags & SMB2_REQUEST) {
1577                        /* pretend like it did not get sent to recover SMB 2/3 credits */
1578                        rqp->sr_extflags &= ~SMB2_REQ_SENT;
1579
1580                        SMBERROR("Timed out waiting on the response for 0x%x message_id = %lld state 0x%x\n",
1581                                 (UInt32) rqp->sr_cmd, rqp->sr_messageid, (UInt32) rqp->sr_state);
1582                   }
1583                    else {
1584                        SMBERROR("Timed out waiting on the response for 0x%x mid = 0x%x state 0x%x\n",
1585                                 (UInt32) rqp->sr_cmd, rqp->sr_mid, (UInt32) rqp->sr_state);
1586                    }
1587					smb_iod_rqprocessed(rqp, ETIMEDOUT, 0);
1588				} else if (rqp->sr_cmd != SMB_COM_ECHO) {
1589					ts = now;
1590					uetimeout.tv_sec = SMBUETIMEOUT;
1591					uetimeout.tv_nsec = 0;
1592					timespecsub(&ts, &uetimeout);
1593					/*
1594                     * Its been SMBUETIMEOUT seconds since we sent this message
1595                     * send an echo ping
1596                     */
1597					if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
1598						echo++;
1599                    }
1600				}
1601				break;
1602		    default:
1603				break;
1604		}
1605		if (herror)
1606			break;
1607	}
1608
1609    if (drop_req_lock)
1610        SMB_IOD_RQUNLOCK(iod);
1611
1612	if (herror == ENOTCONN) {
1613		smb_iod_start_reconnect(iod);
1614	}	/* no echo message while we are reconnecting */
1615	else if (echo && ((iod->iod_flags & SMBIOD_RECONNECT) != SMBIOD_RECONNECT)) {
1616		/*
1617		 * If the UE timeout has passed since last packet i/o, nudge connection with "echo".  If server
1618		 * responds iod_lastrecv gets set so we'll avoid bring the connection down.
1619		 */
1620		nanouptime(&ts);
1621		uetimeout.tv_sec = SMBUETIMEOUT;
1622		uetimeout.tv_nsec = 0;
1623		timespecsub(&ts, &uetimeout);
1624		if (timespeccmp(&ts, &iod->iod_lastrecv, >) &&
1625		    timespeccmp(&ts, &iod->iod_lastrqsent, >)) {
1626			(void)smb_smb_echo(vcp, SMBNOREPLYWAIT, 1, iod->iod_context);
1627        }
1628	}
1629
1630    if (vcp->vc_flags & SMBV_SMB2) {
1631        /* Update oldest pending request Message ID */
1632        SMBC_CREDIT_LOCK(vcp);
1633        if (pending_reply == 0) {
1634            /* No pending reply found */
1635            vcp->vc_req_pending = 0;
1636
1637            if (vcp->vc_oldest_message_id != 0) {
1638                vcp->vc_oldest_message_id = 0;
1639                need_wakeup = 1;
1640            }
1641        }
1642        else {
1643            /* A pending reply was found */
1644            vcp->vc_req_pending = 1;
1645
1646            if (oldest_message_id != vcp->vc_oldest_message_id) {
1647                vcp->vc_oldest_message_id = oldest_message_id;
1648                need_wakeup = 1;
1649            }
1650        }
1651
1652        /* Wake up any requests waiting for more credits */
1653        if ((need_wakeup == 1) && (vcp->vc_credits_wait)) {
1654            OSAddAtomic(-1, &vcp->vc_credits_wait);
1655            wakeup(&vcp->vc_credits_wait);
1656        }
1657
1658        SMBC_CREDIT_UNLOCK(vcp);
1659    }
1660
1661	return 0;
1662}
1663
1664/*
1665 * Count the number of active shares on the VC, handle a dead shares,
1666 * timeout share or notification of unresponsive shares.
1667 */
1668static int
1669smb_iod_check_for_active_shares(struct smb_vc *vcp)
1670{
1671	struct smbiod *	iod = vcp->vc_iod;
1672	struct smb_share *share, *tshare;
1673	int treecnt = 0;
1674
1675	smb_vc_lock(vcp);	/* lock the vc so we can search the list */
1676	SMBCO_FOREACH_SAFE(share, VCTOCP(vcp), tshare) {
1677		/*
1678		 * Since we have the vc lock we know the share can't be freed, but
1679		 * it could be going away (Tree Disconnect is in process). Take a
1680		 * reference so no one else can disconnect it out from underneath
1681		 * us.
1682		 */
1683		smb_share_ref(share);
1684		if (share->ss_flags & SMBO_GONE) {
1685			/* Skip any shares that are being disconnect */
1686			smb_share_rele(share, iod->iod_context);
1687			continue;
1688		}
1689		/* This share has a dead timeout value see if they want to disconnect */
1690		if (share->ss_dead_timer) {
1691			if (smb_iod_check_timeout(&iod->reconnectStartTime, share->ss_dead_timer)) {
1692				lck_mtx_lock(&share->ss_shlock);
1693				if (share->ss_dead) {
1694					share->ss_dead(share);
1695					lck_mtx_unlock(&share->ss_shlock);
1696				} else {
1697					lck_mtx_unlock(&share->ss_shlock);
1698					/* Shutdown all outstanding I/O requests on this share. */
1699					smb_iod_errorout_share_request(share, EPIPE);
1700				}
1701				smb_share_rele(share, iod->iod_context);
1702				continue;
1703			}
1704		}
1705		/* Check for soft mount timeouts */
1706		if ((share->ss_soft_timer) &&
1707			(smb_iod_check_timeout(&iod->reconnectStartTime, share->ss_soft_timer))) {
1708			smb_iod_errorout_share_request(share, ETIMEDOUT);
1709		}
1710
1711		lck_mtx_lock(&share->ss_shlock);
1712		if (share->ss_down) {
1713			treecnt += share->ss_down(share, (iod->iod_flags & SMBIOD_VC_NOTRESP));
1714		} else {
1715			treecnt++;
1716		}
1717		lck_mtx_unlock(&share->ss_shlock);
1718		smb_share_rele(share, iod->iod_context);
1719	}
1720	smb_vc_unlock(vcp);
1721	return treecnt;
1722
1723}
1724
1725/*
1726 * This is called from tcp_connect and smb_iod_reconnect. During a reconnect if the volume
1727 * goes away or someone tries to unmount it then we need to break out of the reconnect. We
1728 * may want to use this for normal connections in the future.
1729 */
1730int smb_iod_nb_intr(struct smb_vc *vcp)
1731{
1732	struct smbiod *	iod = vcp->vc_iod;
1733
1734	/*
1735	 * If not in reconnect then see if the user applications wants to cancel the
1736	 * connection.
1737	 */
1738	if ((iod->iod_flags & SMBIOD_RECONNECT) != SMBIOD_RECONNECT) {
1739		if (vcp->connect_flag && (*(vcp->connect_flag) & NSMBFL_CANCEL))
1740			return EINTR;
1741		else return 0;
1742	}
1743	/*
1744	 * We must be in reconnect, check to see if we are offically unresponsive.
1745	 * XXX - We should really rework this in the future, if the VC was having
1746	 * issues before we got here, we may want to have SMBIOD_VC_NOTRESP arlready
1747	 * set. See <rdar://problem/8124132>
1748	 */
1749	if (((iod->iod_flags & SMBIOD_VC_NOTRESP) == 0) &&
1750		(smb_iod_check_timeout(&iod->reconnectStartTime, NOTIFY_USER_TIMEOUT))) {
1751			SMB_IOD_FLAGSLOCK(iod);		/* Mark that the VC is not responsive. */
1752			iod->iod_flags |= SMBIOD_VC_NOTRESP;
1753			SMB_IOD_FLAGSUNLOCK(iod);
1754	}
1755	/* Don't keep reconnecting if there are no active shares */
1756	return (smb_iod_check_for_active_shares(vcp)) ? 0 : EINTR;
1757}
1758
1759/*
1760 * The connection went down for some reason. We need to try and reconnect. We need to
1761 * do a TCP connection and if on port 139 a NetBIOS connection. Any error from the negotiate,
1762 * setup, or tree connect message is fatal and will bring down the whole process. This routine
1763 * is run from the main thread, so any message comming down from the file system will block
1764 * until we are done.
1765 */
1766static void smb_iod_reconnect(struct smbiod *iod)
1767{
1768	struct smb_vc *vcp = iod->iod_vc;
1769	int tree_cnt = 0;
1770	int error = 0;
1771	int sleepcnt = 0;
1772	struct smb_share *share = NULL, *tshare;
1773	struct timespec waittime, sleeptime, tsnow;
1774	int ii;
1775
1776	/* See if we can get a reference on this VC */
1777	if (smb_vc_reconnect_ref(iod->iod_vc, iod->iod_context)) {
1778		/* The vc is either gone or going away */
1779		iod->iod_flags &= ~SMBIOD_RECONNECT;
1780		iod->iod_workflag = 1;
1781		SMBERROR("The vc is going aways while we are in reconnect?\n");
1782		return;
1783	}
1784
1785	SMBWARNING("Starting reconnect with %s\n", vcp->vc_srvname);
1786	SMB_TRAN_DISCONNECT(vcp); /* Make sure the connection is close first */
1787	iod->iod_state = SMBIOD_ST_CONNECT;
1788	/* Start the reconnect timers */
1789	sleepcnt = 1;
1790	sleeptime.tv_sec = 1;
1791	sleeptime.tv_nsec = 0;
1792	nanouptime(&iod->reconnectStartTime);
1793	/* The number of seconds to wait on a reconnect */
1794	waittime.tv_sec = vcp->reconnect_wait_time;
1795	waittime.tv_nsec = 0;
1796	timespecadd(&waittime, &iod->reconnectStartTime);
1797
1798	do {
1799		/*
1800		 * The tcp connect will cause smb_iod_nb_intr to be called every two
1801		 * seconds. So we always wait 2 two seconds to see if the connection
1802		 * comes back quickly before attempting any other types of action.
1803		 */
1804        if (smb_iod_nb_intr(vcp) == EINTR) {
1805            error = EINTR;
1806            SMBDEBUG("Reconnect to %s was canceled\n", vcp->vc_srvname);
1807            goto exit;
1808        }
1809
1810		error = SMB_TRAN_CONNECT(vcp, vcp->vc_saddr);
1811		if (error == EINTR)	{
1812			SMBDEBUG("Reconnect to %s was canceled\n", vcp->vc_srvname);
1813			goto exit;
1814		}
1815
1816		DBG_ASSERT(vcp->vc_tdata != NULL);
1817		DBG_ASSERT(error != EISCONN);
1818		DBG_ASSERT(error != EINVAL);
1819		if (error) {
1820			/*
1821			 * Never sleep longer that 1 second at a time, but we can wait up
1822			 * to 5 seconds between tcp connections.
1823			 */
1824			for (ii= 1; ii <= sleepcnt; ii++) {
1825				msleep(&iod->iod_flags, 0, PWAIT, "smb_iod_reconnect", &sleeptime);
1826
1827				if (smb_iod_nb_intr(vcp) == EINTR) {
1828					error = EINTR;
1829					SMBDEBUG("Reconnect to %s was canceled\n", vcp->vc_srvname);
1830					goto exit;
1831				}
1832			}
1833			/* Never wait more than 5 seconds between connection attempts */
1834			if (sleepcnt < SMB_MAX_SLEEP_CNT )
1835				sleepcnt++;
1836			SMBWARNING("Retrying connection to %s error = %d\n", vcp->vc_srvname,
1837					 error);
1838		}
1839		/*
1840		 * We went to sleep during the reconnect and we just woke up. Start the
1841		 * reconnect process over again. Reset our start time to now. Reset our
1842		 * wait time to be based on the current time. Reset the time we sleep between
1843		 * reconnect. Just act like we just entered this routine
1844		 */
1845		if (iod->reconnectStartTime.tv_sec < gWakeTime.tv_sec) {
1846			sleepcnt = 1;
1847			nanouptime(&iod->reconnectStartTime);
1848			/* The number of seconds to wait on a reconnect */
1849			waittime.tv_sec = vcp->reconnect_wait_time;
1850			waittime.tv_nsec = 0;
1851			timespecadd(&waittime, &iod->reconnectStartTime);
1852		}
1853		/*
1854		 * We now do the negotiate message inside the connect loop. This way if
1855		 * the negotiate message times out we can keep trying the connection. This
1856		 * solves the problem of getting disconnected from a server that is going
1857		 * down, but stays up long enough for us to do a tcp connection.
1858		 */
1859		if (!error) {
1860			/* Clear out the outstanding request counter, everything is going to get resent */
1861			iod->iod_muxcnt = 0;
1862
1863			/* Reset the virtual circuit to a reconnect state */
1864			smb_vc_reset(vcp);
1865
1866			/* Start the virtual circuit */
1867			iod->iod_state = SMBIOD_ST_TRANACTIVE;
1868			error = smb_smb_negotiate(vcp, NULL, TRUE, iod->iod_context);
1869			if ((error == ENOTCONN) || (error == ETIMEDOUT)) {
1870				SMBWARNING("The negotiate timed out to %s trying again: error = %d\n",
1871						   vcp->vc_srvname, error);
1872				SMB_TRAN_DISCONNECT(vcp); /* Make sure the connection is close first */
1873				iod->iod_state = SMBIOD_ST_CONNECT;
1874			} else if (error) {
1875				SMBWARNING("The negotiate failed to %s with an error of %d\n",
1876						   vcp->vc_srvname, error);
1877				break;
1878			} else {
1879				SMBDEBUG("The negotiate succeeded to %s\n", vcp->vc_srvname);
1880				iod->iod_state = SMBIOD_ST_NEGOACTIVE;
1881				/*
1882				 * We now do the authentication inside the connect loop. This
1883				 * way if the authentication fails because we don't have a
1884				 * creditials yet or the creditials have expired, then we can
1885				 * keep trying.
1886				 */
1887				error = smb_iod_ssnsetup(iod, TRUE);
1888				if (error)
1889					SMBWARNING("The authentication failed to %s with an error of %d\n",
1890							   vcp->vc_srvname, error);
1891				/* If the error isn't EAGAIN then nothing else to do here, we have success or failure */
1892				if (error != EAGAIN)
1893					break;
1894
1895				/* Try four more times and see if the user has update the Kerberos Creds */
1896				for (ii = 1; ii < SMB_MAX_SLEEP_CNT; ii++) {
1897					msleep(&iod->iod_flags, 0, PWAIT, "smb_iod_reconnect", &sleeptime);
1898
1899                    if (smb_iod_nb_intr(vcp) == EINTR) {
1900                        error = EINTR;
1901                        SMBDEBUG("Reconnect to %s was canceled\n",
1902                                 vcp->vc_srvname);
1903                        goto exit;
1904                    }
1905
1906                    error = smb_iod_ssnsetup(iod, TRUE);
1907					if (error)
1908						SMBWARNING("Retrying authentication count %d failed to %s with an error of %d\n",
1909								   ii, vcp->vc_srvname, error);
1910					if (error != EAGAIN)
1911						break;
1912				}
1913				/* If no error then we are done, otherwise break the connection and try again */
1914				if (error == 0)
1915					break;
1916
1917				SMB_TRAN_DISCONNECT(vcp); /* Make sure the connection is close first */
1918				iod->iod_state = SMBIOD_ST_CONNECT;
1919				error = EAUTH;
1920			}
1921		}
1922		nanouptime(&tsnow);
1923	} while (error && (timespeccmp(&waittime, &tsnow, >)));
1924
1925	/* reconnect failed or we timed out, nothing left to do cancel the reconnect */
1926	if (error) {
1927		SMBWARNING("The connection failed to %s with an error of %d\n", vcp->vc_srvname, error);
1928		goto exit;
1929	}
1930	/*
1931	 * We now need to reconnect each share. Since the current code only has one share
1932	 * per virtual circuit there is no problem with locking the list down here. Need
1933	 * to look at this in the future. If at least one mount point succeeds then do not
1934	 * close the whole circuit.
1935	 * We do not wake up smbfs_smb_reopen_file, wait till the very end.
1936	 */
1937	tree_cnt = 0;
1938	smb_vc_lock(vcp);	/* lock the vc so we can search the list */
1939	SMBCO_FOREACH_SAFE(share, VCTOCP(vcp), tshare) {
1940		/*
1941		 * Since we have the vc lock we know the share can't be freed, but
1942		 * it could be going away (Tree Disconnect is in process). Take a
1943		 * reference so no one else can disconnect it out from underneath
1944		 * us.
1945		 */
1946		smb_share_ref(share);
1947		if (share->ss_flags & SMBO_GONE) {
1948			/* Skip any shares that are being disconnected */
1949			smb_share_rele(share, iod->iod_context);
1950			continue;
1951		}
1952
1953		/* Always reconnect the tree, even if its not a mount point */
1954		error = smb_smb_treeconnect(share, iod->iod_context);
1955		if (error) {
1956			SMBERROR("Reconnect failed to share %s on server %s error = %d\n",
1957					 share->ss_name, vcp->vc_srvname, error);
1958			error = 0; /* reset the error, only used for logging */
1959		} else {
1960			tree_cnt++;
1961			lck_mtx_lock(&share->ss_shlock);
1962			if (share->ss_up) {
1963                /*
1964                 * Tell upper layers that reconnect has been done. Right now
1965                 * this marks all open files that they need to be reopened.
1966                 */
1967				share->ss_up(share, TRUE);
1968				SMBERROR("Reconnected share %s with server %s\n", share->ss_name, vcp->vc_srvname);
1969			} else {
1970				SMBWARNING("Reconnected share %s with server %s\n", share->ss_name, vcp->vc_srvname);
1971			}
1972			lck_mtx_unlock(&share->ss_shlock);
1973		}
1974		smb_share_rele(share, iod->iod_context);
1975	}
1976	smb_vc_unlock(vcp);
1977	/* If we have no shares on this connect then kill the whole virtual circuit. */
1978	if (!tree_cnt) {
1979		SMBWARNING("No mounted volumes in reconnect, closing connection to server %s\n",vcp->vc_srvname);
1980		error = ENOTCONN;
1981	}
1982
1983exit:
1984	/*
1985	 * We only want to wake up the shares if we are not trying to do another
1986	 * reconnect. So if we have no error or the reconnect time is pass the
1987	 * wake time, then wake up any volumes that are waiting
1988	 */
1989	if ((error == 0) || (iod->reconnectStartTime.tv_sec >= gWakeTime.tv_sec)) {
1990		smb_vc_lock(vcp);	/* lock the vc so we can search the list */
1991		SMBCO_FOREACH_SAFE(share, VCTOCP(vcp), tshare) {
1992			smb_share_ref(share);
1993			lck_mtx_lock(&share->ss_stlock);
1994			share->ss_flags &= ~SMBS_RECONNECTING;	/* Turn off reconnecting flag */
1995			lck_mtx_unlock(&share->ss_stlock);
1996			wakeup(&share->ss_flags);	/* Wakeup the volumes. */
1997			smb_share_rele(share, iod->iod_context);
1998		}
1999		smb_vc_unlock(vcp);
2000	}
2001	/*
2002	 * Remember we are the main thread, turning off the flag will start the process
2003	 * going only after we leave this routine.
2004	 */
2005	SMB_IOD_FLAGSLOCK(iod);
2006	iod->iod_flags &= ~SMBIOD_RECONNECT;
2007	SMB_IOD_FLAGSUNLOCK(iod);
2008	if (error)
2009		SMB_TRAN_DISCONNECT(vcp);
2010
2011	smb_vc_reconnect_rel(vcp);	/* We are done release the reference */
2012
2013	if (error) {
2014		if (iod->reconnectStartTime.tv_sec < gWakeTime.tv_sec) {
2015			/*
2016			 * We went to sleep after the connection, but before the reconnect
2017			 * completed. Start the whole process over now and see if we can
2018			 * reconnect.
2019			 */
2020			SMBWARNING("The reconnect failed because we went to sleep retrying! %d\n", error);
2021			iod->iod_state = SMBIOD_ST_RECONNECT;
2022			smb_iod_start_reconnect(iod); /* Retry the reconnect */
2023		}
2024        else {
2025			/* We failed; tell the user and have the volume unmounted */
2026			smb_iod_dead(iod);
2027
2028            /*
2029             * Reconnect failed, but iod layer is all set now to deny any new
2030             * requests. Tell above layer that we now have a ton of credits to
2031             * allow any requests waiting for credits to error out.
2032             */
2033            smb2_rq_credit_start(vcp, kCREDIT_MAX_AMT);
2034		}
2035	}
2036    else {
2037        /* Reconnect worked, its now safe to start up crediting again */
2038        smb2_rq_credit_start(vcp, 0);
2039    }
2040
2041	iod->iod_workflag = 1;
2042}
2043
2044/*
2045 * "main" function for smbiod daemon
2046 */
2047static __inline void
2048smb_iod_main(struct smbiod *iod)
2049{
2050	struct smbiod_event *evp;
2051
2052	SMBIODEBUG("\n");
2053	/*
2054	 * Check all interesting events
2055	 */
2056	for (;;) {
2057		SMB_IOD_EVLOCK(iod);
2058		evp = STAILQ_FIRST(&iod->iod_evlist);
2059		if (evp == NULL) {
2060			SMB_IOD_EVUNLOCK(iod);
2061			break;
2062		}
2063		else if (iod->iod_flags & SMBIOD_RECONNECT) {
2064			/* Ignore any events until reconnect is done */
2065		    SMB_IOD_EVUNLOCK(iod);
2066		    break;
2067		}
2068		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
2069		evp->ev_type |= SMBIOD_EV_PROCESSING;
2070		SMB_IOD_EVUNLOCK(iod);
2071		switch (evp->ev_type & SMBIOD_EV_MASK) {
2072		    case SMBIOD_EV_NEGOTIATE:
2073				evp->ev_error = smb_iod_negotiate(iod, evp->ev_ident);
2074				break;
2075		    case SMBIOD_EV_SSNSETUP:
2076				evp->ev_error = smb_iod_ssnsetup(iod, FALSE);
2077				break;
2078		    case SMBIOD_EV_DISCONNECT:
2079				evp->ev_error = smb_iod_disconnect(iod);
2080				break;
2081		    case SMBIOD_EV_SHUTDOWN:
2082				/*
2083				 * Flags in iod_flags are only set within the iod,
2084				 * so we don't need the mutex to protect
2085				 * setting or clearing them, and SMBIOD_SHUTDOWN
2086				 * is only tested within the iod, so we don't
2087				 * need the mutex to protect against other
2088				 * threads testing it.
2089				 */
2090				iod->iod_flags |= SMBIOD_SHUTDOWN;
2091				break;
2092		    case SMBIOD_EV_NEWRQ:
2093				break;
2094		    case SMBIOD_EV_FORCE_RECONNECT:
2095                smb_iod_start_reconnect(iod);
2096				break;
2097			default:
2098				break;
2099		}
2100		if (evp->ev_type & SMBIOD_EV_SYNC) {
2101			SMB_IOD_EVLOCK(iod);
2102			wakeup(evp);
2103			SMB_IOD_EVUNLOCK(iod);
2104		} else
2105			SMB_FREE(evp, M_SMBIOD);
2106	}
2107	smb_iod_sendall(iod);
2108	smb_iod_recvall(iod);
2109	return;
2110}
2111
2112static void smb_iod_thread(void *arg)
2113{
2114	struct smbiod *iod = arg;
2115	vfs_context_t      context;
2116
2117
2118	/* the iod sets the iod_p to kernproc when launching smb_iod_thread in
2119	 * smb_iod_create. Current kpis to cvfscontext support to build a
2120	 * context from the current context or from some other context and
2121	 * not from proc only. So Since the kernel threads run under kernel
2122	 * task and kernproc it should be fine to create the context from
2123	 * from current thread
2124	 */
2125
2126	context = iod->iod_context = vfs_context_create((vfs_context_t)0);
2127
2128	SMB_IOD_FLAGSLOCK(iod);
2129	iod->iod_flags |= SMBIOD_RUNNING;
2130	SMB_IOD_FLAGSUNLOCK(iod);
2131
2132	/*
2133	 * SMBIOD_SHUTDOWN is only set within the iod, so we don't need
2134	 * the mutex to protect testing it.
2135	 */
2136	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
2137		iod->iod_workflag = 0;
2138		smb_iod_main(iod);
2139		if (iod->iod_flags & SMBIOD_SHUTDOWN)
2140			break;
2141		/* First see if we need to try a reconnect. If not see the VC is not responsive. */
2142		if ((iod->iod_flags & (SMBIOD_START_RECONNECT | SMBIOD_RECONNECT)) == SMBIOD_RECONNECT)
2143			smb_iod_reconnect(iod);
2144		/*
2145		 * In order to prevent a race here, this should really be locked
2146		 * with a mutex on which we would subsequently msleep, and
2147		 * which should be acquired before changing the flag.
2148		 * Or should this be another flag in iod_flags, using its
2149		 * mutex?
2150		 */
2151		if (iod->iod_workflag)
2152			continue;
2153		SMBIODEBUG("going to sleep for %ld secs %ld nsecs\n", iod->iod_sleeptimespec.tv_sec,
2154				iod->iod_sleeptimespec.tv_nsec);
2155		msleep(&iod->iod_flags, 0, PWAIT, "iod thread idle", &iod->iod_sleeptimespec);
2156	}
2157
2158	/*
2159	 * Clear the running flag, and wake up anybody waiting for us to quit.
2160	 */
2161	SMB_IOD_FLAGSLOCK(iod);
2162	iod->iod_flags &= ~SMBIOD_RUNNING;
2163	wakeup(iod);
2164	SMB_IOD_FLAGSUNLOCK(iod);
2165
2166	vfs_context_rele(context);
2167}
2168
2169int
2170smb_iod_create(struct smb_vc *vcp)
2171{
2172	struct smbiod	*iod;
2173	kern_return_t	result;
2174	thread_t		thread;
2175
2176	SMB_MALLOC(iod, struct smbiod *, sizeof(*iod), M_SMBIOD, M_WAITOK | M_ZERO);
2177	iod->iod_id = smb_iod_next++;
2178	iod->iod_state = SMBIOD_ST_NOTCONN;
2179	lck_mtx_init(&iod->iod_flagslock, iodflags_lck_group, iodflags_lck_attr);
2180	iod->iod_vc = vcp;
2181	iod->iod_sleeptimespec.tv_sec = SMBIOD_SLEEP_TIMO;
2182	iod->iod_sleeptimespec.tv_nsec = 0;
2183	nanouptime(&iod->iod_lastrqsent);
2184	vcp->vc_iod = iod;
2185	lck_mtx_init(&iod->iod_rqlock, iodrq_lck_group, iodrq_lck_attr);
2186	TAILQ_INIT(&iod->iod_rqlist);
2187	lck_mtx_init(&iod->iod_evlock, iodev_lck_group, iodev_lck_attr);
2188	STAILQ_INIT(&iod->iod_evlist);
2189	/*
2190	 * The IOCreateThread routine has been depricated. Just copied
2191	 * that code here
2192	 */
2193	result = kernel_thread_start((thread_continue_t)smb_iod_thread, iod, &thread);
2194	if (result != KERN_SUCCESS) {
2195		SMBERROR("can't start smbiod result = %d\n", result);
2196		SMB_FREE(iod, M_SMBIOD);
2197		return (ENOMEM);
2198	}
2199	thread_deallocate(thread);
2200	return (0);
2201}
2202
2203int
2204smb_iod_destroy(struct smbiod *iod)
2205{
2206	/*
2207	 * We don't post this synchronously, as that causes a wakeup
2208	 * when the SMBIOD_SHUTDOWN flag is set, but that happens
2209	 * before the iod actually terminates, and we have to wait
2210	 * until it terminates before we can free its locks and
2211	 * its data structure.
2212	 */
2213	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN, NULL);
2214
2215	/*
2216	 * Wait for the iod to exit.
2217	 */
2218	for (;;) {
2219		SMB_IOD_FLAGSLOCK(iod);
2220		if (!(iod->iod_flags & SMBIOD_RUNNING)) {
2221			SMB_IOD_FLAGSUNLOCK(iod);
2222			break;
2223		}
2224		msleep(iod, SMB_IOD_FLAGSLOCKPTR(iod), PWAIT | PDROP,
2225		    "iod-exit", 0);
2226	}
2227	lck_mtx_destroy(&iod->iod_flagslock, iodflags_lck_group);
2228	lck_mtx_destroy(&iod->iod_rqlock, iodrq_lck_group);
2229	lck_mtx_destroy(&iod->iod_evlock, iodev_lck_group);
2230	SMB_FREE(iod, M_SMBIOD);
2231	return 0;
2232}
2233
2234int
2235smb_iod_init(void)
2236{
2237	return 0;
2238}
2239
2240int
2241smb_iod_done(void)
2242{
2243	return 0;
2244}
2245
2246