smb_iod.c revision 95533
1/*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *    This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/netsmb/smb_iod.c 95533 2002-04-26 22:48:23Z mike $
33 */
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/endian.h>
38#include <sys/proc.h>
39#include <sys/kernel.h>
40#include <sys/kthread.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/unistd.h>
44
45#include <netsmb/smb.h>
46#include <netsmb/smb_conn.h>
47#include <netsmb/smb_rq.h>
48#include <netsmb/smb_tran.h>
49#include <netsmb/smb_trantcp.h>
50
51
52#define SMBIOD_SLEEP_TIMO	2
53#define	SMBIOD_PING_TIMO	60	/* seconds */
54
55#define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
56#define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
57#define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
58
59#define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
60#define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
61#define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
62
63#define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
64
65
66static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
67
68static int smb_iod_next;
69
70static int  smb_iod_sendall(struct smbiod *iod);
71static int  smb_iod_disconnect(struct smbiod *iod);
72static void smb_iod_thread(void *);
73
74static __inline void
75smb_iod_rqprocessed(struct smb_rq *rqp, int error)
76{
77	SMBRQ_SLOCK(rqp);
78	rqp->sr_lerror = error;
79	rqp->sr_rpgen++;
80	rqp->sr_state = SMBRQ_NOTIFIED;
81	wakeup(&rqp->sr_state);
82	SMBRQ_SUNLOCK(rqp);
83}
84
85static void
86smb_iod_invrq(struct smbiod *iod)
87{
88	struct smb_rq *rqp;
89
90	/*
91	 * Invalidate all outstanding requests for this connection
92	 */
93	SMB_IOD_RQLOCK(iod);
94	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
95		if (rqp->sr_flags & SMBR_INTERNAL)
96			SMBRQ_SUNLOCK(rqp);
97		rqp->sr_flags |= SMBR_RESTART;
98		smb_iod_rqprocessed(rqp, ENOTCONN);
99	}
100	SMB_IOD_RQUNLOCK(iod);
101}
102
103static void
104smb_iod_closetran(struct smbiod *iod)
105{
106	struct smb_vc *vcp = iod->iod_vc;
107	struct thread *td = iod->iod_td;
108
109	if (vcp->vc_tdata == NULL)
110		return;
111	SMB_TRAN_DISCONNECT(vcp, td);
112	SMB_TRAN_DONE(vcp, td);
113	vcp->vc_tdata = NULL;
114}
115
116static void
117smb_iod_dead(struct smbiod *iod)
118{
119	iod->iod_state = SMBIOD_ST_DEAD;
120	smb_iod_closetran(iod);
121	smb_iod_invrq(iod);
122}
123
124static int
125smb_iod_connect(struct smbiod *iod)
126{
127	struct smb_vc *vcp = iod->iod_vc;
128	struct thread *td = iod->iod_td;
129	int error;
130
131	SMBIODEBUG("%d\n", iod->iod_state);
132	switch(iod->iod_state) {
133	    case SMBIOD_ST_VCACTIVE:
134		SMBERROR("called for already opened connection\n");
135		return EISCONN;
136	    case SMBIOD_ST_DEAD:
137		return ENOTCONN;	/* XXX: last error code ? */
138	    default:
139		break;
140	}
141	vcp->vc_genid++;
142	error = 0;
143	itry {
144		ithrow(SMB_TRAN_CREATE(vcp, td));
145		SMBIODEBUG("tcreate\n");
146		if (vcp->vc_laddr) {
147			ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, td));
148		}
149		SMBIODEBUG("tbind\n");
150		ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td));
151		SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
152		iod->iod_state = SMBIOD_ST_TRANACTIVE;
153		SMBIODEBUG("tconnect\n");
154/*		vcp->vc_mid = 0;*/
155		ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
156		SMBIODEBUG("snegotiate\n");
157		ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
158		iod->iod_state = SMBIOD_ST_VCACTIVE;
159		SMBIODEBUG("completed\n");
160		smb_iod_invrq(iod);
161	} icatch(error) {
162		smb_iod_dead(iod);
163	} ifinally {
164	} iendtry;
165	return error;
166}
167
168static int
169smb_iod_disconnect(struct smbiod *iod)
170{
171	struct smb_vc *vcp = iod->iod_vc;
172
173	SMBIODEBUG("\n");
174	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
175		smb_smb_ssnclose(vcp, &iod->iod_scred);
176		iod->iod_state = SMBIOD_ST_TRANACTIVE;
177	}
178	vcp->vc_smbuid = SMB_UID_UNKNOWN;
179	smb_iod_closetran(iod);
180	iod->iod_state = SMBIOD_ST_NOTCONN;
181	return 0;
182}
183
184static int
185smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
186{
187	int error;
188
189	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
190		if (iod->iod_state != SMBIOD_ST_DEAD)
191			return ENOTCONN;
192		iod->iod_state = SMBIOD_ST_RECONNECT;
193		error = smb_iod_connect(iod);
194		if (error)
195			return error;
196	}
197	SMBIODEBUG("tree reconnect\n");
198	SMBS_ST_LOCK(ssp);
199	ssp->ss_flags |= SMBS_RECONNECTING;
200	SMBS_ST_UNLOCK(ssp);
201	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
202	SMBS_ST_LOCK(ssp);
203	ssp->ss_flags &= ~SMBS_RECONNECTING;
204	SMBS_ST_UNLOCK(ssp);
205	wakeup(&ssp->ss_vcgenid);
206	return error;
207}
208
209static int
210smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
211{
212	struct thread *td = iod->iod_td;
213	struct smb_vc *vcp = iod->iod_vc;
214	struct smb_share *ssp = rqp->sr_share;
215	struct mbuf *m;
216	int error;
217
218	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
219	switch (iod->iod_state) {
220	    case SMBIOD_ST_NOTCONN:
221		smb_iod_rqprocessed(rqp, ENOTCONN);
222		return 0;
223	    case SMBIOD_ST_DEAD:
224		iod->iod_state = SMBIOD_ST_RECONNECT;
225		return 0;
226	    case SMBIOD_ST_RECONNECT:
227		return 0;
228	    default:
229		break;
230	}
231	if (rqp->sr_sendcnt == 0) {
232#ifdef movedtoanotherplace
233		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
234			return 0;
235#endif
236		*rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
237		*rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
238		mb_fixhdr(&rqp->sr_rq);
239	}
240	if (rqp->sr_sendcnt++ > 5) {
241		rqp->sr_flags |= SMBR_RESTART;
242		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
243		/*
244		 * If all attempts to send a request failed, then
245		 * something is seriously hosed.
246		 */
247		return ENOTCONN;
248	}
249	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
250	m_dumpm(rqp->sr_rq.mb_top);
251	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_TRYWAIT);
252	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
253	if (error == 0) {
254		getnanotime(&rqp->sr_timesent);
255		iod->iod_lastrqsent = rqp->sr_timesent;
256		rqp->sr_flags |= SMBR_SENT;
257		rqp->sr_state = SMBRQ_SENT;
258		return 0;
259	}
260	/*
261	 * Check for fatal errors
262	 */
263	if (SMB_TRAN_FATAL(vcp, error)) {
264		/*
265		 * No further attempts should be made
266		 */
267		return ENOTCONN;
268	}
269	if (smb_rq_intr(rqp))
270		smb_iod_rqprocessed(rqp, EINTR);
271	return 0;
272}
273
274/*
275 * Process incoming packets
276 */
277static int
278smb_iod_recvall(struct smbiod *iod)
279{
280	struct smb_vc *vcp = iod->iod_vc;
281	struct thread *td = iod->iod_td;
282	struct smb_rq *rqp;
283	struct mbuf *m;
284	u_char *hp;
285	u_short mid;
286	int error;
287
288	switch (iod->iod_state) {
289	    case SMBIOD_ST_NOTCONN:
290	    case SMBIOD_ST_DEAD:
291	    case SMBIOD_ST_RECONNECT:
292		return 0;
293	    default:
294		break;
295	}
296	for (;;) {
297		m = NULL;
298		error = SMB_TRAN_RECV(vcp, &m, td);
299		if (error == EWOULDBLOCK)
300			break;
301		if (SMB_TRAN_FATAL(vcp, error)) {
302			smb_iod_dead(iod);
303			break;
304		}
305		if (error)
306			break;
307		if (m == NULL) {
308			SMBERROR("tran return NULL without error\n");
309			error = EPIPE;
310			continue;
311		}
312		m = m_pullup(m, SMB_HDRLEN);
313		if (m == NULL)
314			continue;	/* wait for a good packet */
315		/*
316		 * Now we got an entire and possibly invalid SMB packet.
317		 * Be careful while parsing it.
318		 */
319		m_dumpm(m);
320		hp = mtod(m, u_char*);
321		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
322			m_freem(m);
323			continue;
324		}
325		mid = SMB_HDRMID(hp);
326		SMBSDEBUG("mid %04x\n", (u_int)mid);
327		SMB_IOD_RQLOCK(iod);
328		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
329			if (rqp->sr_mid != mid)
330				continue;
331			SMBRQ_SLOCK(rqp);
332			if (rqp->sr_rp.md_top == NULL) {
333				md_initm(&rqp->sr_rp, m);
334			} else {
335				if (rqp->sr_flags & SMBR_MULTIPACKET) {
336					md_append_record(&rqp->sr_rp, m);
337				} else {
338					SMBRQ_SUNLOCK(rqp);
339					SMBERROR("duplicate response %d (ignored)\n", mid);
340					break;
341				}
342			}
343			SMBRQ_SUNLOCK(rqp);
344			smb_iod_rqprocessed(rqp, 0);
345			break;
346		}
347		SMB_IOD_RQUNLOCK(iod);
348		if (rqp == NULL) {
349			SMBERROR("drop resp with mid %d\n", (u_int)mid);
350/*			smb_printrqlist(vcp);*/
351			m_freem(m);
352		}
353	}
354	/*
355	 * check for interrupts
356	 */
357	SMB_IOD_RQLOCK(iod);
358	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
359		if (smb_proc_intr(rqp->sr_cred->scr_td->td_proc)) {
360			smb_iod_rqprocessed(rqp, EINTR);
361		}
362	}
363	SMB_IOD_RQUNLOCK(iod);
364	return 0;
365}
366
367int
368smb_iod_request(struct smbiod *iod, int event, void *ident)
369{
370	struct smbiod_event *evp;
371	int error;
372
373	SMBIODEBUG("\n");
374	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
375	evp->ev_type = event;
376	evp->ev_ident = ident;
377	SMB_IOD_EVLOCK(iod);
378	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
379	if ((event & SMBIOD_EV_SYNC) == 0) {
380		SMB_IOD_EVUNLOCK(iod);
381		smb_iod_wakeup(iod);
382		return 0;
383	}
384	smb_iod_wakeup(iod);
385	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
386	error = evp->ev_error;
387	free(evp, M_SMBIOD);
388	return error;
389}
390
391/*
392 * Place request in the queue.
393 * Request from smbiod have a high priority.
394 */
395int
396smb_iod_addrq(struct smb_rq *rqp)
397{
398	struct smb_vc *vcp = rqp->sr_vc;
399	struct smbiod *iod = vcp->vc_iod;
400	int error;
401
402	SMBIODEBUG("\n");
403	if (rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
404		rqp->sr_flags |= SMBR_INTERNAL;
405		SMB_IOD_RQLOCK(iod);
406		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
407		SMB_IOD_RQUNLOCK(iod);
408		for (;;) {
409			if (smb_iod_sendrq(iod, rqp) != 0) {
410				smb_iod_dead(iod);
411				break;
412			}
413			/*
414			 * we don't need to lock state field here
415			 */
416			if (rqp->sr_state != SMBRQ_NOTSENT)
417				break;
418			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
419		}
420		if (rqp->sr_lerror)
421			smb_iod_removerq(rqp);
422		return rqp->sr_lerror;
423	}
424
425	switch (iod->iod_state) {
426	    case SMBIOD_ST_NOTCONN:
427		return ENOTCONN;
428	    case SMBIOD_ST_DEAD:
429		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
430		if (error)
431			return error;
432		return EXDEV;
433	    default:
434		break;
435	}
436
437	SMB_IOD_RQLOCK(iod);
438	for (;;) {
439		if (vcp->vc_maxmux == 0) {
440			SMBERROR("maxmux == 0\n");
441			break;
442		}
443		if (iod->iod_muxcnt < vcp->vc_maxmux)
444			break;
445		iod->iod_muxwant++;
446		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
447		    PWAIT, "90mux", 0);
448	}
449	iod->iod_muxcnt++;
450	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
451	SMB_IOD_RQUNLOCK(iod);
452	smb_iod_wakeup(iod);
453	return 0;
454}
455
456int
457smb_iod_removerq(struct smb_rq *rqp)
458{
459	struct smb_vc *vcp = rqp->sr_vc;
460	struct smbiod *iod = vcp->vc_iod;
461
462	SMBIODEBUG("\n");
463	if (rqp->sr_flags & SMBR_INTERNAL) {
464		SMB_IOD_RQLOCK(iod);
465		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
466		SMB_IOD_RQUNLOCK(iod);
467		return 0;
468	}
469	SMB_IOD_RQLOCK(iod);
470	while (rqp->sr_flags & SMBR_XLOCK) {
471		rqp->sr_flags |= SMBR_XLOCKWANT;
472		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
473	}
474	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
475	iod->iod_muxcnt--;
476	if (iod->iod_muxwant) {
477		iod->iod_muxwant--;
478		wakeup(&iod->iod_muxwant);
479	}
480	SMB_IOD_RQUNLOCK(iod);
481	return 0;
482}
483
484int
485smb_iod_waitrq(struct smb_rq *rqp)
486{
487	struct smbiod *iod = rqp->sr_vc->vc_iod;
488	int error;
489
490	SMBIODEBUG("\n");
491	if (rqp->sr_flags & SMBR_INTERNAL) {
492		for (;;) {
493			smb_iod_sendall(iod);
494			smb_iod_recvall(iod);
495			if (rqp->sr_rpgen != rqp->sr_rplast)
496				break;
497			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
498		}
499		smb_iod_removerq(rqp);
500		return rqp->sr_lerror;
501
502	}
503	SMBRQ_SLOCK(rqp);
504	if (rqp->sr_rpgen == rqp->sr_rplast)
505		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
506	rqp->sr_rplast++;
507	SMBRQ_SUNLOCK(rqp);
508	error = rqp->sr_lerror;
509	if (rqp->sr_flags & SMBR_MULTIPACKET) {
510		/*
511		 * If request should stay in the list, then reinsert it
512		 * at the end of queue so other waiters have chance to concur
513		 */
514		SMB_IOD_RQLOCK(iod);
515		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
516		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
517		SMB_IOD_RQUNLOCK(iod);
518	} else
519		smb_iod_removerq(rqp);
520	return error;
521}
522
523
524static int
525smb_iod_sendall(struct smbiod *iod)
526{
527	struct smb_vc *vcp = iod->iod_vc;
528	struct smb_rq *rqp;
529	struct timespec ts, tstimeout;
530	int herror;
531
532	herror = 0;
533	/*
534	 * Loop through the list of requests and send them if possible
535	 */
536	SMB_IOD_RQLOCK(iod);
537	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
538		switch (rqp->sr_state) {
539		    case SMBRQ_NOTSENT:
540			rqp->sr_flags |= SMBR_XLOCK;
541			SMB_IOD_RQUNLOCK(iod);
542			herror = smb_iod_sendrq(iod, rqp);
543			SMB_IOD_RQLOCK(iod);
544			rqp->sr_flags &= ~SMBR_XLOCK;
545			if (rqp->sr_flags & SMBR_XLOCKWANT) {
546				rqp->sr_flags &= ~SMBR_XLOCKWANT;
547				wakeup(rqp);
548			}
549			break;
550		    case SMBRQ_SENT:
551			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
552			timespecadd(&tstimeout, &tstimeout);
553			getnanotime(&ts);
554			timespecsub(&ts, &tstimeout);
555			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
556				smb_iod_rqprocessed(rqp, ETIMEDOUT);
557			}
558			break;
559		    default:
560		}
561		if (herror)
562			break;
563	}
564	SMB_IOD_RQUNLOCK(iod);
565	if (herror == ENOTCONN)
566		smb_iod_dead(iod);
567	return 0;
568}
569
570/*
571 * "main" function for smbiod daemon
572 */
573static __inline void
574smb_iod_main(struct smbiod *iod)
575{
576/*	struct smb_vc *vcp = iod->iod_vc;*/
577	struct smbiod_event *evp;
578/*	struct timespec tsnow;*/
579	int error;
580
581	SMBIODEBUG("\n");
582	error = 0;
583
584	/*
585	 * Check all interesting events
586	 */
587	for (;;) {
588		SMB_IOD_EVLOCK(iod);
589		evp = STAILQ_FIRST(&iod->iod_evlist);
590		if (evp == NULL) {
591			SMB_IOD_EVUNLOCK(iod);
592			break;
593		}
594		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
595		evp->ev_type |= SMBIOD_EV_PROCESSING;
596		SMB_IOD_EVUNLOCK(iod);
597		switch (evp->ev_type & SMBIOD_EV_MASK) {
598		    case SMBIOD_EV_CONNECT:
599			iod->iod_state = SMBIOD_ST_RECONNECT;
600			evp->ev_error = smb_iod_connect(iod);
601			break;
602		    case SMBIOD_EV_DISCONNECT:
603			evp->ev_error = smb_iod_disconnect(iod);
604			break;
605		    case SMBIOD_EV_TREECONNECT:
606			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
607			break;
608		    case SMBIOD_EV_SHUTDOWN:
609			iod->iod_flags |= SMBIOD_SHUTDOWN;
610			break;
611		    case SMBIOD_EV_NEWRQ:
612			break;
613		}
614		if (evp->ev_type & SMBIOD_EV_SYNC) {
615			SMB_IOD_EVLOCK(iod);
616			wakeup(evp);
617			SMB_IOD_EVUNLOCK(iod);
618		} else
619			free(evp, M_SMBIOD);
620	}
621#if 0
622	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
623		getnanotime(&tsnow);
624		timespecsub(&tsnow, &iod->iod_pingtimo);
625		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
626			smb_smb_echo(vcp, &iod->iod_scred);
627		}
628	}
629#endif
630	smb_iod_sendall(iod);
631	smb_iod_recvall(iod);
632	return;
633}
634
635void
636smb_iod_thread(void *arg)
637{
638	struct smbiod *iod = arg;
639
640	mtx_lock(&Giant);
641	/*
642	 * Here we assume that the thread structure will be the same
643	 * for an entire kthread (kproc, to be more precise) life.
644	 */
645	iod->iod_td = curthread;
646	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
647	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
648		smb_iod_main(iod);
649		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
650/*		mtx_unlock(&Giant, MTX_DEF);*/
651		if (iod->iod_flags & SMBIOD_SHUTDOWN)
652			break;
653		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
654	}
655/*	mtx_lock(&Giant, MTX_DEF);*/
656	kthread_exit(0);
657}
658
659int
660smb_iod_create(struct smb_vc *vcp)
661{
662	struct smbiod *iod;
663	int error;
664
665	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
666	iod->iod_id = smb_iod_next++;
667	iod->iod_state = SMBIOD_ST_NOTCONN;
668	iod->iod_vc = vcp;
669	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
670	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
671	getnanotime(&iod->iod_lastrqsent);
672	vcp->vc_iod = iod;
673	smb_sl_init(&iod->iod_rqlock, "90rql");
674	TAILQ_INIT(&iod->iod_rqlist);
675	smb_sl_init(&iod->iod_evlock, "90evl");
676	STAILQ_INIT(&iod->iod_evlist);
677	error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
678	    RFNOWAIT, "smbiod%d", iod->iod_id);
679	if (error) {
680		SMBERROR("can't start smbiod: %d", error);
681		free(iod, M_SMBIOD);
682		return error;
683	}
684	return 0;
685}
686
687int
688smb_iod_destroy(struct smbiod *iod)
689{
690	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
691	smb_sl_destroy(&iod->iod_rqlock);
692	smb_sl_destroy(&iod->iod_evlock);
693	free(iod, M_SMBIOD);
694	return 0;
695}
696
697int
698smb_iod_init(void)
699{
700	return 0;
701}
702
703int
704smb_iod_done(void)
705{
706	return 0;
707}
708
709