smb_iod.c revision 206361
1/*-
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/netsmb/smb_iod.c 206361 2010-04-07 16:50:38Z joel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/endian.h>
33#include <sys/proc.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/malloc.h>
37#include <sys/mbuf.h>
38#include <sys/unistd.h>
39
40#include <netsmb/smb.h>
41#include <netsmb/smb_conn.h>
42#include <netsmb/smb_rq.h>
43#include <netsmb/smb_tran.h>
44#include <netsmb/smb_trantcp.h>
45
46
47#define SMBIOD_SLEEP_TIMO	2
48#define	SMBIOD_PING_TIMO	60	/* seconds */
49
50#define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
51#define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
52#define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
53
54#define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
55#define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
56#define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
57
58#define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
59
60
61static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
62
63static int smb_iod_next;
64
65static int  smb_iod_sendall(struct smbiod *iod);
66static int  smb_iod_disconnect(struct smbiod *iod);
67static void smb_iod_thread(void *);
68
69static __inline void
70smb_iod_rqprocessed(struct smb_rq *rqp, int error)
71{
72	SMBRQ_SLOCK(rqp);
73	rqp->sr_lerror = error;
74	rqp->sr_rpgen++;
75	rqp->sr_state = SMBRQ_NOTIFIED;
76	wakeup(&rqp->sr_state);
77	SMBRQ_SUNLOCK(rqp);
78}
79
80static void
81smb_iod_invrq(struct smbiod *iod)
82{
83	struct smb_rq *rqp;
84
85	/*
86	 * Invalidate all outstanding requests for this connection
87	 */
88	SMB_IOD_RQLOCK(iod);
89	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
90		if (rqp->sr_flags & SMBR_INTERNAL)
91			SMBRQ_SUNLOCK(rqp);
92		rqp->sr_flags |= SMBR_RESTART;
93		smb_iod_rqprocessed(rqp, ENOTCONN);
94	}
95	SMB_IOD_RQUNLOCK(iod);
96}
97
98static void
99smb_iod_closetran(struct smbiod *iod)
100{
101	struct smb_vc *vcp = iod->iod_vc;
102	struct thread *td = iod->iod_td;
103
104	if (vcp->vc_tdata == NULL)
105		return;
106	SMB_TRAN_DISCONNECT(vcp, td);
107	SMB_TRAN_DONE(vcp, td);
108	vcp->vc_tdata = NULL;
109}
110
111static void
112smb_iod_dead(struct smbiod *iod)
113{
114	iod->iod_state = SMBIOD_ST_DEAD;
115	smb_iod_closetran(iod);
116	smb_iod_invrq(iod);
117}
118
119static int
120smb_iod_connect(struct smbiod *iod)
121{
122	struct smb_vc *vcp = iod->iod_vc;
123	struct thread *td = iod->iod_td;
124	int error;
125
126	SMBIODEBUG("%d\n", iod->iod_state);
127	switch(iod->iod_state) {
128	    case SMBIOD_ST_VCACTIVE:
129		SMBERROR("called for already opened connection\n");
130		return EISCONN;
131	    case SMBIOD_ST_DEAD:
132		return ENOTCONN;	/* XXX: last error code ? */
133	    default:
134		break;
135	}
136	vcp->vc_genid++;
137	error = 0;
138
139	error = (int)SMB_TRAN_CREATE(vcp, td);
140	if (error)
141		goto fail;
142	SMBIODEBUG("tcreate\n");
143	if (vcp->vc_laddr) {
144		error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
145		if (error)
146			goto fail;
147	}
148	SMBIODEBUG("tbind\n");
149	error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
150	if (error)
151		goto fail;
152	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
153	iod->iod_state = SMBIOD_ST_TRANACTIVE;
154	SMBIODEBUG("tconnect\n");
155	/* vcp->vc_mid = 0;*/
156	error = (int)smb_smb_negotiate(vcp, &iod->iod_scred);
157	if (error)
158		goto fail;
159	SMBIODEBUG("snegotiate\n");
160	error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred);
161	if (error)
162		goto fail;
163	iod->iod_state = SMBIOD_ST_VCACTIVE;
164	SMBIODEBUG("completed\n");
165	smb_iod_invrq(iod);
166	return (0);
167
168 fail:
169	smb_iod_dead(iod);
170	return (error);
171}
172
173static int
174smb_iod_disconnect(struct smbiod *iod)
175{
176	struct smb_vc *vcp = iod->iod_vc;
177
178	SMBIODEBUG("\n");
179	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
180		smb_smb_ssnclose(vcp, &iod->iod_scred);
181		iod->iod_state = SMBIOD_ST_TRANACTIVE;
182	}
183	vcp->vc_smbuid = SMB_UID_UNKNOWN;
184	smb_iod_closetran(iod);
185	iod->iod_state = SMBIOD_ST_NOTCONN;
186	return 0;
187}
188
189static int
190smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
191{
192	int error;
193
194	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
195		if (iod->iod_state != SMBIOD_ST_DEAD)
196			return ENOTCONN;
197		iod->iod_state = SMBIOD_ST_RECONNECT;
198		error = smb_iod_connect(iod);
199		if (error)
200			return error;
201	}
202	SMBIODEBUG("tree reconnect\n");
203	SMBS_ST_LOCK(ssp);
204	ssp->ss_flags |= SMBS_RECONNECTING;
205	SMBS_ST_UNLOCK(ssp);
206	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
207	SMBS_ST_LOCK(ssp);
208	ssp->ss_flags &= ~SMBS_RECONNECTING;
209	SMBS_ST_UNLOCK(ssp);
210	wakeup(&ssp->ss_vcgenid);
211	return error;
212}
213
214static int
215smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
216{
217	struct thread *td = iod->iod_td;
218	struct smb_vc *vcp = iod->iod_vc;
219	struct smb_share *ssp = rqp->sr_share;
220	struct mbuf *m;
221	int error;
222
223	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
224	switch (iod->iod_state) {
225	    case SMBIOD_ST_NOTCONN:
226		smb_iod_rqprocessed(rqp, ENOTCONN);
227		return 0;
228	    case SMBIOD_ST_DEAD:
229		iod->iod_state = SMBIOD_ST_RECONNECT;
230		return 0;
231	    case SMBIOD_ST_RECONNECT:
232		return 0;
233	    default:
234		break;
235	}
236	if (rqp->sr_sendcnt == 0) {
237#ifdef movedtoanotherplace
238		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
239			return 0;
240#endif
241		le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
242		le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0);
243		mb_fixhdr(&rqp->sr_rq);
244		if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE)
245			smb_rq_sign(rqp);
246	}
247	if (rqp->sr_sendcnt++ > 5) {
248		rqp->sr_flags |= SMBR_RESTART;
249		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
250		/*
251		 * If all attempts to send a request failed, then
252		 * something is seriously hosed.
253		 */
254		return ENOTCONN;
255	}
256	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
257	m_dumpm(rqp->sr_rq.mb_top);
258	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
259	error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td);
260	if (error == 0) {
261		getnanotime(&rqp->sr_timesent);
262		iod->iod_lastrqsent = rqp->sr_timesent;
263		rqp->sr_flags |= SMBR_SENT;
264		rqp->sr_state = SMBRQ_SENT;
265		return 0;
266	}
267	/*
268	 * Check for fatal errors
269	 */
270	if (SMB_TRAN_FATAL(vcp, error)) {
271		/*
272		 * No further attempts should be made
273		 */
274		return ENOTCONN;
275	}
276	if (smb_rq_intr(rqp))
277		smb_iod_rqprocessed(rqp, EINTR);
278	return 0;
279}
280
281/*
282 * Process incoming packets
283 */
284static int
285smb_iod_recvall(struct smbiod *iod)
286{
287	struct smb_vc *vcp = iod->iod_vc;
288	struct thread *td = iod->iod_td;
289	struct smb_rq *rqp;
290	struct mbuf *m;
291	u_char *hp;
292	u_short mid;
293	int error;
294
295	switch (iod->iod_state) {
296	    case SMBIOD_ST_NOTCONN:
297	    case SMBIOD_ST_DEAD:
298	    case SMBIOD_ST_RECONNECT:
299		return 0;
300	    default:
301		break;
302	}
303	for (;;) {
304		m = NULL;
305		error = SMB_TRAN_RECV(vcp, &m, td);
306		if (error == EWOULDBLOCK)
307			break;
308		if (SMB_TRAN_FATAL(vcp, error)) {
309			smb_iod_dead(iod);
310			break;
311		}
312		if (error)
313			break;
314		if (m == NULL) {
315			SMBERROR("tran return NULL without error\n");
316			error = EPIPE;
317			continue;
318		}
319		m = m_pullup(m, SMB_HDRLEN);
320		if (m == NULL)
321			continue;	/* wait for a good packet */
322		/*
323		 * Now we got an entire and possibly invalid SMB packet.
324		 * Be careful while parsing it.
325		 */
326		m_dumpm(m);
327		hp = mtod(m, u_char*);
328		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
329			m_freem(m);
330			continue;
331		}
332		mid = SMB_HDRMID(hp);
333		SMBSDEBUG("mid %04x\n", (u_int)mid);
334		SMB_IOD_RQLOCK(iod);
335		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
336			if (rqp->sr_mid != mid)
337				continue;
338			SMBRQ_SLOCK(rqp);
339			if (rqp->sr_rp.md_top == NULL) {
340				md_initm(&rqp->sr_rp, m);
341			} else {
342				if (rqp->sr_flags & SMBR_MULTIPACKET) {
343					md_append_record(&rqp->sr_rp, m);
344				} else {
345					SMBRQ_SUNLOCK(rqp);
346					SMBERROR("duplicate response %d (ignored)\n", mid);
347					break;
348				}
349			}
350			SMBRQ_SUNLOCK(rqp);
351			smb_iod_rqprocessed(rqp, 0);
352			break;
353		}
354		SMB_IOD_RQUNLOCK(iod);
355		if (rqp == NULL) {
356			SMBERROR("drop resp with mid %d\n", (u_int)mid);
357/*			smb_printrqlist(vcp);*/
358			m_freem(m);
359		}
360	}
361	/*
362	 * check for interrupts
363	 */
364	SMB_IOD_RQLOCK(iod);
365	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
366		if (smb_td_intr(rqp->sr_cred->scr_td)) {
367			smb_iod_rqprocessed(rqp, EINTR);
368		}
369	}
370	SMB_IOD_RQUNLOCK(iod);
371	return 0;
372}
373
374int
375smb_iod_request(struct smbiod *iod, int event, void *ident)
376{
377	struct smbiod_event *evp;
378	int error;
379
380	SMBIODEBUG("\n");
381	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
382	evp->ev_type = event;
383	evp->ev_ident = ident;
384	SMB_IOD_EVLOCK(iod);
385	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
386	if ((event & SMBIOD_EV_SYNC) == 0) {
387		SMB_IOD_EVUNLOCK(iod);
388		smb_iod_wakeup(iod);
389		return 0;
390	}
391	smb_iod_wakeup(iod);
392	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
393	error = evp->ev_error;
394	free(evp, M_SMBIOD);
395	return error;
396}
397
398/*
399 * Place request in the queue.
400 * Request from smbiod have a high priority.
401 */
402int
403smb_iod_addrq(struct smb_rq *rqp)
404{
405	struct smb_vc *vcp = rqp->sr_vc;
406	struct smbiod *iod = vcp->vc_iod;
407	int error;
408
409	SMBIODEBUG("\n");
410	if (rqp->sr_cred->scr_td != NULL &&
411	    rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
412		rqp->sr_flags |= SMBR_INTERNAL;
413		SMB_IOD_RQLOCK(iod);
414		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
415		SMB_IOD_RQUNLOCK(iod);
416		for (;;) {
417			if (smb_iod_sendrq(iod, rqp) != 0) {
418				smb_iod_dead(iod);
419				break;
420			}
421			/*
422			 * we don't need to lock state field here
423			 */
424			if (rqp->sr_state != SMBRQ_NOTSENT)
425				break;
426			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
427		}
428		if (rqp->sr_lerror)
429			smb_iod_removerq(rqp);
430		return rqp->sr_lerror;
431	}
432
433	switch (iod->iod_state) {
434	    case SMBIOD_ST_NOTCONN:
435		return ENOTCONN;
436	    case SMBIOD_ST_DEAD:
437		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
438		if (error)
439			return error;
440		return EXDEV;
441	    default:
442		break;
443	}
444
445	SMB_IOD_RQLOCK(iod);
446	for (;;) {
447		if (vcp->vc_maxmux == 0) {
448			SMBERROR("maxmux == 0\n");
449			break;
450		}
451		if (iod->iod_muxcnt < vcp->vc_maxmux)
452			break;
453		iod->iod_muxwant++;
454		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
455		    PWAIT, "90mux", 0);
456	}
457	iod->iod_muxcnt++;
458	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
459	SMB_IOD_RQUNLOCK(iod);
460	smb_iod_wakeup(iod);
461	return 0;
462}
463
464int
465smb_iod_removerq(struct smb_rq *rqp)
466{
467	struct smb_vc *vcp = rqp->sr_vc;
468	struct smbiod *iod = vcp->vc_iod;
469
470	SMBIODEBUG("\n");
471	if (rqp->sr_flags & SMBR_INTERNAL) {
472		SMB_IOD_RQLOCK(iod);
473		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
474		SMB_IOD_RQUNLOCK(iod);
475		return 0;
476	}
477	SMB_IOD_RQLOCK(iod);
478	while (rqp->sr_flags & SMBR_XLOCK) {
479		rqp->sr_flags |= SMBR_XLOCKWANT;
480		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
481	}
482	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
483	iod->iod_muxcnt--;
484	if (iod->iod_muxwant) {
485		iod->iod_muxwant--;
486		wakeup(&iod->iod_muxwant);
487	}
488	SMB_IOD_RQUNLOCK(iod);
489	return 0;
490}
491
492int
493smb_iod_waitrq(struct smb_rq *rqp)
494{
495	struct smbiod *iod = rqp->sr_vc->vc_iod;
496	int error;
497
498	SMBIODEBUG("\n");
499	if (rqp->sr_flags & SMBR_INTERNAL) {
500		for (;;) {
501			smb_iod_sendall(iod);
502			smb_iod_recvall(iod);
503			if (rqp->sr_rpgen != rqp->sr_rplast)
504				break;
505			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
506		}
507		smb_iod_removerq(rqp);
508		return rqp->sr_lerror;
509
510	}
511	SMBRQ_SLOCK(rqp);
512	if (rqp->sr_rpgen == rqp->sr_rplast)
513		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
514	rqp->sr_rplast++;
515	SMBRQ_SUNLOCK(rqp);
516	error = rqp->sr_lerror;
517	if (rqp->sr_flags & SMBR_MULTIPACKET) {
518		/*
519		 * If request should stay in the list, then reinsert it
520		 * at the end of queue so other waiters have chance to concur
521		 */
522		SMB_IOD_RQLOCK(iod);
523		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
524		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
525		SMB_IOD_RQUNLOCK(iod);
526	} else
527		smb_iod_removerq(rqp);
528	return error;
529}
530
531
532static int
533smb_iod_sendall(struct smbiod *iod)
534{
535	struct smb_vc *vcp = iod->iod_vc;
536	struct smb_rq *rqp;
537	struct timespec ts, tstimeout;
538	int herror;
539
540	herror = 0;
541	/*
542	 * Loop through the list of requests and send them if possible
543	 */
544	SMB_IOD_RQLOCK(iod);
545	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
546		switch (rqp->sr_state) {
547		    case SMBRQ_NOTSENT:
548			rqp->sr_flags |= SMBR_XLOCK;
549			SMB_IOD_RQUNLOCK(iod);
550			herror = smb_iod_sendrq(iod, rqp);
551			SMB_IOD_RQLOCK(iod);
552			rqp->sr_flags &= ~SMBR_XLOCK;
553			if (rqp->sr_flags & SMBR_XLOCKWANT) {
554				rqp->sr_flags &= ~SMBR_XLOCKWANT;
555				wakeup(rqp);
556			}
557			break;
558		    case SMBRQ_SENT:
559			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
560			timespecadd(&tstimeout, &tstimeout);
561			getnanotime(&ts);
562			timespecsub(&ts, &tstimeout);
563			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
564				smb_iod_rqprocessed(rqp, ETIMEDOUT);
565			}
566			break;
567		    default:
568			break;
569		}
570		if (herror)
571			break;
572	}
573	SMB_IOD_RQUNLOCK(iod);
574	if (herror == ENOTCONN)
575		smb_iod_dead(iod);
576	return 0;
577}
578
579/*
580 * "main" function for smbiod daemon
581 */
582static __inline void
583smb_iod_main(struct smbiod *iod)
584{
585/*	struct smb_vc *vcp = iod->iod_vc;*/
586	struct smbiod_event *evp;
587/*	struct timespec tsnow;*/
588	int error;
589
590	SMBIODEBUG("\n");
591	error = 0;
592
593	/*
594	 * Check all interesting events
595	 */
596	for (;;) {
597		SMB_IOD_EVLOCK(iod);
598		evp = STAILQ_FIRST(&iod->iod_evlist);
599		if (evp == NULL) {
600			SMB_IOD_EVUNLOCK(iod);
601			break;
602		}
603		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
604		evp->ev_type |= SMBIOD_EV_PROCESSING;
605		SMB_IOD_EVUNLOCK(iod);
606		switch (evp->ev_type & SMBIOD_EV_MASK) {
607		    case SMBIOD_EV_CONNECT:
608			iod->iod_state = SMBIOD_ST_RECONNECT;
609			evp->ev_error = smb_iod_connect(iod);
610			break;
611		    case SMBIOD_EV_DISCONNECT:
612			evp->ev_error = smb_iod_disconnect(iod);
613			break;
614		    case SMBIOD_EV_TREECONNECT:
615			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
616			break;
617		    case SMBIOD_EV_SHUTDOWN:
618			iod->iod_flags |= SMBIOD_SHUTDOWN;
619			break;
620		    case SMBIOD_EV_NEWRQ:
621			break;
622		}
623		if (evp->ev_type & SMBIOD_EV_SYNC) {
624			SMB_IOD_EVLOCK(iod);
625			wakeup(evp);
626			SMB_IOD_EVUNLOCK(iod);
627		} else
628			free(evp, M_SMBIOD);
629	}
630#if 0
631	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
632		getnanotime(&tsnow);
633		timespecsub(&tsnow, &iod->iod_pingtimo);
634		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
635			smb_smb_echo(vcp, &iod->iod_scred);
636		}
637	}
638#endif
639	smb_iod_sendall(iod);
640	smb_iod_recvall(iod);
641	return;
642}
643
644void
645smb_iod_thread(void *arg)
646{
647	struct smbiod *iod = arg;
648
649	mtx_lock(&Giant);
650
651	/*
652	 * Here we assume that the thread structure will be the same
653	 * for an entire kthread (kproc, to be more precise) life.
654	 */
655	iod->iod_td = curthread;
656	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
657	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
658		smb_iod_main(iod);
659		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
660		if (iod->iod_flags & SMBIOD_SHUTDOWN)
661			break;
662		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
663	}
664	mtx_unlock(&Giant);
665	kproc_exit(0);
666}
667
668int
669smb_iod_create(struct smb_vc *vcp)
670{
671	struct smbiod *iod;
672	int error;
673
674	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
675	iod->iod_id = smb_iod_next++;
676	iod->iod_state = SMBIOD_ST_NOTCONN;
677	iod->iod_vc = vcp;
678	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
679	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
680	getnanotime(&iod->iod_lastrqsent);
681	vcp->vc_iod = iod;
682	smb_sl_init(&iod->iod_rqlock, "90rql");
683	TAILQ_INIT(&iod->iod_rqlist);
684	smb_sl_init(&iod->iod_evlock, "90evl");
685	STAILQ_INIT(&iod->iod_evlist);
686	error = kproc_create(smb_iod_thread, iod, &iod->iod_p,
687	    RFNOWAIT, 0, "smbiod%d", iod->iod_id);
688	if (error) {
689		SMBERROR("can't start smbiod: %d", error);
690		free(iod, M_SMBIOD);
691		return error;
692	}
693	return 0;
694}
695
696int
697smb_iod_destroy(struct smbiod *iod)
698{
699	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
700	smb_sl_destroy(&iod->iod_rqlock);
701	smb_sl_destroy(&iod->iod_evlock);
702	free(iod, M_SMBIOD);
703	return 0;
704}
705
706int
707smb_iod_init(void)
708{
709	return 0;
710}
711
712int
713smb_iod_done(void)
714{
715	return 0;
716}
717
718