smb_rq.c revision 88741
1/*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *    This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/netsmb/smb_rq.c 88741 2001-12-31 19:29:43Z bp $
33 */
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/malloc.h>
38#include <sys/proc.h>
39#include <sys/lock.h>
40#include <sys/sysctl.h>
41#include <sys/socket.h>
42#include <sys/socketvar.h>
43#include <sys/mbuf.h>
44
45#include <netsmb/smb.h>
46#include <netsmb/smb_conn.h>
47#include <netsmb/smb_rq.h>
48#include <netsmb/smb_subr.h>
49#include <netsmb/smb_tran.h>
50
51MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
52
53MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
54
55static int  smb_rq_reply(struct smb_rq *rqp);
56static int  smb_rq_enqueue(struct smb_rq *rqp);
57static int  smb_rq_getenv(struct smb_connobj *layer,
58		struct smb_vc **vcpp, struct smb_share **sspp);
59static int  smb_rq_new(struct smb_rq *rqp, u_char cmd);
60static int  smb_t2_reply(struct smb_t2rq *t2p);
61
62int
63smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
64	struct smb_rq **rqpp)
65{
66	struct smb_rq *rqp;
67	int error;
68
69	MALLOC(rqp, struct smb_rq *, sizeof(*rqp), M_SMBRQ, M_WAITOK);
70	if (rqp == NULL)
71		return ENOMEM;
72	error = smb_rq_init(rqp, layer, cmd, scred);
73	rqp->sr_flags |= SMBR_ALLOCED;
74	if (error) {
75		smb_rq_done(rqp);
76		return error;
77	}
78	*rqpp = rqp;
79	return 0;
80}
81
82static char tzero[12];
83
84int
85smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
86	struct smb_cred *scred)
87{
88	int error;
89
90	bzero(rqp, sizeof(*rqp));
91	smb_sl_init(&rqp->sr_slock, "srslock");
92	error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
93	if (error)
94		return error;
95	error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
96	if (error)
97		return error;
98	if (rqp->sr_share) {
99		error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
100		if (error)
101			return error;
102	}
103	rqp->sr_cred = scred;
104	rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
105	return smb_rq_new(rqp, cmd);
106}
107
108static int
109smb_rq_new(struct smb_rq *rqp, u_char cmd)
110{
111	struct smb_vc *vcp = rqp->sr_vc;
112	struct mbchain *mbp = &rqp->sr_rq;
113	int error;
114
115	rqp->sr_sendcnt = 0;
116	mb_done(mbp);
117	md_done(&rqp->sr_rp);
118	error = mb_init(mbp);
119	if (error)
120		return error;
121	mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
122	mb_put_uint8(mbp, cmd);
123	mb_put_uint32le(mbp, 0);		/* DosError */
124	mb_put_uint8(mbp, vcp->vc_hflags);
125	mb_put_uint16le(mbp, vcp->vc_hflags2);
126	mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
127	rqp->sr_rqtid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
128	mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
129	rqp->sr_rquid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
130	mb_put_uint16le(mbp, rqp->sr_mid);
131	return 0;
132}
133
134void
135smb_rq_done(struct smb_rq *rqp)
136{
137	mb_done(&rqp->sr_rq);
138	md_done(&rqp->sr_rp);
139	smb_sl_destroy(&rqp->sr_slock);
140	if (rqp->sr_flags & SMBR_ALLOCED)
141		free(rqp, M_SMBRQ);
142}
143
144/*
145 * Simple request-reply exchange
146 */
147int
148smb_rq_simple(struct smb_rq *rqp)
149{
150	struct smb_vc *vcp = rqp->sr_vc;
151	int error = EINVAL, i;
152
153	for (i = 0; i < SMB_MAXRCN; i++) {
154		rqp->sr_flags &= ~SMBR_RESTART;
155		rqp->sr_timo = vcp->vc_timo;
156		rqp->sr_state = SMBRQ_NOTSENT;
157		error = smb_rq_enqueue(rqp);
158		if (error)
159			return error;
160		error = smb_rq_reply(rqp);
161		if (error == 0)
162			break;
163		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
164			break;
165	}
166	return error;
167}
168
169static int
170smb_rq_enqueue(struct smb_rq *rqp)
171{
172	struct smb_share *ssp = rqp->sr_share;
173	int error;
174
175	if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
176		return smb_iod_addrq(rqp);
177	}
178	for (;;) {
179		SMBS_ST_LOCK(ssp);
180		if (ssp->ss_flags & SMBS_RECONNECTING) {
181			msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
182			    PWAIT | PDROP, "90trcn", hz);
183			if (smb_proc_intr(rqp->sr_cred->scr_td->td_proc))
184				return EINTR;
185			continue;
186		}
187		if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
188			SMBS_ST_UNLOCK(ssp);
189		} else {
190			SMBS_ST_UNLOCK(ssp);
191			error = smb_iod_request(rqp->sr_vc->vc_iod,
192			    SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
193			if (error)
194				return error;
195		}
196		error = smb_iod_addrq(rqp);
197		if (error != EXDEV)
198			break;
199	}
200	return error;
201}
202
203void
204smb_rq_wstart(struct smb_rq *rqp)
205{
206	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
207	rqp->sr_rq.mb_count = 0;
208}
209
210void
211smb_rq_wend(struct smb_rq *rqp)
212{
213	if (rqp->sr_wcount == NULL) {
214		SMBERROR("no wcount\n");	/* actually panic */
215		return;
216	}
217	if (rqp->sr_rq.mb_count & 1)
218		SMBERROR("odd word count\n");
219	*rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
220}
221
222void
223smb_rq_bstart(struct smb_rq *rqp)
224{
225	rqp->sr_bcount = (u_short*)mb_reserve(&rqp->sr_rq, sizeof(u_short));
226	rqp->sr_rq.mb_count = 0;
227}
228
229void
230smb_rq_bend(struct smb_rq *rqp)
231{
232	int bcnt;
233
234	if (rqp->sr_bcount == NULL) {
235		SMBERROR("no bcount\n");	/* actually panic */
236		return;
237	}
238	bcnt = rqp->sr_rq.mb_count;
239	if (bcnt > 0xffff)
240		SMBERROR("byte count too large (%d)\n", bcnt);
241	*rqp->sr_bcount = htoles(bcnt);
242}
243
244int
245smb_rq_intr(struct smb_rq *rqp)
246{
247	struct proc *p = rqp->sr_cred->scr_td->td_proc;
248
249	if (rqp->sr_flags & SMBR_INTR)
250		return EINTR;
251	return smb_proc_intr(p);
252}
253
254int
255smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
256{
257	*mbpp = &rqp->sr_rq;
258	return 0;
259}
260
261int
262smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
263{
264	*mbpp = &rqp->sr_rp;
265	return 0;
266}
267
268static int
269smb_rq_getenv(struct smb_connobj *layer,
270	struct smb_vc **vcpp, struct smb_share **sspp)
271{
272	struct smb_vc *vcp = NULL;
273	struct smb_share *ssp = NULL;
274	struct smb_connobj *cp;
275	int error = 0;
276
277	switch (layer->co_level) {
278	    case SMBL_VC:
279		vcp = CPTOVC(layer);
280		if (layer->co_parent == NULL) {
281			SMBERROR("zombie VC %s\n", vcp->vc_srvname);
282			error = EINVAL;
283			break;
284		}
285		break;
286	    case SMBL_SHARE:
287		ssp = CPTOSS(layer);
288		cp = layer->co_parent;
289		if (cp == NULL) {
290			SMBERROR("zombie share %s\n", ssp->ss_name);
291			error = EINVAL;
292			break;
293		}
294		error = smb_rq_getenv(cp, &vcp, NULL);
295		if (error)
296			break;
297		break;
298	    default:
299		SMBERROR("invalid layer %d passed\n", layer->co_level);
300		error = EINVAL;
301	}
302	if (vcpp)
303		*vcpp = vcp;
304	if (sspp)
305		*sspp = ssp;
306	return error;
307}
308
309/*
310 * Wait for reply on the request
311 */
312static int
313smb_rq_reply(struct smb_rq *rqp)
314{
315	struct mdchain *mdp = &rqp->sr_rp;
316	u_int32_t tdw;
317	u_int8_t tb;
318	int error, rperror = 0;
319
320	error = smb_iod_waitrq(rqp);
321	if (error)
322		return error;
323	error = md_get_uint32(mdp, &tdw);
324	if (error)
325		return error;
326	error = md_get_uint8(mdp, &tb);
327	if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
328		error = md_get_uint32le(mdp, &rqp->sr_error);
329	} else {
330		error = md_get_uint8(mdp, &rqp->sr_errclass);
331		error = md_get_uint8(mdp, &tb);
332		error = md_get_uint16le(mdp, &rqp->sr_serror);
333		if (!error)
334			rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
335	}
336	error = md_get_uint8(mdp, &rqp->sr_rpflags);
337	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
338
339	error = md_get_uint32(mdp, &tdw);
340	error = md_get_uint32(mdp, &tdw);
341	error = md_get_uint32(mdp, &tdw);
342
343	error = md_get_uint16le(mdp, &rqp->sr_rptid);
344	error = md_get_uint16le(mdp, &rqp->sr_rppid);
345	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
346	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
347
348	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
349	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
350	    rqp->sr_errclass, rqp->sr_serror);
351	return error ? error : rperror;
352}
353
354
355#define ALIGN4(a)	(((a) + 3) & ~3)
356
357/*
358 * TRANS2 request implementation
359 */
360int
361smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
362	struct smb_t2rq **t2pp)
363{
364	struct smb_t2rq *t2p;
365	int error;
366
367	MALLOC(t2p, struct smb_t2rq *, sizeof(*t2p), M_SMBRQ, M_WAITOK);
368	if (t2p == NULL)
369		return ENOMEM;
370	error = smb_t2_init(t2p, layer, setup, scred);
371	t2p->t2_flags |= SMBT2_ALLOCED;
372	if (error) {
373		smb_t2_done(t2p);
374		return error;
375	}
376	*t2pp = t2p;
377	return 0;
378}
379
380int
381smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
382	struct smb_cred *scred)
383{
384	int error;
385
386	bzero(t2p, sizeof(*t2p));
387	t2p->t2_source = source;
388	t2p->t2_setupcount = 1;
389	t2p->t2_setupdata = t2p->t2_setup;
390	t2p->t2_setup[0] = setup;
391	t2p->t2_fid = 0xffff;
392	t2p->t2_cred = scred;
393	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
394	if (error)
395		return error;
396	return 0;
397}
398
399void
400smb_t2_done(struct smb_t2rq *t2p)
401{
402	mb_done(&t2p->t2_tparam);
403	mb_done(&t2p->t2_tdata);
404	md_done(&t2p->t2_rparam);
405	md_done(&t2p->t2_rdata);
406	if (t2p->t2_flags & SMBT2_ALLOCED)
407		free(t2p, M_SMBRQ);
408}
409
410static int
411smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
412	struct mdchain *mdp)
413{
414	struct mbuf *m, *m0;
415	int len;
416
417	m0 = m_split(mtop, offset, M_TRYWAIT);
418	if (m0 == NULL)
419		return EBADRPC;
420	for(len = 0, m = m0; m->m_next; m = m->m_next)
421		len += m->m_len;
422	len += m->m_len;
423	m->m_len -= len - count;
424	if (mdp->md_top == NULL) {
425		md_initm(mdp, m0);
426	} else
427		m_cat(mdp->md_top, m0);
428	return 0;
429}
430
431static int
432smb_t2_reply(struct smb_t2rq *t2p)
433{
434	struct mdchain *mdp;
435	struct smb_rq *rqp = t2p->t2_rq;
436	int error, totpgot, totdgot;
437	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
438	u_int16_t tmp, bc, dcount;
439	u_int8_t wc;
440
441	error = smb_rq_reply(rqp);
442	if (error)
443		return error;
444	if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
445		/*
446		 * this is an interim response, ignore it.
447		 */
448		SMBRQ_SLOCK(rqp);
449		md_next_record(&rqp->sr_rp);
450		SMBRQ_SUNLOCK(rqp);
451		return 0;
452	}
453	/*
454	 * Now we have to get all subsequent responses. The CIFS specification
455	 * says that they can be disordered which is weird.
456	 * TODO: timo
457	 */
458	totpgot = totdgot = 0;
459	totpcount = totdcount = 0xffff;
460	mdp = &rqp->sr_rp;
461	for (;;) {
462		m_dumpm(mdp->md_top);
463		if ((error = md_get_uint8(mdp, &wc)) != 0)
464			break;
465		if (wc < 10) {
466			error = ENOENT;
467			break;
468		}
469		if ((error = md_get_uint16le(mdp, &tmp)) != 0)
470			break;
471		if (totpcount > tmp)
472			totpcount = tmp;
473		md_get_uint16le(mdp, &tmp);
474		if (totdcount > tmp)
475			totdcount = tmp;
476		if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
477		    (error = md_get_uint16le(mdp, &pcount)) != 0 ||
478		    (error = md_get_uint16le(mdp, &poff)) != 0 ||
479		    (error = md_get_uint16le(mdp, &pdisp)) != 0)
480			break;
481		if (pcount != 0 && pdisp != totpgot) {
482			SMBERROR("Can't handle disordered parameters %d:%d\n",
483			    pdisp, totpgot);
484			error = EINVAL;
485			break;
486		}
487		if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
488		    (error = md_get_uint16le(mdp, &doff)) != 0 ||
489		    (error = md_get_uint16le(mdp, &ddisp)) != 0)
490			break;
491		if (dcount != 0 && ddisp != totdgot) {
492			SMBERROR("Can't handle disordered data\n");
493			error = EINVAL;
494			break;
495		}
496		md_get_uint8(mdp, &wc);
497		md_get_uint8(mdp, NULL);
498		tmp = wc;
499		while (tmp--)
500			md_get_uint16(mdp, NULL);
501		if ((error = md_get_uint16le(mdp, &bc)) != 0)
502			break;
503/*		tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
504		if (dcount) {
505			error = smb_t2_placedata(mdp->md_top, doff, dcount,
506			    &t2p->t2_rdata);
507			if (error)
508				break;
509		}
510		if (pcount) {
511			error = smb_t2_placedata(mdp->md_top, poff, pcount,
512			    &t2p->t2_rparam);
513			if (error)
514				break;
515		}
516		totpgot += pcount;
517		totdgot += dcount;
518		if (totpgot >= totpcount && totdgot >= totdcount) {
519			error = 0;
520			t2p->t2_flags |= SMBT2_ALLRECV;
521			break;
522		}
523		/*
524		 * We're done with this reply, look for the next one.
525		 */
526		SMBRQ_SLOCK(rqp);
527		md_next_record(&rqp->sr_rp);
528		SMBRQ_SUNLOCK(rqp);
529		error = smb_rq_reply(rqp);
530		if (error)
531			break;
532	}
533	return error;
534}
535
536/*
537 * Perform a full round of TRANS2 request
538 */
539static int
540smb_t2_request_int(struct smb_t2rq *t2p)
541{
542	struct smb_vc *vcp = t2p->t2_vc;
543	struct smb_cred *scred = t2p->t2_cred;
544	struct mbchain *mbp;
545	struct mdchain *mdp, mbparam, mbdata;
546	struct mbuf *m;
547	struct smb_rq *rqp;
548	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
549	int error, doff, poff, txdcount, txpcount, nmlen;
550
551	m = t2p->t2_tparam.mb_top;
552	if (m) {
553		md_initm(&mbparam, m);	/* do not free it! */
554		totpcount = m_fixhdr(m);
555		if (totpcount > 0xffff)		/* maxvalue for u_short */
556			return EINVAL;
557	} else
558		totpcount = 0;
559	m = t2p->t2_tdata.mb_top;
560	if (m) {
561		md_initm(&mbdata, m);	/* do not free it! */
562		totdcount =  m_fixhdr(m);
563		if (totdcount > 0xffff)
564			return EINVAL;
565	} else
566		totdcount = 0;
567	leftdcount = totdcount;
568	leftpcount = totpcount;
569	txmax = vcp->vc_txmax;
570	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
571	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
572	if (error)
573		return error;
574	rqp->sr_flags |= SMBR_MULTIPACKET;
575	t2p->t2_rq = rqp;
576	mbp = &rqp->sr_rq;
577	smb_rq_wstart(rqp);
578	mb_put_uint16le(mbp, totpcount);
579	mb_put_uint16le(mbp, totdcount);
580	mb_put_uint16le(mbp, t2p->t2_maxpcount);
581	mb_put_uint16le(mbp, t2p->t2_maxdcount);
582	mb_put_uint8(mbp, t2p->t2_maxscount);
583	mb_put_uint8(mbp, 0);			/* reserved */
584	mb_put_uint16le(mbp, 0);			/* flags */
585	mb_put_uint32le(mbp, 0);			/* Timeout */
586	mb_put_uint16le(mbp, 0);			/* reserved 2 */
587	len = mb_fixhdr(mbp);
588	/*
589	 * now we have known packet size as
590	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
591	 * and need to decide which parts should go into the first request
592	 */
593	nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
594	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
595	if (len + leftpcount > txmax) {
596		txpcount = min(leftpcount, txmax - len);
597		poff = len;
598		txdcount = 0;
599		doff = 0;
600	} else {
601		txpcount = leftpcount;
602		poff = txpcount ? len : 0;
603		len = ALIGN4(len + txpcount);
604		txdcount = min(leftdcount, txmax - len);
605		doff = txdcount ? len : 0;
606	}
607	leftpcount -= txpcount;
608	leftdcount -= txdcount;
609	mb_put_uint16le(mbp, txpcount);
610	mb_put_uint16le(mbp, poff);
611	mb_put_uint16le(mbp, txdcount);
612	mb_put_uint16le(mbp, doff);
613	mb_put_uint8(mbp, t2p->t2_setupcount);
614	mb_put_uint8(mbp, 0);
615	for (i = 0; i < t2p->t2_setupcount; i++)
616		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
617	smb_rq_wend(rqp);
618	smb_rq_bstart(rqp);
619	/* TDUNICODE */
620	if (t2p->t_name)
621		mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
622	mb_put_uint8(mbp, 0);	/* terminating zero */
623	len = mb_fixhdr(mbp);
624	if (txpcount) {
625		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
626		error = md_get_mbuf(&mbparam, txpcount, &m);
627		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
628		if (error)
629			goto freerq;
630		mb_put_mbuf(mbp, m);
631	}
632	len = mb_fixhdr(mbp);
633	if (txdcount) {
634		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
635		error = md_get_mbuf(&mbdata, txdcount, &m);
636		if (error)
637			goto freerq;
638		mb_put_mbuf(mbp, m);
639	}
640	smb_rq_bend(rqp);	/* incredible, but thats it... */
641	error = smb_rq_enqueue(rqp);
642	if (error)
643		goto freerq;
644	if (leftpcount == 0 && leftdcount == 0)
645		t2p->t2_flags |= SMBT2_ALLSENT;
646	error = smb_t2_reply(t2p);
647	if (error)
648		goto bad;
649	while (leftpcount || leftdcount) {
650		error = smb_rq_new(rqp, t2p->t_name ?
651		    SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
652		if (error)
653			goto bad;
654		mbp = &rqp->sr_rq;
655		smb_rq_wstart(rqp);
656		mb_put_uint16le(mbp, totpcount);
657		mb_put_uint16le(mbp, totdcount);
658		len = mb_fixhdr(mbp);
659		/*
660		 * now we have known packet size as
661		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
662		 * and need to decide which parts should go into request
663		 */
664		len = ALIGN4(len + 6 * 2 + 2);
665		if (t2p->t_name == NULL)
666			len += 2;
667		if (len + leftpcount > txmax) {
668			txpcount = min(leftpcount, txmax - len);
669			poff = len;
670			txdcount = 0;
671			doff = 0;
672		} else {
673			txpcount = leftpcount;
674			poff = txpcount ? len : 0;
675			len = ALIGN4(len + txpcount);
676			txdcount = min(leftdcount, txmax - len);
677			doff = txdcount ? len : 0;
678		}
679		mb_put_uint16le(mbp, txpcount);
680		mb_put_uint16le(mbp, poff);
681		mb_put_uint16le(mbp, totpcount - leftpcount);
682		mb_put_uint16le(mbp, txdcount);
683		mb_put_uint16le(mbp, doff);
684		mb_put_uint16le(mbp, totdcount - leftdcount);
685		leftpcount -= txpcount;
686		leftdcount -= txdcount;
687		if (t2p->t_name == NULL)
688			mb_put_uint16le(mbp, t2p->t2_fid);
689		smb_rq_wend(rqp);
690		smb_rq_bstart(rqp);
691		mb_put_uint8(mbp, 0);	/* name */
692		len = mb_fixhdr(mbp);
693		if (txpcount) {
694			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
695			error = md_get_mbuf(&mbparam, txpcount, &m);
696			if (error)
697				goto bad;
698			mb_put_mbuf(mbp, m);
699		}
700		len = mb_fixhdr(mbp);
701		if (txdcount) {
702			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
703			error = md_get_mbuf(&mbdata, txdcount, &m);
704			if (error)
705				goto bad;
706			mb_put_mbuf(mbp, m);
707		}
708		smb_rq_bend(rqp);
709		rqp->sr_state = SMBRQ_NOTSENT;
710		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
711		if (error)
712			goto bad;
713	}	/* while left params or data */
714	t2p->t2_flags |= SMBT2_ALLSENT;
715	mdp = &t2p->t2_rdata;
716	if (mdp->md_top) {
717		m_fixhdr(mdp->md_top);
718		md_initm(mdp, mdp->md_top);
719	}
720	mdp = &t2p->t2_rparam;
721	if (mdp->md_top) {
722		m_fixhdr(mdp->md_top);
723		md_initm(mdp, mdp->md_top);
724	}
725bad:
726	smb_iod_removerq(rqp);
727freerq:
728	smb_rq_done(rqp);
729	if (error) {
730		if (rqp->sr_flags & SMBR_RESTART)
731			t2p->t2_flags |= SMBT2_RESTART;
732		md_done(&t2p->t2_rparam);
733		md_done(&t2p->t2_rdata);
734	}
735	return error;
736}
737
738int
739smb_t2_request(struct smb_t2rq *t2p)
740{
741	int error = EINVAL, i;
742
743	for (i = 0; i < SMB_MAXRCN; i++) {
744		t2p->t2_flags &= ~SMBR_RESTART;
745		error = smb_t2_request_int(t2p);
746		if (error == 0)
747			break;
748		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
749			break;
750	}
751	return error;
752}
753