smb_rq.c revision 124087
1/*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *    This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netsmb/smb_rq.c 124087 2004-01-02 22:38:42Z tjr $");
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/proc.h>
42#include <sys/lock.h>
43#include <sys/sysctl.h>
44#include <sys/socket.h>
45#include <sys/socketvar.h>
46#include <sys/mbuf.h>
47
48#include <netsmb/smb.h>
49#include <netsmb/smb_conn.h>
50#include <netsmb/smb_rq.h>
51#include <netsmb/smb_subr.h>
52#include <netsmb/smb_tran.h>
53
54MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
55
56MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
57
58static int  smb_rq_reply(struct smb_rq *rqp);
59static int  smb_rq_enqueue(struct smb_rq *rqp);
60static int  smb_rq_getenv(struct smb_connobj *layer,
61		struct smb_vc **vcpp, struct smb_share **sspp);
62static int  smb_rq_new(struct smb_rq *rqp, u_char cmd);
63static int  smb_t2_reply(struct smb_t2rq *t2p);
64
65int
66smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
67	struct smb_rq **rqpp)
68{
69	struct smb_rq *rqp;
70	int error;
71
72	MALLOC(rqp, struct smb_rq *, sizeof(*rqp), M_SMBRQ, M_WAITOK);
73	if (rqp == NULL)
74		return ENOMEM;
75	error = smb_rq_init(rqp, layer, cmd, scred);
76	rqp->sr_flags |= SMBR_ALLOCED;
77	if (error) {
78		smb_rq_done(rqp);
79		return error;
80	}
81	*rqpp = rqp;
82	return 0;
83}
84
85static char tzero[12];
86
87int
88smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
89	struct smb_cred *scred)
90{
91	int error;
92
93	bzero(rqp, sizeof(*rqp));
94	smb_sl_init(&rqp->sr_slock, "srslock");
95	error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
96	if (error)
97		return error;
98	error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
99	if (error)
100		return error;
101	if (rqp->sr_share) {
102		error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
103		if (error)
104			return error;
105	}
106	rqp->sr_cred = scred;
107	rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
108	return smb_rq_new(rqp, cmd);
109}
110
111static int
112smb_rq_new(struct smb_rq *rqp, u_char cmd)
113{
114	struct smb_vc *vcp = rqp->sr_vc;
115	struct mbchain *mbp = &rqp->sr_rq;
116	int error;
117	u_int16_t flags2;
118
119	rqp->sr_sendcnt = 0;
120	mb_done(mbp);
121	md_done(&rqp->sr_rp);
122	error = mb_init(mbp);
123	if (error)
124		return error;
125	mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
126	mb_put_uint8(mbp, cmd);
127	mb_put_uint32le(mbp, 0);		/* DosError */
128	mb_put_uint8(mbp, vcp->vc_hflags);
129	flags2 = vcp->vc_hflags2;
130	if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY)
131		flags2 &= ~SMB_FLAGS2_UNICODE;
132	if (cmd == SMB_COM_NEGOTIATE)
133		flags2 &= ~SMB_FLAGS2_SECURITY_SIGNATURE;
134	mb_put_uint16le(mbp, flags2);
135	if ((flags2 & SMB_FLAGS2_SECURITY_SIGNATURE) == 0) {
136		mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
137		rqp->sr_rqsig = NULL;
138	} else {
139		mb_put_uint16le(mbp, 0 /*scred->sc_p->p_pid >> 16*/);
140		rqp->sr_rqsig = (u_int8_t *)mb_reserve(mbp, 8);
141		mb_put_uint16le(mbp, 0);
142	}
143	rqp->sr_rqtid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
144	mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
145	rqp->sr_rquid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
146	mb_put_uint16le(mbp, rqp->sr_mid);
147	return 0;
148}
149
150void
151smb_rq_done(struct smb_rq *rqp)
152{
153	mb_done(&rqp->sr_rq);
154	md_done(&rqp->sr_rp);
155	smb_sl_destroy(&rqp->sr_slock);
156	if (rqp->sr_flags & SMBR_ALLOCED)
157		free(rqp, M_SMBRQ);
158}
159
160/*
161 * Simple request-reply exchange
162 */
163int
164smb_rq_simple(struct smb_rq *rqp)
165{
166	struct smb_vc *vcp = rqp->sr_vc;
167	int error = EINVAL, i;
168
169	for (i = 0; i < SMB_MAXRCN; i++) {
170		rqp->sr_flags &= ~SMBR_RESTART;
171		rqp->sr_timo = vcp->vc_timo;
172		rqp->sr_state = SMBRQ_NOTSENT;
173		error = smb_rq_enqueue(rqp);
174		if (error)
175			return error;
176		error = smb_rq_reply(rqp);
177		if (error == 0)
178			break;
179		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
180			break;
181	}
182	return error;
183}
184
185static int
186smb_rq_enqueue(struct smb_rq *rqp)
187{
188	struct smb_share *ssp = rqp->sr_share;
189	int error;
190
191	if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
192		return smb_iod_addrq(rqp);
193	}
194	for (;;) {
195		SMBS_ST_LOCK(ssp);
196		if (ssp->ss_flags & SMBS_RECONNECTING) {
197			msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
198			    PWAIT | PDROP, "90trcn", hz);
199			if (smb_td_intr(rqp->sr_cred->scr_td))
200				return EINTR;
201			continue;
202		}
203		if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
204			SMBS_ST_UNLOCK(ssp);
205		} else {
206			SMBS_ST_UNLOCK(ssp);
207			error = smb_iod_request(rqp->sr_vc->vc_iod,
208			    SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
209			if (error)
210				return error;
211		}
212		error = smb_iod_addrq(rqp);
213		if (error != EXDEV)
214			break;
215	}
216	return error;
217}
218
219void
220smb_rq_wstart(struct smb_rq *rqp)
221{
222	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
223	rqp->sr_rq.mb_count = 0;
224}
225
226void
227smb_rq_wend(struct smb_rq *rqp)
228{
229	if (rqp->sr_wcount == NULL) {
230		SMBERROR("no wcount\n");	/* actually panic */
231		return;
232	}
233	if (rqp->sr_rq.mb_count & 1)
234		SMBERROR("odd word count\n");
235	*rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
236}
237
238void
239smb_rq_bstart(struct smb_rq *rqp)
240{
241	rqp->sr_bcount = (u_short*)mb_reserve(&rqp->sr_rq, sizeof(u_short));
242	rqp->sr_rq.mb_count = 0;
243}
244
245void
246smb_rq_bend(struct smb_rq *rqp)
247{
248	int bcnt;
249
250	if (rqp->sr_bcount == NULL) {
251		SMBERROR("no bcount\n");	/* actually panic */
252		return;
253	}
254	bcnt = rqp->sr_rq.mb_count;
255	if (bcnt > 0xffff)
256		SMBERROR("byte count too large (%d)\n", bcnt);
257	*rqp->sr_bcount = htole16(bcnt);
258}
259
260int
261smb_rq_intr(struct smb_rq *rqp)
262{
263	if (rqp->sr_flags & SMBR_INTR)
264		return EINTR;
265	return smb_td_intr(rqp->sr_cred->scr_td);
266}
267
268int
269smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
270{
271	*mbpp = &rqp->sr_rq;
272	return 0;
273}
274
275int
276smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
277{
278	*mbpp = &rqp->sr_rp;
279	return 0;
280}
281
282static int
283smb_rq_getenv(struct smb_connobj *layer,
284	struct smb_vc **vcpp, struct smb_share **sspp)
285{
286	struct smb_vc *vcp = NULL;
287	struct smb_share *ssp = NULL;
288	struct smb_connobj *cp;
289	int error = 0;
290
291	switch (layer->co_level) {
292	    case SMBL_VC:
293		vcp = CPTOVC(layer);
294		if (layer->co_parent == NULL) {
295			SMBERROR("zombie VC %s\n", vcp->vc_srvname);
296			error = EINVAL;
297			break;
298		}
299		break;
300	    case SMBL_SHARE:
301		ssp = CPTOSS(layer);
302		cp = layer->co_parent;
303		if (cp == NULL) {
304			SMBERROR("zombie share %s\n", ssp->ss_name);
305			error = EINVAL;
306			break;
307		}
308		error = smb_rq_getenv(cp, &vcp, NULL);
309		if (error)
310			break;
311		break;
312	    default:
313		SMBERROR("invalid layer %d passed\n", layer->co_level);
314		error = EINVAL;
315	}
316	if (vcpp)
317		*vcpp = vcp;
318	if (sspp)
319		*sspp = ssp;
320	return error;
321}
322
323/*
324 * Wait for reply on the request
325 */
326static int
327smb_rq_reply(struct smb_rq *rqp)
328{
329	struct mdchain *mdp = &rqp->sr_rp;
330	u_int32_t tdw;
331	u_int8_t tb;
332	int error, rperror = 0;
333
334	error = smb_iod_waitrq(rqp);
335	if (error)
336		return error;
337	error = md_get_uint32(mdp, &tdw);
338	if (error)
339		return error;
340	error = md_get_uint8(mdp, &tb);
341	if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
342		error = md_get_uint32le(mdp, &rqp->sr_error);
343	} else {
344		error = md_get_uint8(mdp, &rqp->sr_errclass);
345		error = md_get_uint8(mdp, &tb);
346		error = md_get_uint16le(mdp, &rqp->sr_serror);
347		if (!error)
348			rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
349	}
350	error = md_get_uint8(mdp, &rqp->sr_rpflags);
351	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
352
353	error = md_get_uint32(mdp, &tdw);
354	error = md_get_uint32(mdp, &tdw);
355	error = md_get_uint32(mdp, &tdw);
356
357	error = md_get_uint16le(mdp, &rqp->sr_rptid);
358	error = md_get_uint16le(mdp, &rqp->sr_rppid);
359	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
360	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
361
362	if (error == 0 &&
363	    (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE))
364		error = smb_rq_verify(rqp);
365
366	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
367	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
368	    rqp->sr_errclass, rqp->sr_serror);
369	return error ? error : rperror;
370}
371
372
373#define ALIGN4(a)	(((a) + 3) & ~3)
374
375/*
376 * TRANS2 request implementation
377 */
378int
379smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
380	struct smb_t2rq **t2pp)
381{
382	struct smb_t2rq *t2p;
383	int error;
384
385	MALLOC(t2p, struct smb_t2rq *, sizeof(*t2p), M_SMBRQ, M_WAITOK);
386	if (t2p == NULL)
387		return ENOMEM;
388	error = smb_t2_init(t2p, layer, setup, scred);
389	t2p->t2_flags |= SMBT2_ALLOCED;
390	if (error) {
391		smb_t2_done(t2p);
392		return error;
393	}
394	*t2pp = t2p;
395	return 0;
396}
397
398int
399smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
400	struct smb_cred *scred)
401{
402	int error;
403
404	bzero(t2p, sizeof(*t2p));
405	t2p->t2_source = source;
406	t2p->t2_setupcount = 1;
407	t2p->t2_setupdata = t2p->t2_setup;
408	t2p->t2_setup[0] = setup;
409	t2p->t2_fid = 0xffff;
410	t2p->t2_cred = scred;
411	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
412	if (error)
413		return error;
414	return 0;
415}
416
417void
418smb_t2_done(struct smb_t2rq *t2p)
419{
420	mb_done(&t2p->t2_tparam);
421	mb_done(&t2p->t2_tdata);
422	md_done(&t2p->t2_rparam);
423	md_done(&t2p->t2_rdata);
424	if (t2p->t2_flags & SMBT2_ALLOCED)
425		free(t2p, M_SMBRQ);
426}
427
428static int
429smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
430	struct mdchain *mdp)
431{
432	struct mbuf *m, *m0;
433	int len;
434
435	m0 = m_split(mtop, offset, M_TRYWAIT);
436	if (m0 == NULL)
437		return EBADRPC;
438	len = m_length(m0, &m);
439	m->m_len -= len - count;
440	if (mdp->md_top == NULL) {
441		md_initm(mdp, m0);
442	} else
443		m_cat(mdp->md_top, m0);
444	return 0;
445}
446
447static int
448smb_t2_reply(struct smb_t2rq *t2p)
449{
450	struct mdchain *mdp;
451	struct smb_rq *rqp = t2p->t2_rq;
452	int error, totpgot, totdgot;
453	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
454	u_int16_t tmp, bc, dcount;
455	u_int8_t wc;
456
457	error = smb_rq_reply(rqp);
458	if (error)
459		return error;
460	if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
461		/*
462		 * this is an interim response, ignore it.
463		 */
464		SMBRQ_SLOCK(rqp);
465		md_next_record(&rqp->sr_rp);
466		SMBRQ_SUNLOCK(rqp);
467		return 0;
468	}
469	/*
470	 * Now we have to get all subsequent responses. The CIFS specification
471	 * says that they can be disordered which is weird.
472	 * TODO: timo
473	 */
474	totpgot = totdgot = 0;
475	totpcount = totdcount = 0xffff;
476	mdp = &rqp->sr_rp;
477	for (;;) {
478		m_dumpm(mdp->md_top);
479		if ((error = md_get_uint8(mdp, &wc)) != 0)
480			break;
481		if (wc < 10) {
482			error = ENOENT;
483			break;
484		}
485		if ((error = md_get_uint16le(mdp, &tmp)) != 0)
486			break;
487		if (totpcount > tmp)
488			totpcount = tmp;
489		md_get_uint16le(mdp, &tmp);
490		if (totdcount > tmp)
491			totdcount = tmp;
492		if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
493		    (error = md_get_uint16le(mdp, &pcount)) != 0 ||
494		    (error = md_get_uint16le(mdp, &poff)) != 0 ||
495		    (error = md_get_uint16le(mdp, &pdisp)) != 0)
496			break;
497		if (pcount != 0 && pdisp != totpgot) {
498			SMBERROR("Can't handle disordered parameters %d:%d\n",
499			    pdisp, totpgot);
500			error = EINVAL;
501			break;
502		}
503		if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
504		    (error = md_get_uint16le(mdp, &doff)) != 0 ||
505		    (error = md_get_uint16le(mdp, &ddisp)) != 0)
506			break;
507		if (dcount != 0 && ddisp != totdgot) {
508			SMBERROR("Can't handle disordered data\n");
509			error = EINVAL;
510			break;
511		}
512		md_get_uint8(mdp, &wc);
513		md_get_uint8(mdp, NULL);
514		tmp = wc;
515		while (tmp--)
516			md_get_uint16(mdp, NULL);
517		if ((error = md_get_uint16le(mdp, &bc)) != 0)
518			break;
519/*		tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
520		if (dcount) {
521			error = smb_t2_placedata(mdp->md_top, doff, dcount,
522			    &t2p->t2_rdata);
523			if (error)
524				break;
525		}
526		if (pcount) {
527			error = smb_t2_placedata(mdp->md_top, poff, pcount,
528			    &t2p->t2_rparam);
529			if (error)
530				break;
531		}
532		totpgot += pcount;
533		totdgot += dcount;
534		if (totpgot >= totpcount && totdgot >= totdcount) {
535			error = 0;
536			t2p->t2_flags |= SMBT2_ALLRECV;
537			break;
538		}
539		/*
540		 * We're done with this reply, look for the next one.
541		 */
542		SMBRQ_SLOCK(rqp);
543		md_next_record(&rqp->sr_rp);
544		SMBRQ_SUNLOCK(rqp);
545		error = smb_rq_reply(rqp);
546		if (error)
547			break;
548	}
549	return error;
550}
551
552/*
553 * Perform a full round of TRANS2 request
554 */
555static int
556smb_t2_request_int(struct smb_t2rq *t2p)
557{
558	struct smb_vc *vcp = t2p->t2_vc;
559	struct smb_cred *scred = t2p->t2_cred;
560	struct mbchain *mbp;
561	struct mdchain *mdp, mbparam, mbdata;
562	struct mbuf *m;
563	struct smb_rq *rqp;
564	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
565	int error, doff, poff, txdcount, txpcount, nmlen;
566
567	m = t2p->t2_tparam.mb_top;
568	if (m) {
569		md_initm(&mbparam, m);	/* do not free it! */
570		totpcount = m_fixhdr(m);
571		if (totpcount > 0xffff)		/* maxvalue for u_short */
572			return EINVAL;
573	} else
574		totpcount = 0;
575	m = t2p->t2_tdata.mb_top;
576	if (m) {
577		md_initm(&mbdata, m);	/* do not free it! */
578		totdcount =  m_fixhdr(m);
579		if (totdcount > 0xffff)
580			return EINVAL;
581	} else
582		totdcount = 0;
583	leftdcount = totdcount;
584	leftpcount = totpcount;
585	txmax = vcp->vc_txmax;
586	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
587	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
588	if (error)
589		return error;
590	rqp->sr_flags |= SMBR_MULTIPACKET;
591	t2p->t2_rq = rqp;
592	rqp->sr_t2 = t2p;
593	mbp = &rqp->sr_rq;
594	smb_rq_wstart(rqp);
595	mb_put_uint16le(mbp, totpcount);
596	mb_put_uint16le(mbp, totdcount);
597	mb_put_uint16le(mbp, t2p->t2_maxpcount);
598	mb_put_uint16le(mbp, t2p->t2_maxdcount);
599	mb_put_uint8(mbp, t2p->t2_maxscount);
600	mb_put_uint8(mbp, 0);			/* reserved */
601	mb_put_uint16le(mbp, 0);			/* flags */
602	mb_put_uint32le(mbp, 0);			/* Timeout */
603	mb_put_uint16le(mbp, 0);			/* reserved 2 */
604	len = mb_fixhdr(mbp);
605	/*
606	 * now we have known packet size as
607	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
608	 * and need to decide which parts should go into the first request
609	 */
610	nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
611	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
612	if (len + leftpcount > txmax) {
613		txpcount = min(leftpcount, txmax - len);
614		poff = len;
615		txdcount = 0;
616		doff = 0;
617	} else {
618		txpcount = leftpcount;
619		poff = txpcount ? len : 0;
620		len = ALIGN4(len + txpcount);
621		txdcount = min(leftdcount, txmax - len);
622		doff = txdcount ? len : 0;
623	}
624	leftpcount -= txpcount;
625	leftdcount -= txdcount;
626	mb_put_uint16le(mbp, txpcount);
627	mb_put_uint16le(mbp, poff);
628	mb_put_uint16le(mbp, txdcount);
629	mb_put_uint16le(mbp, doff);
630	mb_put_uint8(mbp, t2p->t2_setupcount);
631	mb_put_uint8(mbp, 0);
632	for (i = 0; i < t2p->t2_setupcount; i++)
633		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
634	smb_rq_wend(rqp);
635	smb_rq_bstart(rqp);
636	/* TDUNICODE */
637	if (t2p->t_name)
638		mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
639	mb_put_uint8(mbp, 0);	/* terminating zero */
640	len = mb_fixhdr(mbp);
641	if (txpcount) {
642		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
643		error = md_get_mbuf(&mbparam, txpcount, &m);
644		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
645		if (error)
646			goto freerq;
647		mb_put_mbuf(mbp, m);
648	}
649	len = mb_fixhdr(mbp);
650	if (txdcount) {
651		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
652		error = md_get_mbuf(&mbdata, txdcount, &m);
653		if (error)
654			goto freerq;
655		mb_put_mbuf(mbp, m);
656	}
657	smb_rq_bend(rqp);	/* incredible, but thats it... */
658	error = smb_rq_enqueue(rqp);
659	if (error)
660		goto freerq;
661	if (leftpcount == 0 && leftdcount == 0)
662		t2p->t2_flags |= SMBT2_ALLSENT;
663	error = smb_t2_reply(t2p);
664	if (error)
665		goto bad;
666	while (leftpcount || leftdcount) {
667		t2p->t2_flags |= SMBT2_SECONDARY;
668		error = smb_rq_new(rqp, t2p->t_name ?
669		    SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
670		if (error)
671			goto bad;
672		mbp = &rqp->sr_rq;
673		smb_rq_wstart(rqp);
674		mb_put_uint16le(mbp, totpcount);
675		mb_put_uint16le(mbp, totdcount);
676		len = mb_fixhdr(mbp);
677		/*
678		 * now we have known packet size as
679		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
680		 * and need to decide which parts should go into request
681		 */
682		len = ALIGN4(len + 6 * 2 + 2);
683		if (t2p->t_name == NULL)
684			len += 2;
685		if (len + leftpcount > txmax) {
686			txpcount = min(leftpcount, txmax - len);
687			poff = len;
688			txdcount = 0;
689			doff = 0;
690		} else {
691			txpcount = leftpcount;
692			poff = txpcount ? len : 0;
693			len = ALIGN4(len + txpcount);
694			txdcount = min(leftdcount, txmax - len);
695			doff = txdcount ? len : 0;
696		}
697		mb_put_uint16le(mbp, txpcount);
698		mb_put_uint16le(mbp, poff);
699		mb_put_uint16le(mbp, totpcount - leftpcount);
700		mb_put_uint16le(mbp, txdcount);
701		mb_put_uint16le(mbp, doff);
702		mb_put_uint16le(mbp, totdcount - leftdcount);
703		leftpcount -= txpcount;
704		leftdcount -= txdcount;
705		if (t2p->t_name == NULL)
706			mb_put_uint16le(mbp, t2p->t2_fid);
707		smb_rq_wend(rqp);
708		smb_rq_bstart(rqp);
709		mb_put_uint8(mbp, 0);	/* name */
710		len = mb_fixhdr(mbp);
711		if (txpcount) {
712			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
713			error = md_get_mbuf(&mbparam, txpcount, &m);
714			if (error)
715				goto bad;
716			mb_put_mbuf(mbp, m);
717		}
718		len = mb_fixhdr(mbp);
719		if (txdcount) {
720			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
721			error = md_get_mbuf(&mbdata, txdcount, &m);
722			if (error)
723				goto bad;
724			mb_put_mbuf(mbp, m);
725		}
726		smb_rq_bend(rqp);
727		rqp->sr_state = SMBRQ_NOTSENT;
728		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
729		if (error)
730			goto bad;
731	}	/* while left params or data */
732	t2p->t2_flags |= SMBT2_ALLSENT;
733	mdp = &t2p->t2_rdata;
734	if (mdp->md_top) {
735		m_fixhdr(mdp->md_top);
736		md_initm(mdp, mdp->md_top);
737	}
738	mdp = &t2p->t2_rparam;
739	if (mdp->md_top) {
740		m_fixhdr(mdp->md_top);
741		md_initm(mdp, mdp->md_top);
742	}
743bad:
744	smb_iod_removerq(rqp);
745freerq:
746	smb_rq_done(rqp);
747	if (error) {
748		if (rqp->sr_flags & SMBR_RESTART)
749			t2p->t2_flags |= SMBT2_RESTART;
750		md_done(&t2p->t2_rparam);
751		md_done(&t2p->t2_rdata);
752	}
753	return error;
754}
755
756int
757smb_t2_request(struct smb_t2rq *t2p)
758{
759	int error = EINVAL, i;
760
761	for (i = 0; i < SMB_MAXRCN; i++) {
762		t2p->t2_flags &= ~SMBR_RESTART;
763		error = smb_t2_request_int(t2p);
764		if (error == 0)
765			break;
766		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
767			break;
768	}
769	return error;
770}
771