1/*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Portions Copyright (C) 2001 - 2012 Apple Inc. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *    This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/ioccom.h>
39#include <sys/malloc.h>
40#include <sys/uio.h>
41#include <sys/conf.h>
42#include <sys/kpi_mbuf.h>
43#include <sys/proc.h>
44#include <sys/fcntl.h>
45#include <sys/file.h>
46#include <sys/socket.h>
47#include <sys/select.h>
48#include <sys/sysctl.h>
49#include <sys/vnode.h>
50
51#include <sys/kauth.h>
52
53#include <net/if.h>
54#include <sys/smb_apple.h>
55#include <sys/smb_byte_order.h>
56#include <sys/mchain.h>
57
58#include <netsmb/smb.h>
59#include <netsmb/smb_2.h>
60#include <netsmb/smb_rq.h>
61#include <netsmb/smb_rq_2.h>
62#include <netsmb/smb_conn.h>
63#include <netsmb/smb_conn_2.h>
64#include <netsmb/smb_subr.h>
65#include <netsmb/smb_dev.h>
66#include <netsmb/smb_dev_2.h>
67#include <netsmb/smb_tran.h>
68
69/*
70 * Userland code loops through minor #s 0 to 1023, looking for one which opens.
71 * Intially we create minor 0 and leave it for anyone.  Minor zero will never
72 * actually get used - opening triggers creation of another (but private) minor,
73 * which userland code will get to and mark busy.
74 */
75#define SMBMINORS 1024
76struct smb_dev * smb_dtab[SMBMINORS];
77int smb_minor_hiwat = -1;
78#define SMB_GETDEV(dev)         (smb_dtab[minor(dev)])
79
80static d_open_t	 nsmb_dev_open;
81static d_close_t nsmb_dev_close;
82static d_ioctl_t nsmb_dev_ioctl;
83
84static struct cdevsw nsmb_cdevsw = {
85	nsmb_dev_open,
86	nsmb_dev_close,
87	eno_rdwrt,	/* d_read */
88	eno_rdwrt,	/* d_write */
89	nsmb_dev_ioctl,
90	eno_stop,
91	eno_reset,
92	0,		/* struct tty ** d_ttys */
93	eno_select,
94	eno_mmap,
95	eno_strat,
96	eno_getc,
97	eno_putc,
98	0		/* d_type */
99};
100
101int	smb_major = -1;
102
103extern lck_rw_t  * dev_rw_lck;
104extern lck_grp_t  * dev_lck_grp;
105extern lck_attr_t * dev_lck_attr;
106
107extern int dev_open_cnt;
108extern int unloadInProgress;
109
110static int
111nsmb_dev_open_nolock(dev_t dev, int oflags, int devtype, struct proc *p)
112{
113#pragma unused(oflags, devtype, p)
114	struct smb_dev *sdp;
115	kauth_cred_t cred = vfs_context_ucred(vfs_context_current());
116
117	sdp = SMB_GETDEV(dev);
118	if (sdp && (sdp->sd_flags & NSMBFL_OPEN))
119		return (EBUSY);
120	if (!sdp || minor(dev) == 0) {
121		int	avail_minor;
122
123		for (avail_minor = 1; avail_minor < SMBMINORS; avail_minor++)
124			if (!SMB_GETDEV(avail_minor))
125				break;
126		if (avail_minor >= SMBMINORS) {
127			SMBERROR("Too many minor devices, %d >= %d !", avail_minor, SMBMINORS);
128			return (ENOMEM);
129		}
130        SMB_MALLOC(sdp, struct smb_dev *, sizeof(*sdp), M_NSMBDEV, M_WAITOK);
131		bzero(sdp, sizeof(*sdp));
132		dev = makedev(smb_major, avail_minor);
133		sdp->sd_devfs = devfs_make_node(dev, DEVFS_CHAR,
134						kauth_cred_getuid(cred),
135						kauth_cred_getgid(cred),
136						0700, "nsmb%x", avail_minor);
137		if (!sdp->sd_devfs) {
138			SMBERROR("devfs_make_node failed %d\n", avail_minor);
139			SMB_FREE(sdp, M_NSMBDEV);
140			return (ENOMEM);
141		}
142		if (avail_minor > smb_minor_hiwat)
143			smb_minor_hiwat = avail_minor;
144		SMB_GETDEV(dev) = sdp;
145		return (EBUSY);
146	}
147	lck_rw_init(&sdp->sd_rwlock, dev_lck_grp, dev_lck_attr);
148	sdp->sd_flags |= NSMBFL_OPEN;
149	dev_open_cnt++;
150	return (0);
151}
152
153static int
154nsmb_dev_open(dev_t dev, int oflags, int devtype, struct proc *p)
155{
156    int error;
157
158    /* Just some sanity checks for debug purposes only */
159    DBG_ASSERT(sizeof(struct smbioc_negotiate) < SMB_MAX_IOC_SIZE);
160    DBG_ASSERT(sizeof(struct smbioc_setup) < SMB_MAX_IOC_SIZE);
161    DBG_ASSERT(sizeof(struct smbioc_share) < SMB_MAX_IOC_SIZE);
162    DBG_ASSERT(sizeof(struct smbioc_rq) < SMB_MAX_IOC_SIZE);
163    DBG_ASSERT(sizeof(struct smbioc_t2rq) < SMB_MAX_IOC_SIZE);
164    DBG_ASSERT(sizeof(struct smbioc_rw) < SMB_MAX_IOC_SIZE);
165
166    lck_rw_lock_exclusive(dev_rw_lck);
167
168    if (! unloadInProgress) {
169        error = nsmb_dev_open_nolock(dev, oflags, devtype, p);
170    }
171    else {
172        SMBERROR("We are being unloaded\n");
173        error = EBUSY;
174    }
175
176    lck_rw_unlock_exclusive(dev_rw_lck);
177    return (error);
178}
179
180static int
181nsmb_dev_close(dev_t dev, int flag, int fmt, struct proc *p)
182{
183#pragma unused(flag, fmt, p)
184	struct smb_dev *sdp;
185	struct smb_vc *vcp;
186	struct smb_share *share;
187	vfs_context_t context;
188
189 	lck_rw_lock_exclusive(dev_rw_lck);
190 	sdp = SMB_GETDEV(dev);
191	if ((sdp == NULL) || ((sdp->sd_flags & NSMBFL_OPEN) == 0)) {
192		lck_rw_unlock_exclusive(dev_rw_lck);
193		return (EBADF);
194    }
195
196	context = vfs_context_create((vfs_context_t)0);
197
198	/* make sure any ioctls have finished before proceeding */
199	lck_rw_lock_exclusive(&sdp->sd_rwlock);
200
201	share = sdp->sd_share;
202	sdp->sd_share = NULL; /* Just to be extra careful */
203	if (share != NULL) {
204		smb_share_rele(share, context);
205	}
206
207	vcp = sdp->sd_vc;
208	sdp->sd_vc = NULL; /* Just to be extra careful */
209	if (vcp != NULL)
210		smb_vc_rele(vcp, context);
211
212	lck_rw_unlock_exclusive(&sdp->sd_rwlock);
213
214	devfs_remove(sdp->sd_devfs); /* first disallow opens */
215
216	vfs_context_rele(context);
217
218	SMB_GETDEV(dev) = NULL;
219	lck_rw_destroy(&sdp->sd_rwlock, dev_lck_grp);
220	SMB_FREE(sdp, M_NSMBDEV);
221	dev_open_cnt--;
222
223	lck_rw_unlock_exclusive(dev_rw_lck);
224	return (0);
225}
226
227static int nsmb_dev_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag,
228						  struct proc *p)
229{
230#pragma unused(flag, p)
231	struct smb_dev *sdp;
232	struct smb_vc *vcp;
233	struct smb_share *sharep;
234	uint32_t error = 0;
235	vfs_context_t context;
236
237	/*
238	 * We allow mutiple ioctl calls, but never when opening, closing or
239	 * getting the mount device. dev_rw_lck is used to keep the dev list
240     * from changing as we get the sdp from the dev. Lock dev_rw_lck first,
241     * then get the sdp and then get the lock on sd_rwlock. sd_rwlock is
242     * held when an ioctl call is still in progress and keeps us from closing
243     * the dev with the outstanding ioctl call.
244	 */
245	lck_rw_lock_shared(dev_rw_lck);
246	sdp = SMB_GETDEV(dev);
247	if ((sdp == NULL) || ((sdp->sd_flags & NSMBFL_OPEN) == 0)) {
248		error = EBADF;
249        lck_rw_unlock_shared(dev_rw_lck);
250		goto exit;
251	}
252
253	context = vfs_context_create((vfs_context_t)0);
254
255	/*
256	  *%%% K64
257	 * Need to keep checking to see if this gets corrected. The problem here
258	 * is ioctl_cmd_t is uint32_t on K64 builds. The _IO defines use sizeof
259	 * which returns a size_t. Hopefully either cmd will be changed to u_long
260	 * or the _IO defines will have sizeof typed cast to uint32_t.
261	 */
262	switch (cmd) {
263		case SMBIOC_FIND_VC:
264		case SMBIOC_NEGOTIATE:
265		{
266			int searchOnly = (cmd == SMBIOC_FIND_VC) ? TRUE : FALSE;
267			struct smbioc_negotiate * vspec = (struct smbioc_negotiate *)data;
268
269			/* protect against anyone else playing with the smb dev structure */
270			lck_rw_lock_exclusive(&sdp->sd_rwlock);
271
272            /* free global lock now since we now have sd_rwlock */
273            lck_rw_unlock_shared(dev_rw_lck);
274
275            /* Make sure the version matches */
276			if (vspec->ioc_version != SMB_IOC_STRUCT_VERSION) {
277				error = EINVAL;
278			} else if (sdp->sd_vc || sdp->sd_share) {
279				error = EISCONN;
280			} else {
281				error = smb_usr_negotiate(vspec, context, sdp, searchOnly);
282			}
283
284			lck_rw_unlock_exclusive(&sdp->sd_rwlock);
285			break;
286		}
287		case SMBIOC_NTWRK_IDENTITY:
288		{
289			struct smbioc_ntwrk_identity * ntwrkID = (struct smbioc_ntwrk_identity *)data;
290
291			lck_rw_lock_shared(&sdp->sd_rwlock);
292
293            /* free global lock now since we now have sd_rwlock */
294            lck_rw_unlock_shared(dev_rw_lck);
295
296			/* Make sure the version matches */
297			if (ntwrkID->ioc_version != SMB_IOC_STRUCT_VERSION) {
298				error = EINVAL;
299			} else if (!sdp->sd_vc) {
300				error = ENOTCONN;
301			} else {
302				error = smb_usr_set_network_identity(sdp->sd_vc, ntwrkID);
303			}
304
305			lck_rw_unlock_shared(&sdp->sd_rwlock);
306			break;
307		}
308		case SMBIOC_SSNSETUP:
309		{
310			struct smbioc_setup * sspec = (struct smbioc_setup *)data;
311
312			lck_rw_lock_shared(&sdp->sd_rwlock);
313
314            /* free global lock now since we now have sd_rwlock */
315            lck_rw_unlock_shared(dev_rw_lck);
316
317			/* Make sure the version matches */
318			if (sspec->ioc_version != SMB_IOC_STRUCT_VERSION) {
319				error = EINVAL;
320			} else if (sdp->sd_share) {
321				error = EISCONN;
322			} else if (!sdp->sd_vc) {
323				error = ENOTCONN;
324			} else {
325				error = smb_sm_ssnsetup(sdp->sd_vc, sspec, context);
326			}
327
328			lck_rw_unlock_shared(&sdp->sd_rwlock);
329			break;
330		}
331		case SMBIOC_CONVERT_PATH:
332		{
333			struct smbioc_path_convert * dp = (struct smbioc_path_convert *)data;
334
335			lck_rw_lock_shared(&sdp->sd_rwlock);
336
337            /* free global lock now since we now have sd_rwlock */
338            lck_rw_unlock_shared(dev_rw_lck);
339
340			/* Make sure the version matches */
341			if (dp->ioc_version != SMB_IOC_STRUCT_VERSION) {
342				error = EINVAL;
343			} else if (!sdp->sd_vc) {
344				error = ENOTCONN;
345			} else {
346				/* Take the 32 bit world pointers and convert them to user_addr_t. */
347				if (! vfs_context_is64bit (context)) {
348					dp->ioc_kern_src = CAST_USER_ADDR_T(dp->ioc_src);
349					dp->ioc_kern_dest = CAST_USER_ADDR_T(dp->ioc_dest);
350				}
351				if (!dp->ioc_kern_src || !dp->ioc_kern_dest) {
352					error = EINVAL;
353				} else if (((dp->ioc_direction & (LOCAL_TO_NETWORK | NETWORK_TO_LOCAL)) == 0) ||
354					((dp->ioc_direction & LOCAL_TO_NETWORK) && (dp->ioc_direction & NETWORK_TO_LOCAL))) {
355					/* Need to have one set and you can't have both set */
356					error = EINVAL;
357				} else if (dp->ioc_direction & LOCAL_TO_NETWORK) {
358					error = smb_usr_convert_path_to_network(sdp->sd_vc, dp);
359				} else {
360					error = smb_usr_convert_network_to_path(sdp->sd_vc, dp);
361				}
362			}
363
364			lck_rw_unlock_shared(&sdp->sd_rwlock);
365			break;
366		}
367		case SMBIOC_TCON:
368		{
369			struct smbioc_share * shspec = (struct smbioc_share *)data;
370
371			/* protect against anyone else playing with the smb dev structure */
372			lck_rw_lock_exclusive(&sdp->sd_rwlock);
373
374            /* free global lock now since we now have sd_rwlock */
375            lck_rw_unlock_shared(dev_rw_lck);
376
377			/* Make sure the version matches */
378			if (shspec->ioc_version != SMB_IOC_STRUCT_VERSION) {
379				error = EINVAL;
380			} else if (sdp->sd_share) {
381				error = EISCONN;
382			} else  if (!sdp->sd_vc) {
383				error = ENOTCONN;
384			} else  {
385				error = smb_sm_tcon(sdp->sd_vc, shspec, &sdp->sd_share, context);
386			}
387
388			lck_rw_unlock_exclusive(&sdp->sd_rwlock);
389			break;
390		}
391		case SMBIOC_TDIS:
392		{
393			struct smbioc_share * shspec = (struct smbioc_share *)data;
394
395			/* protect against anyone else playing with the smb dev structure */
396			lck_rw_lock_exclusive(&sdp->sd_rwlock);
397
398            /* free global lock now since we now have sd_rwlock */
399            lck_rw_unlock_shared(dev_rw_lck);
400
401			/* Make sure the version match */
402			if (shspec->ioc_version != SMB_IOC_STRUCT_VERSION) {
403				error = EINVAL;
404			} else  if (sdp->sd_share == NULL) {
405				error = ENOTCONN;
406			} else {
407				smb_share_rele(sdp->sd_share, context);
408				sdp->sd_share = NULL;
409				error = 0;
410			}
411
412			lck_rw_unlock_exclusive(&sdp->sd_rwlock);
413			break;
414		}
415		case SMBIOC_AUTH_INFO:
416		{
417			struct smbioc_auth_info * auth_info = (struct smbioc_auth_info *)data;
418
419			lck_rw_lock_shared(&sdp->sd_rwlock);
420
421            /* free global lock now since we now have sd_rwlock */
422            lck_rw_unlock_shared(dev_rw_lck);
423
424			if (auth_info->ioc_version != SMB_IOC_STRUCT_VERSION) {
425				error = EINVAL;
426			} else if (!sdp->sd_vc) {
427				error = ENOTCONN;
428			} else {
429				vcp = sdp->sd_vc;
430				auth_info->ioc_client_nt = vcp->vc_gss.gss_client_nt;
431				auth_info->ioc_target_nt = vcp->vc_gss.gss_target_nt;
432				/*
433				 * On input the client_size and target_size must be the max size
434				 * of the buffer. On output we set them to the correct size or
435				 * zero if the buffer is not big enough.
436				 */
437				if (vcp->vc_gss.gss_cpn_len >= auth_info->ioc_client_size) {
438					auth_info->ioc_client_size = vcp->vc_gss.gss_cpn_len;
439				} else {
440					auth_info->ioc_client_size = 0;
441				}
442				if (vcp->vc_gss.gss_spn_len >= auth_info->ioc_target_size) {
443					auth_info->ioc_target_size = vcp->vc_gss.gss_spn_len;
444				} else {
445					auth_info->ioc_target_size = 0;
446				}
447				if (vcp->vc_gss.gss_cpn && auth_info->ioc_client_size) {
448					error = copyout(vcp->vc_gss.gss_cpn, auth_info->ioc_client_name,
449									(size_t)auth_info->ioc_client_size);
450					if (error) {
451						lck_rw_unlock_shared(&sdp->sd_rwlock);
452						break;
453					}
454				}
455				if (vcp->vc_gss.gss_spn && auth_info->ioc_target_size) {
456					error = copyout(vcp->vc_gss.gss_spn, auth_info->ioc_target_name,
457									(size_t)auth_info->ioc_target_size);
458				}
459			}
460
461			lck_rw_unlock_shared(&sdp->sd_rwlock);
462			break;
463		}
464		case SMBIOC_VC_PROPERTIES:
465		{
466			struct smbioc_vc_properties * properties = (struct smbioc_vc_properties *)data;
467
468			lck_rw_lock_shared(&sdp->sd_rwlock);
469
470            /* free global lock now since we now have sd_rwlock */
471            lck_rw_unlock_shared(dev_rw_lck);
472
473			if (properties->ioc_version != SMB_IOC_STRUCT_VERSION) {
474				error = EINVAL;
475			} else if (!sdp->sd_vc) {
476				error = ENOTCONN;
477			} else {
478				vcp = sdp->sd_vc;
479                properties->uid = vcp->vc_uid;
480				properties->smb1_caps = vcp->vc_sopt.sv_caps;
481                properties->smb2_caps = vcp->vc_sopt.sv_capabilities;
482				properties->flags = vcp->vc_flags;
483                properties->misc_flags = vcp->vc_misc_flags;
484				properties->hflags = vcp->vc_hflags;
485				properties->hflags2 = vcp->vc_hflags2;
486				properties->txmax = vcp->vc_txmax;
487				properties->rxmax = vcp->vc_rxmax;
488                properties->wxmax = vcp->vc_wxmax;
489                memset(properties->model_info, 0, (SMB_MAXFNAMELEN * 2));
490                /* only when we are mac to mac */
491                if ((vcp->vc_misc_flags & SMBV_OSX_SERVER) && vcp->vc_model_info) {
492                    memcpy(properties->model_info, vcp->vc_model_info, strlen(vcp->vc_model_info));
493                }
494			}
495
496			lck_rw_unlock_shared(&sdp->sd_rwlock);
497			break;
498		}
499		case SMBIOC_SHARE_PROPERTIES:
500		{
501			struct smbioc_share_properties * properties = (struct smbioc_share_properties *)data;
502
503			lck_rw_lock_shared(&sdp->sd_rwlock);
504
505            /* free global lock now since we now have sd_rwlock */
506            lck_rw_unlock_shared(dev_rw_lck);
507
508			if (properties->ioc_version != SMB_IOC_STRUCT_VERSION) {
509				error = EINVAL;
510			} else if (!sdp->sd_vc || !sdp->sd_share) {
511				error = ENOTCONN;
512			} else {
513				sharep = sdp->sd_share;
514                properties->share_caps  = sharep->ss_share_caps;
515                properties->share_flags = sharep->ss_share_flags;
516				properties->share_type  = sharep->ss_share_type;
517				properties->attributes  = sharep->ss_attributes;
518			}
519
520			lck_rw_unlock_shared(&sdp->sd_rwlock);
521			break;
522		}
523        case SMBIOC_GET_OS_LANMAN:
524		{
525			lck_rw_lock_shared(&sdp->sd_rwlock);
526
527            /* free global lock now since we now have sd_rwlock */
528            lck_rw_unlock_shared(dev_rw_lck);
529
530			if (!sdp->sd_vc) {
531				error = ENOTCONN;
532			} else {
533				struct smbioc_os_lanman * OSLanman = (struct smbioc_os_lanman *)data;
534				vcp = sdp->sd_vc;
535				if (vcp->NativeOS)
536					strlcpy(OSLanman->NativeOS, vcp->NativeOS, sizeof(OSLanman->NativeOS));
537				if (vcp->NativeLANManager)
538					strlcpy(OSLanman->NativeLANManager, vcp->NativeLANManager, sizeof(OSLanman->NativeLANManager));
539			}
540
541			lck_rw_unlock_shared(&sdp->sd_rwlock);
542			break;
543		}
544		case SMBIOC_SESSSTATE:
545		{
546			lck_rw_lock_shared(&sdp->sd_rwlock);
547
548            /* free global lock now since we now have sd_rwlock */
549            lck_rw_unlock_shared(dev_rw_lck);
550
551			/* Check to see if the VC is still up and running */
552			if (sdp->sd_vc && (SMB_TRAN_FATAL(sdp->sd_vc, 0) == 0)) {
553				*(uint16_t *)data = EISCONN;
554			} else {
555				*(uint16_t *)data = ENOTCONN;
556			}
557
558			lck_rw_unlock_shared(&sdp->sd_rwlock);
559			break;
560		}
561		case SMBIOC_CANCEL_SESSION:
562		{
563			/* The global device lock protects us here */
564			sdp->sd_flags |= NSMBFL_CANCEL;
565
566            lck_rw_unlock_shared(dev_rw_lck);
567			break;
568		}
569		case SMBIOC_REQUEST:
570		{
571			struct smbioc_rq * dp = (struct smbioc_rq *)data;
572
573			lck_rw_lock_shared(&sdp->sd_rwlock);
574
575            /* free global lock now since we now have sd_rwlock */
576            lck_rw_unlock_shared(dev_rw_lck);
577
578			/* Make sure the version match */
579			if (dp->ioc_version != SMB_IOC_STRUCT_VERSION) {
580				error = EINVAL;
581			}
582			else if (sdp->sd_share == NULL) {
583				error = ENOTCONN;
584			} else {
585				error = smb_usr_simplerequest(sdp->sd_share, dp, context);
586			}
587
588			lck_rw_unlock_shared(&sdp->sd_rwlock);
589			break;
590		}
591		case SMBIOC_T2RQ:
592		{
593			struct smbioc_t2rq * dp2 = (struct smbioc_t2rq *)data;
594
595			lck_rw_lock_shared(&sdp->sd_rwlock);
596
597            /* free global lock now since we now have sd_rwlock */
598            lck_rw_unlock_shared(dev_rw_lck);
599
600			/* Make sure the version match */
601			if (dp2->ioc_version != SMB_IOC_STRUCT_VERSION) {
602				error = EINVAL;
603			} else if (sdp->sd_share == NULL) {
604				error = ENOTCONN;
605			} else {
606				error = smb_usr_t2request(sdp->sd_share, dp2, context);
607			}
608
609			lck_rw_unlock_shared(&sdp->sd_rwlock);
610			break;
611		}
612		case SMBIOC_READ:
613		case SMBIOC_WRITE:
614		{
615			struct smbioc_rw *rwrq = (struct smbioc_rw *)data;
616
617			lck_rw_lock_shared(&sdp->sd_rwlock);
618
619            /* free global lock now since we now have sd_rwlock */
620            lck_rw_unlock_shared(dev_rw_lck);
621
622			/* Make sure the version match */
623			if (rwrq->ioc_version != SMB_IOC_STRUCT_VERSION) {
624				error = EINVAL;
625			} else if (sdp->sd_share == NULL) {
626				error = ENOTCONN;
627			} else {
628				uio_t auio = NULL;
629
630				/* Take the 32 bit world pointers and convert them to user_addr_t. */
631				if (vfs_context_is64bit(context))
632					auio = uio_create(1, rwrq->ioc_offset, UIO_USERSPACE64,
633									  (cmd == SMBIOC_READ) ? UIO_READ : UIO_WRITE);
634				else {
635					rwrq->ioc_kern_base = CAST_USER_ADDR_T(rwrq->ioc_base);
636					auio = uio_create(1, rwrq->ioc_offset, UIO_USERSPACE32,
637									  (cmd == SMBIOC_READ) ? UIO_READ : UIO_WRITE);
638				}
639				if (auio) {
640                    smbfh fh;
641                    SMBFID fid = 0;
642
643                    uio_addiov(auio, rwrq->ioc_kern_base, rwrq->ioc_cnt);
644                    fh = htoles(rwrq->ioc_fh);
645                    fid = fh;
646                    /* All calls from user maintain a reference on the share */
647                    if (cmd == SMBIOC_READ) {
648                        error = smb_smb_read(sdp->sd_share, fid, auio, context);
649                    }
650                    else {
651                        int ioFlags = (rwrq->ioc_writeMode & WritethroughMode) ? IO_SYNC : 0;
652
653                        error = smb_smb_write(sdp->sd_share, fid, auio, ioFlags, context);
654                    }
655                    rwrq->ioc_cnt -= (int32_t)uio_resid(auio);
656                    uio_free(auio);
657				}
658                else {
659					error = ENOMEM;
660                }
661			}
662
663			lck_rw_unlock_shared(&sdp->sd_rwlock);
664			break;
665		}
666		case SMBIOC_FSCTL:
667		{
668			struct smbioc_fsctl * fsctl = (struct smbioc_fsctl *)data;
669
670			lck_rw_lock_shared(&sdp->sd_rwlock);
671
672            /* free global lock now since we now have sd_rwlock */
673            lck_rw_unlock_shared(dev_rw_lck);
674
675			/* Make sure the version match */
676			if (fsctl->ioc_version != SMB_IOC_STRUCT_VERSION) {
677				error = EINVAL;
678			} else if (sdp->sd_share == NULL) {
679				error = ENOTCONN;
680			} else {
681				error = smb_usr_fsctl(sdp->sd_share, fsctl, context);
682			}
683
684			lck_rw_unlock_shared(&sdp->sd_rwlock);
685			break;
686		}
687
688		case SMB2IOC_CHECK_DIR:
689		{
690			struct smb2ioc_check_dir * check_dir_ioc = (struct smb2ioc_check_dir *) data;
691
692			lck_rw_lock_shared(&sdp->sd_rwlock);
693
694            /* free global lock now since we now have sd_rwlock */
695            lck_rw_unlock_shared(dev_rw_lck);
696
697			/* Make sure the version match */
698			if (check_dir_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
699				error = EINVAL;
700			} else if (sdp->sd_share == NULL) {
701				error = ENOTCONN;
702			} else {
703				error = smb_usr_check_dir(sdp->sd_share, sdp->sd_vc,
704                                          check_dir_ioc, context);
705                if (error) {
706                    /*
707                     * Note: On error, the ioctl code will NOT copy out the data
708                     * structure back to user space.
709                     *
710                     * If ioc_ret_ntstatus is filled in, change the error to 0
711                     * so that we can return the real NT error in user space.
712                     * User space code is responsible for checking both error
713                     * and ioc_ret_ntstatus for errors.
714                     */
715                    check_dir_ioc->ioc_ret_errno = error;
716                    if (check_dir_ioc->ioc_ret_ntstatus & 0xC0000000) {
717                        error = 0;
718                    }
719                }
720			}
721
722			lck_rw_unlock_shared(&sdp->sd_rwlock);
723			break;
724		}
725
726		case SMB2IOC_CLOSE:
727		{
728			struct smb2ioc_close * close_ioc = (struct smb2ioc_close *) data;
729
730			lck_rw_lock_shared(&sdp->sd_rwlock);
731
732            /* free global lock now since we now have sd_rwlock */
733            lck_rw_unlock_shared(dev_rw_lck);
734
735			/* Make sure the version match */
736			if (close_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
737				error = EINVAL;
738			} else if (sdp->sd_share == NULL) {
739				error = ENOTCONN;
740			} else {
741				error = smb_usr_close(sdp->sd_share, close_ioc, context);
742                if (error) {
743                    /*
744                     * Note: On error, the ioctl code will NOT copy out the data
745                     * structure back to user space.
746                     *
747                     * If ioc_ret_ntstatus is filled in, change the error to 0
748                     * so that we can return the real NT error in user space.
749                     * User space code is responsible for checking both error
750                     * and ioc_ret_ntstatus for errors.
751                     */
752                    if (close_ioc->ioc_ret_ntstatus & 0xC0000000) {
753                        error = 0;
754                    }
755                }
756			}
757
758			lck_rw_unlock_shared(&sdp->sd_rwlock);
759			break;
760		}
761
762		case SMB2IOC_CREATE:
763		{
764			struct smb2ioc_create * create_ioc = (struct smb2ioc_create *) data;
765
766			lck_rw_lock_shared(&sdp->sd_rwlock);
767
768            /* free global lock now since we now have sd_rwlock */
769            lck_rw_unlock_shared(dev_rw_lck);
770
771			/* Make sure the version match */
772			if (create_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
773				error = EINVAL;
774			} else if (sdp->sd_share == NULL) {
775				error = ENOTCONN;
776			} else {
777				error = smb_usr_create(sdp->sd_share, create_ioc, context);
778                if (error) {
779                    /*
780                     * Note: On error, the ioctl code will NOT copy out the data
781                     * structure back to user space.
782                     *
783                     * If ioc_ret_ntstatus is filled in, change the error to 0
784                     * so that we can return the real NT error in user space.
785                     * User space code is responsible for checking both error
786                     * and ioc_ret_ntstatus for errors.
787                     */
788                    if (create_ioc->ioc_ret_ntstatus & 0xC0000000) {
789                        error = 0;
790                    }
791                }
792			}
793
794			lck_rw_unlock_shared(&sdp->sd_rwlock);
795			break;
796		}
797
798		case SMB2IOC_GET_DFS_REFERRAL:
799        {
800			struct smb2ioc_get_dfs_referral * get_dfs_refer_ioc = (struct smb2ioc_get_dfs_referral *) data;
801
802			lck_rw_lock_shared(&sdp->sd_rwlock);
803
804            /* free global lock now since we now have sd_rwlock */
805            lck_rw_unlock_shared(dev_rw_lck);
806
807			/* Make sure the version match */
808			if (get_dfs_refer_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
809				error = EINVAL;
810			} else if (sdp->sd_share == NULL) {
811				error = ENOTCONN;
812			} else {
813				error = smb_usr_get_dfs_referral(sdp->sd_share, sdp->sd_vc,
814                                                 get_dfs_refer_ioc, context);
815                if (error) {
816                    /*
817                     * Note: On error, the ioctl code will NOT copy out the data
818                     * structure back to user space.
819                     *
820                     * If ioc_ret_ntstatus is filled in, change the error to 0
821                     * so that we can return the real NT error in user space.
822                     * User space code is responsible for checking both error
823                     * and ioc_ret_ntstatus for errors.
824                     */
825                    if (get_dfs_refer_ioc->ioc_ret_ntstatus & 0xC0000000) {
826                        error = 0;
827                    }
828                }
829			}
830
831			lck_rw_unlock_shared(&sdp->sd_rwlock);
832			break;
833        }
834
835		case SMB2IOC_IOCTL:
836		{
837			struct smb2ioc_ioctl * ioctl_ioc = (struct smb2ioc_ioctl *) data;
838
839			lck_rw_lock_shared(&sdp->sd_rwlock);
840
841            /* free global lock now since we now have sd_rwlock */
842            lck_rw_unlock_shared(dev_rw_lck);
843
844			/* Make sure the version match */
845			if (ioctl_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
846				error = EINVAL;
847			} else if (sdp->sd_share == NULL) {
848				error = ENOTCONN;
849			} else {
850				error = smb_usr_ioctl(sdp->sd_share, sdp->sd_vc,
851                                      ioctl_ioc, context);
852                if (error) {
853                    /*
854                     * Note: On error, the ioctl code will NOT copy out the data
855                     * structure back to user space.
856                     *
857                     * If ioc_ret_ntstatus is filled in, change the error to 0
858                     * so that we can return the real NT error in user space.
859                     * User space code is responsible for checking both error
860                     * and ioc_ret_ntstatus for errors.
861                     */
862                    if (ioctl_ioc->ioc_ret_ntstatus & 0xC0000000) {
863                        error = 0;
864                    }
865                }
866			}
867
868			lck_rw_unlock_shared(&sdp->sd_rwlock);
869			break;
870		}
871
872		case SMB2IOC_QUERY_DIR:
873        {
874			struct smb2ioc_query_dir *query_dir_ioc = (struct smb2ioc_query_dir *) data;
875
876			lck_rw_lock_shared(&sdp->sd_rwlock);
877
878            /* free global lock now since we now have sd_rwlock */
879            lck_rw_unlock_shared(dev_rw_lck);
880
881			/* Make sure the version match */
882			if (query_dir_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
883				error = EINVAL;
884			} else if (sdp->sd_share == NULL) {
885				error = ENOTCONN;
886			} else {
887				error = smb_usr_query_dir(sdp->sd_share, query_dir_ioc,
888                                          context);
889                if (error) {
890                    /*
891                     * Note: On error, the ioctl code will NOT copy out the data
892                     * structure back to user space.
893                     *
894                     * If ioc_ret_ntstatus is filled in, change the error to 0
895                     * so that we can return the real NT error in user space.
896                     * User space code is responsible for checking both error
897                     * and ioc_ret_ntstatus for errors.
898                     */
899                    if (query_dir_ioc->ioc_ret_ntstatus & 0xC0000000) {
900                        error = 0;
901                    }
902                }
903			}
904
905			lck_rw_unlock_shared(&sdp->sd_rwlock);
906			break;
907        }
908
909		case SMB2IOC_READ:
910		case SMB2IOC_WRITE:
911		{
912			struct smb2ioc_rw *rw_ioc = (struct smb2ioc_rw *) data;
913
914			lck_rw_lock_shared(&sdp->sd_rwlock);
915
916            /* free global lock now since we now have sd_rwlock */
917            lck_rw_unlock_shared(dev_rw_lck);
918
919			/* Make sure the version match */
920			if (rw_ioc->ioc_version != SMB_IOC_STRUCT_VERSION) {
921				error = EINVAL;
922			} else if (sdp->sd_share == NULL) {
923				error = ENOTCONN;
924			} else {
925				error = smb_usr_read_write(sdp->sd_share, cmd, rw_ioc, context);
926                if (error) {
927                    /*
928                     * Note: On error, the ioctl code will NOT copy out the data
929                     * structure back to user space.
930                     *
931                     * If ioc_ret_ntstatus is filled in, change the error to 0
932                     * so that we can return the real NT error in user space.
933                     * User space code is responsible for checking both error
934                     * and ioc_ret_ntstatus for errors.
935                     */
936                    if (rw_ioc->ioc_ret_ntstatus & 0xC0000000) {
937                        error = 0;
938                    }
939                }
940			}
941
942			lck_rw_unlock_shared(&sdp->sd_rwlock);
943			break;
944		}
945
946		default:
947		{
948			error = ENODEV;
949            lck_rw_unlock_shared(dev_rw_lck);
950			break;
951		}
952	}
953
954	vfs_context_rele(context);
955exit:
956	return (error);
957}
958
959
960static int nsmb_dev_load(module_t mod, int cmd, void *arg)
961{
962#pragma unused(mod, arg)
963	int error = 0;
964
965	lck_rw_lock_exclusive(dev_rw_lck);
966	switch (cmd) {
967	    case MOD_LOAD:
968			error = smb_sm_init();
969			if (error)
970				break;
971			error = smb_iod_init();
972			if (error) {
973				(void)smb_sm_done();
974				break;
975			}
976			if (smb_major == -1) {
977				dev_t dev;
978				struct smb_dev *sdp;
979
980				smb_major = cdevsw_add(-1, &nsmb_cdevsw);
981				if (smb_major == -1) {
982					error = EBUSY;
983					SMBERROR("smb: cdevsw_add");
984					(void)smb_iod_done();
985					(void)smb_sm_done();
986				}
987                SMB_MALLOC(sdp, struct smb_dev *, sizeof(*sdp), M_NSMBDEV, M_WAITOK);
988				bzero(sdp, sizeof(*sdp));
989				dev = makedev(smb_major, 0);
990				sdp->sd_devfs = devfs_make_node(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "nsmb0");
991				if (!sdp->sd_devfs) {
992					error = ENOMEM;
993					SMBERROR("smb: devfs_make_node 0666");
994					(void)cdevsw_remove(smb_major, &nsmb_cdevsw);
995					SMB_FREE(sdp, M_NSMBDEV);
996					(void)smb_iod_done();
997					(void)smb_sm_done();
998				}
999				smb_minor_hiwat = 0;
1000				SMB_GETDEV(dev) = sdp;
1001			}
1002			SMBDEBUG("netsmb_dev: loaded\n");
1003			break;
1004	    case MOD_UNLOAD:
1005			smb_iod_done();
1006			error = smb_sm_done();
1007			if (error)
1008				break;
1009			if (smb_major != -1) {
1010				int m;
1011				struct smb_dev *sdp;
1012
1013				for (m = 0; m <= smb_minor_hiwat; m++)
1014					if ((sdp = SMB_GETDEV(m))) {
1015						SMB_GETDEV(m) = 0;
1016						if (sdp->sd_devfs)
1017							devfs_remove(sdp->sd_devfs);
1018						SMB_FREE(sdp, M_NSMBDEV);
1019					}
1020				smb_minor_hiwat = -1;
1021				smb_major = cdevsw_remove(smb_major, &nsmb_cdevsw);
1022				if (smb_major == -1)
1023					SMBERROR("smb: cdevsw_remove failed");
1024				smb_major = -1;
1025			}
1026			SMBDEBUG("netsmb_dev: unloaded\n");
1027			break;
1028	    default:
1029			error = EINVAL;
1030			break;
1031	}
1032	lck_rw_unlock_exclusive(dev_rw_lck);
1033	return (error);
1034}
1035
1036DEV_MODULE(dev_netsmb, nsmb_dev_load, 0);
1037
1038int
1039smb_dev2share(int fd, struct smb_share **outShare)
1040{
1041	vnode_t vp;
1042	struct smb_dev *sdp = NULL;
1043	struct smb_share *share;
1044	dev_t dev = NODEV;
1045	int error;
1046
1047	error = file_vnode_withvid(fd, &vp, NULL);
1048	if (error) {
1049		return (error);
1050	}
1051	lck_rw_lock_exclusive(dev_rw_lck);
1052	if (vp) {
1053		dev = vn_todev(vp);
1054	}
1055	if (dev != NODEV) {
1056		sdp = SMB_GETDEV(dev);
1057	}
1058	if (sdp == NULL) {
1059		error = EBADF;
1060		goto done;
1061	}
1062	/* over kill since we have the global device lock, but it looks cleaner */
1063	lck_rw_lock_exclusive(&sdp->sd_rwlock);
1064	share = sdp->sd_share;
1065	if (share == NULL) {
1066		lck_rw_unlock_exclusive(&sdp->sd_rwlock);
1067 		error = ENOTCONN;
1068		goto done;
1069	}
1070	/*
1071	 * The share is already referenced by the TCON ioctl
1072	 * We NULL to hand off share to caller (mount)
1073	 * This allows further ioctls against connection, for instance
1074	 * another tree connect and mount, in the automounter case
1075	 */
1076	sdp->sd_share = NULL;
1077	lck_rw_unlock_exclusive(&sdp->sd_rwlock);
1078	*outShare = share;
1079done:
1080	file_drop(fd);
1081	lck_rw_unlock_exclusive(dev_rw_lck);
1082	return (error);
1083}
1084
1085