1/* 2 * Copyright (c) 2000-2001 Boris Popov 3 * All rights reserved. 4 * 5 * Portions Copyright (C) 2001 - 2012 Apple Inc. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Boris Popov. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 */ 35 36/* 37 * Connection engine. 38 */ 39 40#include <sys/sysctl.h> /* can't avoid that */ 41 42#include <sys/smb_apple.h> 43#include <sys/kauth.h> 44 45#include <netsmb/smb.h> 46#include <netsmb/smb_2.h> 47#include <netsmb/smb_subr.h> 48#include <netsmb/smb_conn.h> 49#include <netsmb/smb_dev.h> 50#include <netsmb/smb_tran.h> 51#include <netsmb/smb_trantcp.h> 52#include <netsmb/smb_gss.h> 53#include <netsmb/netbios.h> 54 55extern uint32_t smbfs_deadtimer; 56 57static struct smb_connobj smb_vclist; 58static int smb_vcnext = 1; /* next unique id for VC */ 59 60extern struct linker_set sysctl_net_smb; 61 62SYSCTL_DECL(_net_smb); 63 64SYSCTL_NODE(_net, OID_AUTO, smb, CTLFLAG_RW, NULL, "SMB protocol"); 65 66static void smb_co_put(struct smb_connobj *cp, vfs_context_t context); 67 68/* 69 * The smb_co_lock, smb_co_unlock, smb_co_ref, smb_co_rel and smb_co_put deal 70 * with the vclist, vc and shares. So the vclist owns the vc which owns the share. 71 * Currently the share owns nothing even though it does have some relationship 72 * with mount structure. 73 */ 74static int smb_co_lock(struct smb_connobj *cp) 75{ 76 77 if (cp->co_flags & SMBO_GONE) 78 return EBUSY; 79 if (cp->co_lockowner == current_thread()) { 80 cp->co_lockcount++; 81 } else { 82 lck_mtx_lock(cp->co_lock); 83 /* We got the lock, but the VC is going away, so unlock it return EBUSY */ 84 if (cp->co_flags & SMBO_GONE) { 85 lck_mtx_unlock(cp->co_lock); 86 return EBUSY; 87 } 88 cp->co_lockowner = current_thread(); 89 cp->co_lockcount = 1; 90 } 91 return (0); 92} 93 94static void smb_co_unlock(struct smb_connobj *cp) 95{ 96 if (cp->co_lockowner && (cp->co_lockowner != current_thread())) { 97 SMBERROR("not owner of lock"); 98 } else if (cp->co_lockcount && (--cp->co_lockcount == 0)) { 99 cp->co_lockowner = NULL; 100 lck_mtx_unlock(cp->co_lock); 101 lck_mtx_lock(&(cp)->co_interlock); 102 if (cp->co_lock_flags & SMBFS_CO_LOCK_WAIT){ 103 cp->co_lock_flags &= ~SMBFS_CO_LOCK_WAIT; 104 lck_mtx_unlock(&(cp)->co_interlock); 105 wakeup(&cp->co_lock); 106 } else 107 lck_mtx_unlock(&(cp)->co_interlock); 108 } 109} 110 111/* 112 * Common code for connection object 113 */ 114static void 115smb_co_init(struct smb_connobj *cp, int level, const char *objname, struct proc *p) 116{ 117#pragma unused (objname, p) 118 SLIST_INIT(&cp->co_children); 119 lck_mtx_init(&cp->co_interlock, co_lck_group, co_lck_attr); 120 cp->co_lock = lck_mtx_alloc_init(co_lck_group, co_lck_attr); 121 cp->co_lock_flags = 0; 122 cp->co_lockowner = 0; 123 cp->co_lockcount = 0; 124 125 cp->co_level = level; 126 cp->co_usecount = 1; 127 KASSERT(smb_co_lock(cp) == 0, 128 ("smb_co_init: lock failed")); 129} 130 131static void smb_co_done(struct smb_connobj *cp) 132{ 133 lck_mtx_destroy(&cp->co_interlock, co_lck_group); 134 lck_mtx_free(cp->co_lock, co_lck_group); 135 cp->co_lock = 0; 136 cp->co_lock_flags = 0; 137 cp->co_lockowner = 0; 138 cp->co_lockcount = 0; 139} 140 141static void smb_co_gone(struct smb_connobj *cp, vfs_context_t context) 142{ 143 struct smb_connobj *parent; 144 145 /* Drain any locks that are still held */ 146 lck_mtx_lock(&(cp)->co_interlock); 147 while (cp->co_lockcount > 0) { 148 cp->co_lock_flags |= SMBFS_CO_LOCK_WAIT; 149 msleep(&cp->co_lock, &(cp)->co_interlock, 0, 0, 0); 150 } 151 lck_mtx_unlock(&(cp)->co_interlock); 152 /* 153 * The old code would take a smb_co_lock here. Since SMBO_GONE is set 154 * the smb_co_lock did nothing. So I removed that code. 155 */ 156 157 if (cp->co_gone) 158 cp->co_gone(cp, context); 159 parent = cp->co_parent; 160 if (parent) { 161 if (smb_co_lock(parent)) { 162 SMBERROR("unable to lock level %d\n", parent->co_level); 163 } else { 164 SLIST_REMOVE(&parent->co_children, cp, smb_connobj, 165 co_next); 166 smb_co_put(parent, context); 167 } 168 } 169 if (cp->co_free) 170 cp->co_free(cp); 171} 172 173static void smb_co_put(struct smb_connobj *cp, vfs_context_t context) 174{ 175 176 lck_mtx_lock(&(cp)->co_interlock); 177 if (cp->co_usecount > 1) { 178 cp->co_usecount--; 179 } else if (cp->co_usecount == 1) { 180 cp->co_usecount--; 181 cp->co_flags |= SMBO_GONE; 182 } else { 183 SMBERROR("negative usecount\n"); 184 } 185 lck_mtx_unlock(&(cp)->co_interlock); 186 smb_co_unlock(cp); 187 if ((cp->co_flags & SMBO_GONE) == 0) 188 return; 189 190 smb_co_gone(cp, context); 191} 192 193static void smb_co_ref(struct smb_connobj *cp) 194{ 195 lck_mtx_lock(&(cp)->co_interlock); 196 if (cp->co_flags & SMBO_GONE) { 197 /* 198 * This can happen when we are doing a tree disconnect or a VC log off. 199 * In the future we could fix the tree disconnect by only taking a reference 200 * on the VC. Not sure what to do about the VC. If we could solve those 201 * two issues then we should make this a fatal error. 202 */ 203 SMBDEBUG("The object is in the gone state level = 0x%x\n",cp->co_level); 204 } 205 cp->co_usecount++; 206 lck_mtx_unlock(&(cp)->co_interlock); 207} 208 209static void smb_co_addchild(struct smb_connobj *parent, struct smb_connobj *child) 210{ 211 smb_co_ref(parent); 212 SLIST_INSERT_HEAD(&parent->co_children, child, co_next); 213 child->co_parent = parent; 214} 215 216static void smb_co_rele(struct smb_connobj *cp, vfs_context_t context) 217{ 218 lck_mtx_lock(&(cp)->co_interlock); 219 if (cp->co_usecount > 1) { 220 cp->co_usecount--; 221 lck_mtx_unlock(&(cp)->co_interlock); 222 return; 223 } 224 if (cp->co_usecount == 0) { 225 SMBERROR("negative co_usecount for level %d\n", cp->co_level); 226 lck_mtx_unlock(&(cp)->co_interlock); 227 return; 228 } 229 cp->co_usecount--; 230 if (cp->co_flags & SMBO_GONE) { 231 lck_mtx_unlock(&(cp)->co_interlock); 232 return; /* someone is already draining */ 233 } 234 cp->co_flags |= SMBO_GONE; 235 lck_mtx_unlock(&(cp)->co_interlock); 236 237 smb_co_gone(cp, context); 238} 239 240struct sockaddr * 241smb_dup_sockaddr(struct sockaddr *sa, int canwait) 242{ 243 struct sockaddr *sa2; 244 245 SMB_MALLOC(sa2, struct sockaddr *, sa->sa_len, M_SONAME, 246 canwait ? M_WAITOK : M_NOWAIT); 247 if (sa2) 248 bcopy(sa, sa2, sa->sa_len); 249 return (sa2); 250} 251 252int smb_sm_init(void) 253{ 254 smb_co_init(&smb_vclist, SMBL_VCLIST, "smb_vclist", current_proc()); 255 smb_co_unlock(&smb_vclist); 256 return (0); 257} 258 259int smb_sm_done(void) 260{ 261 if (smb_vclist.co_usecount > 1) { 262 SMBERROR("%d connections still active\n", smb_vclist.co_usecount - 1); 263 return (EBUSY); 264 } 265 /* XXX Q4BP why are we not iterating on smb_vclist here with SMBCO_FOREACH? */ 266 smb_co_done(&smb_vclist); 267 return (0); 268} 269 270static void smb_sm_lockvclist() 271{ 272 /* 273 * The smb_vclist never goes away so there is no way for smb_co_lock 274 * to fail in this case. 275 */ 276 KASSERT((smb_co_lock(&smb_vclist) == 0), ("smb_sm_lockvclist: lock failed")); 277} 278 279static void smb_sm_unlockvclist() 280{ 281 smb_co_unlock(&smb_vclist); 282} 283 284/* 285 * This routine will reset the virtual circuit. When doing a reconnect we need to 286 * keep some of the virtual circuit information around. We only reset the information 287 * that is required to do the reconnect. 288 */ 289void smb_vc_reset(struct smb_vc *vcp) 290{ 291 /* 292 * If these three flags were set keep them for the reconnect. Clear out 293 * any other flags that may have been set in the original connection. 294 */ 295 vcp->vc_hflags2 &= (SMB_FLAGS2_EXT_SEC | SMB_FLAGS2_KNOWS_LONG_NAMES | SMB_FLAGS2_UNICODE); 296 297 vcp->vc_mid = 0; 298 vcp->vc_low_pid = 1; 299 300 vcp->vc_message_id = 1; 301 302 /* leave vc_misc_flags untouched as it has preferences flags */ 303 //vcp->vc_misc_flags = 0; 304 305 /* Save previous sessID for reconnects SessionSetup request */ 306 vcp->vc_prev_session_id = vcp->vc_session_id; 307 vcp->vc_session_id = 0; 308 309 vcp->vc_number = smb_vcnext++; 310 311 /* Reset the smb signing */ 312 smb_reset_sig(vcp); 313} 314 315void smb_vc_ref(struct smb_vc *vcp) 316{ 317 smb_co_ref(VCTOCP(vcp)); 318} 319 320void smb_vc_rele(struct smb_vc *vcp, vfs_context_t context) 321{ 322 smb_co_rele(VCTOCP(vcp), context); 323} 324 325static void smb_vc_put(struct smb_vc *vcp, vfs_context_t context) 326{ 327 smb_co_put(VCTOCP(vcp), context); 328} 329 330int smb_vc_lock(struct smb_vc *vcp) 331{ 332 return smb_co_lock(VCTOCP(vcp)); 333} 334 335void smb_vc_unlock(struct smb_vc *vcp) 336{ 337 smb_co_unlock(VCTOCP(vcp)); 338} 339 340static void smb_vc_free(struct smb_connobj *cp) 341{ 342 struct smb_vc *vcp = (struct smb_vc*)cp; 343 344 smb_gss_rel_cred(vcp); 345 346 if (vcp->vc_iod) 347 smb_iod_destroy(vcp->vc_iod); 348 vcp->vc_iod = NULL; 349 350 if (vcp->negotiate_token) { 351 SMB_FREE(vcp->negotiate_token, M_SMBTEMP); 352 } 353 354 if (vcp->NativeOS) { 355 SMB_FREE(vcp->NativeOS, M_SMBSTR); 356 } 357 358 if (vcp->NativeLANManager) { 359 SMB_FREE(vcp->NativeLANManager, M_SMBSTR); 360 } 361 362 if (vcp->vc_username) { 363 SMB_FREE(vcp->vc_username, M_SMBSTR); 364 } 365 366 if (vcp->vc_srvname) { 367 SMB_FREE(vcp->vc_srvname, M_SMBSTR); 368 } 369 370 if (vcp->vc_localname) { 371 SMB_FREE(vcp->vc_localname, M_SMBSTR); 372 } 373 374 if (vcp->vc_pass) { 375 SMB_FREE(vcp->vc_pass, M_SMBSTR); 376 } 377 378 if (vcp->vc_domain) { 379 SMB_FREE(vcp->vc_domain, M_SMBSTR); 380 } 381 382 if (vcp->vc_mackey) { 383 SMB_FREE(vcp->vc_mackey, M_SMBTEMP); 384 } 385 386 if (vcp->vc_saddr) { 387 SMB_FREE(vcp->vc_saddr, M_SONAME); 388 } 389 390 if (vcp->vc_laddr) { 391 SMB_FREE(vcp->vc_laddr, M_SONAME); 392 } 393 394 smb_gss_destroy(&vcp->vc_gss); 395 396 if (vcp->throttle_info) 397 throttle_info_release(vcp->throttle_info); 398 vcp->throttle_info = NULL; 399 400 if (vcp->vc_model_info) { 401 SMB_FREE(vcp->vc_model_info, M_SMBTEMP); 402 } 403 404 smb_co_done(VCTOCP(vcp)); 405 lck_mtx_destroy(&vcp->vc_stlock, vcst_lck_group); 406 if (vcp) { 407 SMB_FREE(vcp, M_SMBCONN); 408 } 409} 410 411/* 412 * Force reconnect on vc 413 */ 414int smb_vc_force_reconnect(struct smb_vc *vcp) 415{ 416 if (vcp->vc_iod) { 417 smb_iod_request(vcp->vc_iod, SMBIOD_EV_FORCE_RECONNECT | SMBIOD_EV_SYNC, NULL); 418 } 419 420 return (0); 421} 422 423/* 424 * Destroy VC to server, invalidate shares linked with it. 425 * Transport should be locked on entry. 426 */ 427static int smb_vc_disconnect(struct smb_vc *vcp) 428{ 429 if (vcp->vc_iod) 430 smb_iod_request(vcp->vc_iod, SMBIOD_EV_DISCONNECT | SMBIOD_EV_SYNC, NULL); 431 return (0); 432} 433 434/* 435 * Called when use count of VC dropped to zero. 436 * VC should be locked on enter with LK_DRAIN. 437 */ 438static void smb_vc_gone(struct smb_connobj *cp, vfs_context_t context) 439{ 440#pragma unused(context) 441 struct smb_vc *vcp = (struct smb_vc*)cp; 442 smb_vc_disconnect(vcp); 443} 444 445static int smb_vc_create(struct smbioc_negotiate *vcspec, 446 struct sockaddr *saddr, struct sockaddr *laddr, 447 vfs_context_t context, struct smb_vc **vcpp) 448{ 449 struct smb_vc *vcp; 450 int error = 0; 451 452 /* Should never happen, but just to be safe */ 453 if (context == NULL) { 454 return ENOTSUP; 455 } 456 SMB_MALLOC(vcp, struct smb_vc *, sizeof(*vcp), M_SMBCONN, M_WAITOK | M_ZERO); 457 smb_co_init(VCTOCP(vcp), SMBL_VC, "smb_vc", vfs_context_proc(context)); 458 vcp->obj.co_free = smb_vc_free; 459 vcp->obj.co_gone = smb_vc_gone; 460 vcp->vc_number = smb_vcnext++; 461 vcp->vc_timo = SMB_DEFRQTIMO; 462 vcp->vc_smbuid = SMB_UID_UNKNOWN; 463 vcp->vc_tdesc = &smb_tran_nbtcp_desc; 464 vcp->vc_seqno = 0; 465 vcp->vc_mackey = NULL; 466 vcp->vc_mackeylen = 0; 467 vcp->vc_saddr = saddr; 468 vcp->vc_laddr = laddr; 469 /* Remove any user setable items */ 470 vcp->vc_flags &= ~SMBV_USER_LAND_MASK; 471 /* Now add the users setable items */ 472 vcp->vc_flags |= (vcspec->ioc_userflags & SMBV_USER_LAND_MASK); 473 474 /* Now add the throttle info */ 475 vcp->throttle_info = throttle_info_create(); 476#ifdef DEBUG_TURN_OFF_EXT_SEC 477 vcp->vc_hflags2 = SMB_FLAGS2_KNOWS_LONG_NAMES; 478#else // DEBUG_TURN_OFF_EXT_SEC 479 vcp->vc_hflags2 = SMB_FLAGS2_KNOWS_LONG_NAMES | SMB_FLAGS2_EXT_SEC | SMB_FLAGS2_UNICODE; 480#endif // DEBUG_TURN_OFF_EXT_SEC 481 482 vcp->vc_uid = vcspec->ioc_ssn.ioc_owner; 483 vcp->vc_gss.gss_asid = AU_ASSIGN_ASID; 484 485 /* Amount of time to wait while reconnecting */ 486 vcp->reconnect_wait_time = vcspec->ioc_ssn.ioc_reconnect_wait_time; 487 488 lck_mtx_init(&vcp->vc_credits_lock, vc_credits_lck_group, vc_credits_lck_attr); 489 490 lck_mtx_init(&vcp->vc_stlock, vcst_lck_group, vcst_lck_attr); 491 492 vcp->vc_srvname = smb_strndup(vcspec->ioc_ssn.ioc_srvname, sizeof(vcspec->ioc_ssn.ioc_srvname)); 493 if (vcp->vc_srvname) 494 vcp->vc_localname = smb_strndup(vcspec->ioc_ssn.ioc_localname, sizeof(vcspec->ioc_ssn.ioc_localname)); 495 if ((vcp->vc_srvname == NULL) || (vcp->vc_localname == NULL)) { 496 error = ENOMEM; 497 } 498 499 vcp->vc_message_id = 1; 500 vcp->vc_misc_flags = SMBV_HAS_FILEIDS; /* assume File IDs supported */ 501 vcp->vc_server_caps = 0; 502 vcp->vc_volume_caps = 0; 503 vcp->vc_model_info = NULL; 504 505 if (!error) 506 error = smb_iod_create(vcp); 507 if (error) { 508 smb_vc_put(vcp, context); 509 return error; 510 } 511 *vcpp = vcp; 512 513 /* is SMB1 or SMB2 only flags set? */ 514 if (vcspec->ioc_extra_flags & SMB_SMB1_ONLY) { 515 vcp->vc_misc_flags |= SMBV_NEG_SMB1_ONLY; 516 } 517 else if (vcspec->ioc_extra_flags & SMB_SMB2_ONLY) { 518 vcp->vc_misc_flags |= SMBV_NEG_SMB2_ONLY; 519 } 520 521 if (vcspec->ioc_extra_flags & SMB_SIGNING_REQUIRED) { 522 vcp->vc_misc_flags |= SMBV_CLIENT_SIGNING_REQUIRED; 523 } 524 525 /* Save client Guid */ 526 memcpy(vcp->vc_client_guid, vcspec->ioc_client_guid, sizeof(vcp->vc_client_guid)); 527 528 smb_sm_lockvclist(); 529 smb_co_addchild(&smb_vclist, VCTOCP(vcp)); 530 smb_sm_unlockvclist(); 531 return 0; 532} 533 534/* 535 * So we have three types of sockaddr strcutures, IPv4, IPv6 or NetBIOS. 536 * 537 * If both sa_family equal AF_NETBIOS then we can just compare the two sockaddr 538 * structures. 539 * 540 * If neither sa_family equal AF_NETBIOS then we can just compare the two sockaddr 541 * structures. 542 * 543 * If the search sa_family equal AF_NETBIOS and the vc sa_family doesn't then we 544 * can just compare, since its its not going to match. We never support sharing 545 * a AF_NETBIOS with a non AF_NETBIOS connection. 546 * 547 * Now that just leaves the cases were the VC is connected using AF_NETBIOS and 548 * the search sockaddr is either IPv4 or IPv6. We need to compare using the real 549 * sockaddr that is inside the AF_NETBIOS sockaddr_nb structure. 550 */ 551static int addressMatch(struct smb_vc *vcp, struct sockaddr *saddr) 552{ 553 struct sockaddr *vc_saddr = vcp->vc_saddr; 554 555 if ((vc_saddr->sa_family == AF_NETBIOS) && (saddr->sa_family != AF_NETBIOS)) { 556 vc_saddr = (struct sockaddr *)&((struct sockaddr_nb *)vcp->vc_saddr)->snb_addrin; 557 } 558 559 if ((vc_saddr->sa_len == saddr->sa_len) && (memcmp(vc_saddr, saddr, saddr->sa_len) == 0)) 560 return TRUE; 561 562 return FALSE; 563} 564 565/* 566 * On success the vc will have a reference taken and a lock. 567 * 568 * Only smb_sm_negotiate passes sockaddr, all other routines need to pass in a 569 * vcp to search on. 570 */ 571static int smb_sm_lookupint(struct sockaddr *sap, uid_t owner, char *username, 572 uint32_t user_flags, struct smb_vc **vcpp) 573{ 574 struct smb_vc *vcp, *tvcp; 575 int error; 576 577 578 DBG_ASSERT(vcpp); /* Better have passed us a vcpp */ 579tryagain: 580 smb_sm_lockvclist(); 581 error = ENOENT; 582 SMBCO_FOREACH_SAFE(vcp, &smb_vclist, tvcp) { 583 584 if (*vcpp && vcp != *vcpp) 585 continue; 586 else if (*vcpp) { 587 /* Found a match, lock it, we are done. */ 588 error = smb_vc_lock(vcp); 589 if (error != 0) { 590 /* Can happen with bad servers */ 591 SMBDEBUG("smb_vc_lock returned error %d\n", error); 592 } 593 break; 594 } else { 595 /* 596 * We should only get in here from the negotiate routine. We better 597 * have a sock addr or thats a programming error. 598 */ 599 DBG_ASSERT(sap); 600 601 /* Don't share a vcp that hasn't been authenticated yet */ 602 if ((vcp->vc_flags & SMBV_AUTH_DONE) != SMBV_AUTH_DONE) { 603 continue; 604 } 605 606 /* The sock address structure needs to match. */ 607 if (!addressMatch(vcp, sap)) { 608 continue; 609 } 610 611 /* Must be the same owner */ 612 if (vcp->vc_uid != owner) { 613 continue; 614 } 615 616 /* Ok we have a lock on the vcp, any error needs to unlock it */ 617 error = smb_vc_lock(vcp); 618 /* 619 * This VC is going away, but it is currently block on the lock we 620 * hold for smb_vclist. We need to unlock the list and allow the VC 621 * to be remove. This still may not be the VC we were looking for so 622 * start the search again. 623 */ 624 if (error) { 625 smb_sm_unlockvclist(); 626 goto tryagain; 627 } 628 629 /* 630 * The VC must be active and not in reconnect, otherwise we should 631 * just skip this VC. 632 */ 633 if ((vcp->vc_iod->iod_state != SMBIOD_ST_VCACTIVE) || 634 (vcp->vc_iod->iod_flags & SMBIOD_RECONNECT)) { 635 SMBWARNING("Skipping %s because its down or in reconnect: flags = 0x%x state = 0x%x\n", 636 vcp->vc_srvname, vcp->vc_iod->iod_flags, vcp->vc_iod->iod_state); 637 smb_vc_unlock(vcp); 638 error = ENOENT; 639 continue; 640 } 641 642 /* 643 * If they ask for authentication then the VC needs to match that 644 * authentication or we need to keep looking. So here are the 645 * scenarios we need to deal with here. 646 * 647 * 1. If they are asking for a private guest access and the VC has 648 * private guest access set then use this VC. If either is set, 649 * but not both then don't reuse the VC. 650 * 2. If they are asking for a anonymous access and the VC has 651 * anonymous access set then use this VC. If either is set, 652 * but not both then don't reuse the VC. 653 * 3. They are requesting kerberos access. If the current VC isn't 654 * using kerberos then don't reuse the vcp. 655 * 4. They are requesting guest access. If the current VC isn't 656 * using guest then don't reuse the VC. 657 * 4. They are using user level security. The VC user name needs to 658 * match the one passed in. 659 * 4. They don't care. Always use the authentication of this VC. 660 */ 661 if ((vcp->vc_flags & SMBV_SFS_ACCESS)) { 662 /* We're guest no matter what the user says, just use this VC */ 663 error = 0; 664 break; 665 } else if ((user_flags & SMBV_PRIV_GUEST_ACCESS) || (vcp->vc_flags & SMBV_PRIV_GUEST_ACCESS)) { 666 if ((user_flags & SMBV_PRIV_GUEST_ACCESS) && (vcp->vc_flags & SMBV_PRIV_GUEST_ACCESS)) { 667 error = 0; 668 break; 669 } else { 670 smb_vc_unlock(vcp); 671 error = ENOENT; 672 continue; 673 } 674 } else if ((user_flags & SMBV_ANONYMOUS_ACCESS) || (vcp->vc_flags & SMBV_ANONYMOUS_ACCESS)) { 675 if ((user_flags & SMBV_ANONYMOUS_ACCESS) && (vcp->vc_flags & SMBV_ANONYMOUS_ACCESS)) { 676 error = 0; 677 break; 678 } else { 679 smb_vc_unlock(vcp); 680 error = ENOENT; 681 continue; 682 } 683 } else if (user_flags & SMBV_KERBEROS_ACCESS) { 684 if (vcp->vc_flags & SMBV_KERBEROS_ACCESS) { 685 error = 0; 686 break; 687 } else { 688 smb_vc_unlock(vcp); 689 error = ENOENT; 690 continue; 691 } 692 } else if (user_flags & SMBV_GUEST_ACCESS) { 693 if (vcp->vc_flags & SMBV_GUEST_ACCESS) { 694 error = 0; 695 break; 696 } else { 697 smb_vc_unlock(vcp); 698 error = ENOENT; 699 continue; 700 } 701 } else if (username && username[0]) { 702 if (vcp->vc_username && 703 ((strncmp(vcp->vc_username, username, SMB_MAXUSERNAMELEN + 1)) == 0)) { 704 error = 0; 705 break; 706 } else { 707 smb_vc_unlock(vcp); 708 error = ENOENT; 709 continue; 710 } 711 } 712 error = 0; 713 break; 714 } 715 } 716 if (vcp && !error) { 717 smb_vc_ref(vcp); 718 *vcpp = vcp; 719 } 720 smb_sm_unlockvclist(); 721 return error; 722} 723 724int smb_sm_negotiate(struct smbioc_negotiate *vcspec, vfs_context_t context, 725 struct smb_vc **vcpp, struct smb_dev *sdp, int searchOnly) 726{ 727 struct smb_vc *vcp = NULL; 728 struct sockaddr *saddr = NULL, *laddr = NULL; 729 int error; 730 731 saddr = smb_memdupin(vcspec->ioc_kern_saddr, vcspec->ioc_saddr_len); 732 if (saddr == NULL) { 733 return ENOMEM; 734 } 735 736 *vcpp = vcp = NULL; 737 738 if (vcspec->ioc_extra_flags & SMB_FORCE_NEW_SESSION) { 739 error = ENOENT; /* Force a new virtual circuit session */ 740 } else { 741 error = smb_sm_lookupint(saddr, vcspec->ioc_ssn.ioc_owner, vcspec->ioc_user, 742 vcspec->ioc_userflags, &vcp); 743 } 744 745 if ((error == 0) || (searchOnly)) { 746 SMB_FREE(saddr, M_SMBDATA); 747 vcspec->ioc_extra_flags |= SMB_SHARING_VC; 748 } else { 749 /* NetBIOS connections require a local address */ 750 if (saddr->sa_family == AF_NETBIOS) { 751 laddr = smb_memdupin(vcspec->ioc_kern_laddr, vcspec->ioc_laddr_len); 752 if (laddr == NULL) { 753 SMB_FREE(saddr, M_SMBDATA); 754 return ENOMEM; 755 } 756 } 757 /* If smb_vc_create fails it will clean up saddr and laddr */ 758 error = smb_vc_create(vcspec, saddr, laddr, context, &vcp); 759 if (error == 0) { 760 /* Flags used to cancel the connection */ 761 vcp->connect_flag = &sdp->sd_flags; 762 error = smb_vc_negotiate(vcp, context); 763 vcp->connect_flag = NULL; 764 if (error) /* Remove the lock and reference */ 765 smb_vc_put(vcp, context); 766 } 767 } 768 if ((error == 0) && (vcp)) { 769 /* 770 * They don't want us to touch the home directory, remove the flag. This 771 * will prevent any shared sessions to touch the home directory when they 772 * shouldn't. 773 */ 774 if ((vcspec->ioc_userflags & SMBV_HOME_ACCESS_OK) != SMBV_HOME_ACCESS_OK) { 775 vcp->vc_flags &= ~SMBV_HOME_ACCESS_OK; 776 } 777 *vcpp = vcp; 778 smb_vc_unlock(vcp); 779 } 780 return error; 781} 782 783int smb_sm_ssnsetup(struct smb_vc *vcp, struct smbioc_setup *sspec, 784 vfs_context_t context) 785{ 786 int error; 787 788 /* 789 * Call smb_sm_lookupint to verify that the vcp is still on the 790 * list. If not found then something really bad has happen. Log 791 * it and just return the error. If smb_sm_lookupint returns without 792 * an error then the vcp will be locked and a refcnt will be taken. 793 */ 794 error = smb_sm_lookupint(NULL, 0, NULL, 0, &vcp); 795 if (error) { 796 SMBERROR("The virtual circtuit was not found: error = %d\n", error); 797 return error; 798 } 799 800 if ((vcp->vc_flags & SMBV_AUTH_DONE) == SMBV_AUTH_DONE) 801 goto done; /* Nothing more to do here */ 802 803 /* Remove any user setable items */ 804 vcp->vc_flags &= ~SMBV_USER_LAND_MASK; 805 /* Now add the users setable items */ 806 vcp->vc_flags |= (sspec->ioc_userflags & SMBV_USER_LAND_MASK); 807 /* 808 * Reset the username, password, domain, kerb client and service names. We 809 * never want to use any values left over from any previous calls. 810 */ 811 if (vcp->vc_username != NULL) { 812 SMB_FREE(vcp->vc_username, M_SMBSTR); 813 } 814 if (vcp->vc_pass != NULL) { 815 SMB_FREE(vcp->vc_pass, M_SMBSTR); 816 } 817 if (vcp->vc_domain != NULL) { 818 SMB_FREE(vcp->vc_domain, M_SMBSTR); 819 } 820 if (vcp->vc_gss.gss_cpn != NULL) { 821 SMB_FREE(vcp->vc_gss.gss_cpn, M_SMBSTR); 822 } 823 /* 824 * Freeing the SPN will make sure we never use the hint. Remember that the 825 * gss_spn contains the hint from the negotiate. We now require user 826 * land to send us a SPN, if we are going to use one. 827 */ 828 if (vcp->vc_gss.gss_spn != NULL) { 829 SMB_FREE(vcp->vc_gss.gss_spn, M_SMBSTR); 830 } 831 vcp->vc_username = smb_strndup(sspec->ioc_user, sizeof(sspec->ioc_user)); 832 vcp->vc_pass = smb_strndup(sspec->ioc_password, sizeof(sspec->ioc_password)); 833 vcp->vc_domain = smb_strndup(sspec->ioc_domain, sizeof(sspec->ioc_domain)); 834 835 if ((vcp->vc_pass == NULL) || (vcp->vc_domain == NULL) || 836 (vcp->vc_username == NULL)) { 837 error = ENOMEM; 838 goto done; 839 } 840 841 /* GSS principal names are only set if we are doing kerberos or ntlmssp */ 842 if (sspec->ioc_gss_client_size) { 843 vcp->vc_gss.gss_cpn = smb_memdupin(sspec->ioc_gss_client_name, sspec->ioc_gss_client_size); 844 } 845 vcp->vc_gss.gss_cpn_len = sspec->ioc_gss_client_size; 846 vcp->vc_gss.gss_client_nt = sspec->ioc_gss_client_nt; 847 848 if (sspec->ioc_gss_target_size) { 849 vcp->vc_gss.gss_spn = smb_memdupin(sspec->ioc_gss_target_name, sspec->ioc_gss_target_size); 850 } 851 vcp->vc_gss.gss_spn_len = sspec->ioc_gss_target_size; 852 vcp->vc_gss.gss_target_nt = sspec->ioc_gss_target_nt; 853 if (!(sspec->ioc_userflags & SMBV_ANONYMOUS_ACCESS)) { 854 SMB_LOG_AUTH("client size = %d client name type = %d\n", 855 sspec->ioc_gss_client_size, vcp->vc_gss.gss_client_nt); 856 SMB_LOG_AUTH("taget size = %d target name type = %d\n", 857 sspec->ioc_gss_target_size, vcp->vc_gss.gss_target_nt); 858 } 859 860 error = smb_vc_ssnsetup(vcp); 861 /* If no error then this virtual circuit has been authorized */ 862 if (error == 0) { 863 smb_gss_ref_cred(vcp); 864 vcp->vc_flags |= SMBV_AUTH_DONE; 865 } 866 867done: 868 if (error) { 869 /* 870 * Authorization failed, reset any authorization 871 * information. This includes removing guest access, 872 * user name, password and the domain name. We should 873 * not every return these values after authorization 874 * fails. 875 */ 876 vcp->vc_flags &= ~(SMBV_GUEST_ACCESS | SMBV_PRIV_GUEST_ACCESS | 877 SMBV_KERBEROS_ACCESS | SMBV_ANONYMOUS_ACCESS); 878 SMB_FREE(vcp->vc_username, M_SMBSTR); 879 SMB_FREE(vcp->vc_pass, M_SMBSTR); 880 SMB_FREE(vcp->vc_domain, M_SMBSTR); 881 SMB_FREE(vcp->vc_gss.gss_cpn, M_SMBSTR); 882 SMB_FREE(vcp->vc_gss.gss_spn, M_SMBSTR); 883 vcp->vc_gss.gss_spn_len = 0; 884 vcp->vc_gss.gss_cpn_len = 0; 885 } 886 887 /* Release the reference and lock that smb_sm_lookupint took on the vcp */ 888 smb_vc_put(vcp, context); 889 return error; 890} 891 892static void smb_share_free(struct smb_connobj *cp) 893{ 894 struct smb_share *share = (struct smb_share *)cp; 895 896 SMB_FREE(share->ss_name, M_SMBSTR); 897 lck_mtx_destroy(&share->ss_stlock, ssst_lck_group); 898 lck_mtx_destroy(&share->ss_shlock, ssst_lck_group); 899 lck_mtx_destroy(&share->ss_fid_lock, fid_lck_grp); 900 smb_co_done(SSTOCP(share)); 901 SMB_FREE(share, M_SMBCONN); 902} 903 904static void smb_share_gone(struct smb_connobj *cp, vfs_context_t context) 905{ 906 struct smb_share *share = (struct smb_share *)cp; 907 908 DBG_ASSERT(share); 909 DBG_ASSERT(SSTOVC(share)); 910 DBG_ASSERT(SSTOVC(share)->vc_iod); 911 smb_smb_treedisconnect(share, context); 912} 913 914void smb_share_ref(struct smb_share *share) 915{ 916 smb_co_ref(SSTOCP(share)); 917} 918 919void smb_share_rele(struct smb_share *share, vfs_context_t context) 920{ 921 smb_co_rele(SSTOCP(share), context); 922} 923 924/* 925 * Allocate share structure and attach it to the given VC. The vcp 926 * needs to be locked on entry. Share will be returned in unlocked state, 927 * but will have a reference on it. 928 */ 929static int 930smb_share_create(struct smb_vc *vcp, struct smbioc_share *shspec, 931 struct smb_share **outShare, vfs_context_t context) 932{ 933 struct smb_share *share; 934 int i; 935 936 /* Should never happen, but just to be safe */ 937 if (context == NULL) 938 return ENOTSUP; 939 940 SMB_MALLOC(share, struct smb_share *, sizeof(*share), M_SMBCONN, M_WAITOK | M_ZERO); 941 if (share == NULL) { 942 return ENOMEM; 943 } 944 share->ss_name = smb_strndup(shspec->ioc_share, sizeof(shspec->ioc_share)); 945 if (share->ss_name == NULL) { 946 SMB_FREE(share, M_SMBCONN); 947 return ENOMEM; 948 } 949 /* The smb_co_init routine locks the share and takes a reference */ 950 smb_co_init(SSTOCP(share), SMBL_SHARE, "smbss", vfs_context_proc(context)); 951 share->obj.co_free = smb_share_free; 952 share->obj.co_gone = smb_share_gone; 953 954 /* alloc FID mapping stuff */ 955 lck_mtx_init(&share->ss_fid_lock, fid_lck_grp, fid_lck_attr); 956 for (i = 0; i < SMB_FID_TABLE_SIZE; i++) { 957 LIST_INIT(&share->ss_fid_table[i].fid_list); 958 } 959 share->ss_fid_collisions = 0; 960 share->ss_fid_inserted = 0; 961 share->ss_fid_max_iter = 0; 962 963 lck_mtx_init(&share->ss_shlock, ssst_lck_group, ssst_lck_attr); 964 lck_mtx_init(&share->ss_stlock, ssst_lck_group, ssst_lck_attr); 965 lck_mtx_lock(&share->ss_shlock); 966 share->ss_mount = NULL; /* Just to be safe clear it out */ 967 /* Set the default dead timer */ 968 share->ss_dead_timer = smbfs_deadtimer; 969 lck_mtx_unlock(&share->ss_shlock); 970 share->ss_tid = SMB_TID_UNKNOWN; 971 share->ss_tree_id = SMB2_TID_UNKNOWN; 972 973 /* unlock the share we no longer need the lock */ 974 smb_co_unlock(SSTOCP(share)); 975 smb_co_addchild(VCTOCP(vcp), SSTOCP(share)); 976 *outShare = share; 977 return (0); 978} 979 980/* 981 * If we already have a connection on the share take a reference and return. 982 * Otherwise create the share, add it to the vc list and then do a tree 983 * connect. 984 */ 985int smb_sm_tcon(struct smb_vc *vcp, struct smbioc_share *shspec, 986 struct smb_share **shpp, vfs_context_t context) 987{ 988 int error; 989 990 *shpp = NULL; 991 /* 992 * Call smb_sm_lookupint to verify that the vcp is still on the 993 * list. If not found then something really bad has happen. Log 994 * it and just return the error. If smb_sm_lookupint returns without 995 * an error then the vcp will be locked and a refcnt will be taken. 996 */ 997 error = smb_sm_lookupint(NULL, 0, NULL, 0, &vcp); 998 if (error) { 999 SMBERROR("The virtual circtuit was not found: error = %d\n", error); 1000 return error; 1001 } 1002 /* At this point we have a locked vcp create the share */ 1003 error = smb_share_create(vcp, shspec, shpp, context); 1004 /* 1005 * We hold a lock and reference on the vc. We are done with the vc lock 1006 * so unlock the vc but hold on to the vc references. 1007 */ 1008 smb_vc_unlock(vcp); 1009 if (error == 0) { 1010 error = smb_smb_treeconnect(*shpp, context); 1011 if (error) { 1012 /* Let the share drain, so it can get removed */ 1013 smb_share_rele(*shpp, context); 1014 *shpp = NULL; /* We failed reset it to NULL */ 1015 } 1016 } 1017 if (*shpp && (error == 0)) { 1018 shspec->ioc_optionalSupport = (*shpp)->optionalSupport; 1019 /* 1020 * ioc_fstype will always be 0 at this time because ss_fstype is filled 1021 * in at mount time. 1022 */ 1023 shspec->ioc_fstype = (*shpp)->ss_fstype; 1024 } 1025 1026 /* Release the reference that smb_sm_lookupint took on the vc */ 1027 smb_vc_rele(vcp, context); 1028 return error; 1029} 1030 1031int smb_vc_access(struct smb_vc *vcp, vfs_context_t context) 1032{ 1033 if (SMBV_HAS_GUEST_ACCESS(vcp)) 1034 return(0); 1035 1036 /* The smbfs_vnop_strategy routine has no context, we always allow these */ 1037 if (context == NULL) { 1038 return(0); 1039 } 1040 if ((vfs_context_suser(context) == 0) || 1041 (kauth_cred_getuid(vfs_context_ucred(context)) == vcp->vc_uid)) 1042 return (0); 1043 return (EACCES); 1044} 1045 1046int smb_vc_negotiate(struct smb_vc *vcp, vfs_context_t context) 1047{ 1048 return smb_iod_request(vcp->vc_iod, 1049 SMBIOD_EV_NEGOTIATE | SMBIOD_EV_SYNC, context); 1050} 1051 1052int smb_vc_ssnsetup(struct smb_vc *vcp) 1053{ 1054 return smb_iod_request(vcp->vc_iod, 1055 SMBIOD_EV_SSNSETUP | SMBIOD_EV_SYNC, NULL); 1056} 1057 1058static char smb_emptypass[] = ""; 1059 1060const char * smb_vc_getpass(struct smb_vc *vcp) 1061{ 1062 if (vcp->vc_pass) 1063 return vcp->vc_pass; 1064 return smb_emptypass; 1065} 1066 1067/* 1068 * They are in share level security and the share requires 1069 * a password. Use the vcp password always. On required for 1070 * Windows 98, should drop support someday. 1071 */ 1072const char * smb_share_getpass(struct smb_share *share) 1073{ 1074 DBG_ASSERT(SSTOVC(share)); 1075 return smb_vc_getpass(SSTOVC(share)); 1076} 1077 1078/* 1079 * The reconnect code needs to get a reference on the vc. First make sure 1080 * this vc is still in the list and no one has release it yet. If smb_sm_lookupint 1081 * finds it we will have it locked and a reference on it. Next make sure its 1082 * not being release. 1083 */ 1084int smb_vc_reconnect_ref(struct smb_vc *vcp, vfs_context_t context) 1085{ 1086 int error; 1087 1088 error = smb_sm_lookupint(NULL, 0, NULL, 0, &vcp); 1089 if (error) 1090 return error; 1091 1092 smb_vc_unlock(vcp); 1093 /* This vc is being release just give up */ 1094 if (vcp->ss_flags & SMBO_GONE) { 1095 smb_vc_rele(vcp, context); 1096 error = ENOTCONN; 1097 } 1098 return error; 1099} 1100 1101/* 1102 * Called from a thread that is not the main iod thread. Prevents us from 1103 * getting into a deadlock. 1104 */ 1105static void smb_reconnect_rel_thread(void *arg) 1106{ 1107 struct smbiod *iod = arg; 1108 1109 /* We are done release the reference */ 1110 smb_vc_rele(iod->iod_vc, iod->iod_context); 1111} 1112 1113/* 1114 * The reconnect code takes a reference on the vc. So we need to release that 1115 * reference, but if we are the last reference the smb_vc_rele routine will 1116 * attempt to destroy the vc, which will then attempt to destroy the main iod 1117 * thread for the vc. The reconnect code is running under the main iod thread, 1118 * which means we can't destroy the thread from that thread without hanging. So 1119 * start a new thread to just release the reference and do any cleanup required. 1120 * This will be a short live thread that just hangs around long enough to do the 1121 * work required to release the vc reference. 1122 */ 1123void smb_vc_reconnect_rel(struct smb_vc *vcp) 1124{ 1125 struct smbiod *iod = vcp->vc_iod; 1126 thread_t thread; 1127 int error; 1128 1129 do { 1130 error = kernel_thread_start((thread_continue_t)smb_reconnect_rel_thread, 1131 iod, &thread); 1132 /* 1133 * Never expect an error here, but just in case log it, sleep for one 1134 * second and try again. Nothing else we can do at this point. 1135 */ 1136 if (error) { 1137 struct timespec ts; 1138 1139 SMBERROR("Starting the reconnect vc release thread failed! %d\n", 1140 error); 1141 ts.tv_sec = 1; 1142 ts.tv_nsec = 0; 1143 msleep(iod, NULL, PWAIT | PCATCH, "smb_vc_reconnect_rel", &ts); 1144 } 1145 } while (error); 1146 thread_deallocate(thread); 1147} 1148 1149 1150