1/* 2 * Copyright (c) 2000, 2001 Boris Popov 3 * All rights reserved. 4 * 5 * Portions Copyright (C) 2001 - 2012 Apple Inc. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Boris Popov. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 */ 35 36#include <sys/types.h> 37#include <sys/smb_byte_order.h> 38#include <sys/smb_apple.h> 39 40#ifdef KERNEL 41#include <sys/kpi_mbuf.h> 42#include <netsmb/smb_subr.h> 43#else // KERNEL 44#include <netsmb/upi_mbuf.h> 45#include <netsmb/smb_lib.h> 46#endif // KERNEL 47 48#include <sys/mchain.h> 49 50/* 51 * Various helper functions 52 */ 53size_t m_fixhdr(mbuf_t m0) 54{ 55 mbuf_t m = m0; 56 size_t len = 0; 57 58 while (m) { 59 len += mbuf_len(m); 60 m = mbuf_next(m); 61 } 62 mbuf_pkthdr_setlen(m0, len); 63 return mbuf_pkthdr_len(m0); 64} 65 66#ifdef KERNEL 67 68/* 69 * There is no KPI call for m_cat. Josh gave me the following 70 * code to replace m_cat. 71 */ 72void 73mbuf_cat_internal(mbuf_t md_top, mbuf_t m0) 74{ 75 mbuf_t m; 76 77 for (m = md_top; mbuf_next(m) != NULL; m = mbuf_next(m)) 78 ; 79 mbuf_setnext(m, m0); 80} 81 82/* 83 * The only way to get min cluster size is to make a 84 * mbuf_stat call. We really only need to do this once 85 * since minclsize is a compile time option. 86 */ 87static uint32_t minclsize = 0; 88 89static uint32_t mbuf_minclsize() 90{ 91 struct mbuf_stat stats; 92 93 if (! minclsize) { 94 mbuf_stats(&stats); 95 minclsize = stats.minclsize; 96 } 97 return minclsize; 98} 99 100/* 101 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 102 * best) and return a pointer to the top of the allocated chain. If m is 103 * non-null, then we assume that it is a single mbuf or an mbuf chain to 104 * which we want len bytes worth of mbufs and/or clusters attached, and so 105 * if we succeed in allocating it, we will just return a pointer to m. 106 * 107 * If we happen to fail at any point during the allocation, we will free 108 * up everything we have already allocated and return NULL. 109 * 110 */ 111static 112mbuf_t smb_mbuf_getm(mbuf_t m, size_t len, int how, int type) 113{ 114 size_t mbuf_space; 115 mbuf_t top, tail, mp = NULL, mtail = NULL; 116 117 KASSERT((ssize_t)len >= 0, ("len is < 0 in smb_mbuf_getm")); 118 119 if (mbuf_get(how, type, &mp)) 120 return (NULL); 121 else if (len > mbuf_minclsize()) { 122 if ((mbuf_mclget(how, type, &mp)) || 123 ((mbuf_flags(mp) & MBUF_EXT) == 0)) { 124 mbuf_free(mp); 125 return (NULL); 126 } 127 } 128 mbuf_setlen(mp, 0); 129 mbuf_space = mbuf_trailingspace(mp); 130 /* len is a size_t so it can't go negative */ 131 if (mbuf_space > len) 132 len = 0; /* Done */ 133 else 134 len -= mbuf_space; /* Need another mbuf or more */ 135 136 if (m != NULL) 137 for (mtail = m; mbuf_next(mtail) != NULL; mtail = mbuf_next(mtail)); 138 else 139 m = mp; 140 141 top = tail = mp; 142 while (len > 0) { 143 if (mbuf_get(how, type, &mp)) 144 goto failed; 145 146 mbuf_setnext(tail, mp); 147 tail = mp; 148 if (len > mbuf_minclsize()) { 149 if ((mbuf_mclget(how, type, &mp)) || 150 ((mbuf_flags(mp) & MBUF_EXT) == 0)) 151 goto failed; 152 } 153 mbuf_setlen(mp, 0); 154 mbuf_space = mbuf_trailingspace(mp); 155 /* len is a size_t so it can't go negative */ 156 if (mbuf_space > len) 157 len = 0; /* Done */ 158 else 159 len -= mbuf_space; /* Need another mbuf or more */ 160 } 161 162 if (mtail != NULL) 163 mbuf_setnext(mtail, top); 164 return (m); 165 166failed: 167 mbuf_freem(top); 168 return (NULL); 169} 170 171#else // KERNEL 172 173/* 174 * We handle this routine differently in userland, than the kernel. See the 175 * above kernel code for more details. 176 */ 177static 178mbuf_t smb_mbuf_getm(mbuf_t mbuf, size_t size, uint32_t how, uint32_t type) 179{ 180 mbuf_t nm = NULL, mtail = NULL; 181 182 if (mbuf_getcluster( how, type, size, &nm)) 183 return NULL; 184 if (mbuf != NULL) { 185 for (mtail = mbuf; mbuf_next(mtail) != NULL; mtail = mbuf_next(mtail)); 186 } else { 187 mbuf = nm; 188 } 189 190 if (mtail != NULL) 191 mbuf_setnext(mtail, nm); 192 return mbuf; 193} 194 195int mb_pullup(mbchain_t mbp) 196{ 197 mbuf_t nm; 198 size_t size; 199 int error; 200 201 /* Its all in one mbuf, nothing to do here */ 202 if (mbuf_next(mbp->mb_top) == NULL) { 203 return 0; 204 } 205 /* We have a chain, assume that we need to reallocate the buffer */ 206 size = mb_fixhdr(mbp); 207 error = mbuf_getcluster(MBUF_WAITOK, MBUF_TYPE_DATA, size, &nm); 208 if (error) { 209 return error; 210 } 211 error = mbuf_copydata(mbp->mb_top, 0, size, mbuf_data(nm)); 212 if (error) { 213 mbuf_freem(nm); 214 return error; 215 } 216 mbuf_pkthdr_setlen(nm, size); 217 mbuf_setlen(nm, size); 218 mbuf_freem(mbp->mb_top); 219 mbp->mb_top = nm; 220 mbp->mb_cur = nm; 221 return 0; 222} 223 224/* 225 * Return a buffer of size from the mbuf chain, the buffer must be contiguous 226 * and fit in one mbuf. If not enough room in this mbuf create an mbuf that 227 * has enough room and add it to the chain. 228 */ 229void * mb_getbuffer(mbchain_t mbp, size_t size) 230{ 231 while (mbp->mb_mleft < size) { 232 mbuf_t nm; 233 234 if (mbuf_getcluster(MBUF_WAITOK, MBUF_TYPE_DATA, mbp->mb_mleft+size, &nm)) 235 return NULL; 236 mbuf_setlen(nm, 0); 237 mbuf_setnext(mbp->mb_cur, nm); 238 mbp->mb_cur = nm; 239 mbp->mb_mleft += mbuf_trailingspace(mbp->mb_cur); 240 } 241 return (void *)((uint8_t *)mbuf_data(mbp->mb_cur) + mbuf_len(mbp->mb_cur)); 242} 243 244/* 245 * Consume size number of bytes. 246 */ 247void mb_consume(mbchain_t mbp, size_t size) 248{ 249 mbp->mb_mleft -= size; 250 mbp->mb_count += size; 251 mbp->mb_len += size; 252 mbuf_setlen(mbp->mb_cur, mbuf_len(mbp->mb_cur)+size); 253} 254 255#endif // KERNEL 256 257/* 258 * Routines for putting data into an mbuf chain 259 */ 260 261static 262void mb_initm(mbchain_t mbp, mbuf_t m) 263{ 264 bzero(mbp, sizeof(*mbp)); 265 mbp->mb_top = mbp->mb_cur = m; 266 mbp->mb_mleft = mbuf_trailingspace(m); 267} 268 269int 270mb_init(mbchain_t mbp) 271{ 272 mbuf_t m = NULL; 273 274 /* mbuf_gethdr now intialize all of the fields */ 275 if (mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &m)) 276 return ENOBUFS; 277 mb_initm(mbp, m); 278 return 0; 279} 280 281void 282mb_done(mbchain_t mbp) 283{ 284 if (mbp->mb_top) { 285 mbuf_freem(mbp->mb_top); 286 mbp->mb_top = NULL; 287 } 288} 289 290mbuf_t mb_detach(mbchain_t mbp) 291{ 292 mbuf_t m; 293 294 m = mbp->mb_top; 295 mbp->mb_top = NULL; 296 return m; 297} 298 299size_t mb_fixhdr(mbchain_t mbp) 300{ 301 return m_fixhdr(mbp->mb_top); 302} 303 304/* 305 * Check if object of size 'size' fit to the current position and 306 * allocate new mbuf if not. Advance pointers and increase length of mbuf(s). 307 * Return pointer to the object placeholder or NULL if any error occured. 308 * Note: size should be <= MLEN if in kernel code 309 */ 310void * mb_reserve(mbchain_t mbp, size_t size) 311{ 312 mbuf_t m, nm; 313 caddr_t bpos; 314 315 m = mbp->mb_cur; 316 if (mbp->mb_mleft < size) { 317 if (mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nm)) 318 return NULL; 319 /* This check was done up above before KPI code */ 320 if (size > mbuf_maxlen(nm)) { 321#ifdef KERNEL 322 SMBERROR("mb_reserve: size = %ld\n", size); 323#else // KERNEL 324 smb_log_info("%s - mb_reserve: size = %ld, syserr = %s", 325 ASL_LEVEL_ERR, __FUNCTION__, size, strerror(EBADRPC)); 326#endif // KERNEL 327 mbuf_freem(nm); 328 return NULL; 329 } 330 mbuf_setnext(m, nm); 331 mbp->mb_cur = nm; 332 m = nm; 333 mbuf_setlen(m, 0); 334 mbp->mb_mleft = mbuf_trailingspace(m); 335 } 336 mbp->mb_mleft -= size; 337 mbp->mb_count += size; 338 mbp->mb_len += size; 339 bpos = (caddr_t)((uint8_t *)mbuf_data(m) + mbuf_len(m)); 340 mbuf_setlen(m, mbuf_len(m)+size); 341 return bpos; 342} 343 344/* 345 * Userland starts at word cound not the smb header, lucky for us 346 * word cound starts at an even boundry. We will need to relook at 347 * this when doing SMB 2/3. Hopefully by then we will be using the whole 348 * buffer for both userland and kernel. 349 */ 350int mb_put_padbyte(mbchain_t mbp) 351{ 352 uintptr_t dst; 353 char x = 0; 354 355 dst = (uintptr_t)((uint8_t *)mbuf_data(mbp->mb_cur) + mbuf_len(mbp->mb_cur)); 356 /* only add padding if address is odd */ 357 if (dst & 1) { 358 return mb_put_mem(mbp, &x, 1, MB_MSYSTEM); 359 } else { 360 return 0; 361 } 362} 363 364int mb_put_uint8(mbchain_t mbp, uint8_t x) 365{ 366 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 367} 368 369int mb_put_uint16be(mbchain_t mbp, uint16_t x) 370{ 371 x = htobes(x); 372 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 373} 374 375int mb_put_uint16le(mbchain_t mbp, uint16_t x) 376{ 377 x = htoles(x); 378 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 379} 380 381int mb_put_uint32be(mbchain_t mbp, uint32_t x) 382{ 383 x = htobel(x); 384 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 385} 386 387int mb_put_uint32le(mbchain_t mbp, uint32_t x) 388{ 389 x = htolel(x); 390 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 391} 392 393int mb_put_uint64be(mbchain_t mbp, uint64_t x) 394{ 395 x = htobeq(x); 396 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 397} 398 399int mb_put_uint64le(mbchain_t mbp, uint64_t x) 400{ 401 x = htoleq(x); 402 return mb_put_mem(mbp, (caddr_t)&x, sizeof(x), MB_MSYSTEM); 403} 404 405int mb_put_mem(mbchain_t mbp, const char *source, size_t size, int type) 406{ 407 mbuf_t m; 408 caddr_t dst; 409 const char * src; 410 size_t mleft, count, cplen; 411 412 m = mbp->mb_cur; 413 mleft = mbp->mb_mleft; 414 415 while (size > 0) { 416 if (mleft == 0) { 417 if (mbuf_next(m) == NULL) { 418 m = smb_mbuf_getm(m, size, MBUF_WAITOK, MBUF_TYPE_DATA); 419 if (m == NULL) 420 return ENOBUFS; 421 } 422 m = mbuf_next(m); 423 mleft = mbuf_trailingspace(m); 424 continue; 425 } 426 cplen = mleft > size ? size : mleft; 427 dst = (caddr_t)((uint8_t *)mbuf_data(m) + mbuf_len(m)); 428 switch (type) { 429 case MB_MINLINE: 430 for (src = source, count = cplen; count; count--) 431 *dst++ = *src++; 432 break; 433 case MB_MSYSTEM: 434 bcopy(source, dst, cplen); 435 break; 436 case MB_MZERO: 437 bzero(dst, cplen); 438 break; 439 } 440 size -= cplen; 441 source += cplen; 442 mbuf_setlen(m, mbuf_len(m)+cplen); 443 mleft -= cplen; 444 mbp->mb_count += cplen; 445 mbp->mb_len += cplen; 446 } 447 mbp->mb_cur = m; 448 mbp->mb_mleft = mleft; 449 return 0; 450} 451 452int mb_put_mbuf(mbchain_t mbp, mbuf_t m) 453{ 454 mbuf_setnext(mbp->mb_cur, m); 455 while (m) { 456 mbp->mb_count += mbuf_len(m); 457 mbp->mb_len += mbuf_len(m); 458 if (mbuf_next(m) == NULL) 459 break; 460 m = mbuf_next(m); 461 } 462 mbp->mb_mleft = mbuf_trailingspace(m); 463 mbp->mb_cur = m; 464 return 0; 465} 466 467#ifdef KERNEL 468/* 469 * copies a uio scatter/gather list to an mbuf chain. 470 */ 471int mb_put_uio(mbchain_t mbp, uio_t uiop, size_t size) 472{ 473 int error; 474 size_t mleft, cplen; 475 void *dst; 476 mbuf_t m; 477 478 m = mbp->mb_cur; /* Mbuf to start copying the data into */ 479 mleft = mbp->mb_mleft; /* How much space is left in this mbuf */ 480 481 while ((size > 0) && (uio_resid(uiop))) { 482 /* Do we need another mbuf, is this one full */ 483 if (mleft == 0) { 484 if (mbuf_next(m) == NULL) { 485 m = smb_mbuf_getm(m, size, MBUF_WAITOK, MBUF_TYPE_DATA); 486 if (m == NULL) 487 return ENOBUFS; 488 } 489 m = mbuf_next(m); 490 mleft = mbuf_trailingspace(m); 491 continue; 492 } 493 /* Get the amount of data to copy and a pointer to the mbuf location */ 494 cplen = mleft > size ? size : mleft; 495 dst = (uint8_t *)mbuf_data(m) + mbuf_len(m); 496 /* Copy the data into the mbuf */ 497 error = uiomove(dst, (int)cplen, uiop); 498 if (error) 499 return error; 500 501 size -= cplen; 502 mbuf_setlen(m, mbuf_len(m)+cplen); 503 mbp->mb_count += cplen; 504 mbp->mb_len += cplen; 505 mleft -= cplen; 506 } 507 mbp->mb_cur = m; 508 mbp->mb_mleft = mleft; 509 return 0; 510} 511 512/* 513 * Given a user land pointer place the data in a mbuf chain. 514 */ 515int mb_put_user_mem(mbchain_t mbp, user_addr_t bufp, int size, off_t offset, vfs_context_t context) 516{ 517 user_size_t nbyte = size; 518 uio_t auio; 519 int error; 520 521 if (vfs_context_is64bit(context)) 522 auio = uio_create(1, offset, UIO_USERSPACE64, UIO_WRITE); 523 else 524 auio = uio_create(1, offset, UIO_USERSPACE32, UIO_WRITE); 525 526 if (! auio ) 527 return ENOMEM; 528 529 uio_addiov(auio, bufp, nbyte); 530 error = mb_put_uio(mbp, auio, size); 531 uio_free(auio); 532 return error; 533} 534#endif // KERNEL 535 536/* 537 * Routines for fetching data from an mbuf chain 538 */ 539void md_initm(mdchain_t mdp, mbuf_t m) 540{ 541 bzero(mdp, sizeof(*mdp)); 542 mdp->md_top = mdp->md_cur = m; 543 mdp->md_pos = mbuf_data(m); 544 mdp->md_len = 0; 545} 546 547int md_init(mdchain_t mdp) 548{ 549 mbuf_t m = NULL; 550 551 /* mbuf_gethdr now intialize all of the fields */ 552 if (mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &m)) 553 return ENOBUFS; 554 md_initm(mdp, m); 555 return 0; 556} 557 558#ifndef KERNEL 559int md_init_rcvsize(mdchain_t mdp, size_t size) 560{ 561 mbuf_t m = NULL; 562 563 if (size <= (size_t)getpagesize()) { 564 if (mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &m)) 565 return ENOBUFS; 566 } else if ((mbuf_getcluster(MBUF_WAITOK, MBUF_TYPE_DATA, size, &m)) != 0) 567 return ENOBUFS; 568 md_initm(mdp, m); 569 return 0; 570} 571#endif // KERNEL 572 573void md_shadow_copy(const mdchain_t mdp, mdchain_t shadow) 574{ 575 shadow->md_top = mdp->md_top; /* head of mbufs chain */ 576 shadow->md_cur = mdp->md_cur; /* current mbuf */ 577 shadow->md_pos = mdp->md_pos; /* offset in the current mbuf */ 578} 579 580void md_done(mdchain_t mdp) 581{ 582 if (mdp->md_top) { 583 mbuf_freem(mdp->md_top); 584 mdp->md_top = NULL; 585 } 586} 587 588#ifdef KERNEL 589/* 590 * Append a separate mbuf chain. It is caller responsibility to prevent 591 * multiple calls to fetch/record routines. 592 */ 593void md_append_record(mdchain_t mdp, mbuf_t top) 594{ 595 mbuf_t m; 596 597 if (mdp->md_top == NULL) { 598 md_initm(mdp, top); 599 return; 600 } 601 m = mdp->md_top; 602 while (mbuf_nextpkt(m)) 603 m = mbuf_nextpkt(m); 604 mbuf_setnextpkt(m, top); 605 mbuf_setnextpkt(top, NULL); 606 return; 607} 608 609/* 610 * Put next record in place of existing 611 */ 612int md_next_record(mdchain_t mdp) 613{ 614 mbuf_t m; 615 616 if (mdp->md_top == NULL) 617 return ENOENT; 618 m = mbuf_nextpkt(mdp->md_top); 619 md_done(mdp); 620 if (m == NULL) 621 return ENOENT; 622 md_initm(mdp, m); 623 return 0; 624} 625#endif // KERNEL 626 627int md_get_uint8(mdchain_t mdp, uint8_t *x) 628{ 629 return md_get_mem(mdp, (caddr_t)x, 1, MB_MINLINE); 630} 631 632int md_get_uint16(mdchain_t mdp, uint16_t *x) 633{ 634 return md_get_mem(mdp, (caddr_t)x, 2, MB_MINLINE); 635} 636 637int md_get_uint16le(mdchain_t mdp, uint16_t *x) 638{ 639 uint16_t v; 640 int error = md_get_uint16(mdp, &v); 641 642 if (x && (error == 0)) 643 *x = letohs(v); 644 return error; 645} 646 647int md_get_uint16be(mdchain_t mdp, uint16_t *x) 648{ 649 uint16_t v; 650 int error = md_get_uint16(mdp, &v); 651 652 if (x && (error == 0)) 653 *x = betohs(v); 654 return error; 655} 656 657int md_get_uint32(mdchain_t mdp, uint32_t *x) 658{ 659 return md_get_mem(mdp, (caddr_t)x, 4, MB_MINLINE); 660} 661 662int md_get_uint32be(mdchain_t mdp, uint32_t *x) 663{ 664 uint32_t v; 665 int error; 666 667 error = md_get_uint32(mdp, &v); 668 if (x && (error == 0)) 669 *x = betohl(v); 670 return error; 671} 672 673int md_get_uint32le(mdchain_t mdp, uint32_t *x) 674{ 675 uint32_t v; 676 int error; 677 678 error = md_get_uint32(mdp, &v); 679 if (x && (error == 0)) 680 *x = letohl(v); 681 return error; 682} 683 684int md_get_uint64(mdchain_t mdp, uint64_t *x) 685{ 686 return md_get_mem(mdp, (caddr_t)x, 8, MB_MINLINE); 687} 688 689int md_get_uint64be(mdchain_t mdp, uint64_t *x) 690{ 691 uint64_t v; 692 int error; 693 694 error = md_get_uint64(mdp, &v); 695 if (x && (error == 0)) 696 *x = betohq(v); 697 return error; 698} 699 700int md_get_uint64le(mdchain_t mdp, uint64_t *x) 701{ 702 uint64_t v; 703 int error; 704 705 error = md_get_uint64(mdp, &v); 706 if (x && (error == 0)) 707 *x = letohq(v); 708 return error; 709} 710 711size_t md_get_size(mdchain_t mdp) 712{ 713 mbuf_t m = mdp->md_cur; 714 size_t start_pos = (size_t)mdp->md_pos; 715 size_t len = 0; 716 717 while (m) { 718 if (start_pos) { 719 len += (size_t)mbuf_data(m) + mbuf_len(m) - start_pos; 720 start_pos = 0; /* only care the first time through */ 721 } else { 722 len += mbuf_len(m); 723 } 724 m = mbuf_next(m); 725 } 726 return len; 727} 728 729/* 730 * This routine relies on the fact that we are looking for the length of a UTF16 731 * string that must start on an even boundry. 732 */ 733size_t md_get_utf16_strlen(mdchain_t mdp) 734{ 735 mbuf_t m = mdp->md_cur; 736 u_char *s = mdp->md_pos; /* Points to the start of the utf16 string in the mbuf data */ 737 size_t size; 738 size_t max_count, count, ii; 739 uint16_t *ustr; 740 741 size = 0; 742 while (m) { 743 /* Max amount of data we can scan in this mbuf */ 744 max_count = count = (size_t)mbuf_data(m) + mbuf_len(m) - (size_t)s; 745 /* Trail byte in this mbuf ignore it for now */ 746 max_count &= ~1; 747 /* Scan the mbuf counting the bytes */ 748 ustr = (uint16_t *)((void *)s); 749 for (ii = 0; ii < max_count; ii += 2) { 750 if (*ustr++ == 0) { 751 /* Found the end we are done */ 752 goto done; 753 } 754 size += 2; 755 } 756 /* Get the next mbuf to scan */ 757 m = mbuf_next(m); 758 if (m) { 759 s = mbuf_data(m); 760 /* Did the previous mbuf have an odd length */ 761 if (count & 1) { 762 /* Check the last byte in that mbuf and the first byte in this one */ 763 if ((*((u_char *)ustr) == 0) && (*s == 0)) { 764 /* Found the end we are done */ 765 goto done; 766 } 767 s += 1; 768 } 769 } 770 } 771done: 772 return size; 773} 774 775int md_get_mem(mdchain_t mdp, caddr_t target, size_t size, int type) 776{ 777 size_t size_request = size; 778 mbuf_t m = mdp->md_cur; 779 size_t count; 780 u_char *s; 781 782 while (size > 0) { 783 if (m == NULL) { 784 /* Note some calls expect this to happen, see notify change */ 785#ifdef KERNEL 786 SMBWARNING("WARNING: Incomplete copy original size = %ld size = %ld\n", size_request, size); 787#else // KERNEL 788 smb_log_info("%s - WARNING: Incomplete copy original size = %ld size = %ld, syserr = %s", 789 ASL_LEVEL_DEBUG, __FUNCTION__, size_request, size, strerror(EBADRPC)); 790#endif // KERNEL 791 return EBADRPC; 792 } 793 s = mdp->md_pos; 794 count = (size_t)mbuf_data(m) + mbuf_len(m) - (size_t)s; 795 if (count == 0) { 796 mdp->md_cur = m = mbuf_next(m); 797 if (m) 798 mdp->md_pos = mbuf_data(m); 799 continue; 800 } 801 if (count > size) 802 count = size; 803 size -= count; 804 mdp->md_pos += count; 805 if (target == NULL) 806 continue; 807 switch (type) { 808 case MB_MSYSTEM: 809 bcopy(s, target, count); 810 break; 811 case MB_MINLINE: 812 while (count--) 813 *target++ = *s++; 814 continue; 815 } 816 target += count; 817 } 818 819 mdp->md_len += size_request; 820 return 0; 821} 822 823#ifdef KERNEL 824int md_get_mbuf(mdchain_t mdp, size_t size, mbuf_t *ret) 825{ 826 mbuf_t m = mdp->md_cur, rm; 827 size_t offset = (size_t)mdp->md_pos - (size_t)mbuf_data(m); 828 829 if (mbuf_copym(m, offset, size, MBUF_WAITOK, &rm)) 830 return EBADRPC; 831 md_get_mem(mdp, NULL, size, MB_MZERO); 832 *ret = rm; 833 return 0; 834} 835 836int md_get_uio(mdchain_t mdp, uio_t uiop, int32_t size) 837{ 838 size_t size_request = size; 839 int32_t count; 840 int error; 841 mbuf_t m = mdp->md_cur; 842 uint8_t *src; 843 844 /* Read in the data into the the uio */ 845 while ((size > 0) && (uio_resid(uiop))) { 846 847 if (m == NULL) { 848 SMBERROR("UIO incomplete copy\n"); 849 return EBADRPC; 850 } 851 /* Get a pointer to the mbuf data */ 852 src = mdp->md_pos; 853 count = (int)((uint8_t *)mbuf_data(m) + mbuf_len(m) - src); 854 if (count == 0) { 855 mdp->md_cur = m = mbuf_next(m); 856 if (m) 857 mdp->md_pos = mbuf_data(m); 858 continue; 859 } 860 if (count > size) 861 count = size; 862 size -= count; 863 mdp->md_pos += count; 864 error = uiomove((void *)src, count, uiop); 865 if (error) 866 return error; 867 } 868 869 mdp->md_len += size_request; 870 return 0; 871} 872 873 874/* 875 * Given a user land pointer place the data in a mbuf chain. 876 */ 877int md_get_user_mem(mdchain_t mdp, user_addr_t bufp, int size, off_t offset, 878 vfs_context_t context) 879{ 880 user_size_t nbyte = size; 881 uio_t auio; 882 int error; 883 884 if (vfs_context_is64bit(context)) 885 auio = uio_create(1, offset, UIO_USERSPACE64, UIO_READ); 886 else 887 auio = uio_create(1, offset, UIO_USERSPACE32, UIO_READ); 888 889 if (! auio ) 890 return ENOMEM; 891 892 uio_addiov(auio, bufp, nbyte); 893 error = md_get_uio(mdp, auio, size); 894 uio_free(auio); 895 return error; 896} 897#endif // KERNEL 898 899