uipc_socket2.c revision 1.13
1/* $NetBSD: uipc_socket2.c,v 1.13 1996/09/07 12:41:05 mycroft Exp $ */ 2 3/* 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 36 */ 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/proc.h> 41#include <sys/file.h> 42#include <sys/buf.h> 43#include <sys/malloc.h> 44#include <sys/mbuf.h> 45#include <sys/protosw.h> 46#include <sys/socket.h> 47#include <sys/socketvar.h> 48#include <sys/signalvar.h> 49 50/* 51 * Primitive routines for operating on sockets and socket buffers 52 */ 53 54/* strings for sleep message: */ 55char netio[] = "netio"; 56char netcon[] = "netcon"; 57char netcls[] = "netcls"; 58 59u_long sb_max = SB_MAX; /* patchable */ 60 61/* 62 * Procedures to manipulate state flags of socket 63 * and do appropriate wakeups. Normal sequence from the 64 * active (originating) side is that soisconnecting() is 65 * called during processing of connect() call, 66 * resulting in an eventual call to soisconnected() if/when the 67 * connection is established. When the connection is torn down 68 * soisdisconnecting() is called during processing of disconnect() call, 69 * and soisdisconnected() is called when the connection to the peer 70 * is totally severed. The semantics of these routines are such that 71 * connectionless protocols can call soisconnected() and soisdisconnected() 72 * only, bypassing the in-progress calls when setting up a ``connection'' 73 * takes no time. 74 * 75 * From the passive side, a socket is created with 76 * two queues of sockets: so_q0 for connections in progress 77 * and so_q for connections already made and awaiting user acceptance. 78 * As a protocol is preparing incoming connections, it creates a socket 79 * structure queued on so_q0 by calling sonewconn(). When the connection 80 * is established, soisconnected() is called, and transfers the 81 * socket structure to so_q, making it available to accept(). 82 * 83 * If a socket is closed with sockets on either 84 * so_q0 or so_q, these sockets are dropped. 85 * 86 * If higher level protocols are implemented in 87 * the kernel, the wakeups done here will sometimes 88 * cause software-interrupt process scheduling. 89 */ 90 91void 92soisconnecting(so) 93 register struct socket *so; 94{ 95 96 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 97 so->so_state |= SS_ISCONNECTING; 98} 99 100void 101soisconnected(so) 102 register struct socket *so; 103{ 104 register struct socket *head = so->so_head; 105 106 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 107 so->so_state |= SS_ISCONNECTED; 108 if (head && soqremque(so, 0)) { 109 soqinsque(head, so, 1); 110 sorwakeup(head); 111 wakeup((caddr_t)&head->so_timeo); 112 } else { 113 wakeup((caddr_t)&so->so_timeo); 114 sorwakeup(so); 115 sowwakeup(so); 116 } 117} 118 119void 120soisdisconnecting(so) 121 register struct socket *so; 122{ 123 124 so->so_state &= ~SS_ISCONNECTING; 125 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); 126 wakeup((caddr_t)&so->so_timeo); 127 sowwakeup(so); 128 sorwakeup(so); 129} 130 131void 132soisdisconnected(so) 133 register struct socket *so; 134{ 135 136 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 137 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); 138 wakeup((caddr_t)&so->so_timeo); 139 sowwakeup(so); 140 sorwakeup(so); 141} 142 143/* 144 * When an attempt at a new connection is noted on a socket 145 * which accepts connections, sonewconn is called. If the 146 * connection is possible (subject to space constraints, etc.) 147 * then we allocate a new structure, propoerly linked into the 148 * data structure of the original socket, and return this. 149 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 150 * 151 * Currently, sonewconn() is defined as sonewconn1() in socketvar.h 152 * to catch calls that are missing the (new) second parameter. 153 */ 154struct socket * 155sonewconn1(head, connstatus) 156 register struct socket *head; 157 int connstatus; 158{ 159 register struct socket *so; 160 int soqueue = connstatus ? 1 : 0; 161 162 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) 163 return ((struct socket *)0); 164 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); 165 if (so == NULL) 166 return ((struct socket *)0); 167 bzero((caddr_t)so, sizeof(*so)); 168 so->so_type = head->so_type; 169 so->so_options = head->so_options &~ SO_ACCEPTCONN; 170 so->so_linger = head->so_linger; 171 so->so_state = head->so_state | SS_NOFDREF; 172 so->so_proto = head->so_proto; 173 so->so_timeo = head->so_timeo; 174 so->so_pgid = head->so_pgid; 175 (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); 176 soqinsque(head, so, soqueue); 177 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, 178 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, 179 (struct proc *)0)) { 180 (void) soqremque(so, soqueue); 181 (void) free((caddr_t)so, M_SOCKET); 182 return ((struct socket *)0); 183 } 184 if (connstatus) { 185 sorwakeup(head); 186 wakeup((caddr_t)&head->so_timeo); 187 so->so_state |= connstatus; 188 } 189 return (so); 190} 191 192void 193soqinsque(head, so, q) 194 register struct socket *head, *so; 195 int q; 196{ 197 198 register struct socket **prev; 199 so->so_head = head; 200 if (q == 0) { 201 head->so_q0len++; 202 so->so_q0 = 0; 203 for (prev = &(head->so_q0); *prev; ) 204 prev = &((*prev)->so_q0); 205 } else { 206 head->so_qlen++; 207 so->so_q = 0; 208 for (prev = &(head->so_q); *prev; ) 209 prev = &((*prev)->so_q); 210 } 211 *prev = so; 212} 213 214int 215soqremque(so, q) 216 register struct socket *so; 217 int q; 218{ 219 register struct socket *head, *prev, *next; 220 221 head = so->so_head; 222 prev = head; 223 for (;;) { 224 next = q ? prev->so_q : prev->so_q0; 225 if (next == so) 226 break; 227 if (next == 0) 228 return (0); 229 prev = next; 230 } 231 if (q == 0) { 232 prev->so_q0 = next->so_q0; 233 head->so_q0len--; 234 } else { 235 prev->so_q = next->so_q; 236 head->so_qlen--; 237 } 238 next->so_q0 = next->so_q = 0; 239 next->so_head = 0; 240 return (1); 241} 242 243/* 244 * Socantsendmore indicates that no more data will be sent on the 245 * socket; it would normally be applied to a socket when the user 246 * informs the system that no more data is to be sent, by the protocol 247 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 248 * will be received, and will normally be applied to the socket by a 249 * protocol when it detects that the peer will send no more data. 250 * Data queued for reading in the socket may yet be read. 251 */ 252 253void 254socantsendmore(so) 255 struct socket *so; 256{ 257 258 so->so_state |= SS_CANTSENDMORE; 259 sowwakeup(so); 260} 261 262void 263socantrcvmore(so) 264 struct socket *so; 265{ 266 267 so->so_state |= SS_CANTRCVMORE; 268 sorwakeup(so); 269} 270 271/* 272 * Wait for data to arrive at/drain from a socket buffer. 273 */ 274int 275sbwait(sb) 276 struct sockbuf *sb; 277{ 278 279 sb->sb_flags |= SB_WAIT; 280 return (tsleep((caddr_t)&sb->sb_cc, 281 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio, 282 sb->sb_timeo)); 283} 284 285/* 286 * Lock a sockbuf already known to be locked; 287 * return any error returned from sleep (EINTR). 288 */ 289int 290sb_lock(sb) 291 register struct sockbuf *sb; 292{ 293 int error; 294 295 while (sb->sb_flags & SB_LOCK) { 296 sb->sb_flags |= SB_WANT; 297 error = tsleep((caddr_t)&sb->sb_flags, 298 (sb->sb_flags & SB_NOINTR) ? 299 PSOCK : PSOCK|PCATCH, netio, 0); 300 if (error) 301 return (error); 302 } 303 sb->sb_flags |= SB_LOCK; 304 return (0); 305} 306 307/* 308 * Wakeup processes waiting on a socket buffer. 309 * Do asynchronous notification via SIGIO 310 * if the socket has the SS_ASYNC flag set. 311 */ 312void 313sowakeup(so, sb) 314 register struct socket *so; 315 register struct sockbuf *sb; 316{ 317 struct proc *p; 318 319 selwakeup(&sb->sb_sel); 320 sb->sb_flags &= ~SB_SEL; 321 if (sb->sb_flags & SB_WAIT) { 322 sb->sb_flags &= ~SB_WAIT; 323 wakeup((caddr_t)&sb->sb_cc); 324 } 325 if (so->so_state & SS_ASYNC) { 326 if (so->so_pgid < 0) 327 gsignal(-so->so_pgid, SIGIO); 328 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 329 psignal(p, SIGIO); 330 } 331} 332 333/* 334 * Socket buffer (struct sockbuf) utility routines. 335 * 336 * Each socket contains two socket buffers: one for sending data and 337 * one for receiving data. Each buffer contains a queue of mbufs, 338 * information about the number of mbufs and amount of data in the 339 * queue, and other fields allowing poll() statements and notification 340 * on data availability to be implemented. 341 * 342 * Data stored in a socket buffer is maintained as a list of records. 343 * Each record is a list of mbufs chained together with the m_next 344 * field. Records are chained together with the m_nextpkt field. The upper 345 * level routine soreceive() expects the following conventions to be 346 * observed when placing information in the receive buffer: 347 * 348 * 1. If the protocol requires each message be preceded by the sender's 349 * name, then a record containing that name must be present before 350 * any associated data (mbuf's must be of type MT_SONAME). 351 * 2. If the protocol supports the exchange of ``access rights'' (really 352 * just additional data associated with the message), and there are 353 * ``rights'' to be received, then a record containing this data 354 * should be present (mbuf's must be of type MT_CONTROL). 355 * 3. If a name or rights record exists, then it must be followed by 356 * a data record, perhaps of zero length. 357 * 358 * Before using a new socket structure it is first necessary to reserve 359 * buffer space to the socket, by calling sbreserve(). This should commit 360 * some of the available buffer space in the system buffer pool for the 361 * socket (currently, it does nothing but enforce limits). The space 362 * should be released by calling sbrelease() when the socket is destroyed. 363 */ 364 365int 366soreserve(so, sndcc, rcvcc) 367 register struct socket *so; 368 u_long sndcc, rcvcc; 369{ 370 371 if (sbreserve(&so->so_snd, sndcc) == 0) 372 goto bad; 373 if (sbreserve(&so->so_rcv, rcvcc) == 0) 374 goto bad2; 375 if (so->so_rcv.sb_lowat == 0) 376 so->so_rcv.sb_lowat = 1; 377 if (so->so_snd.sb_lowat == 0) 378 so->so_snd.sb_lowat = MCLBYTES; 379 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 380 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 381 return (0); 382bad2: 383 sbrelease(&so->so_snd); 384bad: 385 return (ENOBUFS); 386} 387 388/* 389 * Allot mbufs to a sockbuf. 390 * Attempt to scale mbmax so that mbcnt doesn't become limiting 391 * if buffering efficiency is near the normal case. 392 */ 393int 394sbreserve(sb, cc) 395 struct sockbuf *sb; 396 u_long cc; 397{ 398 399 if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) 400 return (0); 401 sb->sb_hiwat = cc; 402 sb->sb_mbmax = min(cc * 2, sb_max); 403 if (sb->sb_lowat > sb->sb_hiwat) 404 sb->sb_lowat = sb->sb_hiwat; 405 return (1); 406} 407 408/* 409 * Free mbufs held by a socket, and reserved mbuf space. 410 */ 411void 412sbrelease(sb) 413 struct sockbuf *sb; 414{ 415 416 sbflush(sb); 417 sb->sb_hiwat = sb->sb_mbmax = 0; 418} 419 420/* 421 * Routines to add and remove 422 * data from an mbuf queue. 423 * 424 * The routines sbappend() or sbappendrecord() are normally called to 425 * append new mbufs to a socket buffer, after checking that adequate 426 * space is available, comparing the function sbspace() with the amount 427 * of data to be added. sbappendrecord() differs from sbappend() in 428 * that data supplied is treated as the beginning of a new record. 429 * To place a sender's address, optional access rights, and data in a 430 * socket receive buffer, sbappendaddr() should be used. To place 431 * access rights and data in a socket receive buffer, sbappendrights() 432 * should be used. In either case, the new data begins a new record. 433 * Note that unlike sbappend() and sbappendrecord(), these routines check 434 * for the caller that there will be enough space to store the data. 435 * Each fails if there is not enough space, or if it cannot find mbufs 436 * to store additional information in. 437 * 438 * Reliable protocols may use the socket send buffer to hold data 439 * awaiting acknowledgement. Data is normally copied from a socket 440 * send buffer in a protocol with m_copy for output to a peer, 441 * and then removing the data from the socket buffer with sbdrop() 442 * or sbdroprecord() when the data is acknowledged by the peer. 443 */ 444 445/* 446 * Append mbuf chain m to the last record in the 447 * socket buffer sb. The additional space associated 448 * the mbuf chain is recorded in sb. Empty mbufs are 449 * discarded and mbufs are compacted where possible. 450 */ 451void 452sbappend(sb, m) 453 struct sockbuf *sb; 454 struct mbuf *m; 455{ 456 register struct mbuf *n; 457 458 if (m == 0) 459 return; 460 if ((n = sb->sb_mb) != NULL) { 461 while (n->m_nextpkt) 462 n = n->m_nextpkt; 463 do { 464 if (n->m_flags & M_EOR) { 465 sbappendrecord(sb, m); /* XXXXXX!!!! */ 466 return; 467 } 468 } while (n->m_next && (n = n->m_next)); 469 } 470 sbcompress(sb, m, n); 471} 472 473#ifdef SOCKBUF_DEBUG 474void 475sbcheck(sb) 476 register struct sockbuf *sb; 477{ 478 register struct mbuf *m; 479 register int len = 0, mbcnt = 0; 480 481 for (m = sb->sb_mb; m; m = m->m_next) { 482 len += m->m_len; 483 mbcnt += MSIZE; 484 if (m->m_flags & M_EXT) 485 mbcnt += m->m_ext.ext_size; 486 if (m->m_nextpkt) 487 panic("sbcheck nextpkt"); 488 } 489 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { 490 printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, 491 mbcnt, sb->sb_mbcnt); 492 panic("sbcheck"); 493 } 494} 495#endif 496 497/* 498 * As above, except the mbuf chain 499 * begins a new record. 500 */ 501void 502sbappendrecord(sb, m0) 503 register struct sockbuf *sb; 504 register struct mbuf *m0; 505{ 506 register struct mbuf *m; 507 508 if (m0 == 0) 509 return; 510 if ((m = sb->sb_mb) != NULL) 511 while (m->m_nextpkt) 512 m = m->m_nextpkt; 513 /* 514 * Put the first mbuf on the queue. 515 * Note this permits zero length records. 516 */ 517 sballoc(sb, m0); 518 if (m) 519 m->m_nextpkt = m0; 520 else 521 sb->sb_mb = m0; 522 m = m0->m_next; 523 m0->m_next = 0; 524 if (m && (m0->m_flags & M_EOR)) { 525 m0->m_flags &= ~M_EOR; 526 m->m_flags |= M_EOR; 527 } 528 sbcompress(sb, m, m0); 529} 530 531/* 532 * As above except that OOB data 533 * is inserted at the beginning of the sockbuf, 534 * but after any other OOB data. 535 */ 536void 537sbinsertoob(sb, m0) 538 register struct sockbuf *sb; 539 register struct mbuf *m0; 540{ 541 register struct mbuf *m; 542 register struct mbuf **mp; 543 544 if (m0 == 0) 545 return; 546 for (mp = &sb->sb_mb; (m = *mp) != NULL; mp = &((*mp)->m_nextpkt)) { 547 again: 548 switch (m->m_type) { 549 550 case MT_OOBDATA: 551 continue; /* WANT next train */ 552 553 case MT_CONTROL: 554 if ((m = m->m_next) != NULL) 555 goto again; /* inspect THIS train further */ 556 } 557 break; 558 } 559 /* 560 * Put the first mbuf on the queue. 561 * Note this permits zero length records. 562 */ 563 sballoc(sb, m0); 564 m0->m_nextpkt = *mp; 565 *mp = m0; 566 m = m0->m_next; 567 m0->m_next = 0; 568 if (m && (m0->m_flags & M_EOR)) { 569 m0->m_flags &= ~M_EOR; 570 m->m_flags |= M_EOR; 571 } 572 sbcompress(sb, m, m0); 573} 574 575/* 576 * Append address and data, and optionally, control (ancillary) data 577 * to the receive queue of a socket. If present, 578 * m0 must include a packet header with total length. 579 * Returns 0 if no space in sockbuf or insufficient mbufs. 580 */ 581int 582sbappendaddr(sb, asa, m0, control) 583 register struct sockbuf *sb; 584 struct sockaddr *asa; 585 struct mbuf *m0, *control; 586{ 587 register struct mbuf *m, *n; 588 int space = asa->sa_len; 589 590if (m0 && (m0->m_flags & M_PKTHDR) == 0) 591panic("sbappendaddr"); 592 if (m0) 593 space += m0->m_pkthdr.len; 594 for (n = control; n; n = n->m_next) { 595 space += n->m_len; 596 if (n->m_next == 0) /* keep pointer to last control buf */ 597 break; 598 } 599 if (space > sbspace(sb)) 600 return (0); 601 if (asa->sa_len > MLEN) 602 return (0); 603 MGET(m, M_DONTWAIT, MT_SONAME); 604 if (m == 0) 605 return (0); 606 m->m_len = asa->sa_len; 607 bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); 608 if (n) 609 n->m_next = m0; /* concatenate data to control */ 610 else 611 control = m0; 612 m->m_next = control; 613 for (n = m; n; n = n->m_next) 614 sballoc(sb, n); 615 if ((n = sb->sb_mb) != NULL) { 616 while (n->m_nextpkt) 617 n = n->m_nextpkt; 618 n->m_nextpkt = m; 619 } else 620 sb->sb_mb = m; 621 return (1); 622} 623 624int 625sbappendcontrol(sb, m0, control) 626 struct sockbuf *sb; 627 struct mbuf *m0, *control; 628{ 629 register struct mbuf *m, *n; 630 int space = 0; 631 632 if (control == 0) 633 panic("sbappendcontrol"); 634 for (m = control; ; m = m->m_next) { 635 space += m->m_len; 636 if (m->m_next == 0) 637 break; 638 } 639 n = m; /* save pointer to last control buffer */ 640 for (m = m0; m; m = m->m_next) 641 space += m->m_len; 642 if (space > sbspace(sb)) 643 return (0); 644 n->m_next = m0; /* concatenate data to control */ 645 for (m = control; m; m = m->m_next) 646 sballoc(sb, m); 647 if ((n = sb->sb_mb) != NULL) { 648 while (n->m_nextpkt) 649 n = n->m_nextpkt; 650 n->m_nextpkt = control; 651 } else 652 sb->sb_mb = control; 653 return (1); 654} 655 656/* 657 * Compress mbuf chain m into the socket 658 * buffer sb following mbuf n. If n 659 * is null, the buffer is presumed empty. 660 */ 661void 662sbcompress(sb, m, n) 663 register struct sockbuf *sb; 664 register struct mbuf *m, *n; 665{ 666 register int eor = 0; 667 register struct mbuf *o; 668 669 while (m) { 670 eor |= m->m_flags & M_EOR; 671 if (m->m_len == 0 && 672 (eor == 0 || 673 (((o = m->m_next) || (o = n)) && 674 o->m_type == m->m_type))) { 675 m = m_free(m); 676 continue; 677 } 678 if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && 679 (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && 680 n->m_type == m->m_type) { 681 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, 682 (unsigned)m->m_len); 683 n->m_len += m->m_len; 684 sb->sb_cc += m->m_len; 685 m = m_free(m); 686 continue; 687 } 688 if (n) 689 n->m_next = m; 690 else 691 sb->sb_mb = m; 692 sballoc(sb, m); 693 n = m; 694 m->m_flags &= ~M_EOR; 695 m = m->m_next; 696 n->m_next = 0; 697 } 698 if (eor) { 699 if (n) 700 n->m_flags |= eor; 701 else 702 printf("semi-panic: sbcompress\n"); 703 } 704} 705 706/* 707 * Free all mbufs in a sockbuf. 708 * Check that all resources are reclaimed. 709 */ 710void 711sbflush(sb) 712 register struct sockbuf *sb; 713{ 714 715 if (sb->sb_flags & SB_LOCK) 716 panic("sbflush"); 717 while (sb->sb_mbcnt) 718 sbdrop(sb, (int)sb->sb_cc); 719 if (sb->sb_cc || sb->sb_mb) 720 panic("sbflush 2"); 721} 722 723/* 724 * Drop data from (the front of) a sockbuf. 725 */ 726void 727sbdrop(sb, len) 728 register struct sockbuf *sb; 729 register int len; 730{ 731 register struct mbuf *m, *mn; 732 struct mbuf *next; 733 734 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 735 while (len > 0) { 736 if (m == 0) { 737 if (next == 0) 738 panic("sbdrop"); 739 m = next; 740 next = m->m_nextpkt; 741 continue; 742 } 743 if (m->m_len > len) { 744 m->m_len -= len; 745 m->m_data += len; 746 sb->sb_cc -= len; 747 break; 748 } 749 len -= m->m_len; 750 sbfree(sb, m); 751 MFREE(m, mn); 752 m = mn; 753 } 754 while (m && m->m_len == 0) { 755 sbfree(sb, m); 756 MFREE(m, mn); 757 m = mn; 758 } 759 if (m) { 760 sb->sb_mb = m; 761 m->m_nextpkt = next; 762 } else 763 sb->sb_mb = next; 764} 765 766/* 767 * Drop a record off the front of a sockbuf 768 * and move the next record to the front. 769 */ 770void 771sbdroprecord(sb) 772 register struct sockbuf *sb; 773{ 774 register struct mbuf *m, *mn; 775 776 m = sb->sb_mb; 777 if (m) { 778 sb->sb_mb = m->m_nextpkt; 779 do { 780 sbfree(sb, m); 781 MFREE(m, mn); 782 } while ((m = mn) != NULL); 783 } 784} 785