uipc_socket2.c revision 1.52
1/* $NetBSD: uipc_socket2.c,v 1.52 2003/06/28 14:21:58 darrenr Exp $ */ 2 3/* 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)uipc_socket2.c 8.2 (Berkeley) 2/14/95 36 */ 37 38#include <sys/cdefs.h> 39__KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.52 2003/06/28 14:21:58 darrenr Exp $"); 40 41#include "opt_mbuftrace.h" 42 43#include <sys/param.h> 44#include <sys/systm.h> 45#include <sys/proc.h> 46#include <sys/file.h> 47#include <sys/buf.h> 48#include <sys/malloc.h> 49#include <sys/mbuf.h> 50#include <sys/protosw.h> 51#include <sys/socket.h> 52#include <sys/socketvar.h> 53#include <sys/signalvar.h> 54 55/* 56 * Primitive routines for operating on sockets and socket buffers 57 */ 58 59/* strings for sleep message: */ 60const char netcon[] = "netcon"; 61const char netcls[] = "netcls"; 62const char netio[] = "netio"; 63const char netlck[] = "netlck"; 64 65/* 66 * Procedures to manipulate state flags of socket 67 * and do appropriate wakeups. Normal sequence from the 68 * active (originating) side is that soisconnecting() is 69 * called during processing of connect() call, 70 * resulting in an eventual call to soisconnected() if/when the 71 * connection is established. When the connection is torn down 72 * soisdisconnecting() is called during processing of disconnect() call, 73 * and soisdisconnected() is called when the connection to the peer 74 * is totally severed. The semantics of these routines are such that 75 * connectionless protocols can call soisconnected() and soisdisconnected() 76 * only, bypassing the in-progress calls when setting up a ``connection'' 77 * takes no time. 78 * 79 * From the passive side, a socket is created with 80 * two queues of sockets: so_q0 for connections in progress 81 * and so_q for connections already made and awaiting user acceptance. 82 * As a protocol is preparing incoming connections, it creates a socket 83 * structure queued on so_q0 by calling sonewconn(). When the connection 84 * is established, soisconnected() is called, and transfers the 85 * socket structure to so_q, making it available to accept(). 86 * 87 * If a socket is closed with sockets on either 88 * so_q0 or so_q, these sockets are dropped. 89 * 90 * If higher level protocols are implemented in 91 * the kernel, the wakeups done here will sometimes 92 * cause software-interrupt process scheduling. 93 */ 94 95void 96soisconnecting(struct socket *so) 97{ 98 99 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 100 so->so_state |= SS_ISCONNECTING; 101} 102 103void 104soisconnected(struct socket *so) 105{ 106 struct socket *head; 107 108 head = so->so_head; 109 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 110 so->so_state |= SS_ISCONNECTED; 111 if (head && soqremque(so, 0)) { 112 soqinsque(head, so, 1); 113 sorwakeup(head); 114 wakeup((caddr_t)&head->so_timeo); 115 } else { 116 wakeup((caddr_t)&so->so_timeo); 117 sorwakeup(so); 118 sowwakeup(so); 119 } 120} 121 122void 123soisdisconnecting(struct socket *so) 124{ 125 126 so->so_state &= ~SS_ISCONNECTING; 127 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); 128 wakeup((caddr_t)&so->so_timeo); 129 sowwakeup(so); 130 sorwakeup(so); 131} 132 133void 134soisdisconnected(struct socket *so) 135{ 136 137 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 138 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED); 139 wakeup((caddr_t)&so->so_timeo); 140 sowwakeup(so); 141 sorwakeup(so); 142} 143 144/* 145 * When an attempt at a new connection is noted on a socket 146 * which accepts connections, sonewconn is called. If the 147 * connection is possible (subject to space constraints, etc.) 148 * then we allocate a new structure, propoerly linked into the 149 * data structure of the original socket, and return this. 150 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 151 * 152 * Currently, sonewconn() is defined as sonewconn1() in socketvar.h 153 * to catch calls that are missing the (new) second parameter. 154 */ 155struct socket * 156sonewconn1(struct socket *head, int connstatus) 157{ 158 struct socket *so; 159 int soqueue; 160 161 soqueue = connstatus ? 1 : 0; 162 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) 163 return ((struct socket *)0); 164 so = pool_get(&socket_pool, PR_NOWAIT); 165 if (so == NULL) 166 return (NULL); 167 memset((caddr_t)so, 0, sizeof(*so)); 168 so->so_type = head->so_type; 169 so->so_options = head->so_options &~ SO_ACCEPTCONN; 170 so->so_linger = head->so_linger; 171 so->so_state = head->so_state | SS_NOFDREF; 172 so->so_proto = head->so_proto; 173 so->so_timeo = head->so_timeo; 174 so->so_pgid = head->so_pgid; 175 so->so_send = head->so_send; 176 so->so_receive = head->so_receive; 177 so->so_uid = head->so_uid; 178#ifdef MBUFTRACE 179 so->so_mowner = head->so_mowner; 180 so->so_rcv.sb_mowner = head->so_rcv.sb_mowner; 181 so->so_snd.sb_mowner = head->so_snd.sb_mowner; 182#endif 183 (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); 184 soqinsque(head, so, soqueue); 185 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, 186 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, 187 (struct lwp *)0)) { 188 (void) soqremque(so, soqueue); 189 pool_put(&socket_pool, so); 190 return (NULL); 191 } 192 if (connstatus) { 193 sorwakeup(head); 194 wakeup((caddr_t)&head->so_timeo); 195 so->so_state |= connstatus; 196 } 197 return (so); 198} 199 200void 201soqinsque(struct socket *head, struct socket *so, int q) 202{ 203 204#ifdef DIAGNOSTIC 205 if (so->so_onq != NULL) 206 panic("soqinsque"); 207#endif 208 209 so->so_head = head; 210 if (q == 0) { 211 head->so_q0len++; 212 so->so_onq = &head->so_q0; 213 } else { 214 head->so_qlen++; 215 so->so_onq = &head->so_q; 216 } 217 TAILQ_INSERT_TAIL(so->so_onq, so, so_qe); 218} 219 220int 221soqremque(struct socket *so, int q) 222{ 223 struct socket *head; 224 225 head = so->so_head; 226 if (q == 0) { 227 if (so->so_onq != &head->so_q0) 228 return (0); 229 head->so_q0len--; 230 } else { 231 if (so->so_onq != &head->so_q) 232 return (0); 233 head->so_qlen--; 234 } 235 TAILQ_REMOVE(so->so_onq, so, so_qe); 236 so->so_onq = NULL; 237 so->so_head = NULL; 238 return (1); 239} 240 241/* 242 * Socantsendmore indicates that no more data will be sent on the 243 * socket; it would normally be applied to a socket when the user 244 * informs the system that no more data is to be sent, by the protocol 245 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 246 * will be received, and will normally be applied to the socket by a 247 * protocol when it detects that the peer will send no more data. 248 * Data queued for reading in the socket may yet be read. 249 */ 250 251void 252socantsendmore(struct socket *so) 253{ 254 255 so->so_state |= SS_CANTSENDMORE; 256 sowwakeup(so); 257} 258 259void 260socantrcvmore(struct socket *so) 261{ 262 263 so->so_state |= SS_CANTRCVMORE; 264 sorwakeup(so); 265} 266 267/* 268 * Wait for data to arrive at/drain from a socket buffer. 269 */ 270int 271sbwait(struct sockbuf *sb) 272{ 273 274 sb->sb_flags |= SB_WAIT; 275 return (tsleep((caddr_t)&sb->sb_cc, 276 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio, 277 sb->sb_timeo)); 278} 279 280/* 281 * Lock a sockbuf already known to be locked; 282 * return any error returned from sleep (EINTR). 283 */ 284int 285sb_lock(struct sockbuf *sb) 286{ 287 int error; 288 289 while (sb->sb_flags & SB_LOCK) { 290 sb->sb_flags |= SB_WANT; 291 error = tsleep((caddr_t)&sb->sb_flags, 292 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, 293 netlck, 0); 294 if (error) 295 return (error); 296 } 297 sb->sb_flags |= SB_LOCK; 298 return (0); 299} 300 301/* 302 * Wakeup processes waiting on a socket buffer. 303 * Do asynchronous notification via SIGIO 304 * if the socket buffer has the SB_ASYNC flag set. 305 */ 306void 307sowakeup(struct socket *so, struct sockbuf *sb) 308{ 309 struct proc *p; 310 311 selnotify(&sb->sb_sel, 0); 312 sb->sb_flags &= ~SB_SEL; 313 if (sb->sb_flags & SB_WAIT) { 314 sb->sb_flags &= ~SB_WAIT; 315 wakeup((caddr_t)&sb->sb_cc); 316 } 317 if (sb->sb_flags & SB_ASYNC) { 318 if (so->so_pgid < 0) 319 gsignal(-so->so_pgid, SIGIO); 320 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 321 psignal(p, SIGIO); 322 } 323 if (sb->sb_flags & SB_UPCALL) 324 (*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT); 325} 326 327/* 328 * Socket buffer (struct sockbuf) utility routines. 329 * 330 * Each socket contains two socket buffers: one for sending data and 331 * one for receiving data. Each buffer contains a queue of mbufs, 332 * information about the number of mbufs and amount of data in the 333 * queue, and other fields allowing poll() statements and notification 334 * on data availability to be implemented. 335 * 336 * Data stored in a socket buffer is maintained as a list of records. 337 * Each record is a list of mbufs chained together with the m_next 338 * field. Records are chained together with the m_nextpkt field. The upper 339 * level routine soreceive() expects the following conventions to be 340 * observed when placing information in the receive buffer: 341 * 342 * 1. If the protocol requires each message be preceded by the sender's 343 * name, then a record containing that name must be present before 344 * any associated data (mbuf's must be of type MT_SONAME). 345 * 2. If the protocol supports the exchange of ``access rights'' (really 346 * just additional data associated with the message), and there are 347 * ``rights'' to be received, then a record containing this data 348 * should be present (mbuf's must be of type MT_CONTROL). 349 * 3. If a name or rights record exists, then it must be followed by 350 * a data record, perhaps of zero length. 351 * 352 * Before using a new socket structure it is first necessary to reserve 353 * buffer space to the socket, by calling sbreserve(). This should commit 354 * some of the available buffer space in the system buffer pool for the 355 * socket (currently, it does nothing but enforce limits). The space 356 * should be released by calling sbrelease() when the socket is destroyed. 357 */ 358 359int 360soreserve(struct socket *so, u_long sndcc, u_long rcvcc) 361{ 362 363 if (sbreserve(&so->so_snd, sndcc) == 0) 364 goto bad; 365 if (sbreserve(&so->so_rcv, rcvcc) == 0) 366 goto bad2; 367 if (so->so_rcv.sb_lowat == 0) 368 so->so_rcv.sb_lowat = 1; 369 if (so->so_snd.sb_lowat == 0) 370 so->so_snd.sb_lowat = MCLBYTES; 371 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 372 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 373 return (0); 374 bad2: 375 sbrelease(&so->so_snd); 376 bad: 377 return (ENOBUFS); 378} 379 380/* 381 * Allot mbufs to a sockbuf. 382 * Attempt to scale mbmax so that mbcnt doesn't become limiting 383 * if buffering efficiency is near the normal case. 384 */ 385int 386sbreserve(struct sockbuf *sb, u_long cc) 387{ 388 389 if (cc == 0 || 390 (u_quad_t) cc > (u_quad_t) sb_max * MCLBYTES / (MSIZE + MCLBYTES)) 391 return (0); 392 sb->sb_hiwat = cc; 393 sb->sb_mbmax = min(cc * 2, sb_max); 394 if (sb->sb_lowat > sb->sb_hiwat) 395 sb->sb_lowat = sb->sb_hiwat; 396 return (1); 397} 398 399/* 400 * Free mbufs held by a socket, and reserved mbuf space. 401 */ 402void 403sbrelease(struct sockbuf *sb) 404{ 405 406 sbflush(sb); 407 sb->sb_hiwat = sb->sb_mbmax = 0; 408} 409 410/* 411 * Routines to add and remove 412 * data from an mbuf queue. 413 * 414 * The routines sbappend() or sbappendrecord() are normally called to 415 * append new mbufs to a socket buffer, after checking that adequate 416 * space is available, comparing the function sbspace() with the amount 417 * of data to be added. sbappendrecord() differs from sbappend() in 418 * that data supplied is treated as the beginning of a new record. 419 * To place a sender's address, optional access rights, and data in a 420 * socket receive buffer, sbappendaddr() should be used. To place 421 * access rights and data in a socket receive buffer, sbappendrights() 422 * should be used. In either case, the new data begins a new record. 423 * Note that unlike sbappend() and sbappendrecord(), these routines check 424 * for the caller that there will be enough space to store the data. 425 * Each fails if there is not enough space, or if it cannot find mbufs 426 * to store additional information in. 427 * 428 * Reliable protocols may use the socket send buffer to hold data 429 * awaiting acknowledgement. Data is normally copied from a socket 430 * send buffer in a protocol with m_copy for output to a peer, 431 * and then removing the data from the socket buffer with sbdrop() 432 * or sbdroprecord() when the data is acknowledged by the peer. 433 */ 434 435#ifdef SOCKBUF_DEBUG 436void 437sblastrecordchk(struct sockbuf *sb, const char *where) 438{ 439 struct mbuf *m = sb->sb_mb; 440 441 while (m && m->m_nextpkt) 442 m = m->m_nextpkt; 443 444 if (m != sb->sb_lastrecord) { 445 printf("sblastrecordchk: sb_mb %p sb_lastrecord %p last %p\n", 446 sb->sb_mb, sb->sb_lastrecord, m); 447 printf("packet chain:\n"); 448 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) 449 printf("\t%p\n", m); 450 panic("sblastrecordchk from %s", where); 451 } 452} 453 454void 455sblastmbufchk(struct sockbuf *sb, const char *where) 456{ 457 struct mbuf *m = sb->sb_mb; 458 struct mbuf *n; 459 460 while (m && m->m_nextpkt) 461 m = m->m_nextpkt; 462 463 while (m && m->m_next) 464 m = m->m_next; 465 466 if (m != sb->sb_mbtail) { 467 printf("sblastmbufchk: sb_mb %p sb_mbtail %p last %p\n", 468 sb->sb_mb, sb->sb_mbtail, m); 469 printf("packet tree:\n"); 470 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { 471 printf("\t"); 472 for (n = m; n != NULL; n = n->m_next) 473 printf("%p ", n); 474 printf("\n"); 475 } 476 panic("sblastmbufchk from %s", where); 477 } 478} 479#endif /* SOCKBUF_DEBUG */ 480 481#define SBLINKRECORD(sb, m0) \ 482do { \ 483 if ((sb)->sb_lastrecord != NULL) \ 484 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 485 else \ 486 (sb)->sb_mb = (m0); \ 487 (sb)->sb_lastrecord = (m0); \ 488} while (/*CONSTCOND*/0) 489 490/* 491 * Append mbuf chain m to the last record in the 492 * socket buffer sb. The additional space associated 493 * the mbuf chain is recorded in sb. Empty mbufs are 494 * discarded and mbufs are compacted where possible. 495 */ 496void 497sbappend(struct sockbuf *sb, struct mbuf *m) 498{ 499 struct mbuf *n; 500 501 if (m == 0) 502 return; 503 504#ifdef MBUFTRACE 505 m_claim(m, sb->sb_mowner); 506#endif 507 508 SBLASTRECORDCHK(sb, "sbappend 1"); 509 510 if ((n = sb->sb_lastrecord) != NULL) { 511 /* 512 * XXX Would like to simply use sb_mbtail here, but 513 * XXX I need to verify that I won't miss an EOR that 514 * XXX way. 515 */ 516 do { 517 if (n->m_flags & M_EOR) { 518 sbappendrecord(sb, m); /* XXXXXX!!!! */ 519 return; 520 } 521 } while (n->m_next && (n = n->m_next)); 522 } else { 523 /* 524 * If this is the first record in the socket buffer, it's 525 * also the last record. 526 */ 527 sb->sb_lastrecord = m; 528 } 529 sbcompress(sb, m, n); 530 SBLASTRECORDCHK(sb, "sbappend 2"); 531} 532 533/* 534 * This version of sbappend() should only be used when the caller 535 * absolutely knows that there will never be more than one record 536 * in the socket buffer, that is, a stream protocol (such as TCP). 537 */ 538void 539sbappendstream(struct sockbuf *sb, struct mbuf *m) 540{ 541 542 KDASSERT(m->m_nextpkt == NULL); 543 KASSERT(sb->sb_mb == sb->sb_lastrecord); 544 545 SBLASTMBUFCHK(sb, __func__); 546 547#ifdef MBUFTRACE 548 m_claim(m, sb->sb_mowner); 549#endif 550 551 sbcompress(sb, m, sb->sb_mbtail); 552 553 sb->sb_lastrecord = sb->sb_mb; 554 SBLASTRECORDCHK(sb, __func__); 555} 556 557#ifdef SOCKBUF_DEBUG 558void 559sbcheck(struct sockbuf *sb) 560{ 561 struct mbuf *m; 562 u_long len, mbcnt; 563 564 len = 0; 565 mbcnt = 0; 566 for (m = sb->sb_mb; m; m = m->m_next) { 567 len += m->m_len; 568 mbcnt += MSIZE; 569 if (m->m_flags & M_EXT) 570 mbcnt += m->m_ext.ext_size; 571 if (m->m_nextpkt) 572 panic("sbcheck nextpkt"); 573 } 574 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { 575 printf("cc %lu != %lu || mbcnt %lu != %lu\n", len, sb->sb_cc, 576 mbcnt, sb->sb_mbcnt); 577 panic("sbcheck"); 578 } 579} 580#endif 581 582/* 583 * As above, except the mbuf chain 584 * begins a new record. 585 */ 586void 587sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 588{ 589 struct mbuf *m; 590 591 if (m0 == 0) 592 return; 593 594#ifdef MBUFTRACE 595 m_claim(m0, sb->sb_mowner); 596#endif 597 /* 598 * Put the first mbuf on the queue. 599 * Note this permits zero length records. 600 */ 601 sballoc(sb, m0); 602 SBLASTRECORDCHK(sb, "sbappendrecord 1"); 603 SBLINKRECORD(sb, m0); 604 m = m0->m_next; 605 m0->m_next = 0; 606 if (m && (m0->m_flags & M_EOR)) { 607 m0->m_flags &= ~M_EOR; 608 m->m_flags |= M_EOR; 609 } 610 sbcompress(sb, m, m0); 611 SBLASTRECORDCHK(sb, "sbappendrecord 2"); 612} 613 614/* 615 * As above except that OOB data 616 * is inserted at the beginning of the sockbuf, 617 * but after any other OOB data. 618 */ 619void 620sbinsertoob(struct sockbuf *sb, struct mbuf *m0) 621{ 622 struct mbuf *m, **mp; 623 624 if (m0 == 0) 625 return; 626 627 SBLASTRECORDCHK(sb, "sbinsertoob 1"); 628 629 for (mp = &sb->sb_mb; (m = *mp) != NULL; mp = &((*mp)->m_nextpkt)) { 630 again: 631 switch (m->m_type) { 632 633 case MT_OOBDATA: 634 continue; /* WANT next train */ 635 636 case MT_CONTROL: 637 if ((m = m->m_next) != NULL) 638 goto again; /* inspect THIS train further */ 639 } 640 break; 641 } 642 /* 643 * Put the first mbuf on the queue. 644 * Note this permits zero length records. 645 */ 646 sballoc(sb, m0); 647 m0->m_nextpkt = *mp; 648 if (*mp == NULL) { 649 /* m0 is actually the new tail */ 650 sb->sb_lastrecord = m0; 651 } 652 *mp = m0; 653 m = m0->m_next; 654 m0->m_next = 0; 655 if (m && (m0->m_flags & M_EOR)) { 656 m0->m_flags &= ~M_EOR; 657 m->m_flags |= M_EOR; 658 } 659 sbcompress(sb, m, m0); 660 SBLASTRECORDCHK(sb, "sbinsertoob 2"); 661} 662 663/* 664 * Append address and data, and optionally, control (ancillary) data 665 * to the receive queue of a socket. If present, 666 * m0 must include a packet header with total length. 667 * Returns 0 if no space in sockbuf or insufficient mbufs. 668 */ 669int 670sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, 671 struct mbuf *control) 672{ 673 struct mbuf *m, *n, *nlast; 674 int space, len; 675 676 space = asa->sa_len; 677 678 if (m0 != NULL) { 679 if ((m0->m_flags & M_PKTHDR) == 0) 680 panic("sbappendaddr"); 681 space += m0->m_pkthdr.len; 682#ifdef MBUFTRACE 683 m_claim(m0, sb->sb_mowner); 684#endif 685 } 686 for (n = control; n; n = n->m_next) { 687 space += n->m_len; 688 MCLAIM(n, sb->sb_mowner); 689 if (n->m_next == 0) /* keep pointer to last control buf */ 690 break; 691 } 692 if (space > sbspace(sb)) 693 return (0); 694 MGET(m, M_DONTWAIT, MT_SONAME); 695 if (m == 0) 696 return (0); 697 MCLAIM(m, sb->sb_mowner); 698 /* 699 * XXX avoid 'comparison always true' warning which isn't easily 700 * avoided. 701 */ 702 len = asa->sa_len; 703 if (len > MLEN) { 704 MEXTMALLOC(m, asa->sa_len, M_NOWAIT); 705 if ((m->m_flags & M_EXT) == 0) { 706 m_free(m); 707 return (0); 708 } 709 } 710 m->m_len = asa->sa_len; 711 memcpy(mtod(m, caddr_t), (caddr_t)asa, asa->sa_len); 712 if (n) 713 n->m_next = m0; /* concatenate data to control */ 714 else 715 control = m0; 716 m->m_next = control; 717 718 SBLASTRECORDCHK(sb, "sbappendaddr 1"); 719 720 for (n = m; n->m_next != NULL; n = n->m_next) 721 sballoc(sb, n); 722 sballoc(sb, n); 723 nlast = n; 724 SBLINKRECORD(sb, m); 725 726 sb->sb_mbtail = nlast; 727 SBLASTMBUFCHK(sb, "sbappendaddr"); 728 729 SBLASTRECORDCHK(sb, "sbappendaddr 2"); 730 731 return (1); 732} 733 734int 735sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control) 736{ 737 struct mbuf *m, *mlast, *n; 738 int space; 739 740 space = 0; 741 if (control == 0) 742 panic("sbappendcontrol"); 743 for (m = control; ; m = m->m_next) { 744 space += m->m_len; 745 MCLAIM(m, sb->sb_mowner); 746 if (m->m_next == 0) 747 break; 748 } 749 n = m; /* save pointer to last control buffer */ 750 for (m = m0; m; m = m->m_next) { 751 MCLAIM(m, sb->sb_mowner); 752 space += m->m_len; 753 } 754 if (space > sbspace(sb)) 755 return (0); 756 n->m_next = m0; /* concatenate data to control */ 757 758 SBLASTRECORDCHK(sb, "sbappendcontrol 1"); 759 760 for (m = control; m->m_next != NULL; m = m->m_next) 761 sballoc(sb, m); 762 sballoc(sb, m); 763 mlast = m; 764 SBLINKRECORD(sb, control); 765 766 sb->sb_mbtail = mlast; 767 SBLASTMBUFCHK(sb, "sbappendcontrol"); 768 769 SBLASTRECORDCHK(sb, "sbappendcontrol 2"); 770 771 return (1); 772} 773 774/* 775 * Compress mbuf chain m into the socket 776 * buffer sb following mbuf n. If n 777 * is null, the buffer is presumed empty. 778 */ 779void 780sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 781{ 782 int eor; 783 struct mbuf *o; 784 785 eor = 0; 786 while (m) { 787 eor |= m->m_flags & M_EOR; 788 if (m->m_len == 0 && 789 (eor == 0 || 790 (((o = m->m_next) || (o = n)) && 791 o->m_type == m->m_type))) { 792 if (sb->sb_lastrecord == m) 793 sb->sb_lastrecord = m->m_next; 794 m = m_free(m); 795 continue; 796 } 797 if (n && (n->m_flags & M_EOR) == 0 && 798 /* M_TRAILINGSPACE() checks buffer writeability */ 799 m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */ 800 m->m_len <= M_TRAILINGSPACE(n) && 801 n->m_type == m->m_type) { 802 memcpy(mtod(n, caddr_t) + n->m_len, mtod(m, caddr_t), 803 (unsigned)m->m_len); 804 n->m_len += m->m_len; 805 sb->sb_cc += m->m_len; 806 m = m_free(m); 807 continue; 808 } 809 if (n) 810 n->m_next = m; 811 else 812 sb->sb_mb = m; 813 sb->sb_mbtail = m; 814 sballoc(sb, m); 815 n = m; 816 m->m_flags &= ~M_EOR; 817 m = m->m_next; 818 n->m_next = 0; 819 } 820 if (eor) { 821 if (n) 822 n->m_flags |= eor; 823 else 824 printf("semi-panic: sbcompress\n"); 825 } 826 SBLASTMBUFCHK(sb, __func__); 827} 828 829/* 830 * Free all mbufs in a sockbuf. 831 * Check that all resources are reclaimed. 832 */ 833void 834sbflush(struct sockbuf *sb) 835{ 836 837 KASSERT((sb->sb_flags & SB_LOCK) == 0); 838 839 while (sb->sb_mbcnt) 840 sbdrop(sb, (int)sb->sb_cc); 841 842 KASSERT(sb->sb_cc == 0); 843 KASSERT(sb->sb_mb == NULL); 844 KASSERT(sb->sb_mbtail == NULL); 845 KASSERT(sb->sb_lastrecord == NULL); 846} 847 848/* 849 * Drop data from (the front of) a sockbuf. 850 */ 851void 852sbdrop(struct sockbuf *sb, int len) 853{ 854 struct mbuf *m, *mn, *next; 855 856 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 857 while (len > 0) { 858 if (m == 0) { 859 if (next == 0) 860 panic("sbdrop"); 861 m = next; 862 next = m->m_nextpkt; 863 continue; 864 } 865 if (m->m_len > len) { 866 m->m_len -= len; 867 m->m_data += len; 868 sb->sb_cc -= len; 869 break; 870 } 871 len -= m->m_len; 872 sbfree(sb, m); 873 MFREE(m, mn); 874 m = mn; 875 } 876 while (m && m->m_len == 0) { 877 sbfree(sb, m); 878 MFREE(m, mn); 879 m = mn; 880 } 881 if (m) { 882 sb->sb_mb = m; 883 m->m_nextpkt = next; 884 } else 885 sb->sb_mb = next; 886 /* 887 * First part is an inline SB_EMPTY_FIXUP(). Second part 888 * makes sure sb_lastrecord is up-to-date if we dropped 889 * part of the last record. 890 */ 891 m = sb->sb_mb; 892 if (m == NULL) { 893 sb->sb_mbtail = NULL; 894 sb->sb_lastrecord = NULL; 895 } else if (m->m_nextpkt == NULL) 896 sb->sb_lastrecord = m; 897} 898 899/* 900 * Drop a record off the front of a sockbuf 901 * and move the next record to the front. 902 */ 903void 904sbdroprecord(struct sockbuf *sb) 905{ 906 struct mbuf *m, *mn; 907 908 m = sb->sb_mb; 909 if (m) { 910 sb->sb_mb = m->m_nextpkt; 911 do { 912 sbfree(sb, m); 913 MFREE(m, mn); 914 } while ((m = mn) != NULL); 915 } 916 SB_EMPTY_FIXUP(sb); 917} 918 919/* 920 * Create a "control" mbuf containing the specified data 921 * with the specified type for presentation on a socket buffer. 922 */ 923struct mbuf * 924sbcreatecontrol(caddr_t p, int size, int type, int level) 925{ 926 struct cmsghdr *cp; 927 struct mbuf *m; 928 929 if (CMSG_SPACE(size) > MCLBYTES) { 930 printf("sbcreatecontrol: message too large %d\n", size); 931 return NULL; 932 } 933 934 if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL) 935 return ((struct mbuf *) NULL); 936 if (CMSG_SPACE(size) > MLEN) { 937 MCLGET(m, M_DONTWAIT); 938 if ((m->m_flags & M_EXT) == 0) { 939 m_free(m); 940 return NULL; 941 } 942 } 943 cp = mtod(m, struct cmsghdr *); 944 memcpy(CMSG_DATA(cp), p, size); 945 m->m_len = CMSG_SPACE(size); 946 cp->cmsg_len = CMSG_LEN(size); 947 cp->cmsg_level = level; 948 cp->cmsg_type = type; 949 return (m); 950} 951