135} 136 137static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; 138 139static int 140raw_append(struct inpcb *last, struct ip *ip, struct mbuf *n) 141{ 142 int policyfail = 0; 143 144 INP_LOCK_ASSERT(last); 145 146#if defined(IPSEC) || defined(FAST_IPSEC) 147 /* check AH/ESP integrity. */ 148 if (ipsec4_in_reject(n, last)) { 149 policyfail = 1; 150#ifdef IPSEC 151 ipsecstat.in_polvio++; 152#endif /*IPSEC*/ 153 /* do not inject data to pcb */ 154 } 155#endif /*IPSEC || FAST_IPSEC*/ 156#ifdef MAC 157 if (!policyfail && mac_check_inpcb_deliver(last, n) != 0) 158 policyfail = 1; 159#endif 160 /* Check the minimum TTL for socket. */ 161 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 162 policyfail = 1; 163 if (!policyfail) { 164 struct mbuf *opts = NULL; 165 struct socket *so; 166 167 so = last->inp_socket; 168 if ((last->inp_flags & INP_CONTROLOPTS) || 169 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 170 ip_savecontrol(last, &opts, ip, n); 171 SOCKBUF_LOCK(&so->so_rcv); 172 if (sbappendaddr_locked(&so->so_rcv, 173 (struct sockaddr *)&ripsrc, n, opts) == 0) { 174 /* should notify about lost packet */ 175 m_freem(n); 176 if (opts) 177 m_freem(opts); 178 SOCKBUF_UNLOCK(&so->so_rcv); 179 } else 180 sorwakeup_locked(so); 181 } else 182 m_freem(n); 183 return policyfail; 184} 185 186/* 187 * Setup generic address and protocol structures 188 * for raw_input routine, then pass them along with 189 * mbuf chain. 190 */ 191void 192rip_input(struct mbuf *m, int off) 193{ 194 struct ip *ip = mtod(m, struct ip *); 195 int proto = ip->ip_p; 196 struct inpcb *inp, *last; 197 198 INP_INFO_RLOCK(&ripcbinfo); 199 ripsrc.sin_addr = ip->ip_src; 200 last = NULL; 201 LIST_FOREACH(inp, &ripcb, inp_list) { 202 INP_LOCK(inp); 203 if (inp->inp_ip_p && inp->inp_ip_p != proto) { 204 docontinue: 205 INP_UNLOCK(inp); 206 continue; 207 } 208#ifdef INET6 209 if ((inp->inp_vflag & INP_IPV4) == 0) 210 goto docontinue; 211#endif 212 if (inp->inp_laddr.s_addr && 213 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 214 goto docontinue; 215 if (inp->inp_faddr.s_addr && 216 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 217 goto docontinue; 218 if (jailed(inp->inp_socket->so_cred)) 219 if (htonl(prison_getip(inp->inp_socket->so_cred)) != 220 ip->ip_dst.s_addr) 221 goto docontinue; 222 if (last) { 223 struct mbuf *n; 224 225 n = m_copy(m, 0, (int)M_COPYALL); 226 if (n != NULL) 227 (void) raw_append(last, ip, n); 228 /* XXX count dropped packet */ 229 INP_UNLOCK(last); 230 } 231 last = inp; 232 } 233 if (last != NULL) { 234 if (raw_append(last, ip, m) != 0) 235 ipstat.ips_delivered--; 236 INP_UNLOCK(last); 237 } else { 238 m_freem(m); 239 ipstat.ips_noproto++; 240 ipstat.ips_delivered--; 241 } 242 INP_INFO_RUNLOCK(&ripcbinfo); 243} 244 245/* 246 * Generate IP header and pass packet to ip_output. 247 * Tack on options user may have setup with control call. 248 */ 249int 250rip_output(struct mbuf *m, struct socket *so, u_long dst) 251{ 252 struct ip *ip; 253 int error; 254 struct inpcb *inp = sotoinpcb(so); 255 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 256 IP_ALLOWBROADCAST; 257 258 /* 259 * If the user handed us a complete IP packet, use it. 260 * Otherwise, allocate an mbuf for a header and fill it in. 261 */ 262 if ((inp->inp_flags & INP_HDRINCL) == 0) { 263 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 264 m_freem(m); 265 return(EMSGSIZE); 266 } 267 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 268 if (m == NULL) 269 return(ENOBUFS); 270 271 INP_LOCK(inp); 272 ip = mtod(m, struct ip *); 273 ip->ip_tos = inp->inp_ip_tos; 274 if (inp->inp_flags & INP_DONTFRAG) 275 ip->ip_off = IP_DF; 276 else 277 ip->ip_off = 0; 278 ip->ip_p = inp->inp_ip_p; 279 ip->ip_len = m->m_pkthdr.len; 280 if (jailed(inp->inp_socket->so_cred)) 281 ip->ip_src.s_addr = 282 htonl(prison_getip(inp->inp_socket->so_cred)); 283 else 284 ip->ip_src = inp->inp_laddr; 285 ip->ip_dst.s_addr = dst; 286 ip->ip_ttl = inp->inp_ip_ttl; 287 } else { 288 if (m->m_pkthdr.len > IP_MAXPACKET) { 289 m_freem(m); 290 return(EMSGSIZE); 291 } 292 INP_LOCK(inp); 293 ip = mtod(m, struct ip *); 294 if (jailed(inp->inp_socket->so_cred)) { 295 if (ip->ip_src.s_addr != 296 htonl(prison_getip(inp->inp_socket->so_cred))) { 297 INP_UNLOCK(inp); 298 m_freem(m); 299 return (EPERM); 300 } 301 } 302 /* don't allow both user specified and setsockopt options, 303 and don't allow packet length sizes that will crash */ 304 if (((ip->ip_hl != (sizeof (*ip) >> 2)) 305 && inp->inp_options) 306 || (ip->ip_len > m->m_pkthdr.len) 307 || (ip->ip_len < (ip->ip_hl << 2))) { 308 INP_UNLOCK(inp); 309 m_freem(m); 310 return EINVAL; 311 } 312 if (ip->ip_id == 0) 313 ip->ip_id = ip_newid(); 314 /* XXX prevent ip_output from overwriting header fields */ 315 flags |= IP_RAWOUTPUT; 316 ipstat.ips_rawout++; 317 } 318 319 if (inp->inp_vflag & INP_ONESBCAST) 320 flags |= IP_SENDONES; 321 322#ifdef MAC 323 mac_create_mbuf_from_inpcb(inp, m); 324#endif 325 326 error = ip_output(m, inp->inp_options, NULL, flags, 327 inp->inp_moptions, inp); 328 INP_UNLOCK(inp); 329 return error; 330} 331 332/* 333 * Raw IP socket option processing. 334 * 335 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 336 * only be created by a privileged process, and as such, socket option 337 * operations to manage system properties on any raw socket were allowed to 338 * take place without explicit additional access control checks. However, 339 * raw sockets can now also be created in jail(), and therefore explicit 340 * checks are now required. Likewise, raw sockets can be used by a process 341 * after it gives up privilege, so some caution is required. For options 342 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 343 * performed in ip_ctloutput() and therefore no check occurs here. 344 * Unilaterally checking suser() here breaks normal IP socket option 345 * operations on raw sockets. 346 * 347 * When adding new socket options here, make sure to add access control 348 * checks here as necessary. 349 */ 350int 351rip_ctloutput(struct socket *so, struct sockopt *sopt) 352{ 353 struct inpcb *inp = sotoinpcb(so); 354 int error, optval; 355 356 if (sopt->sopt_level != IPPROTO_IP) 357 return (EINVAL); 358 359 error = 0; 360 switch (sopt->sopt_dir) { 361 case SOPT_GET: 362 switch (sopt->sopt_name) { 363 case IP_HDRINCL: 364 optval = inp->inp_flags & INP_HDRINCL; 365 error = sooptcopyout(sopt, &optval, sizeof optval); 366 break; 367 368 case IP_FW_ADD: /* ADD actually returns the body... */ 369 case IP_FW_GET: 370 case IP_FW_TABLE_GETSIZE: 371 case IP_FW_TABLE_LIST: 372 error = suser(curthread); 373 if (error != 0) 374 return (error); 375 if (ip_fw_ctl_ptr != NULL) 376 error = ip_fw_ctl_ptr(sopt); 377 else 378 error = ENOPROTOOPT; 379 break; 380 381 case IP_DUMMYNET_GET: 382 error = suser(curthread); 383 if (error != 0) 384 return (error); 385 if (ip_dn_ctl_ptr != NULL) 386 error = ip_dn_ctl_ptr(sopt); 387 else 388 error = ENOPROTOOPT; 389 break ; 390 391 case MRT_INIT: 392 case MRT_DONE: 393 case MRT_ADD_VIF: 394 case MRT_DEL_VIF: 395 case MRT_ADD_MFC: 396 case MRT_DEL_MFC: 397 case MRT_VERSION: 398 case MRT_ASSERT: 399 case MRT_API_SUPPORT: 400 case MRT_API_CONFIG: 401 case MRT_ADD_BW_UPCALL: 402 case MRT_DEL_BW_UPCALL: 403 error = suser(curthread); 404 if (error != 0) 405 return (error); 406 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 407 EOPNOTSUPP; 408 break; 409 410 default: 411 error = ip_ctloutput(so, sopt); 412 break; 413 } 414 break; 415 416 case SOPT_SET: 417 switch (sopt->sopt_name) { 418 case IP_HDRINCL: 419 error = sooptcopyin(sopt, &optval, sizeof optval, 420 sizeof optval); 421 if (error) 422 break; 423 if (optval) 424 inp->inp_flags |= INP_HDRINCL; 425 else 426 inp->inp_flags &= ~INP_HDRINCL; 427 break; 428 429 case IP_FW_ADD: 430 case IP_FW_DEL: 431 case IP_FW_FLUSH: 432 case IP_FW_ZERO: 433 case IP_FW_RESETLOG: 434 case IP_FW_TABLE_ADD: 435 case IP_FW_TABLE_DEL: 436 case IP_FW_TABLE_FLUSH: 437 error = suser(curthread); 438 if (error != 0) 439 return (error); 440 if (ip_fw_ctl_ptr != NULL) 441 error = ip_fw_ctl_ptr(sopt); 442 else 443 error = ENOPROTOOPT; 444 break; 445 446 case IP_DUMMYNET_CONFIGURE: 447 case IP_DUMMYNET_DEL: 448 case IP_DUMMYNET_FLUSH: 449 error = suser(curthread); 450 if (error != 0) 451 return (error); 452 if (ip_dn_ctl_ptr != NULL) 453 error = ip_dn_ctl_ptr(sopt); 454 else 455 error = ENOPROTOOPT ; 456 break ; 457 458 case IP_RSVP_ON: 459 error = suser(curthread); 460 if (error != 0) 461 return (error); 462 error = ip_rsvp_init(so); 463 break; 464 465 case IP_RSVP_OFF: 466 error = suser(curthread); 467 if (error != 0) 468 return (error); 469 error = ip_rsvp_done(); 470 break; 471 472 case IP_RSVP_VIF_ON: 473 case IP_RSVP_VIF_OFF: 474 error = suser(curthread); 475 if (error != 0) 476 return (error); 477 error = ip_rsvp_vif ? 478 ip_rsvp_vif(so, sopt) : EINVAL; 479 break; 480 481 case MRT_INIT: 482 case MRT_DONE: 483 case MRT_ADD_VIF: 484 case MRT_DEL_VIF: 485 case MRT_ADD_MFC: 486 case MRT_DEL_MFC: 487 case MRT_VERSION: 488 case MRT_ASSERT: 489 case MRT_API_SUPPORT: 490 case MRT_API_CONFIG: 491 case MRT_ADD_BW_UPCALL: 492 case MRT_DEL_BW_UPCALL: 493 error = suser(curthread); 494 if (error != 0) 495 return (error); 496 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 497 EOPNOTSUPP; 498 break; 499 500 default: 501 error = ip_ctloutput(so, sopt); 502 break; 503 } 504 break; 505 } 506 507 return (error); 508} 509 510/* 511 * This function exists solely to receive the PRC_IFDOWN messages which 512 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, 513 * and calls in_ifadown() to remove all routes corresponding to that address. 514 * It also receives the PRC_IFUP messages from if_up() and reinstalls the 515 * interface routes. 516 */ 517void 518rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 519{ 520 struct in_ifaddr *ia; 521 struct ifnet *ifp; 522 int err; 523 int flags; 524 525 switch (cmd) { 526 case PRC_IFDOWN: 527 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 528 if (ia->ia_ifa.ifa_addr == sa 529 && (ia->ia_flags & IFA_ROUTE)) { 530 /* 531 * in_ifscrub kills the interface route. 532 */ 533 in_ifscrub(ia->ia_ifp, ia); 534 /* 535 * in_ifadown gets rid of all the rest of 536 * the routes. This is not quite the right 537 * thing to do, but at least if we are running 538 * a routing process they will come back. 539 */ 540 in_ifadown(&ia->ia_ifa, 0); 541 break; 542 } 543 } 544 break; 545 546 case PRC_IFUP: 547 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 548 if (ia->ia_ifa.ifa_addr == sa) 549 break; 550 } 551 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) 552 return; 553 flags = RTF_UP; 554 ifp = ia->ia_ifa.ifa_ifp; 555 556 if ((ifp->if_flags & IFF_LOOPBACK) 557 || (ifp->if_flags & IFF_POINTOPOINT)) 558 flags |= RTF_HOST; 559 560 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 561 if (err == 0) 562 ia->ia_flags |= IFA_ROUTE; 563 break; 564 } 565} 566 567u_long rip_sendspace = RIPSNDQ; 568u_long rip_recvspace = RIPRCVQ; 569 570SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 571 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 572SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 573 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 574 575static int 576rip_attach(struct socket *so, int proto, struct thread *td) 577{ 578 struct inpcb *inp; 579 int error; 580 581 inp = sotoinpcb(so); 582 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 583 if (jailed(td->td_ucred) && !jail_allow_raw_sockets) 584 return (EPERM); 585 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL)) != 0) 586 return error; 587 if (proto >= IPPROTO_MAX || proto < 0) 588 return EPROTONOSUPPORT; 589 error = soreserve(so, rip_sendspace, rip_recvspace); 590 if (error) 591 return error; 592 INP_INFO_WLOCK(&ripcbinfo); 593 error = in_pcballoc(so, &ripcbinfo, "rawinp"); 594 if (error) { 595 INP_INFO_WUNLOCK(&ripcbinfo); 596 return error; 597 } 598 inp = (struct inpcb *)so->so_pcb; 599 INP_LOCK(inp); 600 INP_INFO_WUNLOCK(&ripcbinfo); 601 inp->inp_vflag |= INP_IPV4; 602 inp->inp_ip_p = proto; 603 inp->inp_ip_ttl = ip_defttl; 604 INP_UNLOCK(inp); 605 return 0; 606} 607 608static void 609rip_pcbdetach(struct socket *so, struct inpcb *inp) 610{ 611 612 INP_INFO_WLOCK_ASSERT(&ripcbinfo); 613 INP_LOCK_ASSERT(inp); 614 615 if (so == ip_mrouter && ip_mrouter_done) 616 ip_mrouter_done(); 617 if (ip_rsvp_force_done) 618 ip_rsvp_force_done(so); 619 if (so == ip_rsvpd) 620 ip_rsvp_done(); 621 in_pcbdetach(inp); 622 in_pcbfree(inp); 623} 624 625static void 626rip_detach(struct socket *so) 627{ 628 struct inpcb *inp; 629 630 inp = sotoinpcb(so); 631 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 632 INP_INFO_WLOCK(&ripcbinfo); 633 INP_LOCK(inp); 634 rip_pcbdetach(so, inp); 635 INP_INFO_WUNLOCK(&ripcbinfo); 636} 637 638static void 639rip_abort(struct socket *so) 640{ 641 struct inpcb *inp; 642 643 inp = sotoinpcb(so); 644 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 645 INP_INFO_WLOCK(&ripcbinfo); 646 INP_LOCK(inp); 647 soisdisconnected(so); 648 rip_pcbdetach(so, inp); 649 INP_INFO_WUNLOCK(&ripcbinfo); 650} 651 652static int 653rip_disconnect(struct socket *so) 654{ 655 if ((so->so_state & SS_ISCONNECTED) == 0) 656 return ENOTCONN; 657 rip_abort(so); 658 return (0); 659} 660 661static int 662rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 663{ 664 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 665 struct inpcb *inp; 666 667 if (nam->sa_len != sizeof(*addr)) 668 return EINVAL; 669 670 if (jailed(td->td_ucred)) { 671 if (addr->sin_addr.s_addr == INADDR_ANY) 672 addr->sin_addr.s_addr = 673 htonl(prison_getip(td->td_ucred)); 674 if (htonl(prison_getip(td->td_ucred)) != addr->sin_addr.s_addr) 675 return (EADDRNOTAVAIL); 676 } 677 678 if (TAILQ_EMPTY(&ifnet) || 679 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 680 (addr->sin_addr.s_addr && 681 ifa_ifwithaddr((struct sockaddr *)addr) == 0)) 682 return EADDRNOTAVAIL; 683 684 inp = sotoinpcb(so); 685 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 686 INP_INFO_WLOCK(&ripcbinfo); 687 INP_LOCK(inp); 688 inp->inp_laddr = addr->sin_addr; 689 INP_UNLOCK(inp); 690 INP_INFO_WUNLOCK(&ripcbinfo); 691 return 0; 692} 693 694static int 695rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 696{ 697 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 698 struct inpcb *inp; 699 700 if (nam->sa_len != sizeof(*addr)) 701 return EINVAL; 702 if (TAILQ_EMPTY(&ifnet)) 703 return EADDRNOTAVAIL; 704 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 705 return EAFNOSUPPORT; 706 707 inp = sotoinpcb(so); 708 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 709 INP_INFO_WLOCK(&ripcbinfo); 710 INP_LOCK(inp); 711 inp->inp_faddr = addr->sin_addr; 712 soisconnected(so); 713 INP_UNLOCK(inp); 714 INP_INFO_WUNLOCK(&ripcbinfo); 715 return 0; 716} 717 718static int 719rip_shutdown(struct socket *so) 720{ 721 struct inpcb *inp; 722 723 inp = sotoinpcb(so); 724 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 725 INP_LOCK(inp); 726 socantsendmore(so); 727 INP_UNLOCK(inp); 728 return 0; 729} 730 731static int 732rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 733 struct mbuf *control, struct thread *td) 734{ 735 struct inpcb *inp; 736 u_long dst; 737 738 inp = sotoinpcb(so); 739 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 740 /* 741 * Note: 'dst' reads below are unlocked. 742 */ 743 if (so->so_state & SS_ISCONNECTED) { 744 if (nam) { 745 m_freem(m); 746 return EISCONN; 747 } 748 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 749 } else { 750 if (nam == NULL) { 751 m_freem(m); 752 return ENOTCONN; 753 } 754 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 755 } 756 return rip_output(m, so, dst); 757} 758 759static int 760rip_pcblist(SYSCTL_HANDLER_ARGS) 761{ 762 int error, i, n; 763 struct inpcb *inp, **inp_list; 764 inp_gen_t gencnt; 765 struct xinpgen xig; 766 767 /* 768 * The process of preparing the TCB list is too time-consuming and 769 * resource-intensive to repeat twice on every request. 770 */ 771 if (req->oldptr == 0) { 772 n = ripcbinfo.ipi_count; 773 req->oldidx = 2 * (sizeof xig) 774 + (n + n/8) * sizeof(struct xinpcb); 775 return 0; 776 } 777 778 if (req->newptr != 0) 779 return EPERM; 780 781 /* 782 * OK, now we're committed to doing something. 783 */ 784 INP_INFO_RLOCK(&ripcbinfo); 785 gencnt = ripcbinfo.ipi_gencnt; 786 n = ripcbinfo.ipi_count; 787 INP_INFO_RUNLOCK(&ripcbinfo); 788 789 xig.xig_len = sizeof xig; 790 xig.xig_count = n; 791 xig.xig_gen = gencnt; 792 xig.xig_sogen = so_gencnt; 793 error = SYSCTL_OUT(req, &xig, sizeof xig); 794 if (error) 795 return error; 796 797 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 798 if (inp_list == 0) 799 return ENOMEM; 800 801 INP_INFO_RLOCK(&ripcbinfo); 802 for (inp = LIST_FIRST(ripcbinfo.listhead), i = 0; inp && i < n; 803 inp = LIST_NEXT(inp, inp_list)) { 804 INP_LOCK(inp); 805 if (inp->inp_gencnt <= gencnt && 806 cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0) { 807 /* XXX held references? */ 808 inp_list[i++] = inp; 809 } 810 INP_UNLOCK(inp); 811 } 812 INP_INFO_RUNLOCK(&ripcbinfo); 813 n = i; 814 815 error = 0; 816 for (i = 0; i < n; i++) { 817 inp = inp_list[i]; 818 if (inp->inp_gencnt <= gencnt) { 819 struct xinpcb xi; 820 bzero(&xi, sizeof(xi)); 821 xi.xi_len = sizeof xi; 822 /* XXX should avoid extra copy */ 823 bcopy(inp, &xi.xi_inp, sizeof *inp); 824 if (inp->inp_socket) 825 sotoxsocket(inp->inp_socket, &xi.xi_socket); 826 error = SYSCTL_OUT(req, &xi, sizeof xi); 827 } 828 } 829 if (!error) { 830 /* 831 * Give the user an updated idea of our state. 832 * If the generation differs from what we told 833 * her before, she knows that something happened 834 * while we were processing this request, and it 835 * might be necessary to retry. 836 */ 837 INP_INFO_RLOCK(&ripcbinfo); 838 xig.xig_gen = ripcbinfo.ipi_gencnt; 839 xig.xig_sogen = so_gencnt; 840 xig.xig_count = ripcbinfo.ipi_count; 841 INP_INFO_RUNLOCK(&ripcbinfo); 842 error = SYSCTL_OUT(req, &xig, sizeof xig); 843 } 844 free(inp_list, M_TEMP); 845 return error; 846} 847 848/* 849 * This is the wrapper function for in_setsockaddr. We just pass down 850 * the pcbinfo for in_setpeeraddr to lock. 851 */ 852static int 853rip_sockaddr(struct socket *so, struct sockaddr **nam) 854{ 855 return (in_setsockaddr(so, nam, &ripcbinfo)); 856} 857 858/* 859 * This is the wrapper function for in_setpeeraddr. We just pass down 860 * the pcbinfo for in_setpeeraddr to lock. 861 */ 862static int 863rip_peeraddr(struct socket *so, struct sockaddr **nam) 864{ 865 return (in_setpeeraddr(so, nam, &ripcbinfo)); 866} 867 868 869SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, 870 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 871 872struct pr_usrreqs rip_usrreqs = { 873 .pru_abort = rip_abort, 874 .pru_attach = rip_attach, 875 .pru_bind = rip_bind, 876 .pru_connect = rip_connect, 877 .pru_control = in_control, 878 .pru_detach = rip_detach, 879 .pru_disconnect = rip_disconnect, 880 .pru_peeraddr = rip_peeraddr, 881 .pru_send = rip_send, 882 .pru_shutdown = rip_shutdown, 883 .pru_sockaddr = rip_sockaddr, 884 .pru_sosetlabel = in_pcbsosetlabel 885};
| 144} 145 146static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; 147 148static int 149raw_append(struct inpcb *last, struct ip *ip, struct mbuf *n) 150{ 151 int policyfail = 0; 152 153 INP_LOCK_ASSERT(last); 154 155#if defined(IPSEC) || defined(FAST_IPSEC) 156 /* check AH/ESP integrity. */ 157 if (ipsec4_in_reject(n, last)) { 158 policyfail = 1; 159#ifdef IPSEC 160 ipsecstat.in_polvio++; 161#endif /*IPSEC*/ 162 /* do not inject data to pcb */ 163 } 164#endif /*IPSEC || FAST_IPSEC*/ 165#ifdef MAC 166 if (!policyfail && mac_check_inpcb_deliver(last, n) != 0) 167 policyfail = 1; 168#endif 169 /* Check the minimum TTL for socket. */ 170 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 171 policyfail = 1; 172 if (!policyfail) { 173 struct mbuf *opts = NULL; 174 struct socket *so; 175 176 so = last->inp_socket; 177 if ((last->inp_flags & INP_CONTROLOPTS) || 178 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 179 ip_savecontrol(last, &opts, ip, n); 180 SOCKBUF_LOCK(&so->so_rcv); 181 if (sbappendaddr_locked(&so->so_rcv, 182 (struct sockaddr *)&ripsrc, n, opts) == 0) { 183 /* should notify about lost packet */ 184 m_freem(n); 185 if (opts) 186 m_freem(opts); 187 SOCKBUF_UNLOCK(&so->so_rcv); 188 } else 189 sorwakeup_locked(so); 190 } else 191 m_freem(n); 192 return policyfail; 193} 194 195/* 196 * Setup generic address and protocol structures 197 * for raw_input routine, then pass them along with 198 * mbuf chain. 199 */ 200void 201rip_input(struct mbuf *m, int off) 202{ 203 struct ip *ip = mtod(m, struct ip *); 204 int proto = ip->ip_p; 205 struct inpcb *inp, *last; 206 207 INP_INFO_RLOCK(&ripcbinfo); 208 ripsrc.sin_addr = ip->ip_src; 209 last = NULL; 210 LIST_FOREACH(inp, &ripcb, inp_list) { 211 INP_LOCK(inp); 212 if (inp->inp_ip_p && inp->inp_ip_p != proto) { 213 docontinue: 214 INP_UNLOCK(inp); 215 continue; 216 } 217#ifdef INET6 218 if ((inp->inp_vflag & INP_IPV4) == 0) 219 goto docontinue; 220#endif 221 if (inp->inp_laddr.s_addr && 222 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 223 goto docontinue; 224 if (inp->inp_faddr.s_addr && 225 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 226 goto docontinue; 227 if (jailed(inp->inp_socket->so_cred)) 228 if (htonl(prison_getip(inp->inp_socket->so_cred)) != 229 ip->ip_dst.s_addr) 230 goto docontinue; 231 if (last) { 232 struct mbuf *n; 233 234 n = m_copy(m, 0, (int)M_COPYALL); 235 if (n != NULL) 236 (void) raw_append(last, ip, n); 237 /* XXX count dropped packet */ 238 INP_UNLOCK(last); 239 } 240 last = inp; 241 } 242 if (last != NULL) { 243 if (raw_append(last, ip, m) != 0) 244 ipstat.ips_delivered--; 245 INP_UNLOCK(last); 246 } else { 247 m_freem(m); 248 ipstat.ips_noproto++; 249 ipstat.ips_delivered--; 250 } 251 INP_INFO_RUNLOCK(&ripcbinfo); 252} 253 254/* 255 * Generate IP header and pass packet to ip_output. 256 * Tack on options user may have setup with control call. 257 */ 258int 259rip_output(struct mbuf *m, struct socket *so, u_long dst) 260{ 261 struct ip *ip; 262 int error; 263 struct inpcb *inp = sotoinpcb(so); 264 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 265 IP_ALLOWBROADCAST; 266 267 /* 268 * If the user handed us a complete IP packet, use it. 269 * Otherwise, allocate an mbuf for a header and fill it in. 270 */ 271 if ((inp->inp_flags & INP_HDRINCL) == 0) { 272 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 273 m_freem(m); 274 return(EMSGSIZE); 275 } 276 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); 277 if (m == NULL) 278 return(ENOBUFS); 279 280 INP_LOCK(inp); 281 ip = mtod(m, struct ip *); 282 ip->ip_tos = inp->inp_ip_tos; 283 if (inp->inp_flags & INP_DONTFRAG) 284 ip->ip_off = IP_DF; 285 else 286 ip->ip_off = 0; 287 ip->ip_p = inp->inp_ip_p; 288 ip->ip_len = m->m_pkthdr.len; 289 if (jailed(inp->inp_socket->so_cred)) 290 ip->ip_src.s_addr = 291 htonl(prison_getip(inp->inp_socket->so_cred)); 292 else 293 ip->ip_src = inp->inp_laddr; 294 ip->ip_dst.s_addr = dst; 295 ip->ip_ttl = inp->inp_ip_ttl; 296 } else { 297 if (m->m_pkthdr.len > IP_MAXPACKET) { 298 m_freem(m); 299 return(EMSGSIZE); 300 } 301 INP_LOCK(inp); 302 ip = mtod(m, struct ip *); 303 if (jailed(inp->inp_socket->so_cred)) { 304 if (ip->ip_src.s_addr != 305 htonl(prison_getip(inp->inp_socket->so_cred))) { 306 INP_UNLOCK(inp); 307 m_freem(m); 308 return (EPERM); 309 } 310 } 311 /* don't allow both user specified and setsockopt options, 312 and don't allow packet length sizes that will crash */ 313 if (((ip->ip_hl != (sizeof (*ip) >> 2)) 314 && inp->inp_options) 315 || (ip->ip_len > m->m_pkthdr.len) 316 || (ip->ip_len < (ip->ip_hl << 2))) { 317 INP_UNLOCK(inp); 318 m_freem(m); 319 return EINVAL; 320 } 321 if (ip->ip_id == 0) 322 ip->ip_id = ip_newid(); 323 /* XXX prevent ip_output from overwriting header fields */ 324 flags |= IP_RAWOUTPUT; 325 ipstat.ips_rawout++; 326 } 327 328 if (inp->inp_vflag & INP_ONESBCAST) 329 flags |= IP_SENDONES; 330 331#ifdef MAC 332 mac_create_mbuf_from_inpcb(inp, m); 333#endif 334 335 error = ip_output(m, inp->inp_options, NULL, flags, 336 inp->inp_moptions, inp); 337 INP_UNLOCK(inp); 338 return error; 339} 340 341/* 342 * Raw IP socket option processing. 343 * 344 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 345 * only be created by a privileged process, and as such, socket option 346 * operations to manage system properties on any raw socket were allowed to 347 * take place without explicit additional access control checks. However, 348 * raw sockets can now also be created in jail(), and therefore explicit 349 * checks are now required. Likewise, raw sockets can be used by a process 350 * after it gives up privilege, so some caution is required. For options 351 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 352 * performed in ip_ctloutput() and therefore no check occurs here. 353 * Unilaterally checking suser() here breaks normal IP socket option 354 * operations on raw sockets. 355 * 356 * When adding new socket options here, make sure to add access control 357 * checks here as necessary. 358 */ 359int 360rip_ctloutput(struct socket *so, struct sockopt *sopt) 361{ 362 struct inpcb *inp = sotoinpcb(so); 363 int error, optval; 364 365 if (sopt->sopt_level != IPPROTO_IP) 366 return (EINVAL); 367 368 error = 0; 369 switch (sopt->sopt_dir) { 370 case SOPT_GET: 371 switch (sopt->sopt_name) { 372 case IP_HDRINCL: 373 optval = inp->inp_flags & INP_HDRINCL; 374 error = sooptcopyout(sopt, &optval, sizeof optval); 375 break; 376 377 case IP_FW_ADD: /* ADD actually returns the body... */ 378 case IP_FW_GET: 379 case IP_FW_TABLE_GETSIZE: 380 case IP_FW_TABLE_LIST: 381 error = suser(curthread); 382 if (error != 0) 383 return (error); 384 if (ip_fw_ctl_ptr != NULL) 385 error = ip_fw_ctl_ptr(sopt); 386 else 387 error = ENOPROTOOPT; 388 break; 389 390 case IP_DUMMYNET_GET: 391 error = suser(curthread); 392 if (error != 0) 393 return (error); 394 if (ip_dn_ctl_ptr != NULL) 395 error = ip_dn_ctl_ptr(sopt); 396 else 397 error = ENOPROTOOPT; 398 break ; 399 400 case MRT_INIT: 401 case MRT_DONE: 402 case MRT_ADD_VIF: 403 case MRT_DEL_VIF: 404 case MRT_ADD_MFC: 405 case MRT_DEL_MFC: 406 case MRT_VERSION: 407 case MRT_ASSERT: 408 case MRT_API_SUPPORT: 409 case MRT_API_CONFIG: 410 case MRT_ADD_BW_UPCALL: 411 case MRT_DEL_BW_UPCALL: 412 error = suser(curthread); 413 if (error != 0) 414 return (error); 415 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 416 EOPNOTSUPP; 417 break; 418 419 default: 420 error = ip_ctloutput(so, sopt); 421 break; 422 } 423 break; 424 425 case SOPT_SET: 426 switch (sopt->sopt_name) { 427 case IP_HDRINCL: 428 error = sooptcopyin(sopt, &optval, sizeof optval, 429 sizeof optval); 430 if (error) 431 break; 432 if (optval) 433 inp->inp_flags |= INP_HDRINCL; 434 else 435 inp->inp_flags &= ~INP_HDRINCL; 436 break; 437 438 case IP_FW_ADD: 439 case IP_FW_DEL: 440 case IP_FW_FLUSH: 441 case IP_FW_ZERO: 442 case IP_FW_RESETLOG: 443 case IP_FW_TABLE_ADD: 444 case IP_FW_TABLE_DEL: 445 case IP_FW_TABLE_FLUSH: 446 error = suser(curthread); 447 if (error != 0) 448 return (error); 449 if (ip_fw_ctl_ptr != NULL) 450 error = ip_fw_ctl_ptr(sopt); 451 else 452 error = ENOPROTOOPT; 453 break; 454 455 case IP_DUMMYNET_CONFIGURE: 456 case IP_DUMMYNET_DEL: 457 case IP_DUMMYNET_FLUSH: 458 error = suser(curthread); 459 if (error != 0) 460 return (error); 461 if (ip_dn_ctl_ptr != NULL) 462 error = ip_dn_ctl_ptr(sopt); 463 else 464 error = ENOPROTOOPT ; 465 break ; 466 467 case IP_RSVP_ON: 468 error = suser(curthread); 469 if (error != 0) 470 return (error); 471 error = ip_rsvp_init(so); 472 break; 473 474 case IP_RSVP_OFF: 475 error = suser(curthread); 476 if (error != 0) 477 return (error); 478 error = ip_rsvp_done(); 479 break; 480 481 case IP_RSVP_VIF_ON: 482 case IP_RSVP_VIF_OFF: 483 error = suser(curthread); 484 if (error != 0) 485 return (error); 486 error = ip_rsvp_vif ? 487 ip_rsvp_vif(so, sopt) : EINVAL; 488 break; 489 490 case MRT_INIT: 491 case MRT_DONE: 492 case MRT_ADD_VIF: 493 case MRT_DEL_VIF: 494 case MRT_ADD_MFC: 495 case MRT_DEL_MFC: 496 case MRT_VERSION: 497 case MRT_ASSERT: 498 case MRT_API_SUPPORT: 499 case MRT_API_CONFIG: 500 case MRT_ADD_BW_UPCALL: 501 case MRT_DEL_BW_UPCALL: 502 error = suser(curthread); 503 if (error != 0) 504 return (error); 505 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 506 EOPNOTSUPP; 507 break; 508 509 default: 510 error = ip_ctloutput(so, sopt); 511 break; 512 } 513 break; 514 } 515 516 return (error); 517} 518 519/* 520 * This function exists solely to receive the PRC_IFDOWN messages which 521 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, 522 * and calls in_ifadown() to remove all routes corresponding to that address. 523 * It also receives the PRC_IFUP messages from if_up() and reinstalls the 524 * interface routes. 525 */ 526void 527rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 528{ 529 struct in_ifaddr *ia; 530 struct ifnet *ifp; 531 int err; 532 int flags; 533 534 switch (cmd) { 535 case PRC_IFDOWN: 536 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 537 if (ia->ia_ifa.ifa_addr == sa 538 && (ia->ia_flags & IFA_ROUTE)) { 539 /* 540 * in_ifscrub kills the interface route. 541 */ 542 in_ifscrub(ia->ia_ifp, ia); 543 /* 544 * in_ifadown gets rid of all the rest of 545 * the routes. This is not quite the right 546 * thing to do, but at least if we are running 547 * a routing process they will come back. 548 */ 549 in_ifadown(&ia->ia_ifa, 0); 550 break; 551 } 552 } 553 break; 554 555 case PRC_IFUP: 556 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { 557 if (ia->ia_ifa.ifa_addr == sa) 558 break; 559 } 560 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) 561 return; 562 flags = RTF_UP; 563 ifp = ia->ia_ifa.ifa_ifp; 564 565 if ((ifp->if_flags & IFF_LOOPBACK) 566 || (ifp->if_flags & IFF_POINTOPOINT)) 567 flags |= RTF_HOST; 568 569 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 570 if (err == 0) 571 ia->ia_flags |= IFA_ROUTE; 572 break; 573 } 574} 575 576u_long rip_sendspace = RIPSNDQ; 577u_long rip_recvspace = RIPRCVQ; 578 579SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 580 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 581SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 582 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 583 584static int 585rip_attach(struct socket *so, int proto, struct thread *td) 586{ 587 struct inpcb *inp; 588 int error; 589 590 inp = sotoinpcb(so); 591 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 592 if (jailed(td->td_ucred) && !jail_allow_raw_sockets) 593 return (EPERM); 594 if ((error = suser_cred(td->td_ucred, SUSER_ALLOWJAIL)) != 0) 595 return error; 596 if (proto >= IPPROTO_MAX || proto < 0) 597 return EPROTONOSUPPORT; 598 error = soreserve(so, rip_sendspace, rip_recvspace); 599 if (error) 600 return error; 601 INP_INFO_WLOCK(&ripcbinfo); 602 error = in_pcballoc(so, &ripcbinfo, "rawinp"); 603 if (error) { 604 INP_INFO_WUNLOCK(&ripcbinfo); 605 return error; 606 } 607 inp = (struct inpcb *)so->so_pcb; 608 INP_LOCK(inp); 609 INP_INFO_WUNLOCK(&ripcbinfo); 610 inp->inp_vflag |= INP_IPV4; 611 inp->inp_ip_p = proto; 612 inp->inp_ip_ttl = ip_defttl; 613 INP_UNLOCK(inp); 614 return 0; 615} 616 617static void 618rip_pcbdetach(struct socket *so, struct inpcb *inp) 619{ 620 621 INP_INFO_WLOCK_ASSERT(&ripcbinfo); 622 INP_LOCK_ASSERT(inp); 623 624 if (so == ip_mrouter && ip_mrouter_done) 625 ip_mrouter_done(); 626 if (ip_rsvp_force_done) 627 ip_rsvp_force_done(so); 628 if (so == ip_rsvpd) 629 ip_rsvp_done(); 630 in_pcbdetach(inp); 631 in_pcbfree(inp); 632} 633 634static void 635rip_detach(struct socket *so) 636{ 637 struct inpcb *inp; 638 639 inp = sotoinpcb(so); 640 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 641 INP_INFO_WLOCK(&ripcbinfo); 642 INP_LOCK(inp); 643 rip_pcbdetach(so, inp); 644 INP_INFO_WUNLOCK(&ripcbinfo); 645} 646 647static void 648rip_abort(struct socket *so) 649{ 650 struct inpcb *inp; 651 652 inp = sotoinpcb(so); 653 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 654 INP_INFO_WLOCK(&ripcbinfo); 655 INP_LOCK(inp); 656 soisdisconnected(so); 657 rip_pcbdetach(so, inp); 658 INP_INFO_WUNLOCK(&ripcbinfo); 659} 660 661static int 662rip_disconnect(struct socket *so) 663{ 664 if ((so->so_state & SS_ISCONNECTED) == 0) 665 return ENOTCONN; 666 rip_abort(so); 667 return (0); 668} 669 670static int 671rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 672{ 673 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 674 struct inpcb *inp; 675 676 if (nam->sa_len != sizeof(*addr)) 677 return EINVAL; 678 679 if (jailed(td->td_ucred)) { 680 if (addr->sin_addr.s_addr == INADDR_ANY) 681 addr->sin_addr.s_addr = 682 htonl(prison_getip(td->td_ucred)); 683 if (htonl(prison_getip(td->td_ucred)) != addr->sin_addr.s_addr) 684 return (EADDRNOTAVAIL); 685 } 686 687 if (TAILQ_EMPTY(&ifnet) || 688 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 689 (addr->sin_addr.s_addr && 690 ifa_ifwithaddr((struct sockaddr *)addr) == 0)) 691 return EADDRNOTAVAIL; 692 693 inp = sotoinpcb(so); 694 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 695 INP_INFO_WLOCK(&ripcbinfo); 696 INP_LOCK(inp); 697 inp->inp_laddr = addr->sin_addr; 698 INP_UNLOCK(inp); 699 INP_INFO_WUNLOCK(&ripcbinfo); 700 return 0; 701} 702 703static int 704rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 705{ 706 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 707 struct inpcb *inp; 708 709 if (nam->sa_len != sizeof(*addr)) 710 return EINVAL; 711 if (TAILQ_EMPTY(&ifnet)) 712 return EADDRNOTAVAIL; 713 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 714 return EAFNOSUPPORT; 715 716 inp = sotoinpcb(so); 717 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 718 INP_INFO_WLOCK(&ripcbinfo); 719 INP_LOCK(inp); 720 inp->inp_faddr = addr->sin_addr; 721 soisconnected(so); 722 INP_UNLOCK(inp); 723 INP_INFO_WUNLOCK(&ripcbinfo); 724 return 0; 725} 726 727static int 728rip_shutdown(struct socket *so) 729{ 730 struct inpcb *inp; 731 732 inp = sotoinpcb(so); 733 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 734 INP_LOCK(inp); 735 socantsendmore(so); 736 INP_UNLOCK(inp); 737 return 0; 738} 739 740static int 741rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 742 struct mbuf *control, struct thread *td) 743{ 744 struct inpcb *inp; 745 u_long dst; 746 747 inp = sotoinpcb(so); 748 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 749 /* 750 * Note: 'dst' reads below are unlocked. 751 */ 752 if (so->so_state & SS_ISCONNECTED) { 753 if (nam) { 754 m_freem(m); 755 return EISCONN; 756 } 757 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 758 } else { 759 if (nam == NULL) { 760 m_freem(m); 761 return ENOTCONN; 762 } 763 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 764 } 765 return rip_output(m, so, dst); 766} 767 768static int 769rip_pcblist(SYSCTL_HANDLER_ARGS) 770{ 771 int error, i, n; 772 struct inpcb *inp, **inp_list; 773 inp_gen_t gencnt; 774 struct xinpgen xig; 775 776 /* 777 * The process of preparing the TCB list is too time-consuming and 778 * resource-intensive to repeat twice on every request. 779 */ 780 if (req->oldptr == 0) { 781 n = ripcbinfo.ipi_count; 782 req->oldidx = 2 * (sizeof xig) 783 + (n + n/8) * sizeof(struct xinpcb); 784 return 0; 785 } 786 787 if (req->newptr != 0) 788 return EPERM; 789 790 /* 791 * OK, now we're committed to doing something. 792 */ 793 INP_INFO_RLOCK(&ripcbinfo); 794 gencnt = ripcbinfo.ipi_gencnt; 795 n = ripcbinfo.ipi_count; 796 INP_INFO_RUNLOCK(&ripcbinfo); 797 798 xig.xig_len = sizeof xig; 799 xig.xig_count = n; 800 xig.xig_gen = gencnt; 801 xig.xig_sogen = so_gencnt; 802 error = SYSCTL_OUT(req, &xig, sizeof xig); 803 if (error) 804 return error; 805 806 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 807 if (inp_list == 0) 808 return ENOMEM; 809 810 INP_INFO_RLOCK(&ripcbinfo); 811 for (inp = LIST_FIRST(ripcbinfo.listhead), i = 0; inp && i < n; 812 inp = LIST_NEXT(inp, inp_list)) { 813 INP_LOCK(inp); 814 if (inp->inp_gencnt <= gencnt && 815 cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0) { 816 /* XXX held references? */ 817 inp_list[i++] = inp; 818 } 819 INP_UNLOCK(inp); 820 } 821 INP_INFO_RUNLOCK(&ripcbinfo); 822 n = i; 823 824 error = 0; 825 for (i = 0; i < n; i++) { 826 inp = inp_list[i]; 827 if (inp->inp_gencnt <= gencnt) { 828 struct xinpcb xi; 829 bzero(&xi, sizeof(xi)); 830 xi.xi_len = sizeof xi; 831 /* XXX should avoid extra copy */ 832 bcopy(inp, &xi.xi_inp, sizeof *inp); 833 if (inp->inp_socket) 834 sotoxsocket(inp->inp_socket, &xi.xi_socket); 835 error = SYSCTL_OUT(req, &xi, sizeof xi); 836 } 837 } 838 if (!error) { 839 /* 840 * Give the user an updated idea of our state. 841 * If the generation differs from what we told 842 * her before, she knows that something happened 843 * while we were processing this request, and it 844 * might be necessary to retry. 845 */ 846 INP_INFO_RLOCK(&ripcbinfo); 847 xig.xig_gen = ripcbinfo.ipi_gencnt; 848 xig.xig_sogen = so_gencnt; 849 xig.xig_count = ripcbinfo.ipi_count; 850 INP_INFO_RUNLOCK(&ripcbinfo); 851 error = SYSCTL_OUT(req, &xig, sizeof xig); 852 } 853 free(inp_list, M_TEMP); 854 return error; 855} 856 857/* 858 * This is the wrapper function for in_setsockaddr. We just pass down 859 * the pcbinfo for in_setpeeraddr to lock. 860 */ 861static int 862rip_sockaddr(struct socket *so, struct sockaddr **nam) 863{ 864 return (in_setsockaddr(so, nam, &ripcbinfo)); 865} 866 867/* 868 * This is the wrapper function for in_setpeeraddr. We just pass down 869 * the pcbinfo for in_setpeeraddr to lock. 870 */ 871static int 872rip_peeraddr(struct socket *so, struct sockaddr **nam) 873{ 874 return (in_setpeeraddr(so, nam, &ripcbinfo)); 875} 876 877 878SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, 879 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 880 881struct pr_usrreqs rip_usrreqs = { 882 .pru_abort = rip_abort, 883 .pru_attach = rip_attach, 884 .pru_bind = rip_bind, 885 .pru_connect = rip_connect, 886 .pru_control = in_control, 887 .pru_detach = rip_detach, 888 .pru_disconnect = rip_disconnect, 889 .pru_peeraddr = rip_peeraddr, 890 .pru_send = rip_send, 891 .pru_shutdown = rip_shutdown, 892 .pru_sockaddr = rip_sockaddr, 893 .pru_sosetlabel = in_pcbsosetlabel 894};
|