264 265static volatile unsigned int siftr_exit_pkt_manager_thread = 0; 266static unsigned int siftr_enabled = 0; 267static unsigned int siftr_pkts_per_log = 1; 268static unsigned int siftr_generate_hashes = 0; 269/* static unsigned int siftr_binary_log = 0; */ 270static char siftr_logfile[PATH_MAX] = "/var/log/siftr.log"; 271static u_long siftr_hashmask; 272STAILQ_HEAD(pkthead, pkt_node) pkt_queue = STAILQ_HEAD_INITIALIZER(pkt_queue); 273LIST_HEAD(listhead, flow_hash_node) *counter_hash; 274static int wait_for_pkt; 275static struct alq *siftr_alq = NULL; 276static struct mtx siftr_pkt_queue_mtx; 277static struct mtx siftr_pkt_mgr_mtx; 278static struct thread *siftr_pkt_manager_thr = NULL; 279/* 280 * pfil.h defines PFIL_IN as 1 and PFIL_OUT as 2, 281 * which we use as an index into this array. 282 */ 283static char direction[3] = {'\0', 'i','o'}; 284 285/* Required function prototypes. */ 286static int siftr_sysctl_enabled_handler(SYSCTL_HANDLER_ARGS); 287static int siftr_sysctl_logfile_name_handler(SYSCTL_HANDLER_ARGS); 288 289 290/* Declare the net.inet.siftr sysctl tree and populate it. */ 291 292SYSCTL_DECL(_net_inet_siftr); 293 294SYSCTL_NODE(_net_inet, OID_AUTO, siftr, CTLFLAG_RW, NULL, 295 "siftr related settings"); 296 297SYSCTL_PROC(_net_inet_siftr, OID_AUTO, enabled, CTLTYPE_UINT|CTLFLAG_RW, 298 &siftr_enabled, 0, &siftr_sysctl_enabled_handler, "IU", 299 "switch siftr module operations on/off"); 300 301SYSCTL_PROC(_net_inet_siftr, OID_AUTO, logfile, CTLTYPE_STRING|CTLFLAG_RW, 302 &siftr_logfile, sizeof(siftr_logfile), &siftr_sysctl_logfile_name_handler, 303 "A", "file to save siftr log messages to"); 304 305SYSCTL_UINT(_net_inet_siftr, OID_AUTO, ppl, CTLFLAG_RW, 306 &siftr_pkts_per_log, 1, 307 "number of packets between generating a log message"); 308 309SYSCTL_UINT(_net_inet_siftr, OID_AUTO, genhashes, CTLFLAG_RW, 310 &siftr_generate_hashes, 0, 311 "enable packet hash generation"); 312 313/* XXX: TODO 314SYSCTL_UINT(_net_inet_siftr, OID_AUTO, binary, CTLFLAG_RW, 315 &siftr_binary_log, 0, 316 "write log files in binary instead of ascii"); 317*/ 318 319 320/* Begin functions. */ 321 322static void 323siftr_process_pkt(struct pkt_node * pkt_node) 324{ 325 struct flow_hash_node *hash_node; 326 struct listhead *counter_list; 327 struct siftr_stats *ss; 328 struct ale *log_buf; 329 uint8_t key[FLOW_KEY_LEN]; 330 uint8_t found_match, key_offset; 331 332 hash_node = NULL; 333 ss = DPCPU_PTR(ss); 334 found_match = 0; 335 key_offset = 1; 336 337 /* 338 * Create the key that will be used to create a hash index 339 * into our hash table. Our key consists of: 340 * ipversion, localip, localport, foreignip, foreignport 341 */ 342 key[0] = pkt_node->ipver; 343 memcpy(key + key_offset, &pkt_node->ip_laddr, 344 sizeof(pkt_node->ip_laddr)); 345 key_offset += sizeof(pkt_node->ip_laddr); 346 memcpy(key + key_offset, &pkt_node->tcp_localport, 347 sizeof(pkt_node->tcp_localport)); 348 key_offset += sizeof(pkt_node->tcp_localport); 349 memcpy(key + key_offset, &pkt_node->ip_faddr, 350 sizeof(pkt_node->ip_faddr)); 351 key_offset += sizeof(pkt_node->ip_faddr); 352 memcpy(key + key_offset, &pkt_node->tcp_foreignport, 353 sizeof(pkt_node->tcp_foreignport)); 354 355 counter_list = counter_hash + 356 (hash32_buf(key, sizeof(key), 0) & siftr_hashmask); 357 358 /* 359 * If the list is not empty i.e. the hash index has 360 * been used by another flow previously. 361 */ 362 if (LIST_FIRST(counter_list) != NULL) { 363 /* 364 * Loop through the hash nodes in the list. 365 * There should normally only be 1 hash node in the list, 366 * except if there have been collisions at the hash index 367 * computed by hash32_buf(). 368 */ 369 LIST_FOREACH(hash_node, counter_list, nodes) { 370 /* 371 * Check if the key for the pkt we are currently 372 * processing is the same as the key stored in the 373 * hash node we are currently processing. 374 * If they are the same, then we've found the 375 * hash node that stores the counter for the flow 376 * the pkt belongs to. 377 */ 378 if (memcmp(hash_node->key, key, sizeof(key)) == 0) { 379 found_match = 1; 380 break; 381 } 382 } 383 } 384 385 /* If this flow hash hasn't been seen before or we have a collision. */ 386 if (hash_node == NULL || !found_match) { 387 /* Create a new hash node to store the flow's counter. */ 388 hash_node = malloc(sizeof(struct flow_hash_node), 389 M_SIFTR_HASHNODE, M_WAITOK); 390 391 if (hash_node != NULL) { 392 /* Initialise our new hash node list entry. */ 393 hash_node->counter = 0; 394 memcpy(hash_node->key, key, sizeof(key)); 395 LIST_INSERT_HEAD(counter_list, hash_node, nodes); 396 } else { 397 /* Malloc failed. */ 398 if (pkt_node->direction == PFIL_IN) 399 ss->nskip_in_malloc++; 400 else 401 ss->nskip_out_malloc++; 402 403 return; 404 } 405 } else if (siftr_pkts_per_log > 1) { 406 /* 407 * Taking the remainder of the counter divided 408 * by the current value of siftr_pkts_per_log 409 * and storing that in counter provides a neat 410 * way to modulate the frequency of log 411 * messages being written to the log file. 412 */ 413 hash_node->counter = (hash_node->counter + 1) % 414 siftr_pkts_per_log; 415 416 /* 417 * If we have not seen enough packets since the last time 418 * we wrote a log message for this connection, return. 419 */ 420 if (hash_node->counter > 0) 421 return; 422 } 423 424 log_buf = alq_getn(siftr_alq, MAX_LOG_MSG_LEN, ALQ_WAITOK); 425 426 if (log_buf == NULL) 427 return; /* Should only happen if the ALQ is shutting down. */ 428 429#ifdef SIFTR_IPV6 430 pkt_node->ip_laddr[3] = ntohl(pkt_node->ip_laddr[3]); 431 pkt_node->ip_faddr[3] = ntohl(pkt_node->ip_faddr[3]); 432 433 if (pkt_node->ipver == INP_IPV6) { /* IPv6 packet */ 434 pkt_node->ip_laddr[0] = ntohl(pkt_node->ip_laddr[0]); 435 pkt_node->ip_laddr[1] = ntohl(pkt_node->ip_laddr[1]); 436 pkt_node->ip_laddr[2] = ntohl(pkt_node->ip_laddr[2]); 437 pkt_node->ip_faddr[0] = ntohl(pkt_node->ip_faddr[0]); 438 pkt_node->ip_faddr[1] = ntohl(pkt_node->ip_faddr[1]); 439 pkt_node->ip_faddr[2] = ntohl(pkt_node->ip_faddr[2]); 440 441 /* Construct an IPv6 log message. */ 442 log_buf->ae_bytesused = snprintf(log_buf->ae_data, 443 MAX_LOG_MSG_LEN, 444 "%c,0x%08x,%zd.%06ld,%x:%x:%x:%x:%x:%x:%x:%x,%u,%x:%x:%x:" 445 "%x:%x:%x:%x:%x,%u,%ld,%ld,%ld,%ld,%ld,%u,%u,%u,%u,%u,%u," 446 "%u,%d,%u,%u,%u,%u,%u,%u\n", 447 direction[pkt_node->direction], 448 pkt_node->hash, 449 pkt_node->tval.tv_sec, 450 pkt_node->tval.tv_usec, 451 UPPER_SHORT(pkt_node->ip_laddr[0]), 452 LOWER_SHORT(pkt_node->ip_laddr[0]), 453 UPPER_SHORT(pkt_node->ip_laddr[1]), 454 LOWER_SHORT(pkt_node->ip_laddr[1]), 455 UPPER_SHORT(pkt_node->ip_laddr[2]), 456 LOWER_SHORT(pkt_node->ip_laddr[2]), 457 UPPER_SHORT(pkt_node->ip_laddr[3]), 458 LOWER_SHORT(pkt_node->ip_laddr[3]), 459 ntohs(pkt_node->tcp_localport), 460 UPPER_SHORT(pkt_node->ip_faddr[0]), 461 LOWER_SHORT(pkt_node->ip_faddr[0]), 462 UPPER_SHORT(pkt_node->ip_faddr[1]), 463 LOWER_SHORT(pkt_node->ip_faddr[1]), 464 UPPER_SHORT(pkt_node->ip_faddr[2]), 465 LOWER_SHORT(pkt_node->ip_faddr[2]), 466 UPPER_SHORT(pkt_node->ip_faddr[3]), 467 LOWER_SHORT(pkt_node->ip_faddr[3]), 468 ntohs(pkt_node->tcp_foreignport), 469 pkt_node->snd_ssthresh, 470 pkt_node->snd_cwnd, 471 pkt_node->snd_bwnd, 472 pkt_node->snd_wnd, 473 pkt_node->rcv_wnd, 474 pkt_node->snd_scale, 475 pkt_node->rcv_scale, 476 pkt_node->conn_state, 477 pkt_node->max_seg_size, 478 pkt_node->smoothed_rtt, 479 pkt_node->sack_enabled, 480 pkt_node->flags, 481 pkt_node->rxt_length, 482 pkt_node->snd_buf_hiwater, 483 pkt_node->snd_buf_cc, 484 pkt_node->rcv_buf_hiwater, 485 pkt_node->rcv_buf_cc, 486 pkt_node->sent_inflight_bytes, 487 pkt_node->t_segqlen); 488 } else { /* IPv4 packet */ 489 pkt_node->ip_laddr[0] = FIRST_OCTET(pkt_node->ip_laddr[3]); 490 pkt_node->ip_laddr[1] = SECOND_OCTET(pkt_node->ip_laddr[3]); 491 pkt_node->ip_laddr[2] = THIRD_OCTET(pkt_node->ip_laddr[3]); 492 pkt_node->ip_laddr[3] = FOURTH_OCTET(pkt_node->ip_laddr[3]); 493 pkt_node->ip_faddr[0] = FIRST_OCTET(pkt_node->ip_faddr[3]); 494 pkt_node->ip_faddr[1] = SECOND_OCTET(pkt_node->ip_faddr[3]); 495 pkt_node->ip_faddr[2] = THIRD_OCTET(pkt_node->ip_faddr[3]); 496 pkt_node->ip_faddr[3] = FOURTH_OCTET(pkt_node->ip_faddr[3]); 497#endif /* SIFTR_IPV6 */ 498 499 /* Construct an IPv4 log message. */ 500 log_buf->ae_bytesused = snprintf(log_buf->ae_data, 501 MAX_LOG_MSG_LEN, 502 "%c,0x%08x,%jd.%06ld,%u.%u.%u.%u,%u,%u.%u.%u.%u,%u,%ld,%ld," 503 "%ld,%ld,%ld,%u,%u,%u,%u,%u,%u,%u,%d,%u,%u,%u,%u,%u,%u\n", 504 direction[pkt_node->direction], 505 pkt_node->hash, 506 (intmax_t)pkt_node->tval.tv_sec, 507 pkt_node->tval.tv_usec, 508 pkt_node->ip_laddr[0], 509 pkt_node->ip_laddr[1], 510 pkt_node->ip_laddr[2], 511 pkt_node->ip_laddr[3], 512 ntohs(pkt_node->tcp_localport), 513 pkt_node->ip_faddr[0], 514 pkt_node->ip_faddr[1], 515 pkt_node->ip_faddr[2], 516 pkt_node->ip_faddr[3], 517 ntohs(pkt_node->tcp_foreignport), 518 pkt_node->snd_ssthresh, 519 pkt_node->snd_cwnd, 520 pkt_node->snd_bwnd, 521 pkt_node->snd_wnd, 522 pkt_node->rcv_wnd, 523 pkt_node->snd_scale, 524 pkt_node->rcv_scale, 525 pkt_node->conn_state, 526 pkt_node->max_seg_size, 527 pkt_node->smoothed_rtt, 528 pkt_node->sack_enabled, 529 pkt_node->flags, 530 pkt_node->rxt_length, 531 pkt_node->snd_buf_hiwater, 532 pkt_node->snd_buf_cc, 533 pkt_node->rcv_buf_hiwater, 534 pkt_node->rcv_buf_cc, 535 pkt_node->sent_inflight_bytes, 536 pkt_node->t_segqlen); 537#ifdef SIFTR_IPV6 538 } 539#endif 540 541 alq_post_flags(siftr_alq, log_buf, 0); 542} 543 544 545static void 546siftr_pkt_manager_thread(void *arg) 547{ 548 STAILQ_HEAD(pkthead, pkt_node) tmp_pkt_queue = 549 STAILQ_HEAD_INITIALIZER(tmp_pkt_queue); 550 struct pkt_node *pkt_node, *pkt_node_temp; 551 uint8_t draining; 552 553 draining = 2; 554 555 mtx_lock(&siftr_pkt_mgr_mtx); 556 557 /* draining == 0 when queue has been flushed and it's safe to exit. */ 558 while (draining) { 559 /* 560 * Sleep until we are signalled to wake because thread has 561 * been told to exit or until 1 tick has passed. 562 */ 563 mtx_sleep(&wait_for_pkt, &siftr_pkt_mgr_mtx, PWAIT, "pktwait", 564 1); 565 566 /* Gain exclusive access to the pkt_node queue. */ 567 mtx_lock(&siftr_pkt_queue_mtx); 568 569 /* 570 * Move pkt_queue to tmp_pkt_queue, which leaves 571 * pkt_queue empty and ready to receive more pkt_nodes. 572 */ 573 STAILQ_CONCAT(&tmp_pkt_queue, &pkt_queue); 574 575 /* 576 * We've finished making changes to the list. Unlock it 577 * so the pfil hooks can continue queuing pkt_nodes. 578 */ 579 mtx_unlock(&siftr_pkt_queue_mtx); 580 581 /* 582 * We can't hold a mutex whilst calling siftr_process_pkt 583 * because ALQ might sleep waiting for buffer space. 584 */ 585 mtx_unlock(&siftr_pkt_mgr_mtx); 586 587 /* Flush all pkt_nodes to the log file. */ 588 STAILQ_FOREACH_SAFE(pkt_node, &tmp_pkt_queue, nodes, 589 pkt_node_temp) { 590 siftr_process_pkt(pkt_node); 591 STAILQ_REMOVE_HEAD(&tmp_pkt_queue, nodes); 592 free(pkt_node, M_SIFTR_PKTNODE); 593 } 594 595 KASSERT(STAILQ_EMPTY(&tmp_pkt_queue), 596 ("SIFTR tmp_pkt_queue not empty after flush")); 597 598 mtx_lock(&siftr_pkt_mgr_mtx); 599 600 /* 601 * If siftr_exit_pkt_manager_thread gets set during the window 602 * where we are draining the tmp_pkt_queue above, there might 603 * still be pkts in pkt_queue that need to be drained. 604 * Allow one further iteration to occur after 605 * siftr_exit_pkt_manager_thread has been set to ensure 606 * pkt_queue is completely empty before we kill the thread. 607 * 608 * siftr_exit_pkt_manager_thread is set only after the pfil 609 * hooks have been removed, so only 1 extra iteration 610 * is needed to drain the queue. 611 */ 612 if (siftr_exit_pkt_manager_thread) 613 draining--; 614 } 615 616 mtx_unlock(&siftr_pkt_mgr_mtx); 617 618 /* Calls wakeup on this thread's struct thread ptr. */ 619 kthread_exit(); 620} 621 622 623static uint32_t 624hash_pkt(struct mbuf *m, uint32_t offset) 625{ 626 uint32_t hash; 627 628 hash = 0; 629 630 while (m != NULL && offset > m->m_len) { 631 /* 632 * The IP packet payload does not start in this mbuf, so 633 * need to figure out which mbuf it starts in and what offset 634 * into the mbuf's data region the payload starts at. 635 */ 636 offset -= m->m_len; 637 m = m->m_next; 638 } 639 640 while (m != NULL) { 641 /* Ensure there is data in the mbuf */ 642 if ((m->m_len - offset) > 0) 643 hash = hash32_buf(m->m_data + offset, 644 m->m_len - offset, hash); 645 646 m = m->m_next; 647 offset = 0; 648 } 649 650 return (hash); 651} 652 653 654/* 655 * Check if a given mbuf has the SIFTR mbuf tag. If it does, log the fact that 656 * it's a reinjected packet and return. If it doesn't, tag the mbuf and return. 657 * Return value >0 means the caller should skip processing this mbuf. 658 */ 659static inline int 660siftr_chkreinject(struct mbuf *m, int dir, struct siftr_stats *ss) 661{ 662 if (m_tag_locate(m, PACKET_COOKIE_SIFTR, PACKET_TAG_SIFTR, NULL) 663 != NULL) { 664 if (dir == PFIL_IN) 665 ss->nskip_in_dejavu++; 666 else 667 ss->nskip_out_dejavu++; 668 669 return (1); 670 } else { 671 struct m_tag *tag = m_tag_alloc(PACKET_COOKIE_SIFTR, 672 PACKET_TAG_SIFTR, 0, M_NOWAIT); 673 if (tag == NULL) { 674 if (dir == PFIL_IN) 675 ss->nskip_in_malloc++; 676 else 677 ss->nskip_out_malloc++; 678 679 return (1); 680 } 681 682 m_tag_prepend(m, tag); 683 } 684 685 return (0); 686} 687 688 689/* 690 * Look up an inpcb for a packet. Return the inpcb pointer if found, or NULL 691 * otherwise. 692 */ 693static inline struct inpcb * 694siftr_findinpcb(int ipver, struct ip *ip, struct mbuf *m, uint16_t sport, 695 uint16_t dport, int dir, struct siftr_stats *ss) 696{ 697 struct inpcb *inp; 698 699 /* We need the tcbinfo lock. */ 700 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 701 INP_INFO_RLOCK(&V_tcbinfo); 702 703 if (dir == PFIL_IN) 704 inp = (ipver == INP_IPV4 ? 705 in_pcblookup_hash(&V_tcbinfo, ip->ip_src, sport, ip->ip_dst, 706 dport, 0, m->m_pkthdr.rcvif) 707 : 708#ifdef SIFTR_IPV6 709 in6_pcblookup_hash(&V_tcbinfo, 710 &((struct ip6_hdr *)ip)->ip6_src, sport, 711 &((struct ip6_hdr *)ip)->ip6_dst, dport, 0, 712 m->m_pkthdr.rcvif) 713#else 714 NULL 715#endif 716 ); 717 718 else 719 inp = (ipver == INP_IPV4 ? 720 in_pcblookup_hash(&V_tcbinfo, ip->ip_dst, dport, ip->ip_src, 721 sport, 0, m->m_pkthdr.rcvif) 722 : 723#ifdef SIFTR_IPV6 724 in6_pcblookup_hash(&V_tcbinfo, 725 &((struct ip6_hdr *)ip)->ip6_dst, dport, 726 &((struct ip6_hdr *)ip)->ip6_src, sport, 0, 727 m->m_pkthdr.rcvif) 728#else 729 NULL 730#endif 731 ); 732 733 /* If we can't find the inpcb, bail. */ 734 if (inp == NULL) { 735 if (dir == PFIL_IN) 736 ss->nskip_in_inpcb++; 737 else 738 ss->nskip_out_inpcb++; 739 } else { 740 /* Acquire the inpcb lock. */ 741 INP_UNLOCK_ASSERT(inp); 742 INP_RLOCK(inp); 743 } 744 INP_INFO_RUNLOCK(&V_tcbinfo); 745 746 return (inp); 747} 748 749 750static inline void 751siftr_siftdata(struct pkt_node *pn, struct inpcb *inp, struct tcpcb *tp, 752 int ipver, int dir, int inp_locally_locked) 753{ 754#ifdef SIFTR_IPV6 755 if (ipver == INP_IPV4) { 756 pn->ip_laddr[3] = inp->inp_laddr.s_addr; 757 pn->ip_faddr[3] = inp->inp_faddr.s_addr; 758#else 759 *((uint32_t *)pn->ip_laddr) = inp->inp_laddr.s_addr; 760 *((uint32_t *)pn->ip_faddr) = inp->inp_faddr.s_addr; 761#endif 762#ifdef SIFTR_IPV6 763 } else { 764 pn->ip_laddr[0] = inp->in6p_laddr.s6_addr32[0]; 765 pn->ip_laddr[1] = inp->in6p_laddr.s6_addr32[1]; 766 pn->ip_laddr[2] = inp->in6p_laddr.s6_addr32[2]; 767 pn->ip_laddr[3] = inp->in6p_laddr.s6_addr32[3]; 768 pn->ip_faddr[0] = inp->in6p_faddr.s6_addr32[0]; 769 pn->ip_faddr[1] = inp->in6p_faddr.s6_addr32[1]; 770 pn->ip_faddr[2] = inp->in6p_faddr.s6_addr32[2]; 771 pn->ip_faddr[3] = inp->in6p_faddr.s6_addr32[3]; 772 } 773#endif 774 pn->tcp_localport = inp->inp_lport; 775 pn->tcp_foreignport = inp->inp_fport; 776 pn->snd_cwnd = tp->snd_cwnd; 777 pn->snd_wnd = tp->snd_wnd; 778 pn->rcv_wnd = tp->rcv_wnd; 779 pn->snd_bwnd = 0; /* Unused, kept for compat. */ 780 pn->snd_ssthresh = tp->snd_ssthresh; 781 pn->snd_scale = tp->snd_scale; 782 pn->rcv_scale = tp->rcv_scale; 783 pn->conn_state = tp->t_state; 784 pn->max_seg_size = tp->t_maxseg; 785 pn->smoothed_rtt = tp->t_srtt; 786 pn->sack_enabled = (tp->t_flags & TF_SACK_PERMIT) != 0; 787 pn->flags = tp->t_flags; 788 pn->rxt_length = tp->t_rxtcur; 789 pn->snd_buf_hiwater = inp->inp_socket->so_snd.sb_hiwat; 790 pn->snd_buf_cc = inp->inp_socket->so_snd.sb_cc; 791 pn->rcv_buf_hiwater = inp->inp_socket->so_rcv.sb_hiwat; 792 pn->rcv_buf_cc = inp->inp_socket->so_rcv.sb_cc; 793 pn->sent_inflight_bytes = tp->snd_max - tp->snd_una; 794 pn->t_segqlen = tp->t_segqlen; 795 796 /* We've finished accessing the tcb so release the lock. */ 797 if (inp_locally_locked) 798 INP_RUNLOCK(inp); 799 800 pn->ipver = ipver; 801 pn->direction = dir; 802 803 /* 804 * Significantly more accurate than using getmicrotime(), but slower! 805 * Gives true microsecond resolution at the expense of a hit to 806 * maximum pps throughput processing when SIFTR is loaded and enabled. 807 */ 808 microtime(&pn->tval); 809} 810 811 812/* 813 * pfil hook that is called for each IPv4 packet making its way through the 814 * stack in either direction. 815 * The pfil subsystem holds a non-sleepable mutex somewhere when 816 * calling our hook function, so we can't sleep at all. 817 * It's very important to use the M_NOWAIT flag with all function calls 818 * that support it so that they won't sleep, otherwise you get a panic. 819 */ 820static int 821siftr_chkpkt(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 822 struct inpcb *inp) 823{ 824 struct pkt_node *pn; 825 struct ip *ip; 826 struct tcphdr *th; 827 struct tcpcb *tp; 828 struct siftr_stats *ss; 829 unsigned int ip_hl; 830 int inp_locally_locked; 831 832 inp_locally_locked = 0; 833 ss = DPCPU_PTR(ss); 834 835 /* 836 * m_pullup is not required here because ip_{input|output} 837 * already do the heavy lifting for us. 838 */ 839 840 ip = mtod(*m, struct ip *); 841 842 /* Only continue processing if the packet is TCP. */ 843 if (ip->ip_p != IPPROTO_TCP) 844 goto ret; 845 846 /* 847 * If a kernel subsystem reinjects packets into the stack, our pfil 848 * hook will be called multiple times for the same packet. 849 * Make sure we only process unique packets. 850 */ 851 if (siftr_chkreinject(*m, dir, ss)) 852 goto ret; 853 854 if (dir == PFIL_IN) 855 ss->n_in++; 856 else 857 ss->n_out++; 858 859 /* 860 * Create a tcphdr struct starting at the correct offset 861 * in the IP packet. ip->ip_hl gives the ip header length 862 * in 4-byte words, so multiply it to get the size in bytes. 863 */ 864 ip_hl = (ip->ip_hl << 2); 865 th = (struct tcphdr *)((caddr_t)ip + ip_hl); 866 867 /* 868 * If the pfil hooks don't provide a pointer to the 869 * inpcb, we need to find it ourselves and lock it. 870 */ 871 if (!inp) { 872 /* Find the corresponding inpcb for this pkt. */ 873 inp = siftr_findinpcb(INP_IPV4, ip, *m, th->th_sport, 874 th->th_dport, dir, ss); 875 876 if (inp == NULL) 877 goto ret; 878 else 879 inp_locally_locked = 1; 880 } 881 882 INP_LOCK_ASSERT(inp); 883 884 /* Find the TCP control block that corresponds with this packet */ 885 tp = intotcpcb(inp); 886 887 /* 888 * If we can't find the TCP control block (happens occasionaly for a 889 * packet sent during the shutdown phase of a TCP connection), 890 * or we're in the timewait state, bail 891 */ 892 if (tp == NULL || inp->inp_flags & INP_TIMEWAIT) { 893 if (dir == PFIL_IN) 894 ss->nskip_in_tcpcb++; 895 else 896 ss->nskip_out_tcpcb++; 897 898 goto inp_unlock; 899 } 900 901 pn = malloc(sizeof(struct pkt_node), M_SIFTR_PKTNODE, M_NOWAIT|M_ZERO); 902 903 if (pn == NULL) { 904 if (dir == PFIL_IN) 905 ss->nskip_in_malloc++; 906 else 907 ss->nskip_out_malloc++; 908 909 goto inp_unlock; 910 } 911 912 siftr_siftdata(pn, inp, tp, INP_IPV4, dir, inp_locally_locked); 913 914 if (siftr_generate_hashes) { 915 if ((*m)->m_pkthdr.csum_flags & CSUM_TCP) { 916 /* 917 * For outbound packets, the TCP checksum isn't 918 * calculated yet. This is a problem for our packet 919 * hashing as the receiver will calc a different hash 920 * to ours if we don't include the correct TCP checksum 921 * in the bytes being hashed. To work around this 922 * problem, we manually calc the TCP checksum here in 923 * software. We unset the CSUM_TCP flag so the lower 924 * layers don't recalc it. 925 */ 926 (*m)->m_pkthdr.csum_flags &= ~CSUM_TCP; 927 928 /* 929 * Calculate the TCP checksum in software and assign 930 * to correct TCP header field, which will follow the 931 * packet mbuf down the stack. The trick here is that 932 * tcp_output() sets th->th_sum to the checksum of the 933 * pseudo header for us already. Because of the nature 934 * of the checksumming algorithm, we can sum over the 935 * entire IP payload (i.e. TCP header and data), which 936 * will include the already calculated pseduo header 937 * checksum, thus giving us the complete TCP checksum. 938 * 939 * To put it in simple terms, if checksum(1,2,3,4)=10, 940 * then checksum(1,2,3,4,5) == checksum(10,5). 941 * This property is what allows us to "cheat" and 942 * checksum only the IP payload which has the TCP 943 * th_sum field populated with the pseudo header's 944 * checksum, and not need to futz around checksumming 945 * pseudo header bytes and TCP header/data in one hit. 946 * Refer to RFC 1071 for more info. 947 * 948 * NB: in_cksum_skip(struct mbuf *m, int len, int skip) 949 * in_cksum_skip 2nd argument is NOT the number of 950 * bytes to read from the mbuf at "skip" bytes offset 951 * from the start of the mbuf (very counter intuitive!). 952 * The number of bytes to read is calculated internally 953 * by the function as len-skip i.e. to sum over the IP 954 * payload (TCP header + data) bytes, it is INCORRECT 955 * to call the function like this: 956 * in_cksum_skip(at, ip->ip_len - offset, offset) 957 * Rather, it should be called like this: 958 * in_cksum_skip(at, ip->ip_len, offset) 959 * which means read "ip->ip_len - offset" bytes from 960 * the mbuf cluster "at" at offset "offset" bytes from 961 * the beginning of the "at" mbuf's data pointer. 962 */ 963 th->th_sum = in_cksum_skip(*m, ip->ip_len, ip_hl); 964 } 965 966 /* 967 * XXX: Having to calculate the checksum in software and then 968 * hash over all bytes is really inefficient. Would be nice to 969 * find a way to create the hash and checksum in the same pass 970 * over the bytes. 971 */ 972 pn->hash = hash_pkt(*m, ip_hl); 973 } 974 975 mtx_lock(&siftr_pkt_queue_mtx); 976 STAILQ_INSERT_TAIL(&pkt_queue, pn, nodes); 977 mtx_unlock(&siftr_pkt_queue_mtx); 978 goto ret; 979 980inp_unlock: 981 if (inp_locally_locked) 982 INP_RUNLOCK(inp); 983 984ret: 985 /* Returning 0 ensures pfil will not discard the pkt */ 986 return (0); 987} 988 989 990#ifdef SIFTR_IPV6 991static int 992siftr_chkpkt6(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 993 struct inpcb *inp) 994{ 995 struct pkt_node *pn; 996 struct ip6_hdr *ip6; 997 struct tcphdr *th; 998 struct tcpcb *tp; 999 struct siftr_stats *ss; 1000 unsigned int ip6_hl; 1001 int inp_locally_locked; 1002 1003 inp_locally_locked = 0; 1004 ss = DPCPU_PTR(ss); 1005 1006 /* 1007 * m_pullup is not required here because ip6_{input|output} 1008 * already do the heavy lifting for us. 1009 */ 1010 1011 ip6 = mtod(*m, struct ip6_hdr *); 1012 1013 /* 1014 * Only continue processing if the packet is TCP 1015 * XXX: We should follow the next header fields 1016 * as shown on Pg 6 RFC 2460, but right now we'll 1017 * only check pkts that have no extension headers. 1018 */ 1019 if (ip6->ip6_nxt != IPPROTO_TCP) 1020 goto ret6; 1021 1022 /* 1023 * If a kernel subsystem reinjects packets into the stack, our pfil 1024 * hook will be called multiple times for the same packet. 1025 * Make sure we only process unique packets. 1026 */ 1027 if (siftr_chkreinject(*m, dir, ss)) 1028 goto ret6; 1029 1030 if (dir == PFIL_IN) 1031 ss->n_in++; 1032 else 1033 ss->n_out++; 1034 1035 ip6_hl = sizeof(struct ip6_hdr); 1036 1037 /* 1038 * Create a tcphdr struct starting at the correct offset 1039 * in the ipv6 packet. ip->ip_hl gives the ip header length 1040 * in 4-byte words, so multiply it to get the size in bytes. 1041 */ 1042 th = (struct tcphdr *)((caddr_t)ip6 + ip6_hl); 1043 1044 /* 1045 * For inbound packets, the pfil hooks don't provide a pointer to the 1046 * inpcb, so we need to find it ourselves and lock it. 1047 */ 1048 if (!inp) { 1049 /* Find the corresponding inpcb for this pkt. */ 1050 inp = siftr_findinpcb(INP_IPV6, (struct ip *)ip6, *m, 1051 th->th_sport, th->th_dport, dir, ss); 1052 1053 if (inp == NULL) 1054 goto ret6; 1055 else 1056 inp_locally_locked = 1; 1057 } 1058 1059 /* Find the TCP control block that corresponds with this packet. */ 1060 tp = intotcpcb(inp); 1061 1062 /* 1063 * If we can't find the TCP control block (happens occasionaly for a 1064 * packet sent during the shutdown phase of a TCP connection), 1065 * or we're in the timewait state, bail. 1066 */ 1067 if (tp == NULL || inp->inp_flags & INP_TIMEWAIT) { 1068 if (dir == PFIL_IN) 1069 ss->nskip_in_tcpcb++; 1070 else 1071 ss->nskip_out_tcpcb++; 1072 1073 goto inp_unlock6; 1074 } 1075 1076 pn = malloc(sizeof(struct pkt_node), M_SIFTR_PKTNODE, M_NOWAIT|M_ZERO); 1077 1078 if (pn == NULL) { 1079 if (dir == PFIL_IN) 1080 ss->nskip_in_malloc++; 1081 else 1082 ss->nskip_out_malloc++; 1083 1084 goto inp_unlock6; 1085 } 1086 1087 siftr_siftdata(pn, inp, tp, INP_IPV6, dir, inp_locally_locked); 1088 1089 /* XXX: Figure out how to generate hashes for IPv6 packets. */ 1090 1091 mtx_lock(&siftr_pkt_queue_mtx); 1092 STAILQ_INSERT_TAIL(&pkt_queue, pn, nodes); 1093 mtx_unlock(&siftr_pkt_queue_mtx); 1094 goto ret6; 1095 1096inp_unlock6: 1097 if (inp_locally_locked) 1098 INP_RUNLOCK(inp); 1099 1100ret6: 1101 /* Returning 0 ensures pfil will not discard the pkt. */ 1102 return (0); 1103} 1104#endif /* #ifdef SIFTR_IPV6 */ 1105 1106 1107static int 1108siftr_pfil(int action) 1109{ 1110 struct pfil_head *pfh_inet; 1111#ifdef SIFTR_IPV6 1112 struct pfil_head *pfh_inet6; 1113#endif 1114 VNET_ITERATOR_DECL(vnet_iter); 1115 1116 VNET_LIST_RLOCK(); 1117 VNET_FOREACH(vnet_iter) { 1118 CURVNET_SET(vnet_iter); 1119 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 1120#ifdef SIFTR_IPV6 1121 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 1122#endif 1123 1124 if (action == HOOK) { 1125 pfil_add_hook(siftr_chkpkt, NULL, 1126 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet); 1127#ifdef SIFTR_IPV6 1128 pfil_add_hook(siftr_chkpkt6, NULL, 1129 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet6); 1130#endif 1131 } else if (action == UNHOOK) { 1132 pfil_remove_hook(siftr_chkpkt, NULL, 1133 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet); 1134#ifdef SIFTR_IPV6 1135 pfil_remove_hook(siftr_chkpkt6, NULL, 1136 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet6); 1137#endif 1138 } 1139 CURVNET_RESTORE(); 1140 } 1141 VNET_LIST_RUNLOCK(); 1142 1143 return (0); 1144} 1145 1146 1147static int 1148siftr_sysctl_logfile_name_handler(SYSCTL_HANDLER_ARGS) 1149{ 1150 struct alq *new_alq; 1151 int error; 1152 1153 if (req->newptr == NULL) 1154 goto skip; 1155 1156 /* If old filename and new filename are different. */ 1157 if (strncmp(siftr_logfile, (char *)req->newptr, PATH_MAX)) { 1158 1159 error = alq_open(&new_alq, req->newptr, curthread->td_ucred, 1160 SIFTR_LOG_FILE_MODE, SIFTR_ALQ_BUFLEN, 0); 1161 1162 /* Bail if unable to create new alq. */ 1163 if (error) 1164 return (1); 1165 1166 /* 1167 * If disabled, siftr_alq == NULL so we simply close 1168 * the alq as we've proved it can be opened. 1169 * If enabled, close the existing alq and switch the old 1170 * for the new. 1171 */ 1172 if (siftr_alq == NULL) 1173 alq_close(new_alq); 1174 else { 1175 alq_close(siftr_alq); 1176 siftr_alq = new_alq; 1177 } 1178 } 1179 1180skip: 1181 return (sysctl_handle_string(oidp, arg1, arg2, req)); 1182} 1183 1184 1185static int 1186siftr_manage_ops(uint8_t action) 1187{ 1188 struct siftr_stats totalss; 1189 struct timeval tval; 1190 struct flow_hash_node *counter, *tmp_counter; 1191 struct sbuf *s; 1192 int i, key_index, ret, error; 1193 uint32_t bytes_to_write, total_skipped_pkts; 1194 uint16_t lport, fport; 1195 uint8_t *key, ipver; 1196 1197#ifdef SIFTR_IPV6 1198 uint32_t laddr[4]; 1199 uint32_t faddr[4]; 1200#else 1201 uint8_t laddr[4]; 1202 uint8_t faddr[4]; 1203#endif 1204 1205 error = 0; 1206 total_skipped_pkts = 0; 1207 1208 /* Init an autosizing sbuf that initially holds 200 chars. */ 1209 if ((s = sbuf_new(NULL, NULL, 200, SBUF_AUTOEXTEND)) == NULL) 1210 return (-1); 1211 1212 if (action == SIFTR_ENABLE) { 1213 /* 1214 * Create our alq 1215 * XXX: We should abort if alq_open fails! 1216 */ 1217 alq_open(&siftr_alq, siftr_logfile, curthread->td_ucred, 1218 SIFTR_LOG_FILE_MODE, SIFTR_ALQ_BUFLEN, 0); 1219 1220 STAILQ_INIT(&pkt_queue); 1221 1222 DPCPU_ZERO(ss); 1223 1224 siftr_exit_pkt_manager_thread = 0; 1225 1226 ret = kthread_add(&siftr_pkt_manager_thread, NULL, NULL, 1227 &siftr_pkt_manager_thr, RFNOWAIT, 0, 1228 "siftr_pkt_manager_thr"); 1229 1230 siftr_pfil(HOOK); 1231 1232 microtime(&tval); 1233 1234 sbuf_printf(s, 1235 "enable_time_secs=%jd\tenable_time_usecs=%06ld\t" 1236 "siftrver=%s\thz=%u\ttcp_rtt_scale=%u\tsysname=%s\t" 1237 "sysver=%u\tipmode=%u\n", 1238 (intmax_t)tval.tv_sec, tval.tv_usec, MODVERSION_STR, hz, 1239 TCP_RTT_SCALE, SYS_NAME, __FreeBSD_version, SIFTR_IPMODE); 1240 1241 sbuf_finish(s); 1242 alq_writen(siftr_alq, sbuf_data(s), sbuf_len(s), ALQ_WAITOK); 1243 1244 } else if (action == SIFTR_DISABLE && siftr_pkt_manager_thr != NULL) { 1245 /* 1246 * Remove the pfil hook functions. All threads currently in 1247 * the hook functions are allowed to exit before siftr_pfil() 1248 * returns. 1249 */ 1250 siftr_pfil(UNHOOK); 1251 1252 /* This will block until the pkt manager thread unlocks it. */ 1253 mtx_lock(&siftr_pkt_mgr_mtx); 1254 1255 /* Tell the pkt manager thread that it should exit now. */ 1256 siftr_exit_pkt_manager_thread = 1; 1257 1258 /* 1259 * Wake the pkt_manager thread so it realises that 1260 * siftr_exit_pkt_manager_thread == 1 and exits gracefully. 1261 * The wakeup won't be delivered until we unlock 1262 * siftr_pkt_mgr_mtx so this isn't racy. 1263 */ 1264 wakeup(&wait_for_pkt); 1265 1266 /* Wait for the pkt_manager thread to exit. */ 1267 mtx_sleep(siftr_pkt_manager_thr, &siftr_pkt_mgr_mtx, PWAIT, 1268 "thrwait", 0); 1269 1270 siftr_pkt_manager_thr = NULL; 1271 mtx_unlock(&siftr_pkt_mgr_mtx); 1272 1273 totalss.n_in = DPCPU_VARSUM(ss, n_in); 1274 totalss.n_out = DPCPU_VARSUM(ss, n_out); 1275 totalss.nskip_in_malloc = DPCPU_VARSUM(ss, nskip_in_malloc); 1276 totalss.nskip_out_malloc = DPCPU_VARSUM(ss, nskip_out_malloc); 1277 totalss.nskip_in_mtx = DPCPU_VARSUM(ss, nskip_in_mtx); 1278 totalss.nskip_out_mtx = DPCPU_VARSUM(ss, nskip_out_mtx); 1279 totalss.nskip_in_tcpcb = DPCPU_VARSUM(ss, nskip_in_tcpcb); 1280 totalss.nskip_out_tcpcb = DPCPU_VARSUM(ss, nskip_out_tcpcb); 1281 totalss.nskip_in_inpcb = DPCPU_VARSUM(ss, nskip_in_inpcb); 1282 totalss.nskip_out_inpcb = DPCPU_VARSUM(ss, nskip_out_inpcb); 1283 1284 total_skipped_pkts = totalss.nskip_in_malloc + 1285 totalss.nskip_out_malloc + totalss.nskip_in_mtx + 1286 totalss.nskip_out_mtx + totalss.nskip_in_tcpcb + 1287 totalss.nskip_out_tcpcb + totalss.nskip_in_inpcb + 1288 totalss.nskip_out_inpcb; 1289 1290 microtime(&tval); 1291 1292 sbuf_printf(s, 1293 "disable_time_secs=%jd\tdisable_time_usecs=%06ld\t" 1294 "num_inbound_tcp_pkts=%ju\tnum_outbound_tcp_pkts=%ju\t" 1295 "total_tcp_pkts=%ju\tnum_inbound_skipped_pkts_malloc=%u\t" 1296 "num_outbound_skipped_pkts_malloc=%u\t" 1297 "num_inbound_skipped_pkts_mtx=%u\t" 1298 "num_outbound_skipped_pkts_mtx=%u\t" 1299 "num_inbound_skipped_pkts_tcpcb=%u\t" 1300 "num_outbound_skipped_pkts_tcpcb=%u\t" 1301 "num_inbound_skipped_pkts_inpcb=%u\t" 1302 "num_outbound_skipped_pkts_inpcb=%u\t" 1303 "total_skipped_tcp_pkts=%u\tflow_list=", 1304 (intmax_t)tval.tv_sec, 1305 tval.tv_usec, 1306 (uintmax_t)totalss.n_in, 1307 (uintmax_t)totalss.n_out, 1308 (uintmax_t)(totalss.n_in + totalss.n_out), 1309 totalss.nskip_in_malloc, 1310 totalss.nskip_out_malloc, 1311 totalss.nskip_in_mtx, 1312 totalss.nskip_out_mtx, 1313 totalss.nskip_in_tcpcb, 1314 totalss.nskip_out_tcpcb, 1315 totalss.nskip_in_inpcb, 1316 totalss.nskip_out_inpcb, 1317 total_skipped_pkts); 1318 1319 /* 1320 * Iterate over the flow hash, printing a summary of each 1321 * flow seen and freeing any malloc'd memory. 1322 * The hash consists of an array of LISTs (man 3 queue). 1323 */ 1324 for (i = 0; i < siftr_hashmask; i++) { 1325 LIST_FOREACH_SAFE(counter, counter_hash + i, nodes, 1326 tmp_counter) { 1327 key = counter->key; 1328 key_index = 1; 1329 1330 ipver = key[0]; 1331 1332 memcpy(laddr, key + key_index, sizeof(laddr)); 1333 key_index += sizeof(laddr); 1334 memcpy(&lport, key + key_index, sizeof(lport)); 1335 key_index += sizeof(lport); 1336 memcpy(faddr, key + key_index, sizeof(faddr)); 1337 key_index += sizeof(faddr); 1338 memcpy(&fport, key + key_index, sizeof(fport)); 1339 1340#ifdef SIFTR_IPV6 1341 laddr[3] = ntohl(laddr[3]); 1342 faddr[3] = ntohl(faddr[3]); 1343 1344 if (ipver == INP_IPV6) { 1345 laddr[0] = ntohl(laddr[0]); 1346 laddr[1] = ntohl(laddr[1]); 1347 laddr[2] = ntohl(laddr[2]); 1348 faddr[0] = ntohl(faddr[0]); 1349 faddr[1] = ntohl(faddr[1]); 1350 faddr[2] = ntohl(faddr[2]); 1351 1352 sbuf_printf(s, 1353 "%x:%x:%x:%x:%x:%x:%x:%x;%u-" 1354 "%x:%x:%x:%x:%x:%x:%x:%x;%u,", 1355 UPPER_SHORT(laddr[0]), 1356 LOWER_SHORT(laddr[0]), 1357 UPPER_SHORT(laddr[1]), 1358 LOWER_SHORT(laddr[1]), 1359 UPPER_SHORT(laddr[2]), 1360 LOWER_SHORT(laddr[2]), 1361 UPPER_SHORT(laddr[3]), 1362 LOWER_SHORT(laddr[3]), 1363 ntohs(lport), 1364 UPPER_SHORT(faddr[0]), 1365 LOWER_SHORT(faddr[0]), 1366 UPPER_SHORT(faddr[1]), 1367 LOWER_SHORT(faddr[1]), 1368 UPPER_SHORT(faddr[2]), 1369 LOWER_SHORT(faddr[2]), 1370 UPPER_SHORT(faddr[3]), 1371 LOWER_SHORT(faddr[3]), 1372 ntohs(fport)); 1373 } else { 1374 laddr[0] = FIRST_OCTET(laddr[3]); 1375 laddr[1] = SECOND_OCTET(laddr[3]); 1376 laddr[2] = THIRD_OCTET(laddr[3]); 1377 laddr[3] = FOURTH_OCTET(laddr[3]); 1378 faddr[0] = FIRST_OCTET(faddr[3]); 1379 faddr[1] = SECOND_OCTET(faddr[3]); 1380 faddr[2] = THIRD_OCTET(faddr[3]); 1381 faddr[3] = FOURTH_OCTET(faddr[3]); 1382#endif 1383 sbuf_printf(s, 1384 "%u.%u.%u.%u;%u-%u.%u.%u.%u;%u,", 1385 laddr[0], 1386 laddr[1], 1387 laddr[2], 1388 laddr[3], 1389 ntohs(lport), 1390 faddr[0], 1391 faddr[1], 1392 faddr[2], 1393 faddr[3], 1394 ntohs(fport)); 1395#ifdef SIFTR_IPV6 1396 } 1397#endif 1398 1399 free(counter, M_SIFTR_HASHNODE); 1400 } 1401 1402 LIST_INIT(counter_hash + i); 1403 } 1404 1405 sbuf_printf(s, "\n"); 1406 sbuf_finish(s); 1407 1408 i = 0; 1409 do { 1410 bytes_to_write = min(SIFTR_ALQ_BUFLEN, sbuf_len(s)-i); 1411 alq_writen(siftr_alq, sbuf_data(s)+i, bytes_to_write, ALQ_WAITOK); 1412 i += bytes_to_write; 1413 } while (i < sbuf_len(s)); 1414 1415 alq_close(siftr_alq); 1416 siftr_alq = NULL; 1417 } 1418 1419 sbuf_delete(s); 1420 1421 /* 1422 * XXX: Should be using ret to check if any functions fail 1423 * and set error appropriately 1424 */ 1425 1426 return (error); 1427} 1428 1429 1430static int 1431siftr_sysctl_enabled_handler(SYSCTL_HANDLER_ARGS) 1432{ 1433 if (req->newptr == NULL) 1434 goto skip; 1435 1436 /* If the value passed in isn't 0 or 1, return an error. */ 1437 if (CAST_PTR_INT(req->newptr) != 0 && CAST_PTR_INT(req->newptr) != 1) 1438 return (1); 1439 1440 /* If we are changing state (0 to 1 or 1 to 0). */ 1441 if (CAST_PTR_INT(req->newptr) != siftr_enabled ) 1442 if (siftr_manage_ops(CAST_PTR_INT(req->newptr))) { 1443 siftr_manage_ops(SIFTR_DISABLE); 1444 return (1); 1445 } 1446 1447skip: 1448 return (sysctl_handle_int(oidp, arg1, arg2, req)); 1449} 1450 1451 1452static void 1453siftr_shutdown_handler(void *arg) 1454{ 1455 siftr_manage_ops(SIFTR_DISABLE); 1456} 1457 1458 1459/* 1460 * Module is being unloaded or machine is shutting down. Take care of cleanup. 1461 */ 1462static int 1463deinit_siftr(void) 1464{ 1465 /* Cleanup. */ 1466 siftr_manage_ops(SIFTR_DISABLE); 1467 hashdestroy(counter_hash, M_SIFTR, siftr_hashmask); 1468 mtx_destroy(&siftr_pkt_queue_mtx); 1469 mtx_destroy(&siftr_pkt_mgr_mtx); 1470 1471 return (0); 1472} 1473 1474 1475/* 1476 * Module has just been loaded into the kernel. 1477 */ 1478static int 1479init_siftr(void) 1480{ 1481 EVENTHANDLER_REGISTER(shutdown_pre_sync, siftr_shutdown_handler, NULL, 1482 SHUTDOWN_PRI_FIRST); 1483 1484 /* Initialise our flow counter hash table. */ 1485 counter_hash = hashinit(SIFTR_EXPECTED_MAX_TCP_FLOWS, M_SIFTR, 1486 &siftr_hashmask); 1487 1488 mtx_init(&siftr_pkt_queue_mtx, "siftr_pkt_queue_mtx", NULL, MTX_DEF); 1489 mtx_init(&siftr_pkt_mgr_mtx, "siftr_pkt_mgr_mtx", NULL, MTX_DEF); 1490 1491 /* Print message to the user's current terminal. */ 1492 uprintf("\nStatistical Information For TCP Research (SIFTR) %s\n" 1493 " http://caia.swin.edu.au/urp/newtcp\n\n", 1494 MODVERSION_STR); 1495 1496 return (0); 1497} 1498 1499 1500/* 1501 * This is the function that is called to load and unload the module. 1502 * When the module is loaded, this function is called once with 1503 * "what" == MOD_LOAD 1504 * When the module is unloaded, this function is called twice with 1505 * "what" = MOD_QUIESCE first, followed by "what" = MOD_UNLOAD second 1506 * When the system is shut down e.g. CTRL-ALT-DEL or using the shutdown command, 1507 * this function is called once with "what" = MOD_SHUTDOWN 1508 * When the system is shut down, the handler isn't called until the very end 1509 * of the shutdown sequence i.e. after the disks have been synced. 1510 */ 1511static int 1512siftr_load_handler(module_t mod, int what, void *arg) 1513{ 1514 int ret; 1515 1516 switch (what) { 1517 case MOD_LOAD: 1518 ret = init_siftr(); 1519 break; 1520 1521 case MOD_QUIESCE: 1522 case MOD_SHUTDOWN: 1523 ret = deinit_siftr(); 1524 break; 1525 1526 case MOD_UNLOAD: 1527 ret = 0; 1528 break; 1529 1530 default: 1531 ret = EINVAL; 1532 break; 1533 } 1534 1535 return (ret); 1536} 1537 1538 1539static moduledata_t siftr_mod = { 1540 .name = "siftr", 1541 .evhand = siftr_load_handler, 1542}; 1543 1544/* 1545 * Param 1: name of the kernel module 1546 * Param 2: moduledata_t struct containing info about the kernel module 1547 * and the execution entry point for the module 1548 * Param 3: From sysinit_sub_id enumeration in /usr/include/sys/kernel.h 1549 * Defines the module initialisation order 1550 * Param 4: From sysinit_elem_order enumeration in /usr/include/sys/kernel.h 1551 * Defines the initialisation order of this kld relative to others 1552 * within the same subsystem as defined by param 3 1553 */ 1554DECLARE_MODULE(siftr, siftr_mod, SI_SUB_SMP, SI_ORDER_ANY); 1555MODULE_DEPEND(siftr, alq, 1, 1, 1); 1556MODULE_VERSION(siftr, MODVERSION);
| 264 265static volatile unsigned int siftr_exit_pkt_manager_thread = 0; 266static unsigned int siftr_enabled = 0; 267static unsigned int siftr_pkts_per_log = 1; 268static unsigned int siftr_generate_hashes = 0; 269/* static unsigned int siftr_binary_log = 0; */ 270static char siftr_logfile[PATH_MAX] = "/var/log/siftr.log"; 271static u_long siftr_hashmask; 272STAILQ_HEAD(pkthead, pkt_node) pkt_queue = STAILQ_HEAD_INITIALIZER(pkt_queue); 273LIST_HEAD(listhead, flow_hash_node) *counter_hash; 274static int wait_for_pkt; 275static struct alq *siftr_alq = NULL; 276static struct mtx siftr_pkt_queue_mtx; 277static struct mtx siftr_pkt_mgr_mtx; 278static struct thread *siftr_pkt_manager_thr = NULL; 279/* 280 * pfil.h defines PFIL_IN as 1 and PFIL_OUT as 2, 281 * which we use as an index into this array. 282 */ 283static char direction[3] = {'\0', 'i','o'}; 284 285/* Required function prototypes. */ 286static int siftr_sysctl_enabled_handler(SYSCTL_HANDLER_ARGS); 287static int siftr_sysctl_logfile_name_handler(SYSCTL_HANDLER_ARGS); 288 289 290/* Declare the net.inet.siftr sysctl tree and populate it. */ 291 292SYSCTL_DECL(_net_inet_siftr); 293 294SYSCTL_NODE(_net_inet, OID_AUTO, siftr, CTLFLAG_RW, NULL, 295 "siftr related settings"); 296 297SYSCTL_PROC(_net_inet_siftr, OID_AUTO, enabled, CTLTYPE_UINT|CTLFLAG_RW, 298 &siftr_enabled, 0, &siftr_sysctl_enabled_handler, "IU", 299 "switch siftr module operations on/off"); 300 301SYSCTL_PROC(_net_inet_siftr, OID_AUTO, logfile, CTLTYPE_STRING|CTLFLAG_RW, 302 &siftr_logfile, sizeof(siftr_logfile), &siftr_sysctl_logfile_name_handler, 303 "A", "file to save siftr log messages to"); 304 305SYSCTL_UINT(_net_inet_siftr, OID_AUTO, ppl, CTLFLAG_RW, 306 &siftr_pkts_per_log, 1, 307 "number of packets between generating a log message"); 308 309SYSCTL_UINT(_net_inet_siftr, OID_AUTO, genhashes, CTLFLAG_RW, 310 &siftr_generate_hashes, 0, 311 "enable packet hash generation"); 312 313/* XXX: TODO 314SYSCTL_UINT(_net_inet_siftr, OID_AUTO, binary, CTLFLAG_RW, 315 &siftr_binary_log, 0, 316 "write log files in binary instead of ascii"); 317*/ 318 319 320/* Begin functions. */ 321 322static void 323siftr_process_pkt(struct pkt_node * pkt_node) 324{ 325 struct flow_hash_node *hash_node; 326 struct listhead *counter_list; 327 struct siftr_stats *ss; 328 struct ale *log_buf; 329 uint8_t key[FLOW_KEY_LEN]; 330 uint8_t found_match, key_offset; 331 332 hash_node = NULL; 333 ss = DPCPU_PTR(ss); 334 found_match = 0; 335 key_offset = 1; 336 337 /* 338 * Create the key that will be used to create a hash index 339 * into our hash table. Our key consists of: 340 * ipversion, localip, localport, foreignip, foreignport 341 */ 342 key[0] = pkt_node->ipver; 343 memcpy(key + key_offset, &pkt_node->ip_laddr, 344 sizeof(pkt_node->ip_laddr)); 345 key_offset += sizeof(pkt_node->ip_laddr); 346 memcpy(key + key_offset, &pkt_node->tcp_localport, 347 sizeof(pkt_node->tcp_localport)); 348 key_offset += sizeof(pkt_node->tcp_localport); 349 memcpy(key + key_offset, &pkt_node->ip_faddr, 350 sizeof(pkt_node->ip_faddr)); 351 key_offset += sizeof(pkt_node->ip_faddr); 352 memcpy(key + key_offset, &pkt_node->tcp_foreignport, 353 sizeof(pkt_node->tcp_foreignport)); 354 355 counter_list = counter_hash + 356 (hash32_buf(key, sizeof(key), 0) & siftr_hashmask); 357 358 /* 359 * If the list is not empty i.e. the hash index has 360 * been used by another flow previously. 361 */ 362 if (LIST_FIRST(counter_list) != NULL) { 363 /* 364 * Loop through the hash nodes in the list. 365 * There should normally only be 1 hash node in the list, 366 * except if there have been collisions at the hash index 367 * computed by hash32_buf(). 368 */ 369 LIST_FOREACH(hash_node, counter_list, nodes) { 370 /* 371 * Check if the key for the pkt we are currently 372 * processing is the same as the key stored in the 373 * hash node we are currently processing. 374 * If they are the same, then we've found the 375 * hash node that stores the counter for the flow 376 * the pkt belongs to. 377 */ 378 if (memcmp(hash_node->key, key, sizeof(key)) == 0) { 379 found_match = 1; 380 break; 381 } 382 } 383 } 384 385 /* If this flow hash hasn't been seen before or we have a collision. */ 386 if (hash_node == NULL || !found_match) { 387 /* Create a new hash node to store the flow's counter. */ 388 hash_node = malloc(sizeof(struct flow_hash_node), 389 M_SIFTR_HASHNODE, M_WAITOK); 390 391 if (hash_node != NULL) { 392 /* Initialise our new hash node list entry. */ 393 hash_node->counter = 0; 394 memcpy(hash_node->key, key, sizeof(key)); 395 LIST_INSERT_HEAD(counter_list, hash_node, nodes); 396 } else { 397 /* Malloc failed. */ 398 if (pkt_node->direction == PFIL_IN) 399 ss->nskip_in_malloc++; 400 else 401 ss->nskip_out_malloc++; 402 403 return; 404 } 405 } else if (siftr_pkts_per_log > 1) { 406 /* 407 * Taking the remainder of the counter divided 408 * by the current value of siftr_pkts_per_log 409 * and storing that in counter provides a neat 410 * way to modulate the frequency of log 411 * messages being written to the log file. 412 */ 413 hash_node->counter = (hash_node->counter + 1) % 414 siftr_pkts_per_log; 415 416 /* 417 * If we have not seen enough packets since the last time 418 * we wrote a log message for this connection, return. 419 */ 420 if (hash_node->counter > 0) 421 return; 422 } 423 424 log_buf = alq_getn(siftr_alq, MAX_LOG_MSG_LEN, ALQ_WAITOK); 425 426 if (log_buf == NULL) 427 return; /* Should only happen if the ALQ is shutting down. */ 428 429#ifdef SIFTR_IPV6 430 pkt_node->ip_laddr[3] = ntohl(pkt_node->ip_laddr[3]); 431 pkt_node->ip_faddr[3] = ntohl(pkt_node->ip_faddr[3]); 432 433 if (pkt_node->ipver == INP_IPV6) { /* IPv6 packet */ 434 pkt_node->ip_laddr[0] = ntohl(pkt_node->ip_laddr[0]); 435 pkt_node->ip_laddr[1] = ntohl(pkt_node->ip_laddr[1]); 436 pkt_node->ip_laddr[2] = ntohl(pkt_node->ip_laddr[2]); 437 pkt_node->ip_faddr[0] = ntohl(pkt_node->ip_faddr[0]); 438 pkt_node->ip_faddr[1] = ntohl(pkt_node->ip_faddr[1]); 439 pkt_node->ip_faddr[2] = ntohl(pkt_node->ip_faddr[2]); 440 441 /* Construct an IPv6 log message. */ 442 log_buf->ae_bytesused = snprintf(log_buf->ae_data, 443 MAX_LOG_MSG_LEN, 444 "%c,0x%08x,%zd.%06ld,%x:%x:%x:%x:%x:%x:%x:%x,%u,%x:%x:%x:" 445 "%x:%x:%x:%x:%x,%u,%ld,%ld,%ld,%ld,%ld,%u,%u,%u,%u,%u,%u," 446 "%u,%d,%u,%u,%u,%u,%u,%u\n", 447 direction[pkt_node->direction], 448 pkt_node->hash, 449 pkt_node->tval.tv_sec, 450 pkt_node->tval.tv_usec, 451 UPPER_SHORT(pkt_node->ip_laddr[0]), 452 LOWER_SHORT(pkt_node->ip_laddr[0]), 453 UPPER_SHORT(pkt_node->ip_laddr[1]), 454 LOWER_SHORT(pkt_node->ip_laddr[1]), 455 UPPER_SHORT(pkt_node->ip_laddr[2]), 456 LOWER_SHORT(pkt_node->ip_laddr[2]), 457 UPPER_SHORT(pkt_node->ip_laddr[3]), 458 LOWER_SHORT(pkt_node->ip_laddr[3]), 459 ntohs(pkt_node->tcp_localport), 460 UPPER_SHORT(pkt_node->ip_faddr[0]), 461 LOWER_SHORT(pkt_node->ip_faddr[0]), 462 UPPER_SHORT(pkt_node->ip_faddr[1]), 463 LOWER_SHORT(pkt_node->ip_faddr[1]), 464 UPPER_SHORT(pkt_node->ip_faddr[2]), 465 LOWER_SHORT(pkt_node->ip_faddr[2]), 466 UPPER_SHORT(pkt_node->ip_faddr[3]), 467 LOWER_SHORT(pkt_node->ip_faddr[3]), 468 ntohs(pkt_node->tcp_foreignport), 469 pkt_node->snd_ssthresh, 470 pkt_node->snd_cwnd, 471 pkt_node->snd_bwnd, 472 pkt_node->snd_wnd, 473 pkt_node->rcv_wnd, 474 pkt_node->snd_scale, 475 pkt_node->rcv_scale, 476 pkt_node->conn_state, 477 pkt_node->max_seg_size, 478 pkt_node->smoothed_rtt, 479 pkt_node->sack_enabled, 480 pkt_node->flags, 481 pkt_node->rxt_length, 482 pkt_node->snd_buf_hiwater, 483 pkt_node->snd_buf_cc, 484 pkt_node->rcv_buf_hiwater, 485 pkt_node->rcv_buf_cc, 486 pkt_node->sent_inflight_bytes, 487 pkt_node->t_segqlen); 488 } else { /* IPv4 packet */ 489 pkt_node->ip_laddr[0] = FIRST_OCTET(pkt_node->ip_laddr[3]); 490 pkt_node->ip_laddr[1] = SECOND_OCTET(pkt_node->ip_laddr[3]); 491 pkt_node->ip_laddr[2] = THIRD_OCTET(pkt_node->ip_laddr[3]); 492 pkt_node->ip_laddr[3] = FOURTH_OCTET(pkt_node->ip_laddr[3]); 493 pkt_node->ip_faddr[0] = FIRST_OCTET(pkt_node->ip_faddr[3]); 494 pkt_node->ip_faddr[1] = SECOND_OCTET(pkt_node->ip_faddr[3]); 495 pkt_node->ip_faddr[2] = THIRD_OCTET(pkt_node->ip_faddr[3]); 496 pkt_node->ip_faddr[3] = FOURTH_OCTET(pkt_node->ip_faddr[3]); 497#endif /* SIFTR_IPV6 */ 498 499 /* Construct an IPv4 log message. */ 500 log_buf->ae_bytesused = snprintf(log_buf->ae_data, 501 MAX_LOG_MSG_LEN, 502 "%c,0x%08x,%jd.%06ld,%u.%u.%u.%u,%u,%u.%u.%u.%u,%u,%ld,%ld," 503 "%ld,%ld,%ld,%u,%u,%u,%u,%u,%u,%u,%d,%u,%u,%u,%u,%u,%u\n", 504 direction[pkt_node->direction], 505 pkt_node->hash, 506 (intmax_t)pkt_node->tval.tv_sec, 507 pkt_node->tval.tv_usec, 508 pkt_node->ip_laddr[0], 509 pkt_node->ip_laddr[1], 510 pkt_node->ip_laddr[2], 511 pkt_node->ip_laddr[3], 512 ntohs(pkt_node->tcp_localport), 513 pkt_node->ip_faddr[0], 514 pkt_node->ip_faddr[1], 515 pkt_node->ip_faddr[2], 516 pkt_node->ip_faddr[3], 517 ntohs(pkt_node->tcp_foreignport), 518 pkt_node->snd_ssthresh, 519 pkt_node->snd_cwnd, 520 pkt_node->snd_bwnd, 521 pkt_node->snd_wnd, 522 pkt_node->rcv_wnd, 523 pkt_node->snd_scale, 524 pkt_node->rcv_scale, 525 pkt_node->conn_state, 526 pkt_node->max_seg_size, 527 pkt_node->smoothed_rtt, 528 pkt_node->sack_enabled, 529 pkt_node->flags, 530 pkt_node->rxt_length, 531 pkt_node->snd_buf_hiwater, 532 pkt_node->snd_buf_cc, 533 pkt_node->rcv_buf_hiwater, 534 pkt_node->rcv_buf_cc, 535 pkt_node->sent_inflight_bytes, 536 pkt_node->t_segqlen); 537#ifdef SIFTR_IPV6 538 } 539#endif 540 541 alq_post_flags(siftr_alq, log_buf, 0); 542} 543 544 545static void 546siftr_pkt_manager_thread(void *arg) 547{ 548 STAILQ_HEAD(pkthead, pkt_node) tmp_pkt_queue = 549 STAILQ_HEAD_INITIALIZER(tmp_pkt_queue); 550 struct pkt_node *pkt_node, *pkt_node_temp; 551 uint8_t draining; 552 553 draining = 2; 554 555 mtx_lock(&siftr_pkt_mgr_mtx); 556 557 /* draining == 0 when queue has been flushed and it's safe to exit. */ 558 while (draining) { 559 /* 560 * Sleep until we are signalled to wake because thread has 561 * been told to exit or until 1 tick has passed. 562 */ 563 mtx_sleep(&wait_for_pkt, &siftr_pkt_mgr_mtx, PWAIT, "pktwait", 564 1); 565 566 /* Gain exclusive access to the pkt_node queue. */ 567 mtx_lock(&siftr_pkt_queue_mtx); 568 569 /* 570 * Move pkt_queue to tmp_pkt_queue, which leaves 571 * pkt_queue empty and ready to receive more pkt_nodes. 572 */ 573 STAILQ_CONCAT(&tmp_pkt_queue, &pkt_queue); 574 575 /* 576 * We've finished making changes to the list. Unlock it 577 * so the pfil hooks can continue queuing pkt_nodes. 578 */ 579 mtx_unlock(&siftr_pkt_queue_mtx); 580 581 /* 582 * We can't hold a mutex whilst calling siftr_process_pkt 583 * because ALQ might sleep waiting for buffer space. 584 */ 585 mtx_unlock(&siftr_pkt_mgr_mtx); 586 587 /* Flush all pkt_nodes to the log file. */ 588 STAILQ_FOREACH_SAFE(pkt_node, &tmp_pkt_queue, nodes, 589 pkt_node_temp) { 590 siftr_process_pkt(pkt_node); 591 STAILQ_REMOVE_HEAD(&tmp_pkt_queue, nodes); 592 free(pkt_node, M_SIFTR_PKTNODE); 593 } 594 595 KASSERT(STAILQ_EMPTY(&tmp_pkt_queue), 596 ("SIFTR tmp_pkt_queue not empty after flush")); 597 598 mtx_lock(&siftr_pkt_mgr_mtx); 599 600 /* 601 * If siftr_exit_pkt_manager_thread gets set during the window 602 * where we are draining the tmp_pkt_queue above, there might 603 * still be pkts in pkt_queue that need to be drained. 604 * Allow one further iteration to occur after 605 * siftr_exit_pkt_manager_thread has been set to ensure 606 * pkt_queue is completely empty before we kill the thread. 607 * 608 * siftr_exit_pkt_manager_thread is set only after the pfil 609 * hooks have been removed, so only 1 extra iteration 610 * is needed to drain the queue. 611 */ 612 if (siftr_exit_pkt_manager_thread) 613 draining--; 614 } 615 616 mtx_unlock(&siftr_pkt_mgr_mtx); 617 618 /* Calls wakeup on this thread's struct thread ptr. */ 619 kthread_exit(); 620} 621 622 623static uint32_t 624hash_pkt(struct mbuf *m, uint32_t offset) 625{ 626 uint32_t hash; 627 628 hash = 0; 629 630 while (m != NULL && offset > m->m_len) { 631 /* 632 * The IP packet payload does not start in this mbuf, so 633 * need to figure out which mbuf it starts in and what offset 634 * into the mbuf's data region the payload starts at. 635 */ 636 offset -= m->m_len; 637 m = m->m_next; 638 } 639 640 while (m != NULL) { 641 /* Ensure there is data in the mbuf */ 642 if ((m->m_len - offset) > 0) 643 hash = hash32_buf(m->m_data + offset, 644 m->m_len - offset, hash); 645 646 m = m->m_next; 647 offset = 0; 648 } 649 650 return (hash); 651} 652 653 654/* 655 * Check if a given mbuf has the SIFTR mbuf tag. If it does, log the fact that 656 * it's a reinjected packet and return. If it doesn't, tag the mbuf and return. 657 * Return value >0 means the caller should skip processing this mbuf. 658 */ 659static inline int 660siftr_chkreinject(struct mbuf *m, int dir, struct siftr_stats *ss) 661{ 662 if (m_tag_locate(m, PACKET_COOKIE_SIFTR, PACKET_TAG_SIFTR, NULL) 663 != NULL) { 664 if (dir == PFIL_IN) 665 ss->nskip_in_dejavu++; 666 else 667 ss->nskip_out_dejavu++; 668 669 return (1); 670 } else { 671 struct m_tag *tag = m_tag_alloc(PACKET_COOKIE_SIFTR, 672 PACKET_TAG_SIFTR, 0, M_NOWAIT); 673 if (tag == NULL) { 674 if (dir == PFIL_IN) 675 ss->nskip_in_malloc++; 676 else 677 ss->nskip_out_malloc++; 678 679 return (1); 680 } 681 682 m_tag_prepend(m, tag); 683 } 684 685 return (0); 686} 687 688 689/* 690 * Look up an inpcb for a packet. Return the inpcb pointer if found, or NULL 691 * otherwise. 692 */ 693static inline struct inpcb * 694siftr_findinpcb(int ipver, struct ip *ip, struct mbuf *m, uint16_t sport, 695 uint16_t dport, int dir, struct siftr_stats *ss) 696{ 697 struct inpcb *inp; 698 699 /* We need the tcbinfo lock. */ 700 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 701 INP_INFO_RLOCK(&V_tcbinfo); 702 703 if (dir == PFIL_IN) 704 inp = (ipver == INP_IPV4 ? 705 in_pcblookup_hash(&V_tcbinfo, ip->ip_src, sport, ip->ip_dst, 706 dport, 0, m->m_pkthdr.rcvif) 707 : 708#ifdef SIFTR_IPV6 709 in6_pcblookup_hash(&V_tcbinfo, 710 &((struct ip6_hdr *)ip)->ip6_src, sport, 711 &((struct ip6_hdr *)ip)->ip6_dst, dport, 0, 712 m->m_pkthdr.rcvif) 713#else 714 NULL 715#endif 716 ); 717 718 else 719 inp = (ipver == INP_IPV4 ? 720 in_pcblookup_hash(&V_tcbinfo, ip->ip_dst, dport, ip->ip_src, 721 sport, 0, m->m_pkthdr.rcvif) 722 : 723#ifdef SIFTR_IPV6 724 in6_pcblookup_hash(&V_tcbinfo, 725 &((struct ip6_hdr *)ip)->ip6_dst, dport, 726 &((struct ip6_hdr *)ip)->ip6_src, sport, 0, 727 m->m_pkthdr.rcvif) 728#else 729 NULL 730#endif 731 ); 732 733 /* If we can't find the inpcb, bail. */ 734 if (inp == NULL) { 735 if (dir == PFIL_IN) 736 ss->nskip_in_inpcb++; 737 else 738 ss->nskip_out_inpcb++; 739 } else { 740 /* Acquire the inpcb lock. */ 741 INP_UNLOCK_ASSERT(inp); 742 INP_RLOCK(inp); 743 } 744 INP_INFO_RUNLOCK(&V_tcbinfo); 745 746 return (inp); 747} 748 749 750static inline void 751siftr_siftdata(struct pkt_node *pn, struct inpcb *inp, struct tcpcb *tp, 752 int ipver, int dir, int inp_locally_locked) 753{ 754#ifdef SIFTR_IPV6 755 if (ipver == INP_IPV4) { 756 pn->ip_laddr[3] = inp->inp_laddr.s_addr; 757 pn->ip_faddr[3] = inp->inp_faddr.s_addr; 758#else 759 *((uint32_t *)pn->ip_laddr) = inp->inp_laddr.s_addr; 760 *((uint32_t *)pn->ip_faddr) = inp->inp_faddr.s_addr; 761#endif 762#ifdef SIFTR_IPV6 763 } else { 764 pn->ip_laddr[0] = inp->in6p_laddr.s6_addr32[0]; 765 pn->ip_laddr[1] = inp->in6p_laddr.s6_addr32[1]; 766 pn->ip_laddr[2] = inp->in6p_laddr.s6_addr32[2]; 767 pn->ip_laddr[3] = inp->in6p_laddr.s6_addr32[3]; 768 pn->ip_faddr[0] = inp->in6p_faddr.s6_addr32[0]; 769 pn->ip_faddr[1] = inp->in6p_faddr.s6_addr32[1]; 770 pn->ip_faddr[2] = inp->in6p_faddr.s6_addr32[2]; 771 pn->ip_faddr[3] = inp->in6p_faddr.s6_addr32[3]; 772 } 773#endif 774 pn->tcp_localport = inp->inp_lport; 775 pn->tcp_foreignport = inp->inp_fport; 776 pn->snd_cwnd = tp->snd_cwnd; 777 pn->snd_wnd = tp->snd_wnd; 778 pn->rcv_wnd = tp->rcv_wnd; 779 pn->snd_bwnd = 0; /* Unused, kept for compat. */ 780 pn->snd_ssthresh = tp->snd_ssthresh; 781 pn->snd_scale = tp->snd_scale; 782 pn->rcv_scale = tp->rcv_scale; 783 pn->conn_state = tp->t_state; 784 pn->max_seg_size = tp->t_maxseg; 785 pn->smoothed_rtt = tp->t_srtt; 786 pn->sack_enabled = (tp->t_flags & TF_SACK_PERMIT) != 0; 787 pn->flags = tp->t_flags; 788 pn->rxt_length = tp->t_rxtcur; 789 pn->snd_buf_hiwater = inp->inp_socket->so_snd.sb_hiwat; 790 pn->snd_buf_cc = inp->inp_socket->so_snd.sb_cc; 791 pn->rcv_buf_hiwater = inp->inp_socket->so_rcv.sb_hiwat; 792 pn->rcv_buf_cc = inp->inp_socket->so_rcv.sb_cc; 793 pn->sent_inflight_bytes = tp->snd_max - tp->snd_una; 794 pn->t_segqlen = tp->t_segqlen; 795 796 /* We've finished accessing the tcb so release the lock. */ 797 if (inp_locally_locked) 798 INP_RUNLOCK(inp); 799 800 pn->ipver = ipver; 801 pn->direction = dir; 802 803 /* 804 * Significantly more accurate than using getmicrotime(), but slower! 805 * Gives true microsecond resolution at the expense of a hit to 806 * maximum pps throughput processing when SIFTR is loaded and enabled. 807 */ 808 microtime(&pn->tval); 809} 810 811 812/* 813 * pfil hook that is called for each IPv4 packet making its way through the 814 * stack in either direction. 815 * The pfil subsystem holds a non-sleepable mutex somewhere when 816 * calling our hook function, so we can't sleep at all. 817 * It's very important to use the M_NOWAIT flag with all function calls 818 * that support it so that they won't sleep, otherwise you get a panic. 819 */ 820static int 821siftr_chkpkt(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 822 struct inpcb *inp) 823{ 824 struct pkt_node *pn; 825 struct ip *ip; 826 struct tcphdr *th; 827 struct tcpcb *tp; 828 struct siftr_stats *ss; 829 unsigned int ip_hl; 830 int inp_locally_locked; 831 832 inp_locally_locked = 0; 833 ss = DPCPU_PTR(ss); 834 835 /* 836 * m_pullup is not required here because ip_{input|output} 837 * already do the heavy lifting for us. 838 */ 839 840 ip = mtod(*m, struct ip *); 841 842 /* Only continue processing if the packet is TCP. */ 843 if (ip->ip_p != IPPROTO_TCP) 844 goto ret; 845 846 /* 847 * If a kernel subsystem reinjects packets into the stack, our pfil 848 * hook will be called multiple times for the same packet. 849 * Make sure we only process unique packets. 850 */ 851 if (siftr_chkreinject(*m, dir, ss)) 852 goto ret; 853 854 if (dir == PFIL_IN) 855 ss->n_in++; 856 else 857 ss->n_out++; 858 859 /* 860 * Create a tcphdr struct starting at the correct offset 861 * in the IP packet. ip->ip_hl gives the ip header length 862 * in 4-byte words, so multiply it to get the size in bytes. 863 */ 864 ip_hl = (ip->ip_hl << 2); 865 th = (struct tcphdr *)((caddr_t)ip + ip_hl); 866 867 /* 868 * If the pfil hooks don't provide a pointer to the 869 * inpcb, we need to find it ourselves and lock it. 870 */ 871 if (!inp) { 872 /* Find the corresponding inpcb for this pkt. */ 873 inp = siftr_findinpcb(INP_IPV4, ip, *m, th->th_sport, 874 th->th_dport, dir, ss); 875 876 if (inp == NULL) 877 goto ret; 878 else 879 inp_locally_locked = 1; 880 } 881 882 INP_LOCK_ASSERT(inp); 883 884 /* Find the TCP control block that corresponds with this packet */ 885 tp = intotcpcb(inp); 886 887 /* 888 * If we can't find the TCP control block (happens occasionaly for a 889 * packet sent during the shutdown phase of a TCP connection), 890 * or we're in the timewait state, bail 891 */ 892 if (tp == NULL || inp->inp_flags & INP_TIMEWAIT) { 893 if (dir == PFIL_IN) 894 ss->nskip_in_tcpcb++; 895 else 896 ss->nskip_out_tcpcb++; 897 898 goto inp_unlock; 899 } 900 901 pn = malloc(sizeof(struct pkt_node), M_SIFTR_PKTNODE, M_NOWAIT|M_ZERO); 902 903 if (pn == NULL) { 904 if (dir == PFIL_IN) 905 ss->nskip_in_malloc++; 906 else 907 ss->nskip_out_malloc++; 908 909 goto inp_unlock; 910 } 911 912 siftr_siftdata(pn, inp, tp, INP_IPV4, dir, inp_locally_locked); 913 914 if (siftr_generate_hashes) { 915 if ((*m)->m_pkthdr.csum_flags & CSUM_TCP) { 916 /* 917 * For outbound packets, the TCP checksum isn't 918 * calculated yet. This is a problem for our packet 919 * hashing as the receiver will calc a different hash 920 * to ours if we don't include the correct TCP checksum 921 * in the bytes being hashed. To work around this 922 * problem, we manually calc the TCP checksum here in 923 * software. We unset the CSUM_TCP flag so the lower 924 * layers don't recalc it. 925 */ 926 (*m)->m_pkthdr.csum_flags &= ~CSUM_TCP; 927 928 /* 929 * Calculate the TCP checksum in software and assign 930 * to correct TCP header field, which will follow the 931 * packet mbuf down the stack. The trick here is that 932 * tcp_output() sets th->th_sum to the checksum of the 933 * pseudo header for us already. Because of the nature 934 * of the checksumming algorithm, we can sum over the 935 * entire IP payload (i.e. TCP header and data), which 936 * will include the already calculated pseduo header 937 * checksum, thus giving us the complete TCP checksum. 938 * 939 * To put it in simple terms, if checksum(1,2,3,4)=10, 940 * then checksum(1,2,3,4,5) == checksum(10,5). 941 * This property is what allows us to "cheat" and 942 * checksum only the IP payload which has the TCP 943 * th_sum field populated with the pseudo header's 944 * checksum, and not need to futz around checksumming 945 * pseudo header bytes and TCP header/data in one hit. 946 * Refer to RFC 1071 for more info. 947 * 948 * NB: in_cksum_skip(struct mbuf *m, int len, int skip) 949 * in_cksum_skip 2nd argument is NOT the number of 950 * bytes to read from the mbuf at "skip" bytes offset 951 * from the start of the mbuf (very counter intuitive!). 952 * The number of bytes to read is calculated internally 953 * by the function as len-skip i.e. to sum over the IP 954 * payload (TCP header + data) bytes, it is INCORRECT 955 * to call the function like this: 956 * in_cksum_skip(at, ip->ip_len - offset, offset) 957 * Rather, it should be called like this: 958 * in_cksum_skip(at, ip->ip_len, offset) 959 * which means read "ip->ip_len - offset" bytes from 960 * the mbuf cluster "at" at offset "offset" bytes from 961 * the beginning of the "at" mbuf's data pointer. 962 */ 963 th->th_sum = in_cksum_skip(*m, ip->ip_len, ip_hl); 964 } 965 966 /* 967 * XXX: Having to calculate the checksum in software and then 968 * hash over all bytes is really inefficient. Would be nice to 969 * find a way to create the hash and checksum in the same pass 970 * over the bytes. 971 */ 972 pn->hash = hash_pkt(*m, ip_hl); 973 } 974 975 mtx_lock(&siftr_pkt_queue_mtx); 976 STAILQ_INSERT_TAIL(&pkt_queue, pn, nodes); 977 mtx_unlock(&siftr_pkt_queue_mtx); 978 goto ret; 979 980inp_unlock: 981 if (inp_locally_locked) 982 INP_RUNLOCK(inp); 983 984ret: 985 /* Returning 0 ensures pfil will not discard the pkt */ 986 return (0); 987} 988 989 990#ifdef SIFTR_IPV6 991static int 992siftr_chkpkt6(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 993 struct inpcb *inp) 994{ 995 struct pkt_node *pn; 996 struct ip6_hdr *ip6; 997 struct tcphdr *th; 998 struct tcpcb *tp; 999 struct siftr_stats *ss; 1000 unsigned int ip6_hl; 1001 int inp_locally_locked; 1002 1003 inp_locally_locked = 0; 1004 ss = DPCPU_PTR(ss); 1005 1006 /* 1007 * m_pullup is not required here because ip6_{input|output} 1008 * already do the heavy lifting for us. 1009 */ 1010 1011 ip6 = mtod(*m, struct ip6_hdr *); 1012 1013 /* 1014 * Only continue processing if the packet is TCP 1015 * XXX: We should follow the next header fields 1016 * as shown on Pg 6 RFC 2460, but right now we'll 1017 * only check pkts that have no extension headers. 1018 */ 1019 if (ip6->ip6_nxt != IPPROTO_TCP) 1020 goto ret6; 1021 1022 /* 1023 * If a kernel subsystem reinjects packets into the stack, our pfil 1024 * hook will be called multiple times for the same packet. 1025 * Make sure we only process unique packets. 1026 */ 1027 if (siftr_chkreinject(*m, dir, ss)) 1028 goto ret6; 1029 1030 if (dir == PFIL_IN) 1031 ss->n_in++; 1032 else 1033 ss->n_out++; 1034 1035 ip6_hl = sizeof(struct ip6_hdr); 1036 1037 /* 1038 * Create a tcphdr struct starting at the correct offset 1039 * in the ipv6 packet. ip->ip_hl gives the ip header length 1040 * in 4-byte words, so multiply it to get the size in bytes. 1041 */ 1042 th = (struct tcphdr *)((caddr_t)ip6 + ip6_hl); 1043 1044 /* 1045 * For inbound packets, the pfil hooks don't provide a pointer to the 1046 * inpcb, so we need to find it ourselves and lock it. 1047 */ 1048 if (!inp) { 1049 /* Find the corresponding inpcb for this pkt. */ 1050 inp = siftr_findinpcb(INP_IPV6, (struct ip *)ip6, *m, 1051 th->th_sport, th->th_dport, dir, ss); 1052 1053 if (inp == NULL) 1054 goto ret6; 1055 else 1056 inp_locally_locked = 1; 1057 } 1058 1059 /* Find the TCP control block that corresponds with this packet. */ 1060 tp = intotcpcb(inp); 1061 1062 /* 1063 * If we can't find the TCP control block (happens occasionaly for a 1064 * packet sent during the shutdown phase of a TCP connection), 1065 * or we're in the timewait state, bail. 1066 */ 1067 if (tp == NULL || inp->inp_flags & INP_TIMEWAIT) { 1068 if (dir == PFIL_IN) 1069 ss->nskip_in_tcpcb++; 1070 else 1071 ss->nskip_out_tcpcb++; 1072 1073 goto inp_unlock6; 1074 } 1075 1076 pn = malloc(sizeof(struct pkt_node), M_SIFTR_PKTNODE, M_NOWAIT|M_ZERO); 1077 1078 if (pn == NULL) { 1079 if (dir == PFIL_IN) 1080 ss->nskip_in_malloc++; 1081 else 1082 ss->nskip_out_malloc++; 1083 1084 goto inp_unlock6; 1085 } 1086 1087 siftr_siftdata(pn, inp, tp, INP_IPV6, dir, inp_locally_locked); 1088 1089 /* XXX: Figure out how to generate hashes for IPv6 packets. */ 1090 1091 mtx_lock(&siftr_pkt_queue_mtx); 1092 STAILQ_INSERT_TAIL(&pkt_queue, pn, nodes); 1093 mtx_unlock(&siftr_pkt_queue_mtx); 1094 goto ret6; 1095 1096inp_unlock6: 1097 if (inp_locally_locked) 1098 INP_RUNLOCK(inp); 1099 1100ret6: 1101 /* Returning 0 ensures pfil will not discard the pkt. */ 1102 return (0); 1103} 1104#endif /* #ifdef SIFTR_IPV6 */ 1105 1106 1107static int 1108siftr_pfil(int action) 1109{ 1110 struct pfil_head *pfh_inet; 1111#ifdef SIFTR_IPV6 1112 struct pfil_head *pfh_inet6; 1113#endif 1114 VNET_ITERATOR_DECL(vnet_iter); 1115 1116 VNET_LIST_RLOCK(); 1117 VNET_FOREACH(vnet_iter) { 1118 CURVNET_SET(vnet_iter); 1119 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 1120#ifdef SIFTR_IPV6 1121 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 1122#endif 1123 1124 if (action == HOOK) { 1125 pfil_add_hook(siftr_chkpkt, NULL, 1126 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet); 1127#ifdef SIFTR_IPV6 1128 pfil_add_hook(siftr_chkpkt6, NULL, 1129 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet6); 1130#endif 1131 } else if (action == UNHOOK) { 1132 pfil_remove_hook(siftr_chkpkt, NULL, 1133 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet); 1134#ifdef SIFTR_IPV6 1135 pfil_remove_hook(siftr_chkpkt6, NULL, 1136 PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh_inet6); 1137#endif 1138 } 1139 CURVNET_RESTORE(); 1140 } 1141 VNET_LIST_RUNLOCK(); 1142 1143 return (0); 1144} 1145 1146 1147static int 1148siftr_sysctl_logfile_name_handler(SYSCTL_HANDLER_ARGS) 1149{ 1150 struct alq *new_alq; 1151 int error; 1152 1153 if (req->newptr == NULL) 1154 goto skip; 1155 1156 /* If old filename and new filename are different. */ 1157 if (strncmp(siftr_logfile, (char *)req->newptr, PATH_MAX)) { 1158 1159 error = alq_open(&new_alq, req->newptr, curthread->td_ucred, 1160 SIFTR_LOG_FILE_MODE, SIFTR_ALQ_BUFLEN, 0); 1161 1162 /* Bail if unable to create new alq. */ 1163 if (error) 1164 return (1); 1165 1166 /* 1167 * If disabled, siftr_alq == NULL so we simply close 1168 * the alq as we've proved it can be opened. 1169 * If enabled, close the existing alq and switch the old 1170 * for the new. 1171 */ 1172 if (siftr_alq == NULL) 1173 alq_close(new_alq); 1174 else { 1175 alq_close(siftr_alq); 1176 siftr_alq = new_alq; 1177 } 1178 } 1179 1180skip: 1181 return (sysctl_handle_string(oidp, arg1, arg2, req)); 1182} 1183 1184 1185static int 1186siftr_manage_ops(uint8_t action) 1187{ 1188 struct siftr_stats totalss; 1189 struct timeval tval; 1190 struct flow_hash_node *counter, *tmp_counter; 1191 struct sbuf *s; 1192 int i, key_index, ret, error; 1193 uint32_t bytes_to_write, total_skipped_pkts; 1194 uint16_t lport, fport; 1195 uint8_t *key, ipver; 1196 1197#ifdef SIFTR_IPV6 1198 uint32_t laddr[4]; 1199 uint32_t faddr[4]; 1200#else 1201 uint8_t laddr[4]; 1202 uint8_t faddr[4]; 1203#endif 1204 1205 error = 0; 1206 total_skipped_pkts = 0; 1207 1208 /* Init an autosizing sbuf that initially holds 200 chars. */ 1209 if ((s = sbuf_new(NULL, NULL, 200, SBUF_AUTOEXTEND)) == NULL) 1210 return (-1); 1211 1212 if (action == SIFTR_ENABLE) { 1213 /* 1214 * Create our alq 1215 * XXX: We should abort if alq_open fails! 1216 */ 1217 alq_open(&siftr_alq, siftr_logfile, curthread->td_ucred, 1218 SIFTR_LOG_FILE_MODE, SIFTR_ALQ_BUFLEN, 0); 1219 1220 STAILQ_INIT(&pkt_queue); 1221 1222 DPCPU_ZERO(ss); 1223 1224 siftr_exit_pkt_manager_thread = 0; 1225 1226 ret = kthread_add(&siftr_pkt_manager_thread, NULL, NULL, 1227 &siftr_pkt_manager_thr, RFNOWAIT, 0, 1228 "siftr_pkt_manager_thr"); 1229 1230 siftr_pfil(HOOK); 1231 1232 microtime(&tval); 1233 1234 sbuf_printf(s, 1235 "enable_time_secs=%jd\tenable_time_usecs=%06ld\t" 1236 "siftrver=%s\thz=%u\ttcp_rtt_scale=%u\tsysname=%s\t" 1237 "sysver=%u\tipmode=%u\n", 1238 (intmax_t)tval.tv_sec, tval.tv_usec, MODVERSION_STR, hz, 1239 TCP_RTT_SCALE, SYS_NAME, __FreeBSD_version, SIFTR_IPMODE); 1240 1241 sbuf_finish(s); 1242 alq_writen(siftr_alq, sbuf_data(s), sbuf_len(s), ALQ_WAITOK); 1243 1244 } else if (action == SIFTR_DISABLE && siftr_pkt_manager_thr != NULL) { 1245 /* 1246 * Remove the pfil hook functions. All threads currently in 1247 * the hook functions are allowed to exit before siftr_pfil() 1248 * returns. 1249 */ 1250 siftr_pfil(UNHOOK); 1251 1252 /* This will block until the pkt manager thread unlocks it. */ 1253 mtx_lock(&siftr_pkt_mgr_mtx); 1254 1255 /* Tell the pkt manager thread that it should exit now. */ 1256 siftr_exit_pkt_manager_thread = 1; 1257 1258 /* 1259 * Wake the pkt_manager thread so it realises that 1260 * siftr_exit_pkt_manager_thread == 1 and exits gracefully. 1261 * The wakeup won't be delivered until we unlock 1262 * siftr_pkt_mgr_mtx so this isn't racy. 1263 */ 1264 wakeup(&wait_for_pkt); 1265 1266 /* Wait for the pkt_manager thread to exit. */ 1267 mtx_sleep(siftr_pkt_manager_thr, &siftr_pkt_mgr_mtx, PWAIT, 1268 "thrwait", 0); 1269 1270 siftr_pkt_manager_thr = NULL; 1271 mtx_unlock(&siftr_pkt_mgr_mtx); 1272 1273 totalss.n_in = DPCPU_VARSUM(ss, n_in); 1274 totalss.n_out = DPCPU_VARSUM(ss, n_out); 1275 totalss.nskip_in_malloc = DPCPU_VARSUM(ss, nskip_in_malloc); 1276 totalss.nskip_out_malloc = DPCPU_VARSUM(ss, nskip_out_malloc); 1277 totalss.nskip_in_mtx = DPCPU_VARSUM(ss, nskip_in_mtx); 1278 totalss.nskip_out_mtx = DPCPU_VARSUM(ss, nskip_out_mtx); 1279 totalss.nskip_in_tcpcb = DPCPU_VARSUM(ss, nskip_in_tcpcb); 1280 totalss.nskip_out_tcpcb = DPCPU_VARSUM(ss, nskip_out_tcpcb); 1281 totalss.nskip_in_inpcb = DPCPU_VARSUM(ss, nskip_in_inpcb); 1282 totalss.nskip_out_inpcb = DPCPU_VARSUM(ss, nskip_out_inpcb); 1283 1284 total_skipped_pkts = totalss.nskip_in_malloc + 1285 totalss.nskip_out_malloc + totalss.nskip_in_mtx + 1286 totalss.nskip_out_mtx + totalss.nskip_in_tcpcb + 1287 totalss.nskip_out_tcpcb + totalss.nskip_in_inpcb + 1288 totalss.nskip_out_inpcb; 1289 1290 microtime(&tval); 1291 1292 sbuf_printf(s, 1293 "disable_time_secs=%jd\tdisable_time_usecs=%06ld\t" 1294 "num_inbound_tcp_pkts=%ju\tnum_outbound_tcp_pkts=%ju\t" 1295 "total_tcp_pkts=%ju\tnum_inbound_skipped_pkts_malloc=%u\t" 1296 "num_outbound_skipped_pkts_malloc=%u\t" 1297 "num_inbound_skipped_pkts_mtx=%u\t" 1298 "num_outbound_skipped_pkts_mtx=%u\t" 1299 "num_inbound_skipped_pkts_tcpcb=%u\t" 1300 "num_outbound_skipped_pkts_tcpcb=%u\t" 1301 "num_inbound_skipped_pkts_inpcb=%u\t" 1302 "num_outbound_skipped_pkts_inpcb=%u\t" 1303 "total_skipped_tcp_pkts=%u\tflow_list=", 1304 (intmax_t)tval.tv_sec, 1305 tval.tv_usec, 1306 (uintmax_t)totalss.n_in, 1307 (uintmax_t)totalss.n_out, 1308 (uintmax_t)(totalss.n_in + totalss.n_out), 1309 totalss.nskip_in_malloc, 1310 totalss.nskip_out_malloc, 1311 totalss.nskip_in_mtx, 1312 totalss.nskip_out_mtx, 1313 totalss.nskip_in_tcpcb, 1314 totalss.nskip_out_tcpcb, 1315 totalss.nskip_in_inpcb, 1316 totalss.nskip_out_inpcb, 1317 total_skipped_pkts); 1318 1319 /* 1320 * Iterate over the flow hash, printing a summary of each 1321 * flow seen and freeing any malloc'd memory. 1322 * The hash consists of an array of LISTs (man 3 queue). 1323 */ 1324 for (i = 0; i < siftr_hashmask; i++) { 1325 LIST_FOREACH_SAFE(counter, counter_hash + i, nodes, 1326 tmp_counter) { 1327 key = counter->key; 1328 key_index = 1; 1329 1330 ipver = key[0]; 1331 1332 memcpy(laddr, key + key_index, sizeof(laddr)); 1333 key_index += sizeof(laddr); 1334 memcpy(&lport, key + key_index, sizeof(lport)); 1335 key_index += sizeof(lport); 1336 memcpy(faddr, key + key_index, sizeof(faddr)); 1337 key_index += sizeof(faddr); 1338 memcpy(&fport, key + key_index, sizeof(fport)); 1339 1340#ifdef SIFTR_IPV6 1341 laddr[3] = ntohl(laddr[3]); 1342 faddr[3] = ntohl(faddr[3]); 1343 1344 if (ipver == INP_IPV6) { 1345 laddr[0] = ntohl(laddr[0]); 1346 laddr[1] = ntohl(laddr[1]); 1347 laddr[2] = ntohl(laddr[2]); 1348 faddr[0] = ntohl(faddr[0]); 1349 faddr[1] = ntohl(faddr[1]); 1350 faddr[2] = ntohl(faddr[2]); 1351 1352 sbuf_printf(s, 1353 "%x:%x:%x:%x:%x:%x:%x:%x;%u-" 1354 "%x:%x:%x:%x:%x:%x:%x:%x;%u,", 1355 UPPER_SHORT(laddr[0]), 1356 LOWER_SHORT(laddr[0]), 1357 UPPER_SHORT(laddr[1]), 1358 LOWER_SHORT(laddr[1]), 1359 UPPER_SHORT(laddr[2]), 1360 LOWER_SHORT(laddr[2]), 1361 UPPER_SHORT(laddr[3]), 1362 LOWER_SHORT(laddr[3]), 1363 ntohs(lport), 1364 UPPER_SHORT(faddr[0]), 1365 LOWER_SHORT(faddr[0]), 1366 UPPER_SHORT(faddr[1]), 1367 LOWER_SHORT(faddr[1]), 1368 UPPER_SHORT(faddr[2]), 1369 LOWER_SHORT(faddr[2]), 1370 UPPER_SHORT(faddr[3]), 1371 LOWER_SHORT(faddr[3]), 1372 ntohs(fport)); 1373 } else { 1374 laddr[0] = FIRST_OCTET(laddr[3]); 1375 laddr[1] = SECOND_OCTET(laddr[3]); 1376 laddr[2] = THIRD_OCTET(laddr[3]); 1377 laddr[3] = FOURTH_OCTET(laddr[3]); 1378 faddr[0] = FIRST_OCTET(faddr[3]); 1379 faddr[1] = SECOND_OCTET(faddr[3]); 1380 faddr[2] = THIRD_OCTET(faddr[3]); 1381 faddr[3] = FOURTH_OCTET(faddr[3]); 1382#endif 1383 sbuf_printf(s, 1384 "%u.%u.%u.%u;%u-%u.%u.%u.%u;%u,", 1385 laddr[0], 1386 laddr[1], 1387 laddr[2], 1388 laddr[3], 1389 ntohs(lport), 1390 faddr[0], 1391 faddr[1], 1392 faddr[2], 1393 faddr[3], 1394 ntohs(fport)); 1395#ifdef SIFTR_IPV6 1396 } 1397#endif 1398 1399 free(counter, M_SIFTR_HASHNODE); 1400 } 1401 1402 LIST_INIT(counter_hash + i); 1403 } 1404 1405 sbuf_printf(s, "\n"); 1406 sbuf_finish(s); 1407 1408 i = 0; 1409 do { 1410 bytes_to_write = min(SIFTR_ALQ_BUFLEN, sbuf_len(s)-i); 1411 alq_writen(siftr_alq, sbuf_data(s)+i, bytes_to_write, ALQ_WAITOK); 1412 i += bytes_to_write; 1413 } while (i < sbuf_len(s)); 1414 1415 alq_close(siftr_alq); 1416 siftr_alq = NULL; 1417 } 1418 1419 sbuf_delete(s); 1420 1421 /* 1422 * XXX: Should be using ret to check if any functions fail 1423 * and set error appropriately 1424 */ 1425 1426 return (error); 1427} 1428 1429 1430static int 1431siftr_sysctl_enabled_handler(SYSCTL_HANDLER_ARGS) 1432{ 1433 if (req->newptr == NULL) 1434 goto skip; 1435 1436 /* If the value passed in isn't 0 or 1, return an error. */ 1437 if (CAST_PTR_INT(req->newptr) != 0 && CAST_PTR_INT(req->newptr) != 1) 1438 return (1); 1439 1440 /* If we are changing state (0 to 1 or 1 to 0). */ 1441 if (CAST_PTR_INT(req->newptr) != siftr_enabled ) 1442 if (siftr_manage_ops(CAST_PTR_INT(req->newptr))) { 1443 siftr_manage_ops(SIFTR_DISABLE); 1444 return (1); 1445 } 1446 1447skip: 1448 return (sysctl_handle_int(oidp, arg1, arg2, req)); 1449} 1450 1451 1452static void 1453siftr_shutdown_handler(void *arg) 1454{ 1455 siftr_manage_ops(SIFTR_DISABLE); 1456} 1457 1458 1459/* 1460 * Module is being unloaded or machine is shutting down. Take care of cleanup. 1461 */ 1462static int 1463deinit_siftr(void) 1464{ 1465 /* Cleanup. */ 1466 siftr_manage_ops(SIFTR_DISABLE); 1467 hashdestroy(counter_hash, M_SIFTR, siftr_hashmask); 1468 mtx_destroy(&siftr_pkt_queue_mtx); 1469 mtx_destroy(&siftr_pkt_mgr_mtx); 1470 1471 return (0); 1472} 1473 1474 1475/* 1476 * Module has just been loaded into the kernel. 1477 */ 1478static int 1479init_siftr(void) 1480{ 1481 EVENTHANDLER_REGISTER(shutdown_pre_sync, siftr_shutdown_handler, NULL, 1482 SHUTDOWN_PRI_FIRST); 1483 1484 /* Initialise our flow counter hash table. */ 1485 counter_hash = hashinit(SIFTR_EXPECTED_MAX_TCP_FLOWS, M_SIFTR, 1486 &siftr_hashmask); 1487 1488 mtx_init(&siftr_pkt_queue_mtx, "siftr_pkt_queue_mtx", NULL, MTX_DEF); 1489 mtx_init(&siftr_pkt_mgr_mtx, "siftr_pkt_mgr_mtx", NULL, MTX_DEF); 1490 1491 /* Print message to the user's current terminal. */ 1492 uprintf("\nStatistical Information For TCP Research (SIFTR) %s\n" 1493 " http://caia.swin.edu.au/urp/newtcp\n\n", 1494 MODVERSION_STR); 1495 1496 return (0); 1497} 1498 1499 1500/* 1501 * This is the function that is called to load and unload the module. 1502 * When the module is loaded, this function is called once with 1503 * "what" == MOD_LOAD 1504 * When the module is unloaded, this function is called twice with 1505 * "what" = MOD_QUIESCE first, followed by "what" = MOD_UNLOAD second 1506 * When the system is shut down e.g. CTRL-ALT-DEL or using the shutdown command, 1507 * this function is called once with "what" = MOD_SHUTDOWN 1508 * When the system is shut down, the handler isn't called until the very end 1509 * of the shutdown sequence i.e. after the disks have been synced. 1510 */ 1511static int 1512siftr_load_handler(module_t mod, int what, void *arg) 1513{ 1514 int ret; 1515 1516 switch (what) { 1517 case MOD_LOAD: 1518 ret = init_siftr(); 1519 break; 1520 1521 case MOD_QUIESCE: 1522 case MOD_SHUTDOWN: 1523 ret = deinit_siftr(); 1524 break; 1525 1526 case MOD_UNLOAD: 1527 ret = 0; 1528 break; 1529 1530 default: 1531 ret = EINVAL; 1532 break; 1533 } 1534 1535 return (ret); 1536} 1537 1538 1539static moduledata_t siftr_mod = { 1540 .name = "siftr", 1541 .evhand = siftr_load_handler, 1542}; 1543 1544/* 1545 * Param 1: name of the kernel module 1546 * Param 2: moduledata_t struct containing info about the kernel module 1547 * and the execution entry point for the module 1548 * Param 3: From sysinit_sub_id enumeration in /usr/include/sys/kernel.h 1549 * Defines the module initialisation order 1550 * Param 4: From sysinit_elem_order enumeration in /usr/include/sys/kernel.h 1551 * Defines the initialisation order of this kld relative to others 1552 * within the same subsystem as defined by param 3 1553 */ 1554DECLARE_MODULE(siftr, siftr_mod, SI_SUB_SMP, SI_ORDER_ANY); 1555MODULE_DEPEND(siftr, alq, 1, 1, 1); 1556MODULE_VERSION(siftr, MODVERSION);
|