Deleted Added
full compact
ip_dummynet.c (125952) ip_dummynet.c (126239)
1/*
2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
4 * All rights reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*
2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
4 * All rights reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/netinet/ip_dummynet.c 125952 2004-02-18 00:04:52Z mlaier $
27 * $FreeBSD: head/sys/netinet/ip_dummynet.c 126239 2004-02-25 19:55:29Z mlaier $
28 */
29
30#define DUMMYNET_DEBUG
31
32/*
33 * This module implements IP dummynet, a bandwidth limiter/delay emulator
34 * used in conjunction with the ipfw package.
35 * Description of the data structures used is in ip_dummynet.h

--- 364 unchanged lines hidden (view full) ---

400 bzero(h, sizeof(*h) );
401}
402
403/*
404 * --- end of heap management functions ---
405 */
406
407/*
28 */
29
30#define DUMMYNET_DEBUG
31
32/*
33 * This module implements IP dummynet, a bandwidth limiter/delay emulator
34 * used in conjunction with the ipfw package.
35 * Description of the data structures used is in ip_dummynet.h

--- 364 unchanged lines hidden (view full) ---

400 bzero(h, sizeof(*h) );
401}
402
403/*
404 * --- end of heap management functions ---
405 */
406
407/*
408 * Return the mbuf tag holding the dummynet state. As an optimization
409 * this is assumed to be the first tag on the list. If this turns out
410 * wrong we'll need to search the list.
411 */
412static struct dn_pkt_tag *
413dn_tag_get(struct mbuf *m)
414{
415 struct m_tag *mtag = m_tag_first(m);
416 KASSERT(mtag != NULL &&
417 mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
418 mtag->m_tag_id == PACKET_TAG_DUMMYNET,
419 ("packet on dummynet queue w/o dummynet tag!"));
420 return (struct dn_pkt_tag *)(mtag+1);
421}
422
423/*
408 * Scheduler functions:
409 *
410 * transmit_event() is called when the delay-line needs to enter
411 * the scheduler, either because of existing pkts getting ready,
412 * or new packets entering the queue. The event handled is the delivery
413 * time of the packet.
414 *
415 * ready_event() does something similar with fixed-rate queues, and the

--- 4 unchanged lines hidden (view full) ---

420 *
421 * In all cases, we make sure that the data structures are consistent
422 * before passing pkts out, because this might trigger recursive
423 * invocations of the procedures.
424 */
425static void
426transmit_event(struct dn_pipe *pipe)
427{
424 * Scheduler functions:
425 *
426 * transmit_event() is called when the delay-line needs to enter
427 * the scheduler, either because of existing pkts getting ready,
428 * or new packets entering the queue. The event handled is the delivery
429 * time of the packet.
430 *
431 * ready_event() does something similar with fixed-rate queues, and the

--- 4 unchanged lines hidden (view full) ---

436 *
437 * In all cases, we make sure that the data structures are consistent
438 * before passing pkts out, because this might trigger recursive
439 * invocations of the procedures.
440 */
441static void
442transmit_event(struct dn_pipe *pipe)
443{
428 struct dn_pkt *pkt ;
444 struct mbuf *m ;
445 struct dn_pkt_tag *pkt ;
429
430 DUMMYNET_LOCK_ASSERT();
431
446
447 DUMMYNET_LOCK_ASSERT();
448
432 while ( (pkt = pipe->head) && DN_KEY_LEQ(pkt->output_time, curr_time) ) {
449 while ( (m = pipe->head) ) {
450 pkt = dn_tag_get(m);
451 if ( !DN_KEY_LEQ(pkt->output_time, curr_time) )
452 break;
433 /*
434 * first unlink, then call procedures, since ip_input() can invoke
435 * ip_output() and viceversa, thus causing nested calls
436 */
453 /*
454 * first unlink, then call procedures, since ip_input() can invoke
455 * ip_output() and viceversa, thus causing nested calls
456 */
437 pipe->head = DN_NEXT(pkt) ;
457 pipe->head = m->m_nextpkt ;
458 m->m_nextpkt = NULL;
438
439 /* XXX: drop the lock for now to avoid LOR's */
440 DUMMYNET_UNLOCK();
459
460 /* XXX: drop the lock for now to avoid LOR's */
461 DUMMYNET_UNLOCK();
441 /*
442 * The actual mbuf is preceded by a struct dn_pkt, resembling an mbuf
443 * (NOT A REAL one, just a small block of malloc'ed memory) with
444 * m_type = MT_TAG, m_flags = PACKET_TAG_DUMMYNET
445 * dn_m (m_next) = actual mbuf to be processed by ip_input/output
446 * and some other fields.
447 * The block IS FREED HERE because it contains parameters passed
448 * to the called routine.
449 */
450 switch (pkt->dn_dir) {
451 case DN_TO_IP_OUT:
462 switch (pkt->dn_dir) {
463 case DN_TO_IP_OUT:
452 (void)ip_output((struct mbuf *)pkt, NULL, NULL, 0, NULL, NULL);
453 rt_unref (pkt->ro.ro_rt, __func__) ;
464 (void)ip_output(m, NULL, NULL, pkt->flags, NULL, NULL);
454 break ;
455
456 case DN_TO_IP_IN :
465 break ;
466
467 case DN_TO_IP_IN :
457 ip_input((struct mbuf *)pkt) ;
468 ip_input(m) ;
458 break ;
459
460 case DN_TO_BDG_FWD :
469 break ;
470
471 case DN_TO_BDG_FWD :
461 if (!BDG_LOADED) {
472 /*
473 * The bridge requires/assumes the Ethernet header is
474 * contiguous in the first mbuf header. Insure this is true.
475 */
476 if (BDG_LOADED) {
477 if (m->m_len < ETHER_HDR_LEN &&
478 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
479 printf("dummynet/bridge: pullup fail, dropping pkt\n");
480 break;
481 }
482 m = bdg_forward_ptr(m, pkt->ifp);
483 } else {
462 /* somebody unloaded the bridge module. Drop pkt */
463 /* XXX rate limit */
464 printf("dummynet: dropping bridged packet trapped in pipe\n");
484 /* somebody unloaded the bridge module. Drop pkt */
485 /* XXX rate limit */
486 printf("dummynet: dropping bridged packet trapped in pipe\n");
465 m_freem(pkt->dn_m);
466 break;
467 } /* fallthrough */
468 case DN_TO_ETH_DEMUX:
469 {
470 struct mbuf *m = (struct mbuf *)pkt ;
487 }
488 if (m)
489 m_freem(m);
490 break;
471
491
472 if (pkt->dn_m->m_len < ETHER_HDR_LEN &&
473 (pkt->dn_m = m_pullup(pkt->dn_m, ETHER_HDR_LEN)) == NULL) {
474 printf("dummynet/bridge: pullup fail, dropping pkt\n");
475 break;
476 }
477 /*
478 * bdg_forward() wants a pointer to the pseudo-mbuf-header, but
479 * on return it will supply the pointer to the actual packet
480 * (originally pkt->dn_m, but could be something else now) if
481 * it has not consumed it.
482 */
483 if (pkt->dn_dir == DN_TO_BDG_FWD) {
484 m = bdg_forward_ptr(m, pkt->ifp);
485 if (m)
486 m_freem(m);
487 } else
488 ether_demux(NULL, m); /* which consumes the mbuf */
492 case DN_TO_ETH_DEMUX:
493 /*
494 * The Ethernet code assumes the Ethernet header is
495 * contiguous in the first mbuf header. Insure this is true.
496 */
497 if (m->m_len < ETHER_HDR_LEN &&
498 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
499 printf("dummynet/ether: pullup fail, dropping pkt\n");
500 break;
489 }
501 }
502 ether_demux(m->m_pkthdr.rcvif, m); /* which consumes the mbuf */
490 break ;
503 break ;
504
491 case DN_TO_ETH_OUT:
505 case DN_TO_ETH_OUT:
492 ether_output_frame(pkt->ifp, (struct mbuf *)pkt);
506 ether_output_frame(pkt->ifp, m);
493 break;
494
495 default:
496 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
507 break;
508
509 default:
510 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
497 m_freem(pkt->dn_m);
511 m_freem(m);
498 break ;
499 }
512 break ;
513 }
500 free(pkt, M_DUMMYNET);
501 DUMMYNET_LOCK();
502 }
503 /* if there are leftover packets, put into the heap for next event */
514 DUMMYNET_LOCK();
515 }
516 /* if there are leftover packets, put into the heap for next event */
504 if ( (pkt = pipe->head) )
505 heap_insert(&extract_heap, pkt->output_time, pipe ) ;
506 /* XXX should check errors on heap_insert, by draining the
507 * whole pipe p and hoping in the future we are more successful
508 */
517 if ( (m = pipe->head) ) {
518 pkt = dn_tag_get(m) ;
519 /* XXX should check errors on heap_insert, by draining the
520 * whole pipe p and hoping in the future we are more successful
521 */
522 heap_insert(&extract_heap, pkt->output_time, pipe ) ;
523 }
509}
510
511/*
512 * the following macro computes how many ticks we have to wait
513 * before being able to transmit a packet. The credit is taken from
514 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
515 */
524}
525
526/*
527 * the following macro computes how many ticks we have to wait
528 * before being able to transmit a packet. The credit is taken from
529 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
530 */
516#define SET_TICKS(pkt, q, p) \
517 (pkt->dn_m->m_pkthdr.len*8*hz - (q)->numbytes + p->bandwidth - 1 ) / \
531#define SET_TICKS(_m, q, p) \
532 ((_m)->m_pkthdr.len*8*hz - (q)->numbytes + p->bandwidth - 1 ) / \
518 p->bandwidth ;
519
520/*
521 * extract pkt from queue, compute output time (could be now)
522 * and put into delay line (p_queue)
523 */
524static void
533 p->bandwidth ;
534
535/*
536 * extract pkt from queue, compute output time (could be now)
537 * and put into delay line (p_queue)
538 */
539static void
525move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
540move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
526 struct dn_pipe *p, int len)
527{
541 struct dn_pipe *p, int len)
542{
528 q->head = DN_NEXT(pkt) ;
543 struct dn_pkt_tag *dt = dn_tag_get(pkt);
544
545 q->head = pkt->m_nextpkt ;
529 q->len-- ;
530 q->len_bytes -= len ;
531
546 q->len-- ;
547 q->len_bytes -= len ;
548
532 pkt->output_time = curr_time + p->delay ;
549 dt->output_time = curr_time + p->delay ;
533
534 if (p->head == NULL)
535 p->head = pkt;
536 else
550
551 if (p->head == NULL)
552 p->head = pkt;
553 else
537 DN_NEXT(p->tail) = pkt;
554 p->tail->m_nextpkt = pkt;
538 p->tail = pkt;
555 p->tail = pkt;
539 DN_NEXT(p->tail) = NULL;
556 p->tail->m_nextpkt = NULL;
540}
541
542/*
543 * ready_event() is invoked every time the queue must enter the
544 * scheduler, either because the first packet arrives, or because
545 * a previously scheduled event fired.
546 * On invokation, drain as many pkts as possible (could be 0) and then
547 * if there are leftover packets reinsert the pkt in the scheduler.
548 */
549static void
550ready_event(struct dn_flow_queue *q)
551{
557}
558
559/*
560 * ready_event() is invoked every time the queue must enter the
561 * scheduler, either because the first packet arrives, or because
562 * a previously scheduled event fired.
563 * On invokation, drain as many pkts as possible (could be 0) and then
564 * if there are leftover packets reinsert the pkt in the scheduler.
565 */
566static void
567ready_event(struct dn_flow_queue *q)
568{
552 struct dn_pkt *pkt;
569 struct mbuf *pkt;
553 struct dn_pipe *p = q->fs->pipe ;
554 int p_was_empty ;
555
556 DUMMYNET_LOCK_ASSERT();
557
558 if (p == NULL) {
559 printf("dummynet: ready_event- pipe is gone\n");
560 return ;

--- 5 unchanged lines hidden (view full) ---

566 * Account for the bw accumulated since last scheduling, then
567 * drain as many pkts as allowed by q->numbytes and move to
568 * the delay line (in p) computing output time.
569 * bandwidth==0 (no limit) means we can drain the whole queue,
570 * setting len_scaled = 0 does the job.
571 */
572 q->numbytes += ( curr_time - q->sched_time ) * p->bandwidth;
573 while ( (pkt = q->head) != NULL ) {
570 struct dn_pipe *p = q->fs->pipe ;
571 int p_was_empty ;
572
573 DUMMYNET_LOCK_ASSERT();
574
575 if (p == NULL) {
576 printf("dummynet: ready_event- pipe is gone\n");
577 return ;

--- 5 unchanged lines hidden (view full) ---

583 * Account for the bw accumulated since last scheduling, then
584 * drain as many pkts as allowed by q->numbytes and move to
585 * the delay line (in p) computing output time.
586 * bandwidth==0 (no limit) means we can drain the whole queue,
587 * setting len_scaled = 0 does the job.
588 */
589 q->numbytes += ( curr_time - q->sched_time ) * p->bandwidth;
590 while ( (pkt = q->head) != NULL ) {
574 int len = pkt->dn_m->m_pkthdr.len;
591 int len = pkt->m_pkthdr.len;
575 int len_scaled = p->bandwidth ? len*8*hz : 0 ;
576 if (len_scaled > q->numbytes )
577 break ;
578 q->numbytes -= len_scaled ;
579 move_pkt(pkt, q, p, len);
580 }
581 /*
582 * If we have more packets queued, schedule next ready event

--- 51 unchanged lines hidden (view full) ---

634
635 /*
636 * While we have backlogged traffic AND credit, we need to do
637 * something on the queue.
638 */
639 while ( p->numbytes >=0 && (sch->elements>0 || neh->elements >0) ) {
640 if (sch->elements > 0) { /* have some eligible pkts to send out */
641 struct dn_flow_queue *q = sch->p[0].object ;
592 int len_scaled = p->bandwidth ? len*8*hz : 0 ;
593 if (len_scaled > q->numbytes )
594 break ;
595 q->numbytes -= len_scaled ;
596 move_pkt(pkt, q, p, len);
597 }
598 /*
599 * If we have more packets queued, schedule next ready event

--- 51 unchanged lines hidden (view full) ---

651
652 /*
653 * While we have backlogged traffic AND credit, we need to do
654 * something on the queue.
655 */
656 while ( p->numbytes >=0 && (sch->elements>0 || neh->elements >0) ) {
657 if (sch->elements > 0) { /* have some eligible pkts to send out */
658 struct dn_flow_queue *q = sch->p[0].object ;
642 struct dn_pkt *pkt = q->head;
659 struct mbuf *pkt = q->head;
643 struct dn_flow_set *fs = q->fs;
660 struct dn_flow_set *fs = q->fs;
644 u_int64_t len = pkt->dn_m->m_pkthdr.len;
661 u_int64_t len = pkt->m_pkthdr.len;
645 int len_scaled = p->bandwidth ? len*8*hz : 0 ;
646
647 heap_extract(sch, NULL); /* remove queue from heap */
648 p->numbytes -= len_scaled ;
649 move_pkt(pkt, q, p, len);
650
651 p->V += (len<<MY_M) / p->sum ; /* update V */
652 q->S = q->F ; /* update start time */
653 if (q->len == 0) { /* Flow not backlogged any more */
654 fs->backlogged-- ;
655 heap_insert(&(p->idle_heap), q->F, q);
656 } else { /* still backlogged */
657 /*
658 * update F and position in backlogged queue, then
659 * put flow in not_eligible_heap (we will fix this later).
660 */
662 int len_scaled = p->bandwidth ? len*8*hz : 0 ;
663
664 heap_extract(sch, NULL); /* remove queue from heap */
665 p->numbytes -= len_scaled ;
666 move_pkt(pkt, q, p, len);
667
668 p->V += (len<<MY_M) / p->sum ; /* update V */
669 q->S = q->F ; /* update start time */
670 if (q->len == 0) { /* Flow not backlogged any more */
671 fs->backlogged-- ;
672 heap_insert(&(p->idle_heap), q->F, q);
673 } else { /* still backlogged */
674 /*
675 * update F and position in backlogged queue, then
676 * put flow in not_eligible_heap (we will fix this later).
677 */
661 len = (q->head)->dn_m->m_pkthdr.len;
678 len = (q->head)->m_pkthdr.len;
662 q->F += (len<<MY_M)/(u_int64_t) fs->weight ;
663 if (DN_KEY_LEQ(q->S, p->V))
664 heap_insert(neh, q->S, q);
665 else
666 heap_insert(sch, q->F, q);
667 }
668 }
669 /*

--- 38 unchanged lines hidden (view full) ---

708 * If we are under credit, schedule the next ready event.
709 * Also fix the delivery time of the last packet.
710 */
711 if (p->if_name[0]==0 && p->numbytes < 0) { /* this implies bandwidth >0 */
712 dn_key t=0 ; /* number of ticks i have to wait */
713
714 if (p->bandwidth > 0)
715 t = ( p->bandwidth -1 - p->numbytes) / p->bandwidth ;
679 q->F += (len<<MY_M)/(u_int64_t) fs->weight ;
680 if (DN_KEY_LEQ(q->S, p->V))
681 heap_insert(neh, q->S, q);
682 else
683 heap_insert(sch, q->F, q);
684 }
685 }
686 /*

--- 38 unchanged lines hidden (view full) ---

725 * If we are under credit, schedule the next ready event.
726 * Also fix the delivery time of the last packet.
727 */
728 if (p->if_name[0]==0 && p->numbytes < 0) { /* this implies bandwidth >0 */
729 dn_key t=0 ; /* number of ticks i have to wait */
730
731 if (p->bandwidth > 0)
732 t = ( p->bandwidth -1 - p->numbytes) / p->bandwidth ;
716 p->tail->output_time += t ;
733 dn_tag_get(p->tail)->output_time += t ;
717 p->sched_time = curr_time ;
718 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
719 /* XXX should check errors on heap_insert, and drain the whole
720 * queue on error hoping next time we are luckier.
721 */
722 }
723 /*
724 * If the delay line was empty call transmit_event(p) now.

--- 386 unchanged lines hidden (view full) ---

1111 * dst destination address, only used by ip_output
1112 * rule matching rule, in case of multiple passes
1113 * flags flags from the caller, only used in ip_output
1114 *
1115 */
1116static int
1117dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1118{
734 p->sched_time = curr_time ;
735 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
736 /* XXX should check errors on heap_insert, and drain the whole
737 * queue on error hoping next time we are luckier.
738 */
739 }
740 /*
741 * If the delay line was empty call transmit_event(p) now.

--- 386 unchanged lines hidden (view full) ---

1128 * dst destination address, only used by ip_output
1129 * rule matching rule, in case of multiple passes
1130 * flags flags from the caller, only used in ip_output
1131 *
1132 */
1133static int
1134dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1135{
1119 struct dn_pkt *pkt;
1136 struct dn_pkt_tag *pkt;
1137 struct m_tag *mtag;
1120 struct dn_flow_set *fs;
1121 struct dn_pipe *pipe ;
1122 u_int64_t len = m->m_pkthdr.len ;
1123 struct dn_flow_queue *q = NULL ;
1124 int is_pipe;
1125#if IPFW2
1126 ipfw_insn *cmd = fwa->rule->cmd + fwa->rule->act_ofs;
1138 struct dn_flow_set *fs;
1139 struct dn_pipe *pipe ;
1140 u_int64_t len = m->m_pkthdr.len ;
1141 struct dn_flow_queue *q = NULL ;
1142 int is_pipe;
1143#if IPFW2
1144 ipfw_insn *cmd = fwa->rule->cmd + fwa->rule->act_ofs;
1145#endif
1127
1146
1147 KASSERT(m->m_nextpkt == NULL,
1148 ("dummynet_io: mbuf queue passed to dummynet"));
1149
1150#if IPFW2
1128 if (cmd->opcode == O_LOG)
1129 cmd += F_LEN(cmd);
1130 is_pipe = (cmd->opcode == O_PIPE);
1131#else
1132 is_pipe = (fwa->rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE;
1133#endif
1134
1135 pipe_nr &= 0xffff ;

--- 34 unchanged lines hidden (view full) ---

1170 } else {
1171 if (q->len >= fs->qsize)
1172 goto dropit ; /* queue count overflow */
1173 }
1174 if ( fs->flags_fs & DN_IS_RED && red_drops(fs, q, len) )
1175 goto dropit ;
1176
1177 /* XXX expensive to zero, see if we can remove it*/
1151 if (cmd->opcode == O_LOG)
1152 cmd += F_LEN(cmd);
1153 is_pipe = (cmd->opcode == O_PIPE);
1154#else
1155 is_pipe = (fwa->rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE;
1156#endif
1157
1158 pipe_nr &= 0xffff ;

--- 34 unchanged lines hidden (view full) ---

1193 } else {
1194 if (q->len >= fs->qsize)
1195 goto dropit ; /* queue count overflow */
1196 }
1197 if ( fs->flags_fs & DN_IS_RED && red_drops(fs, q, len) )
1198 goto dropit ;
1199
1200 /* XXX expensive to zero, see if we can remove it*/
1178 pkt = (struct dn_pkt *)malloc(sizeof (*pkt), M_DUMMYNET, M_NOWAIT|M_ZERO);
1179 if ( pkt == NULL )
1201 mtag = m_tag_get(PACKET_TAG_DUMMYNET,
1202 sizeof(struct dn_pkt_tag), M_NOWAIT|M_ZERO);
1203 if ( mtag == NULL )
1180 goto dropit ; /* cannot allocate packet header */
1204 goto dropit ; /* cannot allocate packet header */
1205 m_tag_prepend(m, mtag); /* attach to mbuf chain */
1206
1207 pkt = (struct dn_pkt_tag *)(mtag+1);
1181 /* ok, i can handle the pkt now... */
1182 /* build and enqueue packet + parameters */
1208 /* ok, i can handle the pkt now... */
1209 /* build and enqueue packet + parameters */
1183 pkt->hdr.mh_type = MT_TAG;
1184 pkt->hdr.mh_flags = PACKET_TAG_DUMMYNET;
1185 pkt->rule = fwa->rule ;
1210 pkt->rule = fwa->rule ;
1186 DN_NEXT(pkt) = NULL;
1187 pkt->dn_m = m;
1188 pkt->dn_dir = dir ;
1189
1190 pkt->ifp = fwa->oif;
1191 if (dir == DN_TO_IP_OUT) {
1192 /*
1193 * We need to copy *ro because for ICMP pkts (and maybe others)
1194 * the caller passed a pointer into the stack; dst might also be
1195 * a pointer into *ro so it needs to be updated.

--- 5 unchanged lines hidden (view full) ---

1201 RT_UNLOCK(pkt->ro.ro_rt);
1202 }
1203 if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) /* dst points into ro */
1204 fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst) ;
1205 pkt->dn_dst = fwa->dst;
1206 pkt->flags = fwa->flags;
1207 }
1208 if (q->head == NULL)
1211 pkt->dn_dir = dir ;
1212
1213 pkt->ifp = fwa->oif;
1214 if (dir == DN_TO_IP_OUT) {
1215 /*
1216 * We need to copy *ro because for ICMP pkts (and maybe others)
1217 * the caller passed a pointer into the stack; dst might also be
1218 * a pointer into *ro so it needs to be updated.

--- 5 unchanged lines hidden (view full) ---

1224 RT_UNLOCK(pkt->ro.ro_rt);
1225 }
1226 if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) /* dst points into ro */
1227 fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst) ;
1228 pkt->dn_dst = fwa->dst;
1229 pkt->flags = fwa->flags;
1230 }
1231 if (q->head == NULL)
1209 q->head = pkt;
1232 q->head = m;
1210 else
1233 else
1211 DN_NEXT(q->tail) = pkt;
1212 q->tail = pkt;
1234 q->tail->m_nextpkt = m;
1235 q->tail = m;
1213 q->len++;
1214 q->len_bytes += len ;
1215
1236 q->len++;
1237 q->len_bytes += len ;
1238
1216 if ( q->head != pkt ) /* flow was not idle, we are done */
1239 if ( q->head != m ) /* flow was not idle, we are done */
1217 goto done;
1218 /*
1219 * If we reach this point the flow was previously idle, so we need
1220 * to schedule it. This involves different actions for fixed-rate or
1221 * WF2Q queues.
1222 */
1223 if (is_pipe) {
1224 /*
1225 * Fixed-rate queue: just insert into the ready_heap.
1226 */
1227 dn_key t = 0 ;
1228 if (pipe->bandwidth)
1240 goto done;
1241 /*
1242 * If we reach this point the flow was previously idle, so we need
1243 * to schedule it. This involves different actions for fixed-rate or
1244 * WF2Q queues.
1245 */
1246 if (is_pipe) {
1247 /*
1248 * Fixed-rate queue: just insert into the ready_heap.
1249 */
1250 dn_key t = 0 ;
1251 if (pipe->bandwidth)
1229 t = SET_TICKS(pkt, q, pipe);
1252 t = SET_TICKS(m, q, pipe);
1230 q->sched_time = curr_time ;
1231 if (t == 0) /* must process it now */
1232 ready_event( q );
1233 else
1234 heap_insert(&ready_heap, curr_time + t , q );
1235 } else {
1236 /*
1237 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)

--- 57 unchanged lines hidden (view full) ---

1295 m_freem(m);
1296 return ( (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
1297}
1298
1299/*
1300 * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1301 * Doing this would probably save us the initial bzero of dn_pkt
1302 */
1253 q->sched_time = curr_time ;
1254 if (t == 0) /* must process it now */
1255 ready_event( q );
1256 else
1257 heap_insert(&ready_heap, curr_time + t , q );
1258 } else {
1259 /*
1260 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)

--- 57 unchanged lines hidden (view full) ---

1318 m_freem(m);
1319 return ( (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
1320}
1321
1322/*
1323 * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1324 * Doing this would probably save us the initial bzero of dn_pkt
1325 */
1303#define DN_FREE_PKT(pkt) { \
1304 struct dn_pkt *n = pkt ; \
1305 rt_unref ( n->ro.ro_rt, __func__ ) ; \
1306 m_freem(n->dn_m); \
1307 pkt = DN_NEXT(n) ; \
1308 free(n, M_DUMMYNET) ; }
1326#define DN_FREE_PKT(_m) do { \
1327 rt_unref(dn_tag_get(_m)->ro.ro_rt, __func__); \
1328 m_freem(_m); \
1329} while (0)
1309
1310/*
1311 * Dispose all packets and flow_queues on a flow_set.
1312 * If all=1, also remove red lookup table and other storage,
1313 * including the descriptor itself.
1314 * For the one in dn_pipe MUST also cleanup ready_heap...
1315 */
1316static void
1317purge_flow_set(struct dn_flow_set *fs, int all)
1318{
1330
1331/*
1332 * Dispose all packets and flow_queues on a flow_set.
1333 * If all=1, also remove red lookup table and other storage,
1334 * including the descriptor itself.
1335 * For the one in dn_pipe MUST also cleanup ready_heap...
1336 */
1337static void
1338purge_flow_set(struct dn_flow_set *fs, int all)
1339{
1319 struct dn_pkt *pkt ;
1320 struct dn_flow_queue *q, *qn ;
1321 int i ;
1322
1323 DUMMYNET_LOCK_ASSERT();
1324
1325 for (i = 0 ; i <= fs->rq_size ; i++ ) {
1326 for (q = fs->rq[i] ; q ; q = qn ) {
1340 struct dn_flow_queue *q, *qn ;
1341 int i ;
1342
1343 DUMMYNET_LOCK_ASSERT();
1344
1345 for (i = 0 ; i <= fs->rq_size ; i++ ) {
1346 for (q = fs->rq[i] ; q ; q = qn ) {
1327 for (pkt = q->head ; pkt ; )
1328 DN_FREE_PKT(pkt) ;
1347 struct mbuf *m, *mnext;
1348
1349 mnext = q->head;
1350 while ((m = mnext) != NULL) {
1351 mnext = m->m_nextpkt;
1352 DN_FREE_PKT(m);
1353 }
1329 qn = q->next ;
1330 free(q, M_DUMMYNET);
1331 }
1332 fs->rq[i] = NULL ;
1333 }
1334 fs->rq_elements = 0 ;
1335 if (all) {
1336 /* RED - free lookup table */

--- 10 unchanged lines hidden (view full) ---

1347/*
1348 * Dispose all packets queued on a pipe (not a flow_set).
1349 * Also free all resources associated to a pipe, which is about
1350 * to be deleted.
1351 */
1352static void
1353purge_pipe(struct dn_pipe *pipe)
1354{
1354 qn = q->next ;
1355 free(q, M_DUMMYNET);
1356 }
1357 fs->rq[i] = NULL ;
1358 }
1359 fs->rq_elements = 0 ;
1360 if (all) {
1361 /* RED - free lookup table */

--- 10 unchanged lines hidden (view full) ---

1372/*
1373 * Dispose all packets queued on a pipe (not a flow_set).
1374 * Also free all resources associated to a pipe, which is about
1375 * to be deleted.
1376 */
1377static void
1378purge_pipe(struct dn_pipe *pipe)
1379{
1355 struct dn_pkt *pkt ;
1380 struct mbuf *m, *mnext;
1356
1357 purge_flow_set( &(pipe->fs), 1 );
1358
1381
1382 purge_flow_set( &(pipe->fs), 1 );
1383
1359 for (pkt = pipe->head ; pkt ; )
1360 DN_FREE_PKT(pkt) ;
1384 mnext = pipe->head;
1385 while ((m = mnext) != NULL) {
1386 mnext = m->m_nextpkt;
1387 DN_FREE_PKT(m);
1388 }
1361
1362 heap_free( &(pipe->scheduler_heap) );
1363 heap_free( &(pipe->not_eligible_heap) );
1364 heap_free( &(pipe->idle_heap) );
1365}
1366
1367/*
1368 * Delete all pipes and heaps returning memory. Must also

--- 38 unchanged lines hidden (view full) ---

1407
1408
1409extern struct ip_fw *ip_fw_default_rule ;
1410static void
1411dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
1412{
1413 int i ;
1414 struct dn_flow_queue *q ;
1389
1390 heap_free( &(pipe->scheduler_heap) );
1391 heap_free( &(pipe->not_eligible_heap) );
1392 heap_free( &(pipe->idle_heap) );
1393}
1394
1395/*
1396 * Delete all pipes and heaps returning memory. Must also

--- 38 unchanged lines hidden (view full) ---

1435
1436
1437extern struct ip_fw *ip_fw_default_rule ;
1438static void
1439dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
1440{
1441 int i ;
1442 struct dn_flow_queue *q ;
1415 struct dn_pkt *pkt ;
1443 struct mbuf *m ;
1416
1417 for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */
1418 for (q = fs->rq[i] ; q ; q = q->next )
1444
1445 for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */
1446 for (q = fs->rq[i] ; q ; q = q->next )
1419 for (pkt = q->head ; pkt ; pkt = DN_NEXT(pkt) )
1447 for (m = q->head ; m ; m = m->m_nextpkt ) {
1448 struct dn_pkt_tag *pkt = dn_tag_get(m) ;
1420 if (pkt->rule == r)
1421 pkt->rule = ip_fw_default_rule ;
1449 if (pkt->rule == r)
1450 pkt->rule = ip_fw_default_rule ;
1451 }
1422}
1423/*
1424 * when a firewall rule is deleted, scan all queues and remove the flow-id
1425 * from packets matching this rule.
1426 */
1427void
1428dn_rule_delete(void *r)
1429{
1430 struct dn_pipe *p ;
1452}
1453/*
1454 * when a firewall rule is deleted, scan all queues and remove the flow-id
1455 * from packets matching this rule.
1456 */
1457void
1458dn_rule_delete(void *r)
1459{
1460 struct dn_pipe *p ;
1431 struct dn_pkt *pkt ;
1432 struct dn_flow_set *fs ;
1461 struct dn_flow_set *fs ;
1462 struct dn_pkt_tag *pkt ;
1463 struct mbuf *m ;
1433
1434 DUMMYNET_LOCK();
1435 /*
1436 * If the rule references a queue (dn_flow_set), then scan
1437 * the flow set, otherwise scan pipes. Should do either, but doing
1438 * both does not harm.
1439 */
1440 for ( fs = all_flow_sets ; fs ; fs = fs->next )
1441 dn_rule_delete_fs(fs, r);
1442 for ( p = all_pipes ; p ; p = p->next ) {
1443 fs = &(p->fs) ;
1444 dn_rule_delete_fs(fs, r);
1464
1465 DUMMYNET_LOCK();
1466 /*
1467 * If the rule references a queue (dn_flow_set), then scan
1468 * the flow set, otherwise scan pipes. Should do either, but doing
1469 * both does not harm.
1470 */
1471 for ( fs = all_flow_sets ; fs ; fs = fs->next )
1472 dn_rule_delete_fs(fs, r);
1473 for ( p = all_pipes ; p ; p = p->next ) {
1474 fs = &(p->fs) ;
1475 dn_rule_delete_fs(fs, r);
1445 for (pkt = p->head ; pkt ; pkt = DN_NEXT(pkt) )
1476 for (m = p->head ; m ; m = m->m_nextpkt ) {
1477 pkt = dn_tag_get(m) ;
1446 if (pkt->rule == r)
1447 pkt->rule = ip_fw_default_rule ;
1478 if (pkt->rule == r)
1479 pkt->rule = ip_fw_default_rule ;
1480 }
1448 }
1449 DUMMYNET_UNLOCK();
1450}
1451
1452/*
1453 * setup RED parameters
1454 */
1455static int

--- 257 unchanged lines hidden (view full) ---

1713/*
1714 * drain all queues. Called in case of severe mbuf shortage.
1715 */
1716void
1717dummynet_drain()
1718{
1719 struct dn_flow_set *fs;
1720 struct dn_pipe *p;
1481 }
1482 DUMMYNET_UNLOCK();
1483}
1484
1485/*
1486 * setup RED parameters
1487 */
1488static int

--- 257 unchanged lines hidden (view full) ---

1746/*
1747 * drain all queues. Called in case of severe mbuf shortage.
1748 */
1749void
1750dummynet_drain()
1751{
1752 struct dn_flow_set *fs;
1753 struct dn_pipe *p;
1721 struct dn_pkt *pkt;
1754 struct mbuf *m, *mnext;
1722
1723 DUMMYNET_LOCK_ASSERT();
1724
1725 heap_free(&ready_heap);
1726 heap_free(&wfq_ready_heap);
1727 heap_free(&extract_heap);
1728 /* remove all references to this pipe from flow_sets */
1729 for (fs = all_flow_sets; fs; fs= fs->next )
1730 purge_flow_set(fs, 0);
1731
1732 for (p = all_pipes; p; p= p->next ) {
1733 purge_flow_set(&(p->fs), 0);
1755
1756 DUMMYNET_LOCK_ASSERT();
1757
1758 heap_free(&ready_heap);
1759 heap_free(&wfq_ready_heap);
1760 heap_free(&extract_heap);
1761 /* remove all references to this pipe from flow_sets */
1762 for (fs = all_flow_sets; fs; fs= fs->next )
1763 purge_flow_set(fs, 0);
1764
1765 for (p = all_pipes; p; p= p->next ) {
1766 purge_flow_set(&(p->fs), 0);
1734 for (pkt = p->head ; pkt ; )
1735 DN_FREE_PKT(pkt) ;
1767
1768 mnext = p->head;
1769 while ((m = mnext) != NULL) {
1770 mnext = m->m_nextpkt;
1771 DN_FREE_PKT(m);
1772 }
1736 p->head = p->tail = NULL ;
1737 }
1738}
1739
1740/*
1741 * Fully delete a pipe or a queue, cleaning up associated info.
1742 */
1743static int

--- 307 unchanged lines hidden ---
1773 p->head = p->tail = NULL ;
1774 }
1775}
1776
1777/*
1778 * Fully delete a pipe or a queue, cleaning up associated info.
1779 */
1780static int

--- 307 unchanged lines hidden ---