Lines Matching refs:rs

295 rl_add_syctl_entries(struct sysctl_oid *rl_sysctl_root, struct tcp_rate_set *rs)
300 if (rs->rs_flags & RS_INTF_NO_SUP) {
301 SYSCTL_ADD_S32(&rs->sysctl_ctx,
304 &rs->rs_disable, 0,
307 SYSCTL_ADD_S32(&rs->sysctl_ctx,
310 &rs->rs_disable, 0,
313 SYSCTL_ADD_S32(&rs->sysctl_ctx,
316 &rs->rs_min_seg, 0,
318 SYSCTL_ADD_U64(&rs->sysctl_ctx,
321 &rs->rs_flow_limit, 0,
323 SYSCTL_ADD_S32(&rs->sysctl_ctx,
326 &rs->rs_highest_valid, 0,
328 SYSCTL_ADD_S32(&rs->sysctl_ctx,
331 &rs->rs_lowest_valid, 0,
333 SYSCTL_ADD_S32(&rs->sysctl_ctx,
336 &rs->rs_flags, 0,
338 SYSCTL_ADD_S32(&rs->sysctl_ctx,
341 &rs->rs_rate_cnt, 0,
343 SYSCTL_ADD_U64(&rs->sysctl_ctx,
346 &rs->rs_flows_using, 0,
349 if (rs->rs_rlt && rs->rs_rate_cnt > 0) {
355 rl_rates = SYSCTL_ADD_NODE(&rs->sysctl_ctx,
361 for( i = 0; i < rs->rs_rate_cnt; i++) {
363 rl_rate_num = SYSCTL_ADD_NODE(&rs->sysctl_ctx,
369 SYSCTL_ADD_U32(&rs->sysctl_ctx,
372 &rs->rs_rlt[i].flags, 0,
374 SYSCTL_ADD_U32(&rs->sysctl_ctx,
377 &rs->rs_rlt[i].time_between, 0,
379 SYSCTL_ADD_LONG(&rs->sysctl_ctx,
382 &rs->rs_rlt[i].rate,
384 SYSCTL_ADD_LONG(&rs->sysctl_ctx,
387 &rs->rs_rlt[i].using,
389 SYSCTL_ADD_LONG(&rs->sysctl_ctx,
392 &rs->rs_rlt[i].rs_num_enobufs,
403 struct tcp_rate_set *rs;
406 rs = __containerof(ctx, struct tcp_rate_set, rs_epoch_ctx);
409 rs->rs_flags &= ~RS_FUNERAL_SCHD;
426 do_free_rs = (rs->rs_flows_using == 0);
431 sysctl_ctx_free(&rs->sysctl_ctx);
432 free(rs->rs_rlt, M_TCPPACE);
433 free(rs, M_TCPPACE);
438 rs_defer_destroy(struct tcp_rate_set *rs)
444 if (rs->rs_flags & RS_FUNERAL_SCHD)
450 rs->rs_flags |= RS_FUNERAL_SCHD;
451 NET_EPOCH_CALL(rs_destroy, &rs->rs_epoch_ctx);
490 populate_canned_table(struct tcp_rate_set *rs, const uint64_t *rate_table_act)
503 for(i = 0, at_low = 0, at_high = RS_NEXT_ORDER_GROUP; i < rs->rs_rate_cnt; i++) {
504 rs->rs_rlt[i].flags = 0;
505 rs->rs_rlt[i].time_between = 0;
509 rs->rs_rlt[i].rate = rate_table_act[at_low];
514 rs->rs_rlt[i].rate = rate_table_act[at_high];
525 struct tcp_rate_set *rs;
548 rs = malloc(sizeof(struct tcp_rate_set), M_TCPPACE, M_NOWAIT | M_ZERO);
549 if (rs == NULL) {
563 memset(rs, 0, sizeof(struct tcp_rate_set));
564 rs->rs_ifp = ifp;
565 rs->rs_if_dunit = ifp->if_dunit;
566 rs->rs_flags = RS_INTF_NO_SUP;
567 rs->rs_disable = 1;
569 sysctl_ctx_init(&rs->sysctl_ctx);
570 rl_sysctl_root = SYSCTL_ADD_NODE(&rs->sysctl_ctx,
573 rs->rs_ifp->if_xname,
576 rl_add_syctl_entries(rl_sysctl_root, rs);
579 CK_LIST_INSERT_HEAD(&int_rs, rs, next);
582 return (rs);
584 memset(rs, 0, sizeof(struct tcp_rate_set));
585 rs->rs_ifp = ifp;
586 rs->rs_if_dunit = ifp->if_dunit;
587 rs->rs_flags = RS_IS_DEFF;
589 sysctl_ctx_init(&rs->sysctl_ctx);
590 rl_sysctl_root = SYSCTL_ADD_NODE(&rs->sysctl_ctx,
593 rs->rs_ifp->if_xname,
596 rl_add_syctl_entries(rl_sysctl_root, rs);
599 CK_LIST_INSERT_HEAD(&int_rs, rs, next);
602 return (rs);
605 rs->rs_ifp = ifp;
606 rs->rs_if_dunit = ifp->if_dunit;
607 rs->rs_rate_cnt = rl.number_of_rates;
608 rs->rs_min_seg = rl.min_segment_burst;
609 rs->rs_highest_valid = 0;
610 rs->rs_flow_limit = rl.max_flows;
611 rs->rs_flags = RS_IS_INTF | RS_NO_PRE;
612 rs->rs_disable = 0;
616 rs->rs_ifp = ifp;
617 rs->rs_if_dunit = ifp->if_dunit;
618 rs->rs_rate_cnt = rl.number_of_rates;
619 rs->rs_min_seg = rl.min_segment_burst;
620 rs->rs_disable = 0;
621 rs->rs_flow_limit = rl.max_flows;
623 if ((rs->rs_rate_cnt > MAX_HDWR_RATES) &&
624 (rs->rs_rate_cnt < ALL_HARDWARE_RATES)) {
629 rs->rs_rate_cnt = MAX_HDWR_RATES;
631 if (rs->rs_rate_cnt <= RS_ORDERED_COUNT)
632 rs->rs_flags = RS_IS_INTF;
634 rs->rs_flags = RS_IS_INTF | RS_INT_TBL;
635 if (rs->rs_rate_cnt >= ALL_HARDWARE_RATES)
636 rs->rs_rate_cnt = ALL_HARDWARE_RATES;
638 free(rs, M_TCPPACE);
641 sz = sizeof(struct tcp_hwrate_limit_table) * rs->rs_rate_cnt;
642 rs->rs_rlt = malloc(sz, M_TCPPACE, M_NOWAIT);
643 if (rs->rs_rlt == NULL) {
647 free(rs, M_TCPPACE);
650 if (rs->rs_rate_cnt >= ALL_HARDWARE_RATES) {
657 rs->rs_rlt[0].rate = 12500; /* 100k */
658 rs->rs_rlt[1].rate = 25000; /* 200k */
659 rs->rs_rlt[2].rate = 62500; /* 500k */
664 rs->rs_rlt[i].rate = rat;
667 rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate = 1250000000;
668 } else if (rs->rs_flags & RS_INT_TBL) {
670 populate_canned_table(rs, rate_table_act);
676 for (i=0; i<rs->rs_rate_cnt; i++) {
677 rs->rs_rlt[i].rate = rate_table_act[i];
678 rs->rs_rlt[i].time_between = 0;
679 rs->rs_rlt[i].flags = 0;
682 for (i = (rs->rs_rate_cnt - 1); i >= 0; i--) {
688 rs->rs_rlt[i].ptbl = rs;
689 rs->rs_rlt[i].tag = NULL;
690 rs->rs_rlt[i].using = 0;
691 rs->rs_rlt[i].rs_num_enobufs = 0;
696 res = lentim / rs->rs_rlt[i].rate;
698 rs->rs_rlt[i].time_between = res;
700 rs->rs_rlt[i].time_between = 1;
701 if (rs->rs_flags & RS_NO_PRE) {
702 rs->rs_rlt[i].flags = HDWRPACE_INITED;
703 rs->rs_lowest_valid = i;
710 rs->rs_rlt[i].rate, i);
722 rs->rs_rlt[i].rate,
723 &rs->rs_rlt[i].tag);
726 if (i == (rs->rs_rate_cnt - 1)) {
731 free(rs->rs_rlt, M_TCPPACE);
741 rs->rs_rlt[i].flags = HDWRPACE_INITED | HDWRPACE_TAGPRESENT;
742 rs->rs_lowest_valid = i;
747 if (rs->rs_rlt[(rs->rs_rate_cnt - 1)].flags & HDWRPACE_INITED)
748 rs->rs_highest_valid = rs->rs_rate_cnt - 1;
750 free(rs->rs_rlt, M_TCPPACE);
754 sysctl_ctx_init(&rs->sysctl_ctx);
755 rl_sysctl_root = SYSCTL_ADD_NODE(&rs->sysctl_ctx,
758 rs->rs_ifp->if_xname,
761 rl_add_syctl_entries(rl_sysctl_root, rs);
764 CK_LIST_INSERT_HEAD(&int_rs, rs, next);
767 return (rs);
775 tcp_int_find_suitable_rate(const volatile struct tcp_rate_set *rs,
785 (rs->rs_lowest_valid <= 2)){
791 for(i = rs->rs_lowest_valid; i < 3; i++) {
792 if (bytes_per_sec <= rs->rs_rlt[i].rate) {
793 rte = &rs->rs_rlt[i];
795 } else if (rs->rs_rlt[i].flags & HDWRPACE_INITED) {
796 arte = &rs->rs_rlt[i];
798 previous_rate = rs->rs_rlt[i].rate;
802 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)){
808 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)];
810 arte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)];
811 previous_rate = rs->rs_rlt[(ALL_HARDWARE_RATES-2)].rate;
829 if ((ind_calc >= rs->rs_lowest_valid) &&
830 (ind_calc <= rs->rs_highest_valid)) {
831 rte = &rs->rs_rlt[ind_calc];
833 previous_rate = rs->rs_rlt[(ind_calc-1)].rate;
837 (rs->rs_lowest_valid <= 2)){
838 for(i = rs->rs_lowest_valid; i < 3; i++) {
839 if (bytes_per_sec == rs->rs_rlt[i].rate) {
840 rte = &rs->rs_rlt[i];
845 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)) {
847 if (bytes_per_sec == rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate) {
849 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)];
861 if (rs->rs_rlt[ind_calc].flags & HDWRPACE_INITED)
862 rte = &rs->rs_rlt[ind_calc];
868 (rs->rs_lowest_valid <= 2)){
869 arte = &rs->rs_rlt[3]; /* set alternate to 1Meg */
870 for (i=2; i>=rs->rs_lowest_valid; i--) {
871 if (bytes_per_sec < rs->rs_rlt[i].rate) {
872 rte = &rs->rs_rlt[i];
874 previous_rate = rs->rs_rlt[(i-1)].rate;
878 (bytes_per_sec == rs->rs_rlt[i].rate)) {
879 rte = &rs->rs_rlt[i];
881 previous_rate = rs->rs_rlt[(i-1)].rate;
885 arte = &rs->rs_rlt[i]; /* new alternate */
889 if ((bytes_per_sec < rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate) &&
890 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)){
892 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)];
894 (bytes_per_sec == rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate) &&
895 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)) {
897 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)];
898 } else if (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED) {
900 arte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)];
902 previous_rate = rs->rs_rlt[(ALL_HARDWARE_RATES-2)].rate;
912 rte = &rs->rs_rlt[ind_calc];
914 previous_rate = rs->rs_rlt[(ind_calc-1)].rate;
924 if (rs->rs_rlt[ind_calc].flags & HDWRPACE_INITED) {
925 rte = &rs->rs_rlt[ind_calc];
927 previous_rate = rs->rs_rlt[(ind_calc-1)].rate;
948 tcp_find_suitable_rate(const volatile struct tcp_rate_set *rs, uint64_t bytes_per_sec, uint32_t flags, uint64_t *lower_rate)
964 if ((rs->rs_flags & RS_INT_TBL) &&
965 (rs->rs_rate_cnt >= ALL_HARDWARE_RATES)) {
972 return (tcp_int_find_suitable_rate(rs, bytes_per_sec, flags, lower_rate));
981 for (i = rs->rs_lowest_valid, matched = 0; i <= rs->rs_highest_valid; i++) {
983 (bytes_per_sec == rs->rs_rlt[i].rate)) {
984 rte = &rs->rs_rlt[i];
990 (bytes_per_sec <= rs->rs_rlt[i].rate)) {
991 rte = &rs->rs_rlt[i];
997 previous_rate = rs->rs_rlt[i].rate;
998 if (bytes_per_sec > rs->rs_rlt[i].rate)
1005 rte = &rs->rs_rlt[rs->rs_lowest_valid];
1013 for (i = rs->rs_highest_valid, matched = 0; i >= rs->rs_lowest_valid; i--) {
1014 if (rs->rs_rlt[i].rate > bytes_per_sec) {
1016 rte = &rs->rs_rlt[i];
1019 (bytes_per_sec == rs->rs_rlt[i].rate)) {
1022 rte = &rs->rs_rlt[i];
1031 if (rs->rs_rlt[i].rate < bytes_per_sec) {
1037 *lower_rate = rs->rs_rlt[i].rate;
1044 rte = &rs->rs_rlt[rs->rs_highest_valid];
1128 struct tcp_rate_set *rs;
1130 CK_LIST_FOREACH(rs, &int_rs, next) {
1131 if ((rs->rs_ifp == ifp) &&
1132 (rs->rs_if_dunit == ifp->if_dunit)) {
1134 return (rs);
1148 * So why is rs volatile? This is to defeat a
1150 * that rs can never be NULL (which is not true). Because
1151 * of its conviction it nicely optimizes out the if ((rs == NULL
1154 volatile struct tcp_rate_set *rs;
1161 rs = find_rs_for_ifp(ifp);
1162 if ((rs == NULL) ||
1163 (rs->rs_flags & RS_INTF_NO_SUP) ||
1164 (rs->rs_flags & RS_IS_DEAD)) {
1182 if ((rs == NULL) || (rs->rs_disable != 0)) {
1188 if (rs->rs_flags & RS_IS_DEFF) {
1194 if (rs->rs_disable && error)
1205 if (rs->rs_flow_limit &&
1206 ((rs->rs_flows_using + 1) > rs->rs_flow_limit)) {
1212 rte = tcp_find_suitable_rate(rs, bytes_per_sec, flags, lower_rate);
1226 ("Setup rate has no snd_tag inp:%p rte:%p rate:%llu rs:%p",
1227 inp, rte, (unsigned long long)rte->rate, rs));
1238 atomic_add_64(&rs->rs_flows_using, 1);
1248 struct tcp_rate_set *rs;
1261 rs = find_rs_for_ifp(ifp);
1262 if (rs) {
1276 struct tcp_rate_set *rs;
1282 rs = find_rs_for_ifp(ifp);
1283 if (rs) {
1284 CK_LIST_REMOVE(rs, next);
1286 rs->rs_flags |= RS_IS_DEAD;
1287 for (i = 0; i < rs->rs_rate_cnt; i++) {
1288 if (rs->rs_rlt[i].flags & HDWRPACE_TAGPRESENT) {
1289 in_pcbdetach_tag(rs->rs_rlt[i].tag);
1290 rs->rs_rlt[i].tag = NULL;
1292 rs->rs_rlt[i].flags = HDWRPACE_IFPDEPARTED;
1294 if (rs->rs_flows_using == 0)
1295 rs_defer_destroy(rs);
1304 struct tcp_rate_set *rs, *nrs;
1310 CK_LIST_FOREACH_SAFE(rs, &int_rs, next, nrs) {
1311 CK_LIST_REMOVE(rs, next);
1313 rs->rs_flags |= RS_IS_DEAD;
1314 for (i = 0; i < rs->rs_rate_cnt; i++) {
1315 if (rs->rs_rlt[i].flags & HDWRPACE_TAGPRESENT) {
1316 in_pcbdetach_tag(rs->rs_rlt[i].tag);
1317 rs->rs_rlt[i].tag = NULL;
1319 rs->rs_rlt[i].flags = HDWRPACE_IFPDEPARTED;
1321 if (rs->rs_flows_using == 0)
1322 rs_defer_destroy(rs);
1399 const struct tcp_rate_set *rs;
1451 rs = crte->ptbl;
1452 if ((rs->rs_flags & RS_IS_DEAD) ||
1461 nrte = tcp_find_suitable_rate(rs, bytes_per_sec, flags, lower_rate);
1489 lrs = __DECONST(struct tcp_rate_set *, rs);
1527 struct tcp_rate_set *rs;
1538 rs = __DECONST(struct tcp_rate_set *, crs);
1540 pre = atomic_fetchadd_64(&rs->rs_flows_using, -1);
1549 if (rs->rs_flags & RS_IS_DEAD)
1550 rs_defer_destroy(rs);
1740 struct tcp_rate_set *rs;
1745 rs = find_rs_for_ifp(ifp);
1746 if (rs == NULL) {
1749 } else if (rs->rs_flags & RS_IS_DEFF) {
1762 rate_ret = rs->rs_rlt[rs->rs_highest_valid].rate;