Deleted Added
full compact
pfctl_altq.c (126354) pfctl_altq.c (126355)
1/* $FreeBSD: head/contrib/pf/pfctl/pfctl_altq.c 126355 2004-02-28 17:32:53Z mlaier $ */
1/* $OpenBSD: pfctl_altq.c,v 1.77 2003/08/22 21:50:34 david Exp $ */
2
3/*
4 * Copyright (c) 2002
5 * Sony Computer Science Laboratories Inc.
6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/ioctl.h>
23#include <sys/socket.h>
2/* $OpenBSD: pfctl_altq.c,v 1.77 2003/08/22 21:50:34 david Exp $ */
3
4/*
5 * Copyright (c) 2002
6 * Sony Computer Science Laboratories Inc.
7 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/types.h>
23#include <sys/ioctl.h>
24#include <sys/socket.h>
25#if !defined(__FreeBSD__)
24#include <sys/limits.h>
26#include <sys/limits.h>
27#endif
25
26#include <net/if.h>
27#include <netinet/in.h>
28#include <net/pfvar.h>
29
30#include <err.h>
31#include <errno.h>
32#include <math.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <string.h>
36#include <unistd.h>
37
38#include <altq/altq.h>
39#include <altq/altq_cbq.h>
40#include <altq/altq_priq.h>
41#include <altq/altq_hfsc.h>
42
43#include "pfctl_parser.h"
44#include "pfctl.h"
45
46#define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
47
48TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs);
49LIST_HEAD(gen_sc, segment) rtsc, lssc;
50
51struct pf_altq *qname_to_pfaltq(const char *, const char *);
52u_int32_t qname_to_qid(const char *);
53
54static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *);
55static int cbq_compute_idletime(struct pfctl *, struct pf_altq *);
56static int check_commit_cbq(int, int, struct pf_altq *);
57static int print_cbq_opts(const struct pf_altq *);
58
59static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *);
60static int check_commit_priq(int, int, struct pf_altq *);
61static int print_priq_opts(const struct pf_altq *);
62
63static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *);
64static int check_commit_hfsc(int, int, struct pf_altq *);
65static int print_hfsc_opts(const struct pf_altq *,
66 const struct node_queue_opt *);
67
68static void gsc_add_sc(struct gen_sc *, struct service_curve *);
69static int is_gsc_under_sc(struct gen_sc *,
70 struct service_curve *);
71static void gsc_destroy(struct gen_sc *);
72static struct segment *gsc_getentry(struct gen_sc *, double);
73static int gsc_add_seg(struct gen_sc *, double, double, double,
74 double);
75static double sc_x2y(struct service_curve *, double);
76
28
29#include <net/if.h>
30#include <netinet/in.h>
31#include <net/pfvar.h>
32
33#include <err.h>
34#include <errno.h>
35#include <math.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <string.h>
39#include <unistd.h>
40
41#include <altq/altq.h>
42#include <altq/altq_cbq.h>
43#include <altq/altq_priq.h>
44#include <altq/altq_hfsc.h>
45
46#include "pfctl_parser.h"
47#include "pfctl.h"
48
49#define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
50
51TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs);
52LIST_HEAD(gen_sc, segment) rtsc, lssc;
53
54struct pf_altq *qname_to_pfaltq(const char *, const char *);
55u_int32_t qname_to_qid(const char *);
56
57static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *);
58static int cbq_compute_idletime(struct pfctl *, struct pf_altq *);
59static int check_commit_cbq(int, int, struct pf_altq *);
60static int print_cbq_opts(const struct pf_altq *);
61
62static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *);
63static int check_commit_priq(int, int, struct pf_altq *);
64static int print_priq_opts(const struct pf_altq *);
65
66static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *);
67static int check_commit_hfsc(int, int, struct pf_altq *);
68static int print_hfsc_opts(const struct pf_altq *,
69 const struct node_queue_opt *);
70
71static void gsc_add_sc(struct gen_sc *, struct service_curve *);
72static int is_gsc_under_sc(struct gen_sc *,
73 struct service_curve *);
74static void gsc_destroy(struct gen_sc *);
75static struct segment *gsc_getentry(struct gen_sc *, double);
76static int gsc_add_seg(struct gen_sc *, double, double, double,
77 double);
78static double sc_x2y(struct service_curve *, double);
79
80#if defined(__FreeBSD__)
81u_int32_t getifspeed(int, char *);
82#else
77u_int32_t getifspeed(char *);
83u_int32_t getifspeed(char *);
84#endif
78u_long getifmtu(char *);
79int eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
80 u_int32_t);
81u_int32_t eval_bwspec(struct node_queue_bw *, u_int32_t);
82void print_hfsc_sc(const char *, u_int, u_int, u_int,
83 const struct node_hfsc_sc *);
84
85static u_int32_t max_qid = 1;
86
87void
88pfaltq_store(struct pf_altq *a)
89{
90 struct pf_altq *altq;
91
92 if ((altq = malloc(sizeof(*altq))) == NULL)
93 err(1, "malloc");
94 memcpy(altq, a, sizeof(struct pf_altq));
95 TAILQ_INSERT_TAIL(&altqs, altq, entries);
96}
97
98void
99pfaltq_free(struct pf_altq *a)
100{
101 struct pf_altq *altq;
102
103 TAILQ_FOREACH(altq, &altqs, entries) {
104 if (strncmp(a->ifname, altq->ifname, IFNAMSIZ) == 0 &&
105 strncmp(a->qname, altq->qname, PF_QNAME_SIZE) == 0) {
106 TAILQ_REMOVE(&altqs, altq, entries);
107 free(altq);
108 return;
109 }
110 }
111}
112
113struct pf_altq *
114pfaltq_lookup(const char *ifname)
115{
116 struct pf_altq *altq;
117
118 TAILQ_FOREACH(altq, &altqs, entries) {
119 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
120 altq->qname[0] == 0)
121 return (altq);
122 }
123 return (NULL);
124}
125
126struct pf_altq *
127qname_to_pfaltq(const char *qname, const char *ifname)
128{
129 struct pf_altq *altq;
130
131 TAILQ_FOREACH(altq, &altqs, entries) {
132 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
133 strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
134 return (altq);
135 }
136 return (NULL);
137}
138
139u_int32_t
140qname_to_qid(const char *qname)
141{
142 struct pf_altq *altq;
143
144 /*
145 * We guarantee that same named queues on different interfaces
146 * have the same qid, so we do NOT need to limit matching on
147 * one interface!
148 */
149
150 TAILQ_FOREACH(altq, &altqs, entries) {
151 if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
152 return (altq->qid);
153 }
154 return (0);
155}
156
157void
158print_altq(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
159 struct node_queue_opt *qopts)
160{
161 if (a->qname[0] != NULL) {
162 print_queue(a, level, bw, 0, qopts);
163 return;
164 }
165
166 printf("altq on %s ", a->ifname);
167
168 switch(a->scheduler) {
169 case ALTQT_CBQ:
170 if (!print_cbq_opts(a))
171 printf("cbq ");
172 break;
173 case ALTQT_PRIQ:
174 if (!print_priq_opts(a))
175 printf("priq ");
176 break;
177 case ALTQT_HFSC:
178 if (!print_hfsc_opts(a, qopts))
179 printf("hfsc ");
180 break;
181 }
182
183 if (bw != NULL && bw->bw_percent > 0) {
184 if (bw->bw_percent < 100)
185 printf("bandwidth %u%% ", bw->bw_percent);
186 } else
187 printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
188
189 if (a->qlimit != DEFAULT_QLIMIT)
190 printf("qlimit %u ", a->qlimit);
191 printf("tbrsize %u ", a->tbrsize);
192}
193
194void
195print_queue(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
196 int print_interface, struct node_queue_opt *qopts)
197{
198 unsigned i;
199
200 printf("queue ");
201 for (i = 0; i < level; ++i)
202 printf(" ");
203 printf("%s ", a->qname);
204 if (print_interface)
205 printf("on %s ", a->ifname);
206 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC) {
207 if (bw != NULL && bw->bw_percent > 0) {
208 if (bw->bw_percent < 100)
209 printf("bandwidth %u%% ", bw->bw_percent);
210 } else
211 printf("bandwidth %s ", rate2str((double)a->bandwidth));
212 }
213 if (a->priority != DEFAULT_PRIORITY)
214 printf("priority %u ", a->priority);
215 if (a->qlimit != DEFAULT_QLIMIT)
216 printf("qlimit %u ", a->qlimit);
217 switch (a->scheduler) {
218 case ALTQT_CBQ:
219 print_cbq_opts(a);
220 break;
221 case ALTQT_PRIQ:
222 print_priq_opts(a);
223 break;
224 case ALTQT_HFSC:
225 print_hfsc_opts(a, qopts);
226 break;
227 }
228}
229
230/*
231 * eval_pfaltq computes the discipline parameters.
232 */
233int
234eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
235 struct node_queue_opt *opts)
236{
237 u_int rate, size, errors = 0;
238
239 if (bw->bw_absolute > 0)
240 pa->ifbandwidth = bw->bw_absolute;
241 else
85u_long getifmtu(char *);
86int eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
87 u_int32_t);
88u_int32_t eval_bwspec(struct node_queue_bw *, u_int32_t);
89void print_hfsc_sc(const char *, u_int, u_int, u_int,
90 const struct node_hfsc_sc *);
91
92static u_int32_t max_qid = 1;
93
94void
95pfaltq_store(struct pf_altq *a)
96{
97 struct pf_altq *altq;
98
99 if ((altq = malloc(sizeof(*altq))) == NULL)
100 err(1, "malloc");
101 memcpy(altq, a, sizeof(struct pf_altq));
102 TAILQ_INSERT_TAIL(&altqs, altq, entries);
103}
104
105void
106pfaltq_free(struct pf_altq *a)
107{
108 struct pf_altq *altq;
109
110 TAILQ_FOREACH(altq, &altqs, entries) {
111 if (strncmp(a->ifname, altq->ifname, IFNAMSIZ) == 0 &&
112 strncmp(a->qname, altq->qname, PF_QNAME_SIZE) == 0) {
113 TAILQ_REMOVE(&altqs, altq, entries);
114 free(altq);
115 return;
116 }
117 }
118}
119
120struct pf_altq *
121pfaltq_lookup(const char *ifname)
122{
123 struct pf_altq *altq;
124
125 TAILQ_FOREACH(altq, &altqs, entries) {
126 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
127 altq->qname[0] == 0)
128 return (altq);
129 }
130 return (NULL);
131}
132
133struct pf_altq *
134qname_to_pfaltq(const char *qname, const char *ifname)
135{
136 struct pf_altq *altq;
137
138 TAILQ_FOREACH(altq, &altqs, entries) {
139 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
140 strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
141 return (altq);
142 }
143 return (NULL);
144}
145
146u_int32_t
147qname_to_qid(const char *qname)
148{
149 struct pf_altq *altq;
150
151 /*
152 * We guarantee that same named queues on different interfaces
153 * have the same qid, so we do NOT need to limit matching on
154 * one interface!
155 */
156
157 TAILQ_FOREACH(altq, &altqs, entries) {
158 if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
159 return (altq->qid);
160 }
161 return (0);
162}
163
164void
165print_altq(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
166 struct node_queue_opt *qopts)
167{
168 if (a->qname[0] != NULL) {
169 print_queue(a, level, bw, 0, qopts);
170 return;
171 }
172
173 printf("altq on %s ", a->ifname);
174
175 switch(a->scheduler) {
176 case ALTQT_CBQ:
177 if (!print_cbq_opts(a))
178 printf("cbq ");
179 break;
180 case ALTQT_PRIQ:
181 if (!print_priq_opts(a))
182 printf("priq ");
183 break;
184 case ALTQT_HFSC:
185 if (!print_hfsc_opts(a, qopts))
186 printf("hfsc ");
187 break;
188 }
189
190 if (bw != NULL && bw->bw_percent > 0) {
191 if (bw->bw_percent < 100)
192 printf("bandwidth %u%% ", bw->bw_percent);
193 } else
194 printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
195
196 if (a->qlimit != DEFAULT_QLIMIT)
197 printf("qlimit %u ", a->qlimit);
198 printf("tbrsize %u ", a->tbrsize);
199}
200
201void
202print_queue(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
203 int print_interface, struct node_queue_opt *qopts)
204{
205 unsigned i;
206
207 printf("queue ");
208 for (i = 0; i < level; ++i)
209 printf(" ");
210 printf("%s ", a->qname);
211 if (print_interface)
212 printf("on %s ", a->ifname);
213 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC) {
214 if (bw != NULL && bw->bw_percent > 0) {
215 if (bw->bw_percent < 100)
216 printf("bandwidth %u%% ", bw->bw_percent);
217 } else
218 printf("bandwidth %s ", rate2str((double)a->bandwidth));
219 }
220 if (a->priority != DEFAULT_PRIORITY)
221 printf("priority %u ", a->priority);
222 if (a->qlimit != DEFAULT_QLIMIT)
223 printf("qlimit %u ", a->qlimit);
224 switch (a->scheduler) {
225 case ALTQT_CBQ:
226 print_cbq_opts(a);
227 break;
228 case ALTQT_PRIQ:
229 print_priq_opts(a);
230 break;
231 case ALTQT_HFSC:
232 print_hfsc_opts(a, qopts);
233 break;
234 }
235}
236
237/*
238 * eval_pfaltq computes the discipline parameters.
239 */
240int
241eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
242 struct node_queue_opt *opts)
243{
244 u_int rate, size, errors = 0;
245
246 if (bw->bw_absolute > 0)
247 pa->ifbandwidth = bw->bw_absolute;
248 else
249#if defined(__FreeBSD__)
250 if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) {
251#else
242 if ((rate = getifspeed(pa->ifname)) == 0) {
252 if ((rate = getifspeed(pa->ifname)) == 0) {
253#endif
243 fprintf(stderr, "cannot determine interface bandwidth "
244 "for %s, specify an absolute bandwidth\n",
245 pa->ifname);
246 errors++;
247 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
248 pa->ifbandwidth = rate;
249
250 errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
251
252 /* if tbrsize is not specified, use heuristics */
253 if (pa->tbrsize == 0) {
254 rate = pa->ifbandwidth;
255 if (rate <= 1 * 1000 * 1000)
256 size = 1;
257 else if (rate <= 10 * 1000 * 1000)
258 size = 4;
259 else if (rate <= 200 * 1000 * 1000)
260 size = 8;
261 else
262 size = 24;
263 size = size * getifmtu(pa->ifname);
264 pa->tbrsize = size;
265 }
266 return (errors);
267}
268
269/*
270 * check_commit_altq does consistency check for each interface
271 */
272int
273check_commit_altq(int dev, int opts)
274{
275 struct pf_altq *altq;
276 int error = 0;
277
278 /* call the discipline check for each interface. */
279 TAILQ_FOREACH(altq, &altqs, entries) {
280 if (altq->qname[0] == 0) {
281 switch (altq->scheduler) {
282 case ALTQT_CBQ:
283 error = check_commit_cbq(dev, opts, altq);
284 break;
285 case ALTQT_PRIQ:
286 error = check_commit_priq(dev, opts, altq);
287 break;
288 case ALTQT_HFSC:
289 error = check_commit_hfsc(dev, opts, altq);
290 break;
291 default:
292 break;
293 }
294 }
295 }
296 return (error);
297}
298
299/*
300 * eval_pfqueue computes the queue parameters.
301 */
302int
303eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
304 struct node_queue_opt *opts)
305{
306 /* should be merged with expand_queue */
307 struct pf_altq *if_pa, *parent;
308 int error = 0;
309
310 /* find the corresponding interface and copy fields used by queues */
311 if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) {
312 fprintf(stderr, "altq not defined on %s\n", pa->ifname);
313 return (1);
314 }
315 pa->scheduler = if_pa->scheduler;
316 pa->ifbandwidth = if_pa->ifbandwidth;
317
318 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
319 fprintf(stderr, "queue %s already exists on interface %s\n",
320 pa->qname, pa->ifname);
321 return (1);
322 }
323 pa->qid = qname_to_qid(pa->qname);
324
325 parent = NULL;
326 if (pa->parent[0] != 0) {
327 parent = qname_to_pfaltq(pa->parent, pa->ifname);
328 if (parent == NULL) {
329 fprintf(stderr, "parent %s not found for %s\n",
330 pa->parent, pa->qname);
331 return (1);
332 }
333 pa->parent_qid = parent->qid;
334 }
335 if (pa->qlimit == 0)
336 pa->qlimit = DEFAULT_QLIMIT;
337
338 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC) {
339 if ((pa->bandwidth = eval_bwspec(bw,
340 parent == NULL ? 0 : parent->bandwidth)) == 0) {
341 fprintf(stderr, "bandwidth for %s invalid (%d / %d)\n",
342 pa->qname, bw->bw_absolute, bw->bw_percent);
343 return (1);
344 }
345
346 if (pa->bandwidth > pa->ifbandwidth) {
347 fprintf(stderr, "bandwidth for %s higher than "
348 "interface\n", pa->qname);
349 return (1);
350 }
351 if (parent != NULL && pa->bandwidth > parent->bandwidth) {
352 fprintf(stderr, "bandwidth for %s higher than parent\n",
353 pa->qname);
354 return (1);
355 }
356 }
357
358 if (eval_queue_opts(pa, opts, parent == NULL? 0 : parent->bandwidth))
359 return (1);
360
361 switch (pa->scheduler) {
362 case ALTQT_CBQ:
363 error = eval_pfqueue_cbq(pf, pa);
364 break;
365 case ALTQT_PRIQ:
366 error = eval_pfqueue_priq(pf, pa);
367 break;
368 case ALTQT_HFSC:
369 error = eval_pfqueue_hfsc(pf, pa);
370 break;
371 default:
372 break;
373 }
374 return (error);
375}
376
377/*
378 * CBQ support functions
379 */
380#define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
381#define RM_NS_PER_SEC (1000000000)
382
383static int
384eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa)
385{
386 struct cbq_opts *opts;
387 u_int ifmtu;
388
389 if (pa->priority >= CBQ_MAXPRI) {
390 warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
391 return (-1);
392 }
393
394 ifmtu = getifmtu(pa->ifname);
395 opts = &pa->pq_u.cbq_opts;
396
397 if (opts->pktsize == 0) { /* use default */
398 opts->pktsize = ifmtu;
399 if (opts->pktsize > MCLBYTES) /* do what TCP does */
400 opts->pktsize &= ~MCLBYTES;
401 } else if (opts->pktsize > ifmtu)
402 opts->pktsize = ifmtu;
403 if (opts->maxpktsize == 0) /* use default */
404 opts->maxpktsize = ifmtu;
405 else if (opts->maxpktsize > ifmtu)
406 opts->pktsize = ifmtu;
407
408 if (opts->pktsize > opts->maxpktsize)
409 opts->pktsize = opts->maxpktsize;
410
411 if (pa->parent[0] == 0)
412 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
413 else if (pa->qid == 0 && (opts->flags & CBQCLF_DEFCLASS) == 0)
414 pa->qid = ++max_qid;
415
416 cbq_compute_idletime(pf, pa);
417 return (0);
418}
419
420/*
421 * compute ns_per_byte, maxidle, minidle, and offtime
422 */
423static int
424cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
425{
426 struct cbq_opts *opts;
427 double maxidle_s, maxidle, minidle;
428 double offtime, nsPerByte, ifnsPerByte, ptime, cptime;
429 double z, g, f, gton, gtom;
430 u_int minburst, maxburst;
431
432 opts = &pa->pq_u.cbq_opts;
433 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
434 minburst = opts->minburst;
435 maxburst = opts->maxburst;
436
437 if (pa->bandwidth == 0)
438 f = 0.0001; /* small enough? */
439 else
440 f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
441
442 nsPerByte = ifnsPerByte / f;
443 ptime = (double)opts->pktsize * ifnsPerByte;
444 cptime = ptime * (1.0 - f) / f;
445
446 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
447 /*
448 * this causes integer overflow in kernel!
449 * (bandwidth < 6Kbps when max_pkt_size=1500)
450 */
451 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0)
452 warnx("queue bandwidth must be larger than %s",
453 rate2str(ifnsPerByte * (double)opts->maxpktsize /
454 (double)INT_MAX * (double)pa->ifbandwidth));
455 fprintf(stderr, "cbq: queue %s is too slow!\n",
456 pa->qname);
457 nsPerByte = (double)(INT_MAX / opts->maxpktsize);
458 }
459
460 if (maxburst == 0) { /* use default */
461 if (cptime > 10.0 * 1000000)
462 maxburst = 4;
463 else
464 maxburst = 16;
465 }
466 if (minburst == 0) /* use default */
467 minburst = 2;
468 if (minburst > maxburst)
469 minburst = maxburst;
470
471 z = (double)(1 << RM_FILTER_GAIN);
472 g = (1.0 - 1.0 / z);
473 gton = pow(g, (double)maxburst);
474 gtom = pow(g, (double)(minburst-1));
475 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
476 maxidle_s = (1.0 - g);
477 if (maxidle > maxidle_s)
478 maxidle = ptime * maxidle;
479 else
480 maxidle = ptime * maxidle_s;
481 if (minburst)
482 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
483 else
484 offtime = cptime;
485 minidle = -((double)opts->maxpktsize * (double)nsPerByte);
486
487 /* scale parameters */
488 maxidle = ((maxidle * 8.0) / nsPerByte) * pow(2.0, (double)RM_FILTER_GAIN);
489 offtime = (offtime * 8.0) / nsPerByte * pow(2.0, (double)RM_FILTER_GAIN);
490 minidle = ((minidle * 8.0) / nsPerByte) * pow(2.0, (double)RM_FILTER_GAIN);
491
492 maxidle = maxidle / 1000.0;
493 offtime = offtime / 1000.0;
494 minidle = minidle / 1000.0;
495
496 opts->minburst = minburst;
497 opts->maxburst = maxburst;
498 opts->ns_per_byte = (u_int) nsPerByte;
499 opts->maxidle = (u_int) fabs(maxidle);
500 opts->minidle = (int)minidle;
501 opts->offtime = (u_int) fabs(offtime);
502
503 return (0);
504}
505
506static int
507check_commit_cbq(int dev, int opts, struct pf_altq *pa)
508{
509 struct pf_altq *altq;
510 int root_class, default_class;
511 int error = 0;
512
513 /*
514 * check if cbq has one root queue and one default queue
515 * for this interface
516 */
517 root_class = default_class = 0;
518 TAILQ_FOREACH(altq, &altqs, entries) {
519 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
520 continue;
521 if (altq->qname[0] == 0) /* this is for interface */
522 continue;
523 if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
524 root_class++;
525 if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
526 default_class++;
527 }
528 if (root_class != 1) {
529 warnx("should have one root queue on %s", pa->ifname);
530 error++;
531 }
532 if (default_class != 1) {
533 warnx("should have one default queue on %s", pa->ifname);
534 error++;
535 }
536 return (error);
537}
538
539static int
540print_cbq_opts(const struct pf_altq *a)
541{
542 const struct cbq_opts *opts;
543
544 opts = &a->pq_u.cbq_opts;
545 if (opts->flags) {
546 printf("cbq(");
547 if (opts->flags & CBQCLF_RED)
548 printf(" red");
549 if (opts->flags & CBQCLF_ECN)
550 printf(" ecn");
551 if (opts->flags & CBQCLF_RIO)
552 printf(" rio");
553 if (opts->flags & CBQCLF_CLEARDSCP)
554 printf(" cleardscp");
555 if (opts->flags & CBQCLF_FLOWVALVE)
556 printf(" flowvalve");
557 if (opts->flags & CBQCLF_BORROW)
558 printf(" borrow");
559 if (opts->flags & CBQCLF_WRR)
560 printf(" wrr");
561 if (opts->flags & CBQCLF_EFFICIENT)
562 printf(" efficient");
563 if (opts->flags & CBQCLF_ROOTCLASS)
564 printf(" root");
565 if (opts->flags & CBQCLF_DEFCLASS)
566 printf(" default");
567 printf(" ) ");
568
569 return (1);
570 } else
571 return (0);
572}
573
574/*
575 * PRIQ support functions
576 */
577static int
578eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa)
579{
580 struct pf_altq *altq;
581
582 if (pa->priority >= PRIQ_MAXPRI) {
583 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
584 return (-1);
585 }
586 /* the priority should be unique for the interface */
587 TAILQ_FOREACH(altq, &altqs, entries) {
588 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 &&
589 altq->qname[0] != 0 && altq->priority == pa->priority) {
590 warnx("%s and %s have the same priority",
591 altq->qname, pa->qname);
592 return (-1);
593 }
594 }
595
596 if (pa->qid == 0)
597 pa->qid = ++max_qid;
598
599 return (0);
600}
601
602static int
603check_commit_priq(int dev, int opts, struct pf_altq *pa)
604{
605 struct pf_altq *altq;
606 int default_class;
607 int error = 0;
608
609 /*
610 * check if priq has one default class for this interface
611 */
612 default_class = 0;
613 TAILQ_FOREACH(altq, &altqs, entries) {
614 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
615 continue;
616 if (altq->qname[0] == 0) /* this is for interface */
617 continue;
618 if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
619 default_class++;
620 }
621 if (default_class != 1) {
622 warnx("should have one default queue on %s", pa->ifname);
623 error++;
624 }
625 return (error);
626}
627
628static int
629print_priq_opts(const struct pf_altq *a)
630{
631 const struct priq_opts *opts;
632
633 opts = &a->pq_u.priq_opts;
634
635 if (opts->flags) {
636 printf("priq(");
637 if (opts->flags & PRCF_RED)
638 printf(" red");
639 if (opts->flags & PRCF_ECN)
640 printf(" ecn");
641 if (opts->flags & PRCF_RIO)
642 printf(" rio");
643 if (opts->flags & PRCF_CLEARDSCP)
644 printf(" cleardscp");
645 if (opts->flags & PRCF_DEFAULTCLASS)
646 printf(" default");
647 printf(" ) ");
648
649 return (1);
650 } else
651 return (0);
652}
653
654/*
655 * HFSC support functions
656 */
657static int
658eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa)
659{
660 struct pf_altq *altq, *parent;
661 struct hfsc_opts *opts;
662 struct service_curve sc;
663
664 opts = &pa->pq_u.hfsc_opts;
665
666 if (pa->parent[0] == 0) {
667 /* root queue */
668 pa->qid = HFSC_ROOTCLASS_HANDLE;
669 opts->lssc_m1 = pa->ifbandwidth;
670 opts->lssc_m2 = pa->ifbandwidth;
671 opts->lssc_d = 0;
672 return (0);
673 } else if (pa->qid == 0)
674 pa->qid = ++max_qid;
675
676 LIST_INIT(&rtsc);
677 LIST_INIT(&lssc);
678
679 /* if link_share is not specified, use bandwidth */
680 if (opts->lssc_m2 == 0)
681 opts->lssc_m2 = pa->bandwidth;
682
683 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
684 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
685 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
686 warnx("m2 is zero for %s", pa->qname);
687 return (-1);
688 }
689
690 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
691 (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
692 (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0)) {
693 warnx("m1 must be zero for convex curve: %s", pa->qname);
694 return (-1);
695 }
696
697 /*
698 * admission control:
699 * for the real-time service curve, the sum of the service curves
700 * should not exceed 80% of the interface bandwidth. 20% is reserved
701 * not to over-commit the actual interface bandwidth.
702 * for the link-sharing service curve, the sum of the child service
703 * curve should not exceed the parent service curve.
704 * for the upper-limit service curve, the assigned bandwidth should
705 * be smaller than the interface bandwidth, and the upper-limit should
706 * be larger than the real-time service curve when both are defined.
707 */
708 parent = qname_to_pfaltq(pa->parent, pa->ifname);
709 if (parent == NULL)
710 errx(1, "parent %s not found for %s", pa->parent, pa->qname);
711
712 TAILQ_FOREACH(altq, &altqs, entries) {
713 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
714 continue;
715 if (altq->qname[0] == 0) /* this is for interface */
716 continue;
717
718 /* if the class has a real-time service curve, add it. */
719 if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) {
720 sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1;
721 sc.d = altq->pq_u.hfsc_opts.rtsc_d;
722 sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2;
723 gsc_add_sc(&rtsc, &sc);
724 }
725
726 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0)
727 continue;
728
729 /* if the class has a link-sharing service curve, add it. */
730 if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) {
731 sc.m1 = altq->pq_u.hfsc_opts.lssc_m1;
732 sc.d = altq->pq_u.hfsc_opts.lssc_d;
733 sc.m2 = altq->pq_u.hfsc_opts.lssc_m2;
734 gsc_add_sc(&lssc, &sc);
735 }
736 }
737
738 /* check the real-time service curve. reserve 20% of interface bw */
739 if (opts->rtsc_m2 != 0) {
740 sc.m1 = 0;
741 sc.d = 0;
742 sc.m2 = pa->ifbandwidth / 100 * 80;
743 if (!is_gsc_under_sc(&rtsc, &sc)) {
744 warnx("real-time sc exceeds the interface bandwidth");
745 goto err_ret;
746 }
747 }
748
749 /* check the link-sharing service curve. */
750 if (opts->lssc_m2 != 0) {
751 sc.m1 = parent->pq_u.hfsc_opts.lssc_m1;
752 sc.d = parent->pq_u.hfsc_opts.lssc_d;
753 sc.m2 = parent->pq_u.hfsc_opts.lssc_m2;
754 if (!is_gsc_under_sc(&lssc, &sc)) {
755 warnx("link-sharing sc exceeds parent's sc");
756 goto err_ret;
757 }
758 }
759
760 /* check the upper-limit service curve. */
761 if (opts->ulsc_m2 != 0) {
762 if (opts->ulsc_m1 > pa->ifbandwidth ||
763 opts->ulsc_m2 > pa->ifbandwidth) {
764 warnx("upper-limit larger than interface bandwidth");
765 goto err_ret;
766 }
767 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
768 warnx("upper-limit sc smaller than real-time sc");
769 goto err_ret;
770 }
771 }
772
773 gsc_destroy(&rtsc);
774 gsc_destroy(&lssc);
775
776 return (0);
777
778err_ret:
779 gsc_destroy(&rtsc);
780 gsc_destroy(&lssc);
781 return (-1);
782}
783
784static int
785check_commit_hfsc(int dev, int opts, struct pf_altq *pa)
786{
787 struct pf_altq *altq, *def = NULL;
788 int default_class;
789 int error = 0;
790
791 /* check if hfsc has one default queue for this interface */
792 default_class = 0;
793 TAILQ_FOREACH(altq, &altqs, entries) {
794 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
795 continue;
796 if (altq->qname[0] == 0) /* this is for interface */
797 continue;
798 if (altq->parent[0] == 0) /* dummy root */
799 continue;
800 if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
801 default_class++;
802 def = altq;
803 }
804 }
805 if (default_class != 1) {
806 warnx("should have one default queue on %s", pa->ifname);
807 return (1);
808 }
809 /* make sure the default queue is a leaf */
810 TAILQ_FOREACH(altq, &altqs, entries) {
811 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
812 continue;
813 if (altq->qname[0] == 0) /* this is for interface */
814 continue;
815 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) {
816 warnx("default queue is not a leaf");
817 error++;
818 }
819 }
820 return (error);
821}
822
823static int
824print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
825{
826 const struct hfsc_opts *opts;
827 const struct node_hfsc_sc *rtsc, *lssc, *ulsc;
828
829 opts = &a->pq_u.hfsc_opts;
830 if (qopts == NULL)
831 rtsc = lssc = ulsc = NULL;
832 else {
833 rtsc = &qopts->data.hfsc_opts.realtime;
834 lssc = &qopts->data.hfsc_opts.linkshare;
835 ulsc = &qopts->data.hfsc_opts.upperlimit;
836 }
837
838 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
839 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
840 opts->lssc_d != 0))) {
841 printf("hfsc(");
842 if (opts->flags & HFCF_RED)
843 printf(" red");
844 if (opts->flags & HFCF_ECN)
845 printf(" ecn");
846 if (opts->flags & HFCF_RIO)
847 printf(" rio");
848 if (opts->flags & HFCF_CLEARDSCP)
849 printf(" cleardscp");
850 if (opts->flags & HFCF_DEFAULTCLASS)
851 printf(" default");
852 if (opts->rtsc_m2 != 0)
853 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
854 opts->rtsc_m2, rtsc);
855 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
856 opts->lssc_d != 0))
857 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
858 opts->lssc_m2, lssc);
859 if (opts->ulsc_m2 != 0)
860 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
861 opts->ulsc_m2, ulsc);
862 printf(" ) ");
863
864 return (1);
865 } else
866 return (0);
867}
868
869/*
870 * admission control using generalized service curve
871 */
254 fprintf(stderr, "cannot determine interface bandwidth "
255 "for %s, specify an absolute bandwidth\n",
256 pa->ifname);
257 errors++;
258 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
259 pa->ifbandwidth = rate;
260
261 errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
262
263 /* if tbrsize is not specified, use heuristics */
264 if (pa->tbrsize == 0) {
265 rate = pa->ifbandwidth;
266 if (rate <= 1 * 1000 * 1000)
267 size = 1;
268 else if (rate <= 10 * 1000 * 1000)
269 size = 4;
270 else if (rate <= 200 * 1000 * 1000)
271 size = 8;
272 else
273 size = 24;
274 size = size * getifmtu(pa->ifname);
275 pa->tbrsize = size;
276 }
277 return (errors);
278}
279
280/*
281 * check_commit_altq does consistency check for each interface
282 */
283int
284check_commit_altq(int dev, int opts)
285{
286 struct pf_altq *altq;
287 int error = 0;
288
289 /* call the discipline check for each interface. */
290 TAILQ_FOREACH(altq, &altqs, entries) {
291 if (altq->qname[0] == 0) {
292 switch (altq->scheduler) {
293 case ALTQT_CBQ:
294 error = check_commit_cbq(dev, opts, altq);
295 break;
296 case ALTQT_PRIQ:
297 error = check_commit_priq(dev, opts, altq);
298 break;
299 case ALTQT_HFSC:
300 error = check_commit_hfsc(dev, opts, altq);
301 break;
302 default:
303 break;
304 }
305 }
306 }
307 return (error);
308}
309
310/*
311 * eval_pfqueue computes the queue parameters.
312 */
313int
314eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
315 struct node_queue_opt *opts)
316{
317 /* should be merged with expand_queue */
318 struct pf_altq *if_pa, *parent;
319 int error = 0;
320
321 /* find the corresponding interface and copy fields used by queues */
322 if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) {
323 fprintf(stderr, "altq not defined on %s\n", pa->ifname);
324 return (1);
325 }
326 pa->scheduler = if_pa->scheduler;
327 pa->ifbandwidth = if_pa->ifbandwidth;
328
329 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
330 fprintf(stderr, "queue %s already exists on interface %s\n",
331 pa->qname, pa->ifname);
332 return (1);
333 }
334 pa->qid = qname_to_qid(pa->qname);
335
336 parent = NULL;
337 if (pa->parent[0] != 0) {
338 parent = qname_to_pfaltq(pa->parent, pa->ifname);
339 if (parent == NULL) {
340 fprintf(stderr, "parent %s not found for %s\n",
341 pa->parent, pa->qname);
342 return (1);
343 }
344 pa->parent_qid = parent->qid;
345 }
346 if (pa->qlimit == 0)
347 pa->qlimit = DEFAULT_QLIMIT;
348
349 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC) {
350 if ((pa->bandwidth = eval_bwspec(bw,
351 parent == NULL ? 0 : parent->bandwidth)) == 0) {
352 fprintf(stderr, "bandwidth for %s invalid (%d / %d)\n",
353 pa->qname, bw->bw_absolute, bw->bw_percent);
354 return (1);
355 }
356
357 if (pa->bandwidth > pa->ifbandwidth) {
358 fprintf(stderr, "bandwidth for %s higher than "
359 "interface\n", pa->qname);
360 return (1);
361 }
362 if (parent != NULL && pa->bandwidth > parent->bandwidth) {
363 fprintf(stderr, "bandwidth for %s higher than parent\n",
364 pa->qname);
365 return (1);
366 }
367 }
368
369 if (eval_queue_opts(pa, opts, parent == NULL? 0 : parent->bandwidth))
370 return (1);
371
372 switch (pa->scheduler) {
373 case ALTQT_CBQ:
374 error = eval_pfqueue_cbq(pf, pa);
375 break;
376 case ALTQT_PRIQ:
377 error = eval_pfqueue_priq(pf, pa);
378 break;
379 case ALTQT_HFSC:
380 error = eval_pfqueue_hfsc(pf, pa);
381 break;
382 default:
383 break;
384 }
385 return (error);
386}
387
388/*
389 * CBQ support functions
390 */
391#define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
392#define RM_NS_PER_SEC (1000000000)
393
394static int
395eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa)
396{
397 struct cbq_opts *opts;
398 u_int ifmtu;
399
400 if (pa->priority >= CBQ_MAXPRI) {
401 warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
402 return (-1);
403 }
404
405 ifmtu = getifmtu(pa->ifname);
406 opts = &pa->pq_u.cbq_opts;
407
408 if (opts->pktsize == 0) { /* use default */
409 opts->pktsize = ifmtu;
410 if (opts->pktsize > MCLBYTES) /* do what TCP does */
411 opts->pktsize &= ~MCLBYTES;
412 } else if (opts->pktsize > ifmtu)
413 opts->pktsize = ifmtu;
414 if (opts->maxpktsize == 0) /* use default */
415 opts->maxpktsize = ifmtu;
416 else if (opts->maxpktsize > ifmtu)
417 opts->pktsize = ifmtu;
418
419 if (opts->pktsize > opts->maxpktsize)
420 opts->pktsize = opts->maxpktsize;
421
422 if (pa->parent[0] == 0)
423 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
424 else if (pa->qid == 0 && (opts->flags & CBQCLF_DEFCLASS) == 0)
425 pa->qid = ++max_qid;
426
427 cbq_compute_idletime(pf, pa);
428 return (0);
429}
430
431/*
432 * compute ns_per_byte, maxidle, minidle, and offtime
433 */
434static int
435cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
436{
437 struct cbq_opts *opts;
438 double maxidle_s, maxidle, minidle;
439 double offtime, nsPerByte, ifnsPerByte, ptime, cptime;
440 double z, g, f, gton, gtom;
441 u_int minburst, maxburst;
442
443 opts = &pa->pq_u.cbq_opts;
444 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
445 minburst = opts->minburst;
446 maxburst = opts->maxburst;
447
448 if (pa->bandwidth == 0)
449 f = 0.0001; /* small enough? */
450 else
451 f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
452
453 nsPerByte = ifnsPerByte / f;
454 ptime = (double)opts->pktsize * ifnsPerByte;
455 cptime = ptime * (1.0 - f) / f;
456
457 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
458 /*
459 * this causes integer overflow in kernel!
460 * (bandwidth < 6Kbps when max_pkt_size=1500)
461 */
462 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0)
463 warnx("queue bandwidth must be larger than %s",
464 rate2str(ifnsPerByte * (double)opts->maxpktsize /
465 (double)INT_MAX * (double)pa->ifbandwidth));
466 fprintf(stderr, "cbq: queue %s is too slow!\n",
467 pa->qname);
468 nsPerByte = (double)(INT_MAX / opts->maxpktsize);
469 }
470
471 if (maxburst == 0) { /* use default */
472 if (cptime > 10.0 * 1000000)
473 maxburst = 4;
474 else
475 maxburst = 16;
476 }
477 if (minburst == 0) /* use default */
478 minburst = 2;
479 if (minburst > maxburst)
480 minburst = maxburst;
481
482 z = (double)(1 << RM_FILTER_GAIN);
483 g = (1.0 - 1.0 / z);
484 gton = pow(g, (double)maxburst);
485 gtom = pow(g, (double)(minburst-1));
486 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
487 maxidle_s = (1.0 - g);
488 if (maxidle > maxidle_s)
489 maxidle = ptime * maxidle;
490 else
491 maxidle = ptime * maxidle_s;
492 if (minburst)
493 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
494 else
495 offtime = cptime;
496 minidle = -((double)opts->maxpktsize * (double)nsPerByte);
497
498 /* scale parameters */
499 maxidle = ((maxidle * 8.0) / nsPerByte) * pow(2.0, (double)RM_FILTER_GAIN);
500 offtime = (offtime * 8.0) / nsPerByte * pow(2.0, (double)RM_FILTER_GAIN);
501 minidle = ((minidle * 8.0) / nsPerByte) * pow(2.0, (double)RM_FILTER_GAIN);
502
503 maxidle = maxidle / 1000.0;
504 offtime = offtime / 1000.0;
505 minidle = minidle / 1000.0;
506
507 opts->minburst = minburst;
508 opts->maxburst = maxburst;
509 opts->ns_per_byte = (u_int) nsPerByte;
510 opts->maxidle = (u_int) fabs(maxidle);
511 opts->minidle = (int)minidle;
512 opts->offtime = (u_int) fabs(offtime);
513
514 return (0);
515}
516
517static int
518check_commit_cbq(int dev, int opts, struct pf_altq *pa)
519{
520 struct pf_altq *altq;
521 int root_class, default_class;
522 int error = 0;
523
524 /*
525 * check if cbq has one root queue and one default queue
526 * for this interface
527 */
528 root_class = default_class = 0;
529 TAILQ_FOREACH(altq, &altqs, entries) {
530 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
531 continue;
532 if (altq->qname[0] == 0) /* this is for interface */
533 continue;
534 if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
535 root_class++;
536 if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
537 default_class++;
538 }
539 if (root_class != 1) {
540 warnx("should have one root queue on %s", pa->ifname);
541 error++;
542 }
543 if (default_class != 1) {
544 warnx("should have one default queue on %s", pa->ifname);
545 error++;
546 }
547 return (error);
548}
549
550static int
551print_cbq_opts(const struct pf_altq *a)
552{
553 const struct cbq_opts *opts;
554
555 opts = &a->pq_u.cbq_opts;
556 if (opts->flags) {
557 printf("cbq(");
558 if (opts->flags & CBQCLF_RED)
559 printf(" red");
560 if (opts->flags & CBQCLF_ECN)
561 printf(" ecn");
562 if (opts->flags & CBQCLF_RIO)
563 printf(" rio");
564 if (opts->flags & CBQCLF_CLEARDSCP)
565 printf(" cleardscp");
566 if (opts->flags & CBQCLF_FLOWVALVE)
567 printf(" flowvalve");
568 if (opts->flags & CBQCLF_BORROW)
569 printf(" borrow");
570 if (opts->flags & CBQCLF_WRR)
571 printf(" wrr");
572 if (opts->flags & CBQCLF_EFFICIENT)
573 printf(" efficient");
574 if (opts->flags & CBQCLF_ROOTCLASS)
575 printf(" root");
576 if (opts->flags & CBQCLF_DEFCLASS)
577 printf(" default");
578 printf(" ) ");
579
580 return (1);
581 } else
582 return (0);
583}
584
585/*
586 * PRIQ support functions
587 */
588static int
589eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa)
590{
591 struct pf_altq *altq;
592
593 if (pa->priority >= PRIQ_MAXPRI) {
594 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
595 return (-1);
596 }
597 /* the priority should be unique for the interface */
598 TAILQ_FOREACH(altq, &altqs, entries) {
599 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 &&
600 altq->qname[0] != 0 && altq->priority == pa->priority) {
601 warnx("%s and %s have the same priority",
602 altq->qname, pa->qname);
603 return (-1);
604 }
605 }
606
607 if (pa->qid == 0)
608 pa->qid = ++max_qid;
609
610 return (0);
611}
612
613static int
614check_commit_priq(int dev, int opts, struct pf_altq *pa)
615{
616 struct pf_altq *altq;
617 int default_class;
618 int error = 0;
619
620 /*
621 * check if priq has one default class for this interface
622 */
623 default_class = 0;
624 TAILQ_FOREACH(altq, &altqs, entries) {
625 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
626 continue;
627 if (altq->qname[0] == 0) /* this is for interface */
628 continue;
629 if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
630 default_class++;
631 }
632 if (default_class != 1) {
633 warnx("should have one default queue on %s", pa->ifname);
634 error++;
635 }
636 return (error);
637}
638
639static int
640print_priq_opts(const struct pf_altq *a)
641{
642 const struct priq_opts *opts;
643
644 opts = &a->pq_u.priq_opts;
645
646 if (opts->flags) {
647 printf("priq(");
648 if (opts->flags & PRCF_RED)
649 printf(" red");
650 if (opts->flags & PRCF_ECN)
651 printf(" ecn");
652 if (opts->flags & PRCF_RIO)
653 printf(" rio");
654 if (opts->flags & PRCF_CLEARDSCP)
655 printf(" cleardscp");
656 if (opts->flags & PRCF_DEFAULTCLASS)
657 printf(" default");
658 printf(" ) ");
659
660 return (1);
661 } else
662 return (0);
663}
664
665/*
666 * HFSC support functions
667 */
668static int
669eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa)
670{
671 struct pf_altq *altq, *parent;
672 struct hfsc_opts *opts;
673 struct service_curve sc;
674
675 opts = &pa->pq_u.hfsc_opts;
676
677 if (pa->parent[0] == 0) {
678 /* root queue */
679 pa->qid = HFSC_ROOTCLASS_HANDLE;
680 opts->lssc_m1 = pa->ifbandwidth;
681 opts->lssc_m2 = pa->ifbandwidth;
682 opts->lssc_d = 0;
683 return (0);
684 } else if (pa->qid == 0)
685 pa->qid = ++max_qid;
686
687 LIST_INIT(&rtsc);
688 LIST_INIT(&lssc);
689
690 /* if link_share is not specified, use bandwidth */
691 if (opts->lssc_m2 == 0)
692 opts->lssc_m2 = pa->bandwidth;
693
694 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
695 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
696 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
697 warnx("m2 is zero for %s", pa->qname);
698 return (-1);
699 }
700
701 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
702 (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
703 (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0)) {
704 warnx("m1 must be zero for convex curve: %s", pa->qname);
705 return (-1);
706 }
707
708 /*
709 * admission control:
710 * for the real-time service curve, the sum of the service curves
711 * should not exceed 80% of the interface bandwidth. 20% is reserved
712 * not to over-commit the actual interface bandwidth.
713 * for the link-sharing service curve, the sum of the child service
714 * curve should not exceed the parent service curve.
715 * for the upper-limit service curve, the assigned bandwidth should
716 * be smaller than the interface bandwidth, and the upper-limit should
717 * be larger than the real-time service curve when both are defined.
718 */
719 parent = qname_to_pfaltq(pa->parent, pa->ifname);
720 if (parent == NULL)
721 errx(1, "parent %s not found for %s", pa->parent, pa->qname);
722
723 TAILQ_FOREACH(altq, &altqs, entries) {
724 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
725 continue;
726 if (altq->qname[0] == 0) /* this is for interface */
727 continue;
728
729 /* if the class has a real-time service curve, add it. */
730 if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) {
731 sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1;
732 sc.d = altq->pq_u.hfsc_opts.rtsc_d;
733 sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2;
734 gsc_add_sc(&rtsc, &sc);
735 }
736
737 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0)
738 continue;
739
740 /* if the class has a link-sharing service curve, add it. */
741 if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) {
742 sc.m1 = altq->pq_u.hfsc_opts.lssc_m1;
743 sc.d = altq->pq_u.hfsc_opts.lssc_d;
744 sc.m2 = altq->pq_u.hfsc_opts.lssc_m2;
745 gsc_add_sc(&lssc, &sc);
746 }
747 }
748
749 /* check the real-time service curve. reserve 20% of interface bw */
750 if (opts->rtsc_m2 != 0) {
751 sc.m1 = 0;
752 sc.d = 0;
753 sc.m2 = pa->ifbandwidth / 100 * 80;
754 if (!is_gsc_under_sc(&rtsc, &sc)) {
755 warnx("real-time sc exceeds the interface bandwidth");
756 goto err_ret;
757 }
758 }
759
760 /* check the link-sharing service curve. */
761 if (opts->lssc_m2 != 0) {
762 sc.m1 = parent->pq_u.hfsc_opts.lssc_m1;
763 sc.d = parent->pq_u.hfsc_opts.lssc_d;
764 sc.m2 = parent->pq_u.hfsc_opts.lssc_m2;
765 if (!is_gsc_under_sc(&lssc, &sc)) {
766 warnx("link-sharing sc exceeds parent's sc");
767 goto err_ret;
768 }
769 }
770
771 /* check the upper-limit service curve. */
772 if (opts->ulsc_m2 != 0) {
773 if (opts->ulsc_m1 > pa->ifbandwidth ||
774 opts->ulsc_m2 > pa->ifbandwidth) {
775 warnx("upper-limit larger than interface bandwidth");
776 goto err_ret;
777 }
778 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
779 warnx("upper-limit sc smaller than real-time sc");
780 goto err_ret;
781 }
782 }
783
784 gsc_destroy(&rtsc);
785 gsc_destroy(&lssc);
786
787 return (0);
788
789err_ret:
790 gsc_destroy(&rtsc);
791 gsc_destroy(&lssc);
792 return (-1);
793}
794
795static int
796check_commit_hfsc(int dev, int opts, struct pf_altq *pa)
797{
798 struct pf_altq *altq, *def = NULL;
799 int default_class;
800 int error = 0;
801
802 /* check if hfsc has one default queue for this interface */
803 default_class = 0;
804 TAILQ_FOREACH(altq, &altqs, entries) {
805 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
806 continue;
807 if (altq->qname[0] == 0) /* this is for interface */
808 continue;
809 if (altq->parent[0] == 0) /* dummy root */
810 continue;
811 if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
812 default_class++;
813 def = altq;
814 }
815 }
816 if (default_class != 1) {
817 warnx("should have one default queue on %s", pa->ifname);
818 return (1);
819 }
820 /* make sure the default queue is a leaf */
821 TAILQ_FOREACH(altq, &altqs, entries) {
822 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
823 continue;
824 if (altq->qname[0] == 0) /* this is for interface */
825 continue;
826 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) {
827 warnx("default queue is not a leaf");
828 error++;
829 }
830 }
831 return (error);
832}
833
834static int
835print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
836{
837 const struct hfsc_opts *opts;
838 const struct node_hfsc_sc *rtsc, *lssc, *ulsc;
839
840 opts = &a->pq_u.hfsc_opts;
841 if (qopts == NULL)
842 rtsc = lssc = ulsc = NULL;
843 else {
844 rtsc = &qopts->data.hfsc_opts.realtime;
845 lssc = &qopts->data.hfsc_opts.linkshare;
846 ulsc = &qopts->data.hfsc_opts.upperlimit;
847 }
848
849 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
850 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
851 opts->lssc_d != 0))) {
852 printf("hfsc(");
853 if (opts->flags & HFCF_RED)
854 printf(" red");
855 if (opts->flags & HFCF_ECN)
856 printf(" ecn");
857 if (opts->flags & HFCF_RIO)
858 printf(" rio");
859 if (opts->flags & HFCF_CLEARDSCP)
860 printf(" cleardscp");
861 if (opts->flags & HFCF_DEFAULTCLASS)
862 printf(" default");
863 if (opts->rtsc_m2 != 0)
864 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
865 opts->rtsc_m2, rtsc);
866 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
867 opts->lssc_d != 0))
868 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
869 opts->lssc_m2, lssc);
870 if (opts->ulsc_m2 != 0)
871 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
872 opts->ulsc_m2, ulsc);
873 printf(" ) ");
874
875 return (1);
876 } else
877 return (0);
878}
879
880/*
881 * admission control using generalized service curve
882 */
883#if defined(__FreeBSD__)
884#if defined(INFINITY)
885#undef INFINITY
886#endif
872#define INFINITY HUGE_VAL /* positive infinity defined in <math.h> */
887#define INFINITY HUGE_VAL /* positive infinity defined in <math.h> */
888#else
889#define INFINITY HUGE_VAL /* positive infinity defined in <math.h> */
890#endif
873
874/* add a new service curve to a generalized service curve */
875static void
876gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
877{
878 if (is_sc_null(sc))
879 return;
880 if (sc->d != 0)
881 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
882 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
883}
884
885/*
886 * check whether all points of a generalized service curve have
887 * their y-coordinates no larger than a given two-piece linear
888 * service curve.
889 */
890static int
891is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
892{
893 struct segment *s, *last, *end;
894 double y;
895
896 if (is_sc_null(sc)) {
897 if (LIST_EMPTY(gsc))
898 return (1);
899 LIST_FOREACH(s, gsc, _next) {
900 if (s->m != 0)
901 return (0);
902 }
903 return (1);
904 }
905 /*
906 * gsc has a dummy entry at the end with x = INFINITY.
907 * loop through up to this dummy entry.
908 */
909 end = gsc_getentry(gsc, INFINITY);
910 if (end == NULL)
911 return (1);
912 last = NULL;
913 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
914 if (s->y > sc_x2y(sc, s->x))
915 return (0);
916 last = s;
917 }
918 /* last now holds the real last segment */
919 if (last == NULL)
920 return (1);
921 if (last->m > sc->m2)
922 return (0);
923 if (last->x < sc->d && last->m > sc->m1) {
924 y = last->y + (sc->d - last->x) * last->m;
925 if (y > sc_x2y(sc, sc->d))
926 return (0);
927 }
928 return (1);
929}
930
931static void
932gsc_destroy(struct gen_sc *gsc)
933{
934 struct segment *s;
935
936 while ((s = LIST_FIRST(gsc)) != NULL) {
937 LIST_REMOVE(s, _next);
938 free(s);
939 }
940}
941
942/*
943 * return a segment entry starting at x.
944 * if gsc has no entry starting at x, a new entry is created at x.
945 */
946static struct segment *
947gsc_getentry(struct gen_sc *gsc, double x)
948{
949 struct segment *new, *prev, *s;
950
951 prev = NULL;
952 LIST_FOREACH(s, gsc, _next) {
953 if (s->x == x)
954 return (s); /* matching entry found */
955 else if (s->x < x)
956 prev = s;
957 else
958 break;
959 }
960
961 /* we have to create a new entry */
962 if ((new = calloc(1, sizeof(struct segment))) == NULL)
963 return (NULL);
964
965 new->x = x;
966 if (x == INFINITY || s == NULL)
967 new->d = 0;
968 else if (s->x == INFINITY)
969 new->d = INFINITY;
970 else
971 new->d = s->x - x;
972 if (prev == NULL) {
973 /* insert the new entry at the head of the list */
974 new->y = 0;
975 new->m = 0;
976 LIST_INSERT_HEAD(gsc, new, _next);
977 } else {
978 /*
979 * the start point intersects with the segment pointed by
980 * prev. divide prev into 2 segments
981 */
982 if (x == INFINITY) {
983 prev->d = INFINITY;
984 if (prev->m == 0)
985 new->y = prev->y;
986 else
987 new->y = INFINITY;
988 } else {
989 prev->d = x - prev->x;
990 new->y = prev->d * prev->m + prev->y;
991 }
992 new->m = prev->m;
993 LIST_INSERT_AFTER(prev, new, _next);
994 }
995 return (new);
996}
997
998/* add a segment to a generalized service curve */
999static int
1000gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1001{
1002 struct segment *start, *end, *s;
1003 double x2;
1004
1005 if (d == INFINITY)
1006 x2 = INFINITY;
1007 else
1008 x2 = x + d;
1009 start = gsc_getentry(gsc, x);
1010 end = gsc_getentry(gsc, x2);
1011 if (start == NULL || end == NULL)
1012 return (-1);
1013
1014 for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1015 s->m += m;
1016 s->y += y + (s->x - x) * m;
1017 }
1018
1019 end = gsc_getentry(gsc, INFINITY);
1020 for (; s != end; s = LIST_NEXT(s, _next)) {
1021 s->y += m * d;
1022 }
1023
1024 return (0);
1025}
1026
1027/* get y-projection of a service curve */
1028static double
1029sc_x2y(struct service_curve *sc, double x)
1030{
1031 double y;
1032
1033 if (x <= (double)sc->d)
1034 /* y belongs to the 1st segment */
1035 y = x * (double)sc->m1;
1036 else
1037 /* y belongs to the 2nd segment */
1038 y = (double)sc->d * (double)sc->m1
1039 + (x - (double)sc->d) * (double)sc->m2;
1040 return (y);
1041}
1042
1043/*
1044 * misc utilities
1045 */
1046#define R2S_BUFS 8
1047#define RATESTR_MAX 16
1048
1049char *
1050rate2str(double rate)
1051{
1052 char *buf;
1053 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */
1054 static int idx = 0;
1055 int i;
1056 static const char unit[] = " KMG";
1057
1058 buf = r2sbuf[idx++];
1059 if (idx == R2S_BUFS)
1060 idx = 0;
1061
1062 for (i = 0; rate >= 1000 && i <= 3; i++)
1063 rate /= 1000;
1064
1065 if ((int)(rate * 100) % 100)
1066 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1067 else
1068 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1069
1070 return (buf);
1071}
1072
891
892/* add a new service curve to a generalized service curve */
893static void
894gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
895{
896 if (is_sc_null(sc))
897 return;
898 if (sc->d != 0)
899 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
900 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
901}
902
903/*
904 * check whether all points of a generalized service curve have
905 * their y-coordinates no larger than a given two-piece linear
906 * service curve.
907 */
908static int
909is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
910{
911 struct segment *s, *last, *end;
912 double y;
913
914 if (is_sc_null(sc)) {
915 if (LIST_EMPTY(gsc))
916 return (1);
917 LIST_FOREACH(s, gsc, _next) {
918 if (s->m != 0)
919 return (0);
920 }
921 return (1);
922 }
923 /*
924 * gsc has a dummy entry at the end with x = INFINITY.
925 * loop through up to this dummy entry.
926 */
927 end = gsc_getentry(gsc, INFINITY);
928 if (end == NULL)
929 return (1);
930 last = NULL;
931 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
932 if (s->y > sc_x2y(sc, s->x))
933 return (0);
934 last = s;
935 }
936 /* last now holds the real last segment */
937 if (last == NULL)
938 return (1);
939 if (last->m > sc->m2)
940 return (0);
941 if (last->x < sc->d && last->m > sc->m1) {
942 y = last->y + (sc->d - last->x) * last->m;
943 if (y > sc_x2y(sc, sc->d))
944 return (0);
945 }
946 return (1);
947}
948
949static void
950gsc_destroy(struct gen_sc *gsc)
951{
952 struct segment *s;
953
954 while ((s = LIST_FIRST(gsc)) != NULL) {
955 LIST_REMOVE(s, _next);
956 free(s);
957 }
958}
959
960/*
961 * return a segment entry starting at x.
962 * if gsc has no entry starting at x, a new entry is created at x.
963 */
964static struct segment *
965gsc_getentry(struct gen_sc *gsc, double x)
966{
967 struct segment *new, *prev, *s;
968
969 prev = NULL;
970 LIST_FOREACH(s, gsc, _next) {
971 if (s->x == x)
972 return (s); /* matching entry found */
973 else if (s->x < x)
974 prev = s;
975 else
976 break;
977 }
978
979 /* we have to create a new entry */
980 if ((new = calloc(1, sizeof(struct segment))) == NULL)
981 return (NULL);
982
983 new->x = x;
984 if (x == INFINITY || s == NULL)
985 new->d = 0;
986 else if (s->x == INFINITY)
987 new->d = INFINITY;
988 else
989 new->d = s->x - x;
990 if (prev == NULL) {
991 /* insert the new entry at the head of the list */
992 new->y = 0;
993 new->m = 0;
994 LIST_INSERT_HEAD(gsc, new, _next);
995 } else {
996 /*
997 * the start point intersects with the segment pointed by
998 * prev. divide prev into 2 segments
999 */
1000 if (x == INFINITY) {
1001 prev->d = INFINITY;
1002 if (prev->m == 0)
1003 new->y = prev->y;
1004 else
1005 new->y = INFINITY;
1006 } else {
1007 prev->d = x - prev->x;
1008 new->y = prev->d * prev->m + prev->y;
1009 }
1010 new->m = prev->m;
1011 LIST_INSERT_AFTER(prev, new, _next);
1012 }
1013 return (new);
1014}
1015
1016/* add a segment to a generalized service curve */
1017static int
1018gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1019{
1020 struct segment *start, *end, *s;
1021 double x2;
1022
1023 if (d == INFINITY)
1024 x2 = INFINITY;
1025 else
1026 x2 = x + d;
1027 start = gsc_getentry(gsc, x);
1028 end = gsc_getentry(gsc, x2);
1029 if (start == NULL || end == NULL)
1030 return (-1);
1031
1032 for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1033 s->m += m;
1034 s->y += y + (s->x - x) * m;
1035 }
1036
1037 end = gsc_getentry(gsc, INFINITY);
1038 for (; s != end; s = LIST_NEXT(s, _next)) {
1039 s->y += m * d;
1040 }
1041
1042 return (0);
1043}
1044
1045/* get y-projection of a service curve */
1046static double
1047sc_x2y(struct service_curve *sc, double x)
1048{
1049 double y;
1050
1051 if (x <= (double)sc->d)
1052 /* y belongs to the 1st segment */
1053 y = x * (double)sc->m1;
1054 else
1055 /* y belongs to the 2nd segment */
1056 y = (double)sc->d * (double)sc->m1
1057 + (x - (double)sc->d) * (double)sc->m2;
1058 return (y);
1059}
1060
1061/*
1062 * misc utilities
1063 */
1064#define R2S_BUFS 8
1065#define RATESTR_MAX 16
1066
1067char *
1068rate2str(double rate)
1069{
1070 char *buf;
1071 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */
1072 static int idx = 0;
1073 int i;
1074 static const char unit[] = " KMG";
1075
1076 buf = r2sbuf[idx++];
1077 if (idx == R2S_BUFS)
1078 idx = 0;
1079
1080 for (i = 0; rate >= 1000 && i <= 3; i++)
1081 rate /= 1000;
1082
1083 if ((int)(rate * 100) % 100)
1084 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1085 else
1086 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1087
1088 return (buf);
1089}
1090
1091#if defined(__FreeBSD__)
1092/*
1093 * XXX
1094 * FreeBSD do not have SIOCGIFDATA.
1095 * To emulate this, DIOCGIFSPEED ioctl added to pf.
1096 */
1073u_int32_t
1097u_int32_t
1098getifspeed(int pfdev, char *ifname)
1099{
1100 struct pf_ifspeed io;
1101
1102 bzero(&io, sizeof io);
1103 if (strlcpy(io.ifname, ifname, IFNAMSIZ) >=
1104 sizeof(io.ifname))
1105 errx(1, "getifspeed: strlcpy");
1106 if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1)
1107 err(1, "DIOCGIFSPEED");
1108 return ((u_int32_t)io.baudrate);
1109}
1110#else
1111u_int32_t
1074getifspeed(char *ifname)
1075{
1076 int s;
1077 struct ifreq ifr;
1078 struct if_data ifrdat;
1079
1080 if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1081 err(1, "socket");
1082 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1083 sizeof(ifr.ifr_name))
1084 errx(1, "getifspeed: strlcpy");
1085 ifr.ifr_data = (caddr_t)&ifrdat;
1086 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1087 err(1, "SIOCGIFDATA");
1088 if (shutdown(s, SHUT_RDWR) == -1)
1089 err(1, "shutdown");
1090 if (close(s))
1091 err(1, "close");
1092 return ((u_int32_t)ifrdat.ifi_baudrate);
1093}
1112getifspeed(char *ifname)
1113{
1114 int s;
1115 struct ifreq ifr;
1116 struct if_data ifrdat;
1117
1118 if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1119 err(1, "socket");
1120 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1121 sizeof(ifr.ifr_name))
1122 errx(1, "getifspeed: strlcpy");
1123 ifr.ifr_data = (caddr_t)&ifrdat;
1124 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1125 err(1, "SIOCGIFDATA");
1126 if (shutdown(s, SHUT_RDWR) == -1)
1127 err(1, "shutdown");
1128 if (close(s))
1129 err(1, "close");
1130 return ((u_int32_t)ifrdat.ifi_baudrate);
1131}
1132#endif
1094
1095u_long
1096getifmtu(char *ifname)
1097{
1098 int s;
1099 struct ifreq ifr;
1100
1101 if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1102 err(1, "socket");
1103 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1104 sizeof(ifr.ifr_name))
1105 errx(1, "getifmtu: strlcpy");
1106 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1107 err(1, "SIOCGIFMTU");
1108 if (shutdown(s, SHUT_RDWR) == -1)
1109 err(1, "shutdown");
1110 if (close(s))
1111 err(1, "close");
1112 if (ifr.ifr_mtu > 0)
1113 return (ifr.ifr_mtu);
1114 else {
1115 warnx("could not get mtu for %s, assuming 1500", ifname);
1116 return (1500);
1117 }
1118}
1119
1120int
1121eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1122 u_int32_t ref_bw)
1123{
1124 int errors = 0;
1125
1126 switch (pa->scheduler) {
1127 case ALTQT_CBQ:
1128 pa->pq_u.cbq_opts = opts->data.cbq_opts;
1129 break;
1130 case ALTQT_PRIQ:
1131 pa->pq_u.priq_opts = opts->data.priq_opts;
1132 break;
1133 case ALTQT_HFSC:
1134 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1135 if (opts->data.hfsc_opts.linkshare.used) {
1136 pa->pq_u.hfsc_opts.lssc_m1 =
1137 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1138 ref_bw);
1139 pa->pq_u.hfsc_opts.lssc_m2 =
1140 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1141 ref_bw);
1142 pa->pq_u.hfsc_opts.lssc_d =
1143 opts->data.hfsc_opts.linkshare.d;
1144 }
1145 if (opts->data.hfsc_opts.realtime.used) {
1146 pa->pq_u.hfsc_opts.rtsc_m1 =
1147 eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1148 ref_bw);
1149 pa->pq_u.hfsc_opts.rtsc_m2 =
1150 eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1151 ref_bw);
1152 pa->pq_u.hfsc_opts.rtsc_d =
1153 opts->data.hfsc_opts.realtime.d;
1154 }
1155 if (opts->data.hfsc_opts.upperlimit.used) {
1156 pa->pq_u.hfsc_opts.ulsc_m1 =
1157 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1158 ref_bw);
1159 pa->pq_u.hfsc_opts.ulsc_m2 =
1160 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1161 ref_bw);
1162 pa->pq_u.hfsc_opts.ulsc_d =
1163 opts->data.hfsc_opts.upperlimit.d;
1164 }
1165 break;
1166 default:
1167 warnx("eval_queue_opts: unknown scheduler type %u",
1168 opts->qtype);
1169 errors++;
1170 break;
1171 }
1172
1173 return (errors);
1174}
1175
1176u_int32_t
1177eval_bwspec(struct node_queue_bw *bw, u_int32_t ref_bw)
1178{
1179 if (bw->bw_absolute > 0)
1180 return (bw->bw_absolute);
1181
1182 if (bw->bw_percent > 0)
1183 return (ref_bw / 100 * bw->bw_percent);
1184
1185 return (0);
1186}
1187
1188void
1189print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1190 const struct node_hfsc_sc *sc)
1191{
1192 printf(" %s", scname);
1193
1194 if (d != 0) {
1195 printf("(");
1196 if (sc != NULL && sc->m1.bw_percent > 0)
1197 printf("%u%%", sc->m1.bw_percent);
1198 else
1199 printf("%s", rate2str((double)m1));
1200 printf(" %u", d);
1201 }
1202
1203 if (sc != NULL && sc->m2.bw_percent > 0)
1204 printf(" %u%%", sc->m2.bw_percent);
1205 else
1206 printf(" %s", rate2str((double)m2));
1207
1208 if (d != 0)
1209 printf(")");
1210}
1133
1134u_long
1135getifmtu(char *ifname)
1136{
1137 int s;
1138 struct ifreq ifr;
1139
1140 if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1141 err(1, "socket");
1142 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1143 sizeof(ifr.ifr_name))
1144 errx(1, "getifmtu: strlcpy");
1145 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1146 err(1, "SIOCGIFMTU");
1147 if (shutdown(s, SHUT_RDWR) == -1)
1148 err(1, "shutdown");
1149 if (close(s))
1150 err(1, "close");
1151 if (ifr.ifr_mtu > 0)
1152 return (ifr.ifr_mtu);
1153 else {
1154 warnx("could not get mtu for %s, assuming 1500", ifname);
1155 return (1500);
1156 }
1157}
1158
1159int
1160eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1161 u_int32_t ref_bw)
1162{
1163 int errors = 0;
1164
1165 switch (pa->scheduler) {
1166 case ALTQT_CBQ:
1167 pa->pq_u.cbq_opts = opts->data.cbq_opts;
1168 break;
1169 case ALTQT_PRIQ:
1170 pa->pq_u.priq_opts = opts->data.priq_opts;
1171 break;
1172 case ALTQT_HFSC:
1173 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1174 if (opts->data.hfsc_opts.linkshare.used) {
1175 pa->pq_u.hfsc_opts.lssc_m1 =
1176 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1177 ref_bw);
1178 pa->pq_u.hfsc_opts.lssc_m2 =
1179 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1180 ref_bw);
1181 pa->pq_u.hfsc_opts.lssc_d =
1182 opts->data.hfsc_opts.linkshare.d;
1183 }
1184 if (opts->data.hfsc_opts.realtime.used) {
1185 pa->pq_u.hfsc_opts.rtsc_m1 =
1186 eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1187 ref_bw);
1188 pa->pq_u.hfsc_opts.rtsc_m2 =
1189 eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1190 ref_bw);
1191 pa->pq_u.hfsc_opts.rtsc_d =
1192 opts->data.hfsc_opts.realtime.d;
1193 }
1194 if (opts->data.hfsc_opts.upperlimit.used) {
1195 pa->pq_u.hfsc_opts.ulsc_m1 =
1196 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1197 ref_bw);
1198 pa->pq_u.hfsc_opts.ulsc_m2 =
1199 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1200 ref_bw);
1201 pa->pq_u.hfsc_opts.ulsc_d =
1202 opts->data.hfsc_opts.upperlimit.d;
1203 }
1204 break;
1205 default:
1206 warnx("eval_queue_opts: unknown scheduler type %u",
1207 opts->qtype);
1208 errors++;
1209 break;
1210 }
1211
1212 return (errors);
1213}
1214
1215u_int32_t
1216eval_bwspec(struct node_queue_bw *bw, u_int32_t ref_bw)
1217{
1218 if (bw->bw_absolute > 0)
1219 return (bw->bw_absolute);
1220
1221 if (bw->bw_percent > 0)
1222 return (ref_bw / 100 * bw->bw_percent);
1223
1224 return (0);
1225}
1226
1227void
1228print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1229 const struct node_hfsc_sc *sc)
1230{
1231 printf(" %s", scname);
1232
1233 if (d != 0) {
1234 printf("(");
1235 if (sc != NULL && sc->m1.bw_percent > 0)
1236 printf("%u%%", sc->m1.bw_percent);
1237 else
1238 printf("%s", rate2str((double)m1));
1239 printf(" %u", d);
1240 }
1241
1242 if (sc != NULL && sc->m2.bw_percent > 0)
1243 printf(" %u%%", sc->m2.bw_percent);
1244 else
1245 printf(" %s", rate2str((double)m2));
1246
1247 if (d != 0)
1248 printf(")");
1249}