ifq.h revision 266974
1/*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * From: @(#)if.h 8.1 (Berkeley) 6/10/93 30 * $FreeBSD: head/sys/net/ifq.h 266974 2014-06-02 17:54:39Z marcel $ 31 */ 32 33#ifndef _NET_IFQ_H_ 34#define _NET_IFQ_H_ 35 36#ifdef _KERNEL 37#include <sys/mbuf.h> /* ifqueue only? */ 38#include <sys/buf_ring.h> 39#include <net/vnet.h> 40#endif /* _KERNEL */ 41#include <sys/lock.h> /* XXX */ 42#include <sys/mutex.h> /* struct ifqueue */ 43 44#define IF_DUNIT_NONE -1 45 46#include <altq/if_altq.h> 47 48/* 49 * Structure defining a queue for a network interface. 50 */ 51struct ifqueue { 52 struct mbuf *ifq_head; 53 struct mbuf *ifq_tail; 54 int ifq_len; 55 int ifq_maxlen; 56 int ifq_drops; 57 struct mtx ifq_mtx; 58}; 59 60#ifdef _KERNEL 61/* 62 * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq) 63 * are queues of messages stored on ifqueue structures 64 * (defined above). Entries are added to and deleted from these structures 65 * by these macros. 66 */ 67#define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx) 68#define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx) 69#define IF_LOCK_ASSERT(ifq) mtx_assert(&(ifq)->ifq_mtx, MA_OWNED) 70#define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) 71#define _IF_DROP(ifq) ((ifq)->ifq_drops++) 72#define _IF_QLEN(ifq) ((ifq)->ifq_len) 73 74#define _IF_ENQUEUE(ifq, m) do { \ 75 (m)->m_nextpkt = NULL; \ 76 if ((ifq)->ifq_tail == NULL) \ 77 (ifq)->ifq_head = m; \ 78 else \ 79 (ifq)->ifq_tail->m_nextpkt = m; \ 80 (ifq)->ifq_tail = m; \ 81 (ifq)->ifq_len++; \ 82} while (0) 83 84#define IF_ENQUEUE(ifq, m) do { \ 85 IF_LOCK(ifq); \ 86 _IF_ENQUEUE(ifq, m); \ 87 IF_UNLOCK(ifq); \ 88} while (0) 89 90#define _IF_PREPEND(ifq, m) do { \ 91 (m)->m_nextpkt = (ifq)->ifq_head; \ 92 if ((ifq)->ifq_tail == NULL) \ 93 (ifq)->ifq_tail = (m); \ 94 (ifq)->ifq_head = (m); \ 95 (ifq)->ifq_len++; \ 96} while (0) 97 98#define IF_PREPEND(ifq, m) do { \ 99 IF_LOCK(ifq); \ 100 _IF_PREPEND(ifq, m); \ 101 IF_UNLOCK(ifq); \ 102} while (0) 103 104#define _IF_DEQUEUE(ifq, m) do { \ 105 (m) = (ifq)->ifq_head; \ 106 if (m) { \ 107 if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \ 108 (ifq)->ifq_tail = NULL; \ 109 (m)->m_nextpkt = NULL; \ 110 (ifq)->ifq_len--; \ 111 } \ 112} while (0) 113 114#define IF_DEQUEUE(ifq, m) do { \ 115 IF_LOCK(ifq); \ 116 _IF_DEQUEUE(ifq, m); \ 117 IF_UNLOCK(ifq); \ 118} while (0) 119 120#define _IF_DEQUEUE_ALL(ifq, m) do { \ 121 (m) = (ifq)->ifq_head; \ 122 (ifq)->ifq_head = (ifq)->ifq_tail = NULL; \ 123 (ifq)->ifq_len = 0; \ 124} while (0) 125 126#define IF_DEQUEUE_ALL(ifq, m) do { \ 127 IF_LOCK(ifq); \ 128 _IF_DEQUEUE_ALL(ifq, m); \ 129 IF_UNLOCK(ifq); \ 130} while (0) 131 132#define _IF_POLL(ifq, m) ((m) = (ifq)->ifq_head) 133#define IF_POLL(ifq, m) _IF_POLL(ifq, m) 134 135#define _IF_DRAIN(ifq) do { \ 136 struct mbuf *m; \ 137 for (;;) { \ 138 _IF_DEQUEUE(ifq, m); \ 139 if (m == NULL) \ 140 break; \ 141 m_freem(m); \ 142 } \ 143} while (0) 144 145#define IF_DRAIN(ifq) do { \ 146 IF_LOCK(ifq); \ 147 _IF_DRAIN(ifq); \ 148 IF_UNLOCK(ifq); \ 149} while(0) 150 151int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, 152 int adjust); 153#define IF_HANDOFF(ifq, m, ifp) \ 154 if_handoff((struct ifqueue *)ifq, m, ifp, 0) 155#define IF_HANDOFF_ADJ(ifq, m, ifp, adj) \ 156 if_handoff((struct ifqueue *)ifq, m, ifp, adj) 157 158void if_start(struct ifnet *); 159 160#define IFQ_ENQUEUE(ifq, m, err) \ 161do { \ 162 IF_LOCK(ifq); \ 163 if (ALTQ_IS_ENABLED(ifq)) \ 164 ALTQ_ENQUEUE(ifq, m, NULL, err); \ 165 else { \ 166 if (_IF_QFULL(ifq)) { \ 167 m_freem(m); \ 168 (err) = ENOBUFS; \ 169 } else { \ 170 _IF_ENQUEUE(ifq, m); \ 171 (err) = 0; \ 172 } \ 173 } \ 174 if (err) \ 175 (ifq)->ifq_drops++; \ 176 IF_UNLOCK(ifq); \ 177} while (0) 178 179#define IFQ_DEQUEUE_NOLOCK(ifq, m) \ 180do { \ 181 if (TBR_IS_ENABLED(ifq)) \ 182 (m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE); \ 183 else if (ALTQ_IS_ENABLED(ifq)) \ 184 ALTQ_DEQUEUE(ifq, m); \ 185 else \ 186 _IF_DEQUEUE(ifq, m); \ 187} while (0) 188 189#define IFQ_DEQUEUE(ifq, m) \ 190do { \ 191 IF_LOCK(ifq); \ 192 IFQ_DEQUEUE_NOLOCK(ifq, m); \ 193 IF_UNLOCK(ifq); \ 194} while (0) 195 196#define IFQ_POLL_NOLOCK(ifq, m) \ 197do { \ 198 if (TBR_IS_ENABLED(ifq)) \ 199 (m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL); \ 200 else if (ALTQ_IS_ENABLED(ifq)) \ 201 ALTQ_POLL(ifq, m); \ 202 else \ 203 _IF_POLL(ifq, m); \ 204} while (0) 205 206#define IFQ_POLL(ifq, m) \ 207do { \ 208 IF_LOCK(ifq); \ 209 IFQ_POLL_NOLOCK(ifq, m); \ 210 IF_UNLOCK(ifq); \ 211} while (0) 212 213#define IFQ_PURGE_NOLOCK(ifq) \ 214do { \ 215 if (ALTQ_IS_ENABLED(ifq)) { \ 216 ALTQ_PURGE(ifq); \ 217 } else \ 218 _IF_DRAIN(ifq); \ 219} while (0) 220 221#define IFQ_PURGE(ifq) \ 222do { \ 223 IF_LOCK(ifq); \ 224 IFQ_PURGE_NOLOCK(ifq); \ 225 IF_UNLOCK(ifq); \ 226} while (0) 227 228#define IFQ_SET_READY(ifq) \ 229 do { ((ifq)->altq_flags |= ALTQF_READY); } while (0) 230 231#define IFQ_LOCK(ifq) IF_LOCK(ifq) 232#define IFQ_UNLOCK(ifq) IF_UNLOCK(ifq) 233#define IFQ_LOCK_ASSERT(ifq) IF_LOCK_ASSERT(ifq) 234#define IFQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0) 235#define IFQ_INC_LEN(ifq) ((ifq)->ifq_len++) 236#define IFQ_DEC_LEN(ifq) (--(ifq)->ifq_len) 237#define IFQ_INC_DROPS(ifq) ((ifq)->ifq_drops++) 238#define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen = (len)) 239 240/* 241 * The IFF_DRV_OACTIVE test should really occur in the device driver, not in 242 * the handoff logic, as that flag is locked by the device driver. 243 */ 244#define IFQ_HANDOFF_ADJ(ifp, m, adj, err) \ 245do { \ 246 int len; \ 247 short mflags; \ 248 \ 249 len = (m)->m_pkthdr.len; \ 250 mflags = (m)->m_flags; \ 251 IFQ_ENQUEUE(&(ifp)->if_snd, m, err); \ 252 if ((err) == 0) { \ 253 (ifp)->if_obytes += len + (adj); \ 254 if (mflags & M_MCAST) \ 255 (ifp)->if_omcasts++; \ 256 if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0) \ 257 if_start(ifp); \ 258 } \ 259} while (0) 260 261#define IFQ_HANDOFF(ifp, m, err) \ 262 IFQ_HANDOFF_ADJ(ifp, m, 0, err) 263 264#define IFQ_DRV_DEQUEUE(ifq, m) \ 265do { \ 266 (m) = (ifq)->ifq_drv_head; \ 267 if (m) { \ 268 if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL) \ 269 (ifq)->ifq_drv_tail = NULL; \ 270 (m)->m_nextpkt = NULL; \ 271 (ifq)->ifq_drv_len--; \ 272 } else { \ 273 IFQ_LOCK(ifq); \ 274 IFQ_DEQUEUE_NOLOCK(ifq, m); \ 275 while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) { \ 276 struct mbuf *m0; \ 277 IFQ_DEQUEUE_NOLOCK(ifq, m0); \ 278 if (m0 == NULL) \ 279 break; \ 280 m0->m_nextpkt = NULL; \ 281 if ((ifq)->ifq_drv_tail == NULL) \ 282 (ifq)->ifq_drv_head = m0; \ 283 else \ 284 (ifq)->ifq_drv_tail->m_nextpkt = m0; \ 285 (ifq)->ifq_drv_tail = m0; \ 286 (ifq)->ifq_drv_len++; \ 287 } \ 288 IFQ_UNLOCK(ifq); \ 289 } \ 290} while (0) 291 292#define IFQ_DRV_PREPEND(ifq, m) \ 293do { \ 294 (m)->m_nextpkt = (ifq)->ifq_drv_head; \ 295 if ((ifq)->ifq_drv_tail == NULL) \ 296 (ifq)->ifq_drv_tail = (m); \ 297 (ifq)->ifq_drv_head = (m); \ 298 (ifq)->ifq_drv_len++; \ 299} while (0) 300 301#define IFQ_DRV_IS_EMPTY(ifq) \ 302 (((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0)) 303 304#define IFQ_DRV_PURGE(ifq) \ 305do { \ 306 struct mbuf *m, *n = (ifq)->ifq_drv_head; \ 307 while((m = n) != NULL) { \ 308 n = m->m_nextpkt; \ 309 m_freem(m); \ 310 } \ 311 (ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL; \ 312 (ifq)->ifq_drv_len = 0; \ 313 IFQ_PURGE(ifq); \ 314} while (0) 315 316static __inline int 317drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m) 318{ 319 int error = 0; 320 321#ifdef ALTQ 322 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 323 IFQ_ENQUEUE(&ifp->if_snd, m, error); 324 return (error); 325 } 326#endif 327 error = buf_ring_enqueue(br, m); 328 if (error) 329 m_freem(m); 330 331 return (error); 332} 333 334static __inline void 335drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *new) 336{ 337 /* 338 * The top of the list needs to be swapped 339 * for this one. 340 */ 341#ifdef ALTQ 342 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 343 /* 344 * Peek in altq case dequeued it 345 * so put it back. 346 */ 347 IFQ_DRV_PREPEND(&ifp->if_snd, new); 348 return; 349 } 350#endif 351 buf_ring_putback_sc(br, new); 352} 353 354static __inline struct mbuf * 355drbr_peek(struct ifnet *ifp, struct buf_ring *br) 356{ 357#ifdef ALTQ 358 struct mbuf *m; 359 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 360 /* 361 * Pull it off like a dequeue 362 * since drbr_advance() does nothing 363 * for altq and drbr_putback() will 364 * use the old prepend function. 365 */ 366 IFQ_DEQUEUE(&ifp->if_snd, m); 367 return (m); 368 } 369#endif 370 return(buf_ring_peek(br)); 371} 372 373static __inline void 374drbr_flush(struct ifnet *ifp, struct buf_ring *br) 375{ 376 struct mbuf *m; 377 378#ifdef ALTQ 379 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) 380 IFQ_PURGE(&ifp->if_snd); 381#endif 382 while ((m = buf_ring_dequeue_sc(br)) != NULL) 383 m_freem(m); 384} 385 386static __inline void 387drbr_free(struct buf_ring *br, struct malloc_type *type) 388{ 389 390 drbr_flush(NULL, br); 391 buf_ring_free(br, type); 392} 393 394static __inline struct mbuf * 395drbr_dequeue(struct ifnet *ifp, struct buf_ring *br) 396{ 397#ifdef ALTQ 398 struct mbuf *m; 399 400 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 401 IFQ_DEQUEUE(&ifp->if_snd, m); 402 return (m); 403 } 404#endif 405 return (buf_ring_dequeue_sc(br)); 406} 407 408static __inline void 409drbr_advance(struct ifnet *ifp, struct buf_ring *br) 410{ 411#ifdef ALTQ 412 /* Nothing to do here since peek dequeues in altq case */ 413 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) 414 return; 415#endif 416 return (buf_ring_advance_sc(br)); 417} 418 419 420static __inline struct mbuf * 421drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br, 422 int (*func) (struct mbuf *, void *), void *arg) 423{ 424 struct mbuf *m; 425#ifdef ALTQ 426 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 427 IFQ_LOCK(&ifp->if_snd); 428 IFQ_POLL_NOLOCK(&ifp->if_snd, m); 429 if (m != NULL && func(m, arg) == 0) { 430 IFQ_UNLOCK(&ifp->if_snd); 431 return (NULL); 432 } 433 IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m); 434 IFQ_UNLOCK(&ifp->if_snd); 435 return (m); 436 } 437#endif 438 m = buf_ring_peek(br); 439 if (m == NULL || func(m, arg) == 0) 440 return (NULL); 441 442 return (buf_ring_dequeue_sc(br)); 443} 444 445static __inline int 446drbr_empty(struct ifnet *ifp, struct buf_ring *br) 447{ 448#ifdef ALTQ 449 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 450 return (IFQ_IS_EMPTY(&ifp->if_snd)); 451#endif 452 return (buf_ring_empty(br)); 453} 454 455static __inline int 456drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) 457{ 458#ifdef ALTQ 459 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 460 return (1); 461#endif 462 return (!buf_ring_empty(br)); 463} 464 465static __inline int 466drbr_inuse(struct ifnet *ifp, struct buf_ring *br) 467{ 468#ifdef ALTQ 469 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 470 return (ifp->if_snd.ifq_len); 471#endif 472 return (buf_ring_count(br)); 473} 474 475extern int ifqmaxlen; 476 477void if_qflush(struct ifnet *); 478void ifq_init(struct ifaltq *, struct ifnet *ifp); 479void ifq_delete(struct ifaltq *); 480 481#ifdef DEVICE_POLLING 482enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS }; 483 484typedef int poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count); 485int ether_poll_register(poll_handler_t *h, struct ifnet *ifp); 486int ether_poll_deregister(struct ifnet *ifp); 487/* The following should be temporary, till all drivers use the driver API */ 488typedef int poll_handler_drv_t(if_t ifh, enum poll_cmd cmd, int count); 489int ether_poll_register_drv(poll_handler_drv_t *h, if_t ifh); 490int ether_poll_deregister_drv(if_t ifh); 491#endif /* DEVICE_POLLING */ 492 493#endif /* _KERNEL */ 494#endif /* !_NET_IFQ_H_ */ 495