kern_poll.c revision 189851
1/*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_poll.c 189851 2009-03-15 14:21:05Z rwatson $"); 30 31#include "opt_route.h" 32#include "opt_device_polling.h" 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/kernel.h> 37#include <sys/kthread.h> 38#include <sys/proc.h> 39#include <sys/resourcevar.h> 40#include <sys/socket.h> /* needed by net/if.h */ 41#include <sys/sockio.h> 42#include <sys/sysctl.h> 43#include <sys/syslog.h> 44#include <sys/vimage.h> 45 46#include <net/if.h> /* for IFF_* flags */ 47#include <net/netisr.h> /* for NETISR_POLL */ 48#include <net/route.h> 49#include <net/vnet.h> 50 51static void netisr_poll(void); /* the two netisr handlers */ 52static void netisr_pollmore(void); 53static int poll_switch(SYSCTL_HANDLER_ARGS); 54 55void hardclock_device_poll(void); /* hook from hardclock */ 56void ether_poll(int); /* polling in idle loop */ 57 58static struct mtx poll_mtx; 59 60/* 61 * Polling support for [network] device drivers. 62 * 63 * Drivers which support this feature can register with the 64 * polling code. 65 * 66 * If registration is successful, the driver must disable interrupts, 67 * and further I/O is performed through the handler, which is invoked 68 * (at least once per clock tick) with 3 arguments: the "arg" passed at 69 * register time (a struct ifnet pointer), a command, and a "count" limit. 70 * 71 * The command can be one of the following: 72 * POLL_ONLY: quick move of "count" packets from input/output queues. 73 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do 74 * other more expensive operations. This command is issued periodically 75 * but less frequently than POLL_ONLY. 76 * 77 * The count limit specifies how much work the handler can do during the 78 * call -- typically this is the number of packets to be received, or 79 * transmitted, etc. (drivers are free to interpret this number, as long 80 * as the max time spent in the function grows roughly linearly with the 81 * count). 82 * 83 * Polling is enabled and disabled via setting IFCAP_POLLING flag on 84 * the interface. The driver ioctl handler should register interface 85 * with polling and disable interrupts, if registration was successful. 86 * 87 * A second variable controls the sharing of CPU between polling/kernel 88 * network processing, and other activities (typically userlevel tasks): 89 * kern.polling.user_frac (between 0 and 100, default 50) sets the share 90 * of CPU allocated to user tasks. CPU is allocated proportionally to the 91 * shares, by dynamically adjusting the "count" (poll_burst). 92 * 93 * Other parameters can should be left to their default values. 94 * The following constraints hold 95 * 96 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max 97 * 0 <= poll_each_burst 98 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 99 */ 100 101#define MIN_POLL_BURST_MAX 10 102#define MAX_POLL_BURST_MAX 1000 103 104static uint32_t poll_burst = 5; 105static uint32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */ 106static uint32_t poll_each_burst = 5; 107 108SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0, 109 "Device polling parameters"); 110 111SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD, 112 &poll_burst, 0, "Current polling burst size"); 113 114static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS) 115{ 116 uint32_t val = poll_burst_max; 117 int error; 118 119 error = sysctl_handle_int(oidp, &val, 0, req); 120 if (error || !req->newptr ) 121 return (error); 122 if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX) 123 return (EINVAL); 124 125 mtx_lock(&poll_mtx); 126 poll_burst_max = val; 127 if (poll_burst > poll_burst_max) 128 poll_burst = poll_burst_max; 129 if (poll_each_burst > poll_burst_max) 130 poll_each_burst = MIN_POLL_BURST_MAX; 131 mtx_unlock(&poll_mtx); 132 133 return (0); 134} 135SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW, 136 0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size"); 137 138static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS) 139{ 140 uint32_t val = poll_each_burst; 141 int error; 142 143 error = sysctl_handle_int(oidp, &val, 0, req); 144 if (error || !req->newptr ) 145 return (error); 146 if (val < 1) 147 return (EINVAL); 148 149 mtx_lock(&poll_mtx); 150 if (val > poll_burst_max) { 151 mtx_unlock(&poll_mtx); 152 return (EINVAL); 153 } 154 poll_each_burst = val; 155 mtx_unlock(&poll_mtx); 156 157 return (0); 158} 159SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW, 160 0, sizeof(uint32_t), poll_each_burst_sysctl, "I", 161 "Max size of each burst"); 162 163static uint32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */ 164SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW, 165 &poll_in_idle_loop, 0, "Enable device polling in idle loop"); 166 167static uint32_t user_frac = 50; 168static int user_frac_sysctl(SYSCTL_HANDLER_ARGS) 169{ 170 uint32_t val = user_frac; 171 int error; 172 173 error = sysctl_handle_int(oidp, &val, 0, req); 174 if (error || !req->newptr ) 175 return (error); 176 if (val < 0 || val > 99) 177 return (EINVAL); 178 179 mtx_lock(&poll_mtx); 180 user_frac = val; 181 mtx_unlock(&poll_mtx); 182 183 return (0); 184} 185SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW, 186 0, sizeof(uint32_t), user_frac_sysctl, "I", 187 "Desired user fraction of cpu time"); 188 189static uint32_t reg_frac_count = 0; 190static uint32_t reg_frac = 20 ; 191static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS) 192{ 193 uint32_t val = reg_frac; 194 int error; 195 196 error = sysctl_handle_int(oidp, &val, 0, req); 197 if (error || !req->newptr ) 198 return (error); 199 if (val < 1 || val > hz) 200 return (EINVAL); 201 202 mtx_lock(&poll_mtx); 203 reg_frac = val; 204 if (reg_frac_count >= reg_frac) 205 reg_frac_count = 0; 206 mtx_unlock(&poll_mtx); 207 208 return (0); 209} 210SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW, 211 0, sizeof(uint32_t), reg_frac_sysctl, "I", 212 "Every this many cycles check registers"); 213 214static uint32_t short_ticks; 215SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD, 216 &short_ticks, 0, "Hardclock ticks shorter than they should be"); 217 218static uint32_t lost_polls; 219SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD, 220 &lost_polls, 0, "How many times we would have lost a poll tick"); 221 222static uint32_t pending_polls; 223SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD, 224 &pending_polls, 0, "Do we need to poll again"); 225 226static int residual_burst = 0; 227SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD, 228 &residual_burst, 0, "# of residual cycles in burst"); 229 230static uint32_t poll_handlers; /* next free entry in pr[]. */ 231SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD, 232 &poll_handlers, 0, "Number of registered poll handlers"); 233 234static int polling = 0; 235SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW, 236 0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces"); 237 238static uint32_t phase; 239SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD, 240 &phase, 0, "Polling phase"); 241 242static uint32_t suspect; 243SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD, 244 &suspect, 0, "suspect event"); 245 246static uint32_t stalled; 247SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD, 248 &stalled, 0, "potential stalls"); 249 250static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */ 251SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD, 252 &idlepoll_sleeping, 0, "idlepoll is sleeping"); 253 254 255#define POLL_LIST_LEN 128 256struct pollrec { 257 poll_handler_t *handler; 258 struct ifnet *ifp; 259}; 260 261static struct pollrec pr[POLL_LIST_LEN]; 262 263static void 264init_device_poll(void) 265{ 266 267 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF); 268 netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL, 0); 269 netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL, 0); 270} 271SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL); 272 273 274/* 275 * Hook from hardclock. Tries to schedule a netisr, but keeps track 276 * of lost ticks due to the previous handler taking too long. 277 * Normally, this should not happen, because polling handler should 278 * run for a short time. However, in some cases (e.g. when there are 279 * changes in link status etc.) the drivers take a very long time 280 * (even in the order of milliseconds) to reset and reconfigure the 281 * device, causing apparent lost polls. 282 * 283 * The first part of the code is just for debugging purposes, and tries 284 * to count how often hardclock ticks are shorter than they should, 285 * meaning either stray interrupts or delayed events. 286 */ 287void 288hardclock_device_poll(void) 289{ 290 static struct timeval prev_t, t; 291 int delta; 292 293 if (poll_handlers == 0) 294 return; 295 296 microuptime(&t); 297 delta = (t.tv_usec - prev_t.tv_usec) + 298 (t.tv_sec - prev_t.tv_sec)*1000000; 299 if (delta * hz < 500000) 300 short_ticks++; 301 else 302 prev_t = t; 303 304 if (pending_polls > 100) { 305 /* 306 * Too much, assume it has stalled (not always true 307 * see comment above). 308 */ 309 stalled++; 310 pending_polls = 0; 311 phase = 0; 312 } 313 314 if (phase <= 2) { 315 if (phase != 0) 316 suspect++; 317 phase = 1; 318 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 319 phase = 2; 320 } 321 if (pending_polls++ > 0) 322 lost_polls++; 323} 324 325/* 326 * ether_poll is called from the idle loop. 327 */ 328void 329ether_poll(int count) 330{ 331 int i; 332 333 mtx_lock(&poll_mtx); 334 335 if (count > poll_each_burst) 336 count = poll_each_burst; 337 338 for (i = 0 ; i < poll_handlers ; i++) 339 pr[i].handler(pr[i].ifp, POLL_ONLY, count); 340 341 mtx_unlock(&poll_mtx); 342} 343 344/* 345 * netisr_pollmore is called after other netisr's, possibly scheduling 346 * another NETISR_POLL call, or adapting the burst size for the next cycle. 347 * 348 * It is very bad to fetch large bursts of packets from a single card at once, 349 * because the burst could take a long time to be completely processed, or 350 * could saturate the intermediate queue (ipintrq or similar) leading to 351 * losses or unfairness. To reduce the problem, and also to account better for 352 * time spent in network-related processing, we split the burst in smaller 353 * chunks of fixed size, giving control to the other netisr's between chunks. 354 * This helps in improving the fairness, reducing livelock (because we 355 * emulate more closely the "process to completion" that we have with 356 * fastforwarding) and accounting for the work performed in low level 357 * handling and forwarding. 358 */ 359 360static struct timeval poll_start_t; 361 362void 363netisr_pollmore() 364{ 365 struct timeval t; 366 int kern_load; 367 368 mtx_lock(&poll_mtx); 369 phase = 5; 370 if (residual_burst > 0) { 371 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 372 mtx_unlock(&poll_mtx); 373 /* will run immediately on return, followed by netisrs */ 374 return; 375 } 376 /* here we can account time spent in netisr's in this tick */ 377 microuptime(&t); 378 kern_load = (t.tv_usec - poll_start_t.tv_usec) + 379 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */ 380 kern_load = (kern_load * hz) / 10000; /* 0..100 */ 381 if (kern_load > (100 - user_frac)) { /* try decrease ticks */ 382 if (poll_burst > 1) 383 poll_burst--; 384 } else { 385 if (poll_burst < poll_burst_max) 386 poll_burst++; 387 } 388 389 pending_polls--; 390 if (pending_polls == 0) /* we are done */ 391 phase = 0; 392 else { 393 /* 394 * Last cycle was long and caused us to miss one or more 395 * hardclock ticks. Restart processing again, but slightly 396 * reduce the burst size to prevent that this happens again. 397 */ 398 poll_burst -= (poll_burst / 8); 399 if (poll_burst < 1) 400 poll_burst = 1; 401 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE); 402 phase = 6; 403 } 404 mtx_unlock(&poll_mtx); 405} 406 407/* 408 * netisr_poll is scheduled by schednetisr when appropriate, typically once 409 * per tick. 410 */ 411static void 412netisr_poll(void) 413{ 414 int i, cycles; 415 enum poll_cmd arg = POLL_ONLY; 416 417 mtx_lock(&poll_mtx); 418 phase = 3; 419 if (residual_burst == 0) { /* first call in this tick */ 420 microuptime(&poll_start_t); 421 if (++reg_frac_count == reg_frac) { 422 arg = POLL_AND_CHECK_STATUS; 423 reg_frac_count = 0; 424 } 425 426 residual_burst = poll_burst; 427 } 428 cycles = (residual_burst < poll_each_burst) ? 429 residual_burst : poll_each_burst; 430 residual_burst -= cycles; 431 432 for (i = 0 ; i < poll_handlers ; i++) 433 pr[i].handler(pr[i].ifp, arg, cycles); 434 435 phase = 4; 436 mtx_unlock(&poll_mtx); 437} 438 439/* 440 * Try to register routine for polling. Returns 0 if successful 441 * (and polling should be enabled), error code otherwise. 442 * A device is not supposed to register itself multiple times. 443 * 444 * This is called from within the *_ioctl() functions. 445 */ 446int 447ether_poll_register(poll_handler_t *h, struct ifnet *ifp) 448{ 449 int i; 450 451 KASSERT(h != NULL, ("%s: handler is NULL", __func__)); 452 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 453 454 mtx_lock(&poll_mtx); 455 if (poll_handlers >= POLL_LIST_LEN) { 456 /* 457 * List full, cannot register more entries. 458 * This should never happen; if it does, it is probably a 459 * broken driver trying to register multiple times. Checking 460 * this at runtime is expensive, and won't solve the problem 461 * anyways, so just report a few times and then give up. 462 */ 463 static int verbose = 10 ; 464 if (verbose >0) { 465 log(LOG_ERR, "poll handlers list full, " 466 "maybe a broken driver ?\n"); 467 verbose--; 468 } 469 mtx_unlock(&poll_mtx); 470 return (ENOMEM); /* no polling for you */ 471 } 472 473 for (i = 0 ; i < poll_handlers ; i++) 474 if (pr[i].ifp == ifp && pr[i].handler != NULL) { 475 mtx_unlock(&poll_mtx); 476 log(LOG_DEBUG, "ether_poll_register: %s: handler" 477 " already registered\n", ifp->if_xname); 478 return (EEXIST); 479 } 480 481 pr[poll_handlers].handler = h; 482 pr[poll_handlers].ifp = ifp; 483 poll_handlers++; 484 mtx_unlock(&poll_mtx); 485 if (idlepoll_sleeping) 486 wakeup(&idlepoll_sleeping); 487 return (0); 488} 489 490/* 491 * Remove interface from the polling list. Called from *_ioctl(), too. 492 */ 493int 494ether_poll_deregister(struct ifnet *ifp) 495{ 496 int i; 497 498 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 499 500 mtx_lock(&poll_mtx); 501 502 for (i = 0 ; i < poll_handlers ; i++) 503 if (pr[i].ifp == ifp) /* found it */ 504 break; 505 if (i == poll_handlers) { 506 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n", 507 ifp->if_xname); 508 mtx_unlock(&poll_mtx); 509 return (ENOENT); 510 } 511 poll_handlers--; 512 if (i < poll_handlers) { /* Last entry replaces this one. */ 513 pr[i].handler = pr[poll_handlers].handler; 514 pr[i].ifp = pr[poll_handlers].ifp; 515 } 516 mtx_unlock(&poll_mtx); 517 return (0); 518} 519 520/* 521 * Legacy interface for turning polling on all interfaces at one time. 522 */ 523static int 524poll_switch(SYSCTL_HANDLER_ARGS) 525{ 526 INIT_VNET_NET(curvnet); 527 struct ifnet *ifp; 528 int error; 529 int val = polling; 530 531 error = sysctl_handle_int(oidp, &val, 0, req); 532 if (error || !req->newptr ) 533 return (error); 534 535 if (val == polling) 536 return (0); 537 538 if (val < 0 || val > 1) 539 return (EINVAL); 540 541 polling = val; 542 543 IFNET_RLOCK(); 544 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 545 if (ifp->if_capabilities & IFCAP_POLLING) { 546 struct ifreq ifr; 547 548 if (val == 1) 549 ifr.ifr_reqcap = 550 ifp->if_capenable | IFCAP_POLLING; 551 else 552 ifr.ifr_reqcap = 553 ifp->if_capenable & ~IFCAP_POLLING; 554 (void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 555 } 556 } 557 IFNET_RUNLOCK(); 558 559 log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)"); 560 561 return (0); 562} 563 564static void 565poll_idle(void) 566{ 567 struct thread *td = curthread; 568 struct rtprio rtp; 569 570 rtp.prio = RTP_PRIO_MAX; /* lowest priority */ 571 rtp.type = RTP_PRIO_IDLE; 572 PROC_SLOCK(td->td_proc); 573 rtp_to_pri(&rtp, td); 574 PROC_SUNLOCK(td->td_proc); 575 576 for (;;) { 577 if (poll_in_idle_loop && poll_handlers > 0) { 578 idlepoll_sleeping = 0; 579 ether_poll(poll_each_burst); 580 thread_lock(td); 581 mi_switch(SW_VOL, NULL); 582 thread_unlock(td); 583 } else { 584 idlepoll_sleeping = 1; 585 tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3); 586 } 587 } 588} 589 590static struct proc *idlepoll; 591static struct kproc_desc idlepoll_kp = { 592 "idlepoll", 593 poll_idle, 594 &idlepoll 595}; 596SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, 597 &idlepoll_kp); 598