kern_poll.c revision 193744
1/*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_poll.c 193744 2009-06-08 19:57:35Z bz $"); 30 31#include "opt_device_polling.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kernel.h> 36#include <sys/kthread.h> 37#include <sys/proc.h> 38#include <sys/eventhandler.h> 39#include <sys/resourcevar.h> 40#include <sys/socket.h> /* needed by net/if.h */ 41#include <sys/sockio.h> 42#include <sys/sysctl.h> 43#include <sys/syslog.h> 44#include <sys/vimage.h> 45 46#include <net/if.h> /* for IFF_* flags */ 47#include <net/netisr.h> /* for NETISR_POLL */ 48#include <net/route.h> 49#include <net/vnet.h> 50 51static int poll_switch(SYSCTL_HANDLER_ARGS); 52 53void hardclock_device_poll(void); /* hook from hardclock */ 54 55static struct mtx poll_mtx; 56 57/* 58 * Polling support for [network] device drivers. 59 * 60 * Drivers which support this feature can register with the 61 * polling code. 62 * 63 * If registration is successful, the driver must disable interrupts, 64 * and further I/O is performed through the handler, which is invoked 65 * (at least once per clock tick) with 3 arguments: the "arg" passed at 66 * register time (a struct ifnet pointer), a command, and a "count" limit. 67 * 68 * The command can be one of the following: 69 * POLL_ONLY: quick move of "count" packets from input/output queues. 70 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do 71 * other more expensive operations. This command is issued periodically 72 * but less frequently than POLL_ONLY. 73 * 74 * The count limit specifies how much work the handler can do during the 75 * call -- typically this is the number of packets to be received, or 76 * transmitted, etc. (drivers are free to interpret this number, as long 77 * as the max time spent in the function grows roughly linearly with the 78 * count). 79 * 80 * Polling is enabled and disabled via setting IFCAP_POLLING flag on 81 * the interface. The driver ioctl handler should register interface 82 * with polling and disable interrupts, if registration was successful. 83 * 84 * A second variable controls the sharing of CPU between polling/kernel 85 * network processing, and other activities (typically userlevel tasks): 86 * kern.polling.user_frac (between 0 and 100, default 50) sets the share 87 * of CPU allocated to user tasks. CPU is allocated proportionally to the 88 * shares, by dynamically adjusting the "count" (poll_burst). 89 * 90 * Other parameters can should be left to their default values. 91 * The following constraints hold 92 * 93 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max 94 * 0 <= poll_each_burst 95 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 96 */ 97 98#define MIN_POLL_BURST_MAX 10 99#define MAX_POLL_BURST_MAX 1000 100 101static uint32_t poll_burst = 5; 102static uint32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */ 103static uint32_t poll_each_burst = 5; 104 105SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0, 106 "Device polling parameters"); 107 108SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD, 109 &poll_burst, 0, "Current polling burst size"); 110 111static int netisr_poll_scheduled; 112static int netisr_pollmore_scheduled; 113static int poll_shutting_down; 114 115static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS) 116{ 117 uint32_t val = poll_burst_max; 118 int error; 119 120 error = sysctl_handle_int(oidp, &val, 0, req); 121 if (error || !req->newptr ) 122 return (error); 123 if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX) 124 return (EINVAL); 125 126 mtx_lock(&poll_mtx); 127 poll_burst_max = val; 128 if (poll_burst > poll_burst_max) 129 poll_burst = poll_burst_max; 130 if (poll_each_burst > poll_burst_max) 131 poll_each_burst = MIN_POLL_BURST_MAX; 132 mtx_unlock(&poll_mtx); 133 134 return (0); 135} 136SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW, 137 0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size"); 138 139static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS) 140{ 141 uint32_t val = poll_each_burst; 142 int error; 143 144 error = sysctl_handle_int(oidp, &val, 0, req); 145 if (error || !req->newptr ) 146 return (error); 147 if (val < 1) 148 return (EINVAL); 149 150 mtx_lock(&poll_mtx); 151 if (val > poll_burst_max) { 152 mtx_unlock(&poll_mtx); 153 return (EINVAL); 154 } 155 poll_each_burst = val; 156 mtx_unlock(&poll_mtx); 157 158 return (0); 159} 160SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW, 161 0, sizeof(uint32_t), poll_each_burst_sysctl, "I", 162 "Max size of each burst"); 163 164static uint32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */ 165SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW, 166 &poll_in_idle_loop, 0, "Enable device polling in idle loop"); 167 168static uint32_t user_frac = 50; 169static int user_frac_sysctl(SYSCTL_HANDLER_ARGS) 170{ 171 uint32_t val = user_frac; 172 int error; 173 174 error = sysctl_handle_int(oidp, &val, 0, req); 175 if (error || !req->newptr ) 176 return (error); 177 if (val < 0 || val > 99) 178 return (EINVAL); 179 180 mtx_lock(&poll_mtx); 181 user_frac = val; 182 mtx_unlock(&poll_mtx); 183 184 return (0); 185} 186SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW, 187 0, sizeof(uint32_t), user_frac_sysctl, "I", 188 "Desired user fraction of cpu time"); 189 190static uint32_t reg_frac_count = 0; 191static uint32_t reg_frac = 20 ; 192static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS) 193{ 194 uint32_t val = reg_frac; 195 int error; 196 197 error = sysctl_handle_int(oidp, &val, 0, req); 198 if (error || !req->newptr ) 199 return (error); 200 if (val < 1 || val > hz) 201 return (EINVAL); 202 203 mtx_lock(&poll_mtx); 204 reg_frac = val; 205 if (reg_frac_count >= reg_frac) 206 reg_frac_count = 0; 207 mtx_unlock(&poll_mtx); 208 209 return (0); 210} 211SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW, 212 0, sizeof(uint32_t), reg_frac_sysctl, "I", 213 "Every this many cycles check registers"); 214 215static uint32_t short_ticks; 216SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD, 217 &short_ticks, 0, "Hardclock ticks shorter than they should be"); 218 219static uint32_t lost_polls; 220SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD, 221 &lost_polls, 0, "How many times we would have lost a poll tick"); 222 223static uint32_t pending_polls; 224SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD, 225 &pending_polls, 0, "Do we need to poll again"); 226 227static int residual_burst = 0; 228SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD, 229 &residual_burst, 0, "# of residual cycles in burst"); 230 231static uint32_t poll_handlers; /* next free entry in pr[]. */ 232SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD, 233 &poll_handlers, 0, "Number of registered poll handlers"); 234 235static int polling = 0; 236SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW, 237 0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces"); 238 239static uint32_t phase; 240SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD, 241 &phase, 0, "Polling phase"); 242 243static uint32_t suspect; 244SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD, 245 &suspect, 0, "suspect event"); 246 247static uint32_t stalled; 248SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD, 249 &stalled, 0, "potential stalls"); 250 251static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */ 252SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD, 253 &idlepoll_sleeping, 0, "idlepoll is sleeping"); 254 255 256#define POLL_LIST_LEN 128 257struct pollrec { 258 poll_handler_t *handler; 259 struct ifnet *ifp; 260}; 261 262static struct pollrec pr[POLL_LIST_LEN]; 263 264static void 265poll_shutdown(void *arg, int howto) 266{ 267 268 poll_shutting_down = 1; 269} 270 271static void 272init_device_poll(void) 273{ 274 275 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF); 276 EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL, 277 SHUTDOWN_PRI_LAST); 278} 279SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL); 280 281 282/* 283 * Hook from hardclock. Tries to schedule a netisr, but keeps track 284 * of lost ticks due to the previous handler taking too long. 285 * Normally, this should not happen, because polling handler should 286 * run for a short time. However, in some cases (e.g. when there are 287 * changes in link status etc.) the drivers take a very long time 288 * (even in the order of milliseconds) to reset and reconfigure the 289 * device, causing apparent lost polls. 290 * 291 * The first part of the code is just for debugging purposes, and tries 292 * to count how often hardclock ticks are shorter than they should, 293 * meaning either stray interrupts or delayed events. 294 */ 295void 296hardclock_device_poll(void) 297{ 298 static struct timeval prev_t, t; 299 int delta; 300 301 if (poll_handlers == 0 || poll_shutting_down) 302 return; 303 304 microuptime(&t); 305 delta = (t.tv_usec - prev_t.tv_usec) + 306 (t.tv_sec - prev_t.tv_sec)*1000000; 307 if (delta * hz < 500000) 308 short_ticks++; 309 else 310 prev_t = t; 311 312 if (pending_polls > 100) { 313 /* 314 * Too much, assume it has stalled (not always true 315 * see comment above). 316 */ 317 stalled++; 318 pending_polls = 0; 319 phase = 0; 320 } 321 322 if (phase <= 2) { 323 if (phase != 0) 324 suspect++; 325 phase = 1; 326 netisr_poll_scheduled = 1; 327 netisr_pollmore_scheduled = 1; 328 netisr_sched_poll(); 329 phase = 2; 330 } 331 if (pending_polls++ > 0) 332 lost_polls++; 333} 334 335/* 336 * ether_poll is called from the idle loop. 337 */ 338static void 339ether_poll(int count) 340{ 341 int i; 342 343 mtx_lock(&poll_mtx); 344 345 if (count > poll_each_burst) 346 count = poll_each_burst; 347 348 for (i = 0 ; i < poll_handlers ; i++) 349 pr[i].handler(pr[i].ifp, POLL_ONLY, count); 350 351 mtx_unlock(&poll_mtx); 352} 353 354/* 355 * netisr_pollmore is called after other netisr's, possibly scheduling 356 * another NETISR_POLL call, or adapting the burst size for the next cycle. 357 * 358 * It is very bad to fetch large bursts of packets from a single card at once, 359 * because the burst could take a long time to be completely processed, or 360 * could saturate the intermediate queue (ipintrq or similar) leading to 361 * losses or unfairness. To reduce the problem, and also to account better for 362 * time spent in network-related processing, we split the burst in smaller 363 * chunks of fixed size, giving control to the other netisr's between chunks. 364 * This helps in improving the fairness, reducing livelock (because we 365 * emulate more closely the "process to completion" that we have with 366 * fastforwarding) and accounting for the work performed in low level 367 * handling and forwarding. 368 */ 369 370static struct timeval poll_start_t; 371 372void 373netisr_pollmore() 374{ 375 struct timeval t; 376 int kern_load; 377 378 mtx_lock(&poll_mtx); 379 if (!netisr_pollmore_scheduled) { 380 mtx_unlock(&poll_mtx); 381 return; 382 } 383 netisr_pollmore_scheduled = 0; 384 phase = 5; 385 if (residual_burst > 0) { 386 netisr_poll_scheduled = 1; 387 netisr_pollmore_scheduled = 1; 388 netisr_sched_poll(); 389 mtx_unlock(&poll_mtx); 390 /* will run immediately on return, followed by netisrs */ 391 return; 392 } 393 /* here we can account time spent in netisr's in this tick */ 394 microuptime(&t); 395 kern_load = (t.tv_usec - poll_start_t.tv_usec) + 396 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */ 397 kern_load = (kern_load * hz) / 10000; /* 0..100 */ 398 if (kern_load > (100 - user_frac)) { /* try decrease ticks */ 399 if (poll_burst > 1) 400 poll_burst--; 401 } else { 402 if (poll_burst < poll_burst_max) 403 poll_burst++; 404 } 405 406 pending_polls--; 407 if (pending_polls == 0) /* we are done */ 408 phase = 0; 409 else { 410 /* 411 * Last cycle was long and caused us to miss one or more 412 * hardclock ticks. Restart processing again, but slightly 413 * reduce the burst size to prevent that this happens again. 414 */ 415 poll_burst -= (poll_burst / 8); 416 if (poll_burst < 1) 417 poll_burst = 1; 418 netisr_poll_scheduled = 1; 419 netisr_pollmore_scheduled = 1; 420 netisr_sched_poll(); 421 phase = 6; 422 } 423 mtx_unlock(&poll_mtx); 424} 425 426/* 427 * netisr_poll is typically scheduled once per tick. 428 */ 429void 430netisr_poll(void) 431{ 432 int i, cycles; 433 enum poll_cmd arg = POLL_ONLY; 434 435 mtx_lock(&poll_mtx); 436 if (!netisr_poll_scheduled) { 437 mtx_unlock(&poll_mtx); 438 return; 439 } 440 netisr_poll_scheduled = 0; 441 phase = 3; 442 if (residual_burst == 0) { /* first call in this tick */ 443 microuptime(&poll_start_t); 444 if (++reg_frac_count == reg_frac) { 445 arg = POLL_AND_CHECK_STATUS; 446 reg_frac_count = 0; 447 } 448 449 residual_burst = poll_burst; 450 } 451 cycles = (residual_burst < poll_each_burst) ? 452 residual_burst : poll_each_burst; 453 residual_burst -= cycles; 454 455 for (i = 0 ; i < poll_handlers ; i++) 456 pr[i].handler(pr[i].ifp, arg, cycles); 457 458 phase = 4; 459 mtx_unlock(&poll_mtx); 460} 461 462/* 463 * Try to register routine for polling. Returns 0 if successful 464 * (and polling should be enabled), error code otherwise. 465 * A device is not supposed to register itself multiple times. 466 * 467 * This is called from within the *_ioctl() functions. 468 */ 469int 470ether_poll_register(poll_handler_t *h, struct ifnet *ifp) 471{ 472 int i; 473 474 KASSERT(h != NULL, ("%s: handler is NULL", __func__)); 475 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 476 477 mtx_lock(&poll_mtx); 478 if (poll_handlers >= POLL_LIST_LEN) { 479 /* 480 * List full, cannot register more entries. 481 * This should never happen; if it does, it is probably a 482 * broken driver trying to register multiple times. Checking 483 * this at runtime is expensive, and won't solve the problem 484 * anyways, so just report a few times and then give up. 485 */ 486 static int verbose = 10 ; 487 if (verbose >0) { 488 log(LOG_ERR, "poll handlers list full, " 489 "maybe a broken driver ?\n"); 490 verbose--; 491 } 492 mtx_unlock(&poll_mtx); 493 return (ENOMEM); /* no polling for you */ 494 } 495 496 for (i = 0 ; i < poll_handlers ; i++) 497 if (pr[i].ifp == ifp && pr[i].handler != NULL) { 498 mtx_unlock(&poll_mtx); 499 log(LOG_DEBUG, "ether_poll_register: %s: handler" 500 " already registered\n", ifp->if_xname); 501 return (EEXIST); 502 } 503 504 pr[poll_handlers].handler = h; 505 pr[poll_handlers].ifp = ifp; 506 poll_handlers++; 507 mtx_unlock(&poll_mtx); 508 if (idlepoll_sleeping) 509 wakeup(&idlepoll_sleeping); 510 return (0); 511} 512 513/* 514 * Remove interface from the polling list. Called from *_ioctl(), too. 515 */ 516int 517ether_poll_deregister(struct ifnet *ifp) 518{ 519 int i; 520 521 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 522 523 mtx_lock(&poll_mtx); 524 525 for (i = 0 ; i < poll_handlers ; i++) 526 if (pr[i].ifp == ifp) /* found it */ 527 break; 528 if (i == poll_handlers) { 529 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n", 530 ifp->if_xname); 531 mtx_unlock(&poll_mtx); 532 return (ENOENT); 533 } 534 poll_handlers--; 535 if (i < poll_handlers) { /* Last entry replaces this one. */ 536 pr[i].handler = pr[poll_handlers].handler; 537 pr[i].ifp = pr[poll_handlers].ifp; 538 } 539 mtx_unlock(&poll_mtx); 540 return (0); 541} 542 543/* 544 * Legacy interface for turning polling on all interfaces at one time. 545 */ 546static int 547poll_switch(SYSCTL_HANDLER_ARGS) 548{ 549 INIT_VNET_NET(curvnet); 550 struct ifnet *ifp; 551 int error; 552 int val = polling; 553 554 error = sysctl_handle_int(oidp, &val, 0, req); 555 if (error || !req->newptr ) 556 return (error); 557 558 if (val == polling) 559 return (0); 560 561 if (val < 0 || val > 1) 562 return (EINVAL); 563 564 polling = val; 565 566 IFNET_RLOCK(); 567 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 568 if (ifp->if_capabilities & IFCAP_POLLING) { 569 struct ifreq ifr; 570 571 if (val == 1) 572 ifr.ifr_reqcap = 573 ifp->if_capenable | IFCAP_POLLING; 574 else 575 ifr.ifr_reqcap = 576 ifp->if_capenable & ~IFCAP_POLLING; 577 (void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 578 } 579 } 580 IFNET_RUNLOCK(); 581 582 log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)"); 583 584 return (0); 585} 586 587static void 588poll_idle(void) 589{ 590 struct thread *td = curthread; 591 struct rtprio rtp; 592 593 rtp.prio = RTP_PRIO_MAX; /* lowest priority */ 594 rtp.type = RTP_PRIO_IDLE; 595 PROC_SLOCK(td->td_proc); 596 rtp_to_pri(&rtp, td); 597 PROC_SUNLOCK(td->td_proc); 598 599 for (;;) { 600 if (poll_in_idle_loop && poll_handlers > 0) { 601 idlepoll_sleeping = 0; 602 ether_poll(poll_each_burst); 603 thread_lock(td); 604 mi_switch(SW_VOL, NULL); 605 thread_unlock(td); 606 } else { 607 idlepoll_sleeping = 1; 608 tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3); 609 } 610 } 611} 612 613static struct proc *idlepoll; 614static struct kproc_desc idlepoll_kp = { 615 "idlepoll", 616 poll_idle, 617 &idlepoll 618}; 619SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, 620 &idlepoll_kp); 621