kern_poll.c revision 196019
1/*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_poll.c 196019 2009-08-01 19:26:27Z rwatson $"); 30 31#include "opt_device_polling.h" 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kernel.h> 36#include <sys/kthread.h> 37#include <sys/proc.h> 38#include <sys/eventhandler.h> 39#include <sys/resourcevar.h> 40#include <sys/socket.h> /* needed by net/if.h */ 41#include <sys/sockio.h> 42#include <sys/sysctl.h> 43#include <sys/syslog.h> 44 45#include <net/if.h> /* for IFF_* flags */ 46#include <net/netisr.h> /* for NETISR_POLL */ 47#include <net/vnet.h> 48 49static int poll_switch(SYSCTL_HANDLER_ARGS); 50 51void hardclock_device_poll(void); /* hook from hardclock */ 52 53static struct mtx poll_mtx; 54 55/* 56 * Polling support for [network] device drivers. 57 * 58 * Drivers which support this feature can register with the 59 * polling code. 60 * 61 * If registration is successful, the driver must disable interrupts, 62 * and further I/O is performed through the handler, which is invoked 63 * (at least once per clock tick) with 3 arguments: the "arg" passed at 64 * register time (a struct ifnet pointer), a command, and a "count" limit. 65 * 66 * The command can be one of the following: 67 * POLL_ONLY: quick move of "count" packets from input/output queues. 68 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do 69 * other more expensive operations. This command is issued periodically 70 * but less frequently than POLL_ONLY. 71 * 72 * The count limit specifies how much work the handler can do during the 73 * call -- typically this is the number of packets to be received, or 74 * transmitted, etc. (drivers are free to interpret this number, as long 75 * as the max time spent in the function grows roughly linearly with the 76 * count). 77 * 78 * Polling is enabled and disabled via setting IFCAP_POLLING flag on 79 * the interface. The driver ioctl handler should register interface 80 * with polling and disable interrupts, if registration was successful. 81 * 82 * A second variable controls the sharing of CPU between polling/kernel 83 * network processing, and other activities (typically userlevel tasks): 84 * kern.polling.user_frac (between 0 and 100, default 50) sets the share 85 * of CPU allocated to user tasks. CPU is allocated proportionally to the 86 * shares, by dynamically adjusting the "count" (poll_burst). 87 * 88 * Other parameters can should be left to their default values. 89 * The following constraints hold 90 * 91 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max 92 * 0 <= poll_each_burst 93 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 94 */ 95 96#define MIN_POLL_BURST_MAX 10 97#define MAX_POLL_BURST_MAX 1000 98 99static uint32_t poll_burst = 5; 100static uint32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */ 101static uint32_t poll_each_burst = 5; 102 103SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0, 104 "Device polling parameters"); 105 106SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD, 107 &poll_burst, 0, "Current polling burst size"); 108 109static int netisr_poll_scheduled; 110static int netisr_pollmore_scheduled; 111static int poll_shutting_down; 112 113static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS) 114{ 115 uint32_t val = poll_burst_max; 116 int error; 117 118 error = sysctl_handle_int(oidp, &val, 0, req); 119 if (error || !req->newptr ) 120 return (error); 121 if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX) 122 return (EINVAL); 123 124 mtx_lock(&poll_mtx); 125 poll_burst_max = val; 126 if (poll_burst > poll_burst_max) 127 poll_burst = poll_burst_max; 128 if (poll_each_burst > poll_burst_max) 129 poll_each_burst = MIN_POLL_BURST_MAX; 130 mtx_unlock(&poll_mtx); 131 132 return (0); 133} 134SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW, 135 0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size"); 136 137static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS) 138{ 139 uint32_t val = poll_each_burst; 140 int error; 141 142 error = sysctl_handle_int(oidp, &val, 0, req); 143 if (error || !req->newptr ) 144 return (error); 145 if (val < 1) 146 return (EINVAL); 147 148 mtx_lock(&poll_mtx); 149 if (val > poll_burst_max) { 150 mtx_unlock(&poll_mtx); 151 return (EINVAL); 152 } 153 poll_each_burst = val; 154 mtx_unlock(&poll_mtx); 155 156 return (0); 157} 158SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW, 159 0, sizeof(uint32_t), poll_each_burst_sysctl, "I", 160 "Max size of each burst"); 161 162static uint32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */ 163SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW, 164 &poll_in_idle_loop, 0, "Enable device polling in idle loop"); 165 166static uint32_t user_frac = 50; 167static int user_frac_sysctl(SYSCTL_HANDLER_ARGS) 168{ 169 uint32_t val = user_frac; 170 int error; 171 172 error = sysctl_handle_int(oidp, &val, 0, req); 173 if (error || !req->newptr ) 174 return (error); 175 if (val < 0 || val > 99) 176 return (EINVAL); 177 178 mtx_lock(&poll_mtx); 179 user_frac = val; 180 mtx_unlock(&poll_mtx); 181 182 return (0); 183} 184SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW, 185 0, sizeof(uint32_t), user_frac_sysctl, "I", 186 "Desired user fraction of cpu time"); 187 188static uint32_t reg_frac_count = 0; 189static uint32_t reg_frac = 20 ; 190static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS) 191{ 192 uint32_t val = reg_frac; 193 int error; 194 195 error = sysctl_handle_int(oidp, &val, 0, req); 196 if (error || !req->newptr ) 197 return (error); 198 if (val < 1 || val > hz) 199 return (EINVAL); 200 201 mtx_lock(&poll_mtx); 202 reg_frac = val; 203 if (reg_frac_count >= reg_frac) 204 reg_frac_count = 0; 205 mtx_unlock(&poll_mtx); 206 207 return (0); 208} 209SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW, 210 0, sizeof(uint32_t), reg_frac_sysctl, "I", 211 "Every this many cycles check registers"); 212 213static uint32_t short_ticks; 214SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD, 215 &short_ticks, 0, "Hardclock ticks shorter than they should be"); 216 217static uint32_t lost_polls; 218SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD, 219 &lost_polls, 0, "How many times we would have lost a poll tick"); 220 221static uint32_t pending_polls; 222SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD, 223 &pending_polls, 0, "Do we need to poll again"); 224 225static int residual_burst = 0; 226SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD, 227 &residual_burst, 0, "# of residual cycles in burst"); 228 229static uint32_t poll_handlers; /* next free entry in pr[]. */ 230SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD, 231 &poll_handlers, 0, "Number of registered poll handlers"); 232 233static int polling = 0; 234SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW, 235 0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces"); 236 237static uint32_t phase; 238SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD, 239 &phase, 0, "Polling phase"); 240 241static uint32_t suspect; 242SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD, 243 &suspect, 0, "suspect event"); 244 245static uint32_t stalled; 246SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD, 247 &stalled, 0, "potential stalls"); 248 249static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */ 250SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD, 251 &idlepoll_sleeping, 0, "idlepoll is sleeping"); 252 253 254#define POLL_LIST_LEN 128 255struct pollrec { 256 poll_handler_t *handler; 257 struct ifnet *ifp; 258}; 259 260static struct pollrec pr[POLL_LIST_LEN]; 261 262static void 263poll_shutdown(void *arg, int howto) 264{ 265 266 poll_shutting_down = 1; 267} 268 269static void 270init_device_poll(void) 271{ 272 273 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF); 274 EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL, 275 SHUTDOWN_PRI_LAST); 276} 277SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL); 278 279 280/* 281 * Hook from hardclock. Tries to schedule a netisr, but keeps track 282 * of lost ticks due to the previous handler taking too long. 283 * Normally, this should not happen, because polling handler should 284 * run for a short time. However, in some cases (e.g. when there are 285 * changes in link status etc.) the drivers take a very long time 286 * (even in the order of milliseconds) to reset and reconfigure the 287 * device, causing apparent lost polls. 288 * 289 * The first part of the code is just for debugging purposes, and tries 290 * to count how often hardclock ticks are shorter than they should, 291 * meaning either stray interrupts or delayed events. 292 */ 293void 294hardclock_device_poll(void) 295{ 296 static struct timeval prev_t, t; 297 int delta; 298 299 if (poll_handlers == 0 || poll_shutting_down) 300 return; 301 302 microuptime(&t); 303 delta = (t.tv_usec - prev_t.tv_usec) + 304 (t.tv_sec - prev_t.tv_sec)*1000000; 305 if (delta * hz < 500000) 306 short_ticks++; 307 else 308 prev_t = t; 309 310 if (pending_polls > 100) { 311 /* 312 * Too much, assume it has stalled (not always true 313 * see comment above). 314 */ 315 stalled++; 316 pending_polls = 0; 317 phase = 0; 318 } 319 320 if (phase <= 2) { 321 if (phase != 0) 322 suspect++; 323 phase = 1; 324 netisr_poll_scheduled = 1; 325 netisr_pollmore_scheduled = 1; 326 netisr_sched_poll(); 327 phase = 2; 328 } 329 if (pending_polls++ > 0) 330 lost_polls++; 331} 332 333/* 334 * ether_poll is called from the idle loop. 335 */ 336static void 337ether_poll(int count) 338{ 339 int i; 340 341 mtx_lock(&poll_mtx); 342 343 if (count > poll_each_burst) 344 count = poll_each_burst; 345 346 for (i = 0 ; i < poll_handlers ; i++) 347 pr[i].handler(pr[i].ifp, POLL_ONLY, count); 348 349 mtx_unlock(&poll_mtx); 350} 351 352/* 353 * netisr_pollmore is called after other netisr's, possibly scheduling 354 * another NETISR_POLL call, or adapting the burst size for the next cycle. 355 * 356 * It is very bad to fetch large bursts of packets from a single card at once, 357 * because the burst could take a long time to be completely processed, or 358 * could saturate the intermediate queue (ipintrq or similar) leading to 359 * losses or unfairness. To reduce the problem, and also to account better for 360 * time spent in network-related processing, we split the burst in smaller 361 * chunks of fixed size, giving control to the other netisr's between chunks. 362 * This helps in improving the fairness, reducing livelock (because we 363 * emulate more closely the "process to completion" that we have with 364 * fastforwarding) and accounting for the work performed in low level 365 * handling and forwarding. 366 */ 367 368static struct timeval poll_start_t; 369 370void 371netisr_pollmore() 372{ 373 struct timeval t; 374 int kern_load; 375 376 mtx_lock(&poll_mtx); 377 if (!netisr_pollmore_scheduled) { 378 mtx_unlock(&poll_mtx); 379 return; 380 } 381 netisr_pollmore_scheduled = 0; 382 phase = 5; 383 if (residual_burst > 0) { 384 netisr_poll_scheduled = 1; 385 netisr_pollmore_scheduled = 1; 386 netisr_sched_poll(); 387 mtx_unlock(&poll_mtx); 388 /* will run immediately on return, followed by netisrs */ 389 return; 390 } 391 /* here we can account time spent in netisr's in this tick */ 392 microuptime(&t); 393 kern_load = (t.tv_usec - poll_start_t.tv_usec) + 394 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */ 395 kern_load = (kern_load * hz) / 10000; /* 0..100 */ 396 if (kern_load > (100 - user_frac)) { /* try decrease ticks */ 397 if (poll_burst > 1) 398 poll_burst--; 399 } else { 400 if (poll_burst < poll_burst_max) 401 poll_burst++; 402 } 403 404 pending_polls--; 405 if (pending_polls == 0) /* we are done */ 406 phase = 0; 407 else { 408 /* 409 * Last cycle was long and caused us to miss one or more 410 * hardclock ticks. Restart processing again, but slightly 411 * reduce the burst size to prevent that this happens again. 412 */ 413 poll_burst -= (poll_burst / 8); 414 if (poll_burst < 1) 415 poll_burst = 1; 416 netisr_poll_scheduled = 1; 417 netisr_pollmore_scheduled = 1; 418 netisr_sched_poll(); 419 phase = 6; 420 } 421 mtx_unlock(&poll_mtx); 422} 423 424/* 425 * netisr_poll is typically scheduled once per tick. 426 */ 427void 428netisr_poll(void) 429{ 430 int i, cycles; 431 enum poll_cmd arg = POLL_ONLY; 432 433 mtx_lock(&poll_mtx); 434 if (!netisr_poll_scheduled) { 435 mtx_unlock(&poll_mtx); 436 return; 437 } 438 netisr_poll_scheduled = 0; 439 phase = 3; 440 if (residual_burst == 0) { /* first call in this tick */ 441 microuptime(&poll_start_t); 442 if (++reg_frac_count == reg_frac) { 443 arg = POLL_AND_CHECK_STATUS; 444 reg_frac_count = 0; 445 } 446 447 residual_burst = poll_burst; 448 } 449 cycles = (residual_burst < poll_each_burst) ? 450 residual_burst : poll_each_burst; 451 residual_burst -= cycles; 452 453 for (i = 0 ; i < poll_handlers ; i++) 454 pr[i].handler(pr[i].ifp, arg, cycles); 455 456 phase = 4; 457 mtx_unlock(&poll_mtx); 458} 459 460/* 461 * Try to register routine for polling. Returns 0 if successful 462 * (and polling should be enabled), error code otherwise. 463 * A device is not supposed to register itself multiple times. 464 * 465 * This is called from within the *_ioctl() functions. 466 */ 467int 468ether_poll_register(poll_handler_t *h, struct ifnet *ifp) 469{ 470 int i; 471 472 KASSERT(h != NULL, ("%s: handler is NULL", __func__)); 473 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 474 475 mtx_lock(&poll_mtx); 476 if (poll_handlers >= POLL_LIST_LEN) { 477 /* 478 * List full, cannot register more entries. 479 * This should never happen; if it does, it is probably a 480 * broken driver trying to register multiple times. Checking 481 * this at runtime is expensive, and won't solve the problem 482 * anyways, so just report a few times and then give up. 483 */ 484 static int verbose = 10 ; 485 if (verbose >0) { 486 log(LOG_ERR, "poll handlers list full, " 487 "maybe a broken driver ?\n"); 488 verbose--; 489 } 490 mtx_unlock(&poll_mtx); 491 return (ENOMEM); /* no polling for you */ 492 } 493 494 for (i = 0 ; i < poll_handlers ; i++) 495 if (pr[i].ifp == ifp && pr[i].handler != NULL) { 496 mtx_unlock(&poll_mtx); 497 log(LOG_DEBUG, "ether_poll_register: %s: handler" 498 " already registered\n", ifp->if_xname); 499 return (EEXIST); 500 } 501 502 pr[poll_handlers].handler = h; 503 pr[poll_handlers].ifp = ifp; 504 poll_handlers++; 505 mtx_unlock(&poll_mtx); 506 if (idlepoll_sleeping) 507 wakeup(&idlepoll_sleeping); 508 return (0); 509} 510 511/* 512 * Remove interface from the polling list. Called from *_ioctl(), too. 513 */ 514int 515ether_poll_deregister(struct ifnet *ifp) 516{ 517 int i; 518 519 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 520 521 mtx_lock(&poll_mtx); 522 523 for (i = 0 ; i < poll_handlers ; i++) 524 if (pr[i].ifp == ifp) /* found it */ 525 break; 526 if (i == poll_handlers) { 527 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n", 528 ifp->if_xname); 529 mtx_unlock(&poll_mtx); 530 return (ENOENT); 531 } 532 poll_handlers--; 533 if (i < poll_handlers) { /* Last entry replaces this one. */ 534 pr[i].handler = pr[poll_handlers].handler; 535 pr[i].ifp = pr[poll_handlers].ifp; 536 } 537 mtx_unlock(&poll_mtx); 538 return (0); 539} 540 541/* 542 * Legacy interface for turning polling on all interfaces at one time. 543 */ 544static int 545poll_switch(SYSCTL_HANDLER_ARGS) 546{ 547 struct ifnet *ifp; 548 int error; 549 int val = polling; 550 551 error = sysctl_handle_int(oidp, &val, 0, req); 552 if (error || !req->newptr ) 553 return (error); 554 555 if (val == polling) 556 return (0); 557 558 if (val < 0 || val > 1) 559 return (EINVAL); 560 561 polling = val; 562 563 IFNET_RLOCK(); 564 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 565 if (ifp->if_capabilities & IFCAP_POLLING) { 566 struct ifreq ifr; 567 568 if (val == 1) 569 ifr.ifr_reqcap = 570 ifp->if_capenable | IFCAP_POLLING; 571 else 572 ifr.ifr_reqcap = 573 ifp->if_capenable & ~IFCAP_POLLING; 574 (void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 575 } 576 } 577 IFNET_RUNLOCK(); 578 579 log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)"); 580 581 return (0); 582} 583 584static void 585poll_idle(void) 586{ 587 struct thread *td = curthread; 588 struct rtprio rtp; 589 590 rtp.prio = RTP_PRIO_MAX; /* lowest priority */ 591 rtp.type = RTP_PRIO_IDLE; 592 PROC_SLOCK(td->td_proc); 593 rtp_to_pri(&rtp, td); 594 PROC_SUNLOCK(td->td_proc); 595 596 for (;;) { 597 if (poll_in_idle_loop && poll_handlers > 0) { 598 idlepoll_sleeping = 0; 599 ether_poll(poll_each_burst); 600 thread_lock(td); 601 mi_switch(SW_VOL, NULL); 602 thread_unlock(td); 603 } else { 604 idlepoll_sleeping = 1; 605 tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3); 606 } 607 } 608} 609 610static struct proc *idlepoll; 611static struct kproc_desc idlepoll_kp = { 612 "idlepoll", 613 poll_idle, 614 &idlepoll 615}; 616SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, 617 &idlepoll_kp); 618