kern_cpu.c revision 142114
1/*- 2 * Copyright (c) 2004-2005 Nate Lawson (SDG) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/kern_cpu.c 142114 2005-02-20 00:59:15Z njl $"); 29 30#include <sys/param.h> 31#include <sys/bus.h> 32#include <sys/cpu.h> 33#include <sys/eventhandler.h> 34#include <sys/kernel.h> 35#include <sys/malloc.h> 36#include <sys/module.h> 37#include <sys/proc.h> 38#include <sys/queue.h> 39#include <sys/sched.h> 40#include <sys/sysctl.h> 41#include <sys/systm.h> 42#include <sys/sbuf.h> 43#include <sys/timetc.h> 44 45#include "cpufreq_if.h" 46 47/* 48 * Common CPU frequency glue code. Drivers for specific hardware can 49 * attach this interface to allow users to get/set the CPU frequency. 50 */ 51 52/* 53 * Number of levels we can handle. Levels are synthesized from settings 54 * so for N settings there may be N^2 levels. 55 */ 56#define CF_MAX_LEVELS 32 57 58struct cpufreq_softc { 59 struct cf_level curr_level; 60 int curr_priority; 61 struct cf_level saved_level; 62 int saved_priority; 63 struct cf_level_lst all_levels; 64 int all_count; 65 int max_mhz; 66 device_t dev; 67 struct sysctl_ctx_list sysctl_ctx; 68}; 69 70struct cf_setting_array { 71 struct cf_setting sets[MAX_SETTINGS]; 72 int count; 73 TAILQ_ENTRY(cf_setting_array) link; 74}; 75 76TAILQ_HEAD(cf_setting_lst, cf_setting_array); 77 78static int cpufreq_attach(device_t dev); 79static int cpufreq_detach(device_t dev); 80static void cpufreq_evaluate(void *arg); 81static int cf_set_method(device_t dev, const struct cf_level *level, 82 int priority); 83static int cf_get_method(device_t dev, struct cf_level *level); 84static int cf_levels_method(device_t dev, struct cf_level *levels, 85 int *count); 86static int cpufreq_insert_abs(struct cpufreq_softc *sc, 87 struct cf_setting *sets, int count); 88static int cpufreq_expand_set(struct cpufreq_softc *sc, 89 struct cf_setting_array *set_arr); 90static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc, 91 struct cf_level *dup, struct cf_setting *set); 92static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS); 93static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS); 94static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS); 95 96static device_method_t cpufreq_methods[] = { 97 DEVMETHOD(device_probe, bus_generic_probe), 98 DEVMETHOD(device_attach, cpufreq_attach), 99 DEVMETHOD(device_detach, cpufreq_detach), 100 101 DEVMETHOD(cpufreq_set, cf_set_method), 102 DEVMETHOD(cpufreq_get, cf_get_method), 103 DEVMETHOD(cpufreq_levels, cf_levels_method), 104 {0, 0} 105}; 106static driver_t cpufreq_driver = { 107 "cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc) 108}; 109static devclass_t cpufreq_dc; 110DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0); 111 112static eventhandler_tag cf_ev_tag; 113 114static int 115cpufreq_attach(device_t dev) 116{ 117 struct cpufreq_softc *sc; 118 device_t parent; 119 int numdevs; 120 121 sc = device_get_softc(dev); 122 parent = device_get_parent(dev); 123 sc->dev = dev; 124 sysctl_ctx_init(&sc->sysctl_ctx); 125 TAILQ_INIT(&sc->all_levels); 126 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 127 sc->saved_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 128 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 129 130 /* 131 * Only initialize one set of sysctls for all CPUs. In the future, 132 * if multiple CPUs can have different settings, we can move these 133 * sysctls to be under every CPU instead of just the first one. 134 */ 135 numdevs = devclass_get_count(cpufreq_dc); 136 if (numdevs > 1) 137 return (0); 138 139 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 140 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 141 OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 142 cpufreq_curr_sysctl, "I", "Current CPU frequency"); 143 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 144 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 145 OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 146 cpufreq_levels_sysctl, "A", "CPU frequency levels"); 147 cf_ev_tag = EVENTHANDLER_REGISTER(cpufreq_changed, cpufreq_evaluate, 148 NULL, EVENTHANDLER_PRI_ANY); 149 150 return (0); 151} 152 153static int 154cpufreq_detach(device_t dev) 155{ 156 struct cpufreq_softc *sc; 157 int numdevs; 158 159 sc = device_get_softc(dev); 160 sysctl_ctx_free(&sc->sysctl_ctx); 161 162 /* Only clean up these resources when the last device is detaching. */ 163 numdevs = devclass_get_count(cpufreq_dc); 164 if (numdevs == 1) 165 EVENTHANDLER_DEREGISTER(cpufreq_changed, cf_ev_tag); 166 167 return (0); 168} 169 170static void 171cpufreq_evaluate(void *arg) 172{ 173 /* TODO: Re-evaluate when notified of changes to drivers. */ 174} 175 176static int 177cf_set_method(device_t dev, const struct cf_level *level, int priority) 178{ 179 struct cpufreq_softc *sc; 180 const struct cf_setting *set; 181 struct pcpu *pc; 182 int cpu_id, error, i; 183 184 sc = device_get_softc(dev); 185 186 /* 187 * Check that the TSC isn't being used as a timecounter. 188 * If it is, then return EBUSY and refuse to change the 189 * clock speed. 190 */ 191 if (strcmp(timecounter->tc_name, "TSC") == 0) 192 return (EBUSY); 193 194 /* 195 * If the caller didn't specify a level and one is saved, prepare to 196 * restore the saved level. If none has been saved, return an error. 197 * If they did specify one, but the requested level has a lower 198 * priority, don't allow the new level right now. 199 */ 200 if (level == NULL) { 201 if (sc->saved_level.total_set.freq != CPUFREQ_VAL_UNKNOWN) { 202 level = &sc->saved_level; 203 priority = sc->saved_priority; 204 } else 205 return (ENXIO); 206 } else if (priority < sc->curr_priority) 207 return (EPERM); 208 209 /* If already at this level, just return. */ 210 if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) 211 return (0); 212 213 /* First, set the absolute frequency via its driver. */ 214 set = &level->abs_set; 215 if (set->dev) { 216 if (!device_is_attached(set->dev)) { 217 error = ENXIO; 218 goto out; 219 } 220 221 /* Bind to the target CPU before switching, if necessary. */ 222 cpu_id = PCPU_GET(cpuid); 223 pc = cpu_get_pcpu(set->dev); 224 if (cpu_id != pc->pc_cpuid) { 225 mtx_lock_spin(&sched_lock); 226 sched_bind(curthread, pc->pc_cpuid); 227 mtx_unlock_spin(&sched_lock); 228 } 229 error = CPUFREQ_DRV_SET(set->dev, set); 230 if (cpu_id != pc->pc_cpuid) { 231 mtx_lock_spin(&sched_lock); 232 sched_unbind(curthread); 233 mtx_unlock_spin(&sched_lock); 234 } 235 if (error) { 236 goto out; 237 } 238 } 239 240 /* Next, set any/all relative frequencies via their drivers. */ 241 for (i = 0; i < level->rel_count; i++) { 242 set = &level->rel_set[i]; 243 if (!device_is_attached(set->dev)) { 244 error = ENXIO; 245 goto out; 246 } 247 248 /* Bind to the target CPU before switching, if necessary. */ 249 cpu_id = PCPU_GET(cpuid); 250 pc = cpu_get_pcpu(set->dev); 251 if (cpu_id != pc->pc_cpuid) { 252 mtx_lock_spin(&sched_lock); 253 sched_bind(curthread, pc->pc_cpuid); 254 mtx_unlock_spin(&sched_lock); 255 } 256 error = CPUFREQ_DRV_SET(set->dev, set); 257 if (cpu_id != pc->pc_cpuid) { 258 mtx_lock_spin(&sched_lock); 259 sched_unbind(curthread); 260 mtx_unlock_spin(&sched_lock); 261 } 262 if (error) { 263 /* XXX Back out any successful setting? */ 264 goto out; 265 } 266 } 267 268 /* If we were restoring a saved state, reset it to "unused". */ 269 if (level == &sc->saved_level) { 270 sc->saved_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 271 sc->saved_priority = 0; 272 } 273 274 /* 275 * Before recording the current level, check if we're going to a 276 * higher priority and have not saved a level yet. If so, save the 277 * previous level and priority. 278 */ 279 if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN && 280 sc->saved_level.total_set.freq == CPUFREQ_VAL_UNKNOWN && 281 priority > sc->curr_priority) { 282 sc->saved_level = sc->curr_level; 283 sc->saved_priority = sc->curr_priority; 284 } 285 sc->curr_level = *level; 286 sc->curr_priority = priority; 287 error = 0; 288 289out: 290 if (error) 291 device_printf(set->dev, "set freq failed, err %d\n", error); 292 return (error); 293} 294 295static int 296cf_get_method(device_t dev, struct cf_level *level) 297{ 298 struct cpufreq_softc *sc; 299 struct cf_level *levels; 300 struct cf_setting *curr_set, set; 301 struct pcpu *pc; 302 device_t *devs; 303 int count, error, i, numdevs; 304 uint64_t rate; 305 306 sc = device_get_softc(dev); 307 curr_set = &sc->curr_level.total_set; 308 levels = NULL; 309 310 /* If we already know the current frequency, we're done. */ 311 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) 312 goto out; 313 314 /* 315 * We need to figure out the current level. Loop through every 316 * driver, getting the current setting. Then, attempt to get a best 317 * match of settings against each level. 318 */ 319 count = CF_MAX_LEVELS; 320 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 321 if (levels == NULL) 322 return (ENOMEM); 323 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 324 if (error) 325 goto out; 326 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 327 if (error) 328 goto out; 329 for (i = 0; i < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; i++) { 330 if (!device_is_attached(devs[i])) 331 continue; 332 error = CPUFREQ_DRV_GET(devs[i], &set); 333 if (error) 334 continue; 335 for (i = 0; i < count; i++) { 336 if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) { 337 sc->curr_level = levels[i]; 338 break; 339 } 340 } 341 } 342 free(devs, M_TEMP); 343 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) 344 goto out; 345 346 /* 347 * We couldn't find an exact match, so attempt to estimate and then 348 * match against a level. 349 */ 350 pc = cpu_get_pcpu(dev); 351 if (pc == NULL) { 352 error = ENXIO; 353 goto out; 354 } 355 cpu_est_clockrate(pc->pc_cpuid, &rate); 356 rate /= 1000000; 357 for (i = 0; i < count; i++) { 358 if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) { 359 sc->curr_level = levels[i]; 360 break; 361 } 362 } 363 364out: 365 if (levels) 366 free(levels, M_TEMP); 367 *level = sc->curr_level; 368 return (0); 369} 370 371static int 372cf_levels_method(device_t dev, struct cf_level *levels, int *count) 373{ 374 struct cf_setting_array *set_arr; 375 struct cf_setting_lst rel_sets; 376 struct cpufreq_softc *sc; 377 struct cf_level *lev; 378 struct cf_setting *sets; 379 struct pcpu *pc; 380 device_t *devs; 381 int error, i, numdevs, set_count, type; 382 uint64_t rate; 383 384 if (levels == NULL || count == NULL) 385 return (EINVAL); 386 387 TAILQ_INIT(&rel_sets); 388 sc = device_get_softc(dev); 389 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 390 if (error) 391 return (error); 392 sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); 393 if (sets == NULL) { 394 free(devs, M_TEMP); 395 return (ENOMEM); 396 } 397 398 /* Get settings from all cpufreq drivers. */ 399 for (i = 0; i < numdevs; i++) { 400 /* Skip devices that aren't ready. */ 401 if (!device_is_attached(devs[i])) 402 continue; 403 404 /* 405 * Get settings, skipping drivers that offer no settings or 406 * provide settings for informational purposes only. 407 */ 408 error = CPUFREQ_DRV_TYPE(devs[i], &type); 409 if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) 410 continue; 411 set_count = MAX_SETTINGS; 412 error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count); 413 if (error || set_count == 0) 414 continue; 415 416 /* Add the settings to our absolute/relative lists. */ 417 switch (type & CPUFREQ_TYPE_MASK) { 418 case CPUFREQ_TYPE_ABSOLUTE: 419 error = cpufreq_insert_abs(sc, sets, set_count); 420 break; 421 case CPUFREQ_TYPE_RELATIVE: 422 set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT); 423 if (set_arr == NULL) { 424 error = ENOMEM; 425 goto out; 426 } 427 bcopy(sets, set_arr->sets, set_count * sizeof(*sets)); 428 set_arr->count = set_count; 429 TAILQ_INSERT_TAIL(&rel_sets, set_arr, link); 430 break; 431 default: 432 error = EINVAL; 433 break; 434 } 435 if (error) 436 goto out; 437 } 438 439 /* 440 * If there are no absolute levels, create a fake one at 100%. We 441 * then cache the clockrate for later use as our base frequency. 442 * 443 * XXX This assumes that the first time through, if we only have 444 * relative drivers, the CPU is currently running at 100%. 445 */ 446 if (TAILQ_EMPTY(&sc->all_levels)) { 447 if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) { 448 pc = cpu_get_pcpu(dev); 449 cpu_est_clockrate(pc->pc_cpuid, &rate); 450 sc->max_mhz = rate / 1000000; 451 } 452 memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets)); 453 sets[0].freq = sc->max_mhz; 454 sets[0].dev = NULL; 455 error = cpufreq_insert_abs(sc, sets, 1); 456 if (error) 457 goto out; 458 } 459 460 /* Create a combined list of absolute + relative levels. */ 461 TAILQ_FOREACH(set_arr, &rel_sets, link) 462 cpufreq_expand_set(sc, set_arr); 463 464 /* If the caller doesn't have enough space, return the actual count. */ 465 if (sc->all_count > *count) { 466 *count = sc->all_count; 467 error = E2BIG; 468 goto out; 469 } 470 471 /* Finally, output the list of levels. */ 472 i = 0; 473 TAILQ_FOREACH(lev, &sc->all_levels, link) { 474 levels[i] = *lev; 475 i++; 476 } 477 *count = sc->all_count; 478 error = 0; 479 480out: 481 /* Clear all levels since we regenerate them each time. */ 482 while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) { 483 TAILQ_REMOVE(&sc->all_levels, lev, link); 484 free(lev, M_TEMP); 485 } 486 while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) { 487 TAILQ_REMOVE(&rel_sets, set_arr, link); 488 free(set_arr, M_TEMP); 489 } 490 sc->all_count = 0; 491 free(devs, M_TEMP); 492 free(sets, M_TEMP); 493 return (error); 494} 495 496/* 497 * Create levels for an array of absolute settings and insert them in 498 * sorted order in the specified list. 499 */ 500static int 501cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets, 502 int count) 503{ 504 struct cf_level_lst *list; 505 struct cf_level *level, *search; 506 int i; 507 508 list = &sc->all_levels; 509 for (i = 0; i < count; i++) { 510 level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO); 511 if (level == NULL) 512 return (ENOMEM); 513 level->abs_set = sets[i]; 514 level->total_set = sets[i]; 515 level->total_set.dev = NULL; 516 sc->all_count++; 517 518 if (TAILQ_EMPTY(list)) { 519 TAILQ_INSERT_HEAD(list, level, link); 520 continue; 521 } 522 523 TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) { 524 if (sets[i].freq <= search->total_set.freq) { 525 TAILQ_INSERT_AFTER(list, search, level, link); 526 break; 527 } 528 } 529 } 530 return (0); 531} 532 533/* 534 * Expand a group of relative settings, creating derived levels from them. 535 */ 536static int 537cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr) 538{ 539 struct cf_level *fill, *search; 540 struct cf_setting *set; 541 int i; 542 543 TAILQ_FOREACH(search, &sc->all_levels, link) { 544 /* Skip this level if we've already modified it. */ 545 for (i = 0; i < search->rel_count; i++) { 546 if (search->rel_set[i].dev == set_arr->sets[0].dev) 547 break; 548 } 549 if (i != search->rel_count) 550 continue; 551 552 /* Add each setting to the level, duplicating if necessary. */ 553 for (i = 0; i < set_arr->count; i++) { 554 set = &set_arr->sets[i]; 555 556 /* 557 * If this setting is less than 100%, split the level 558 * into two and add this setting to the new level. 559 */ 560 fill = search; 561 if (set->freq < 10000) 562 fill = cpufreq_dup_set(sc, search, set); 563 564 /* 565 * The new level was a duplicate of an existing level 566 * so we freed it. Go to the next setting. 567 */ 568 if (fill == NULL) 569 continue; 570 571 /* Add this setting to the existing or new level. */ 572 KASSERT(fill->rel_count < MAX_SETTINGS, 573 ("cpufreq: too many relative drivers (%d)", 574 MAX_SETTINGS)); 575 fill->rel_set[fill->rel_count] = *set; 576 fill->rel_count++; 577 } 578 } 579 580 return (0); 581} 582 583static struct cf_level * 584cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup, 585 struct cf_setting *set) 586{ 587 struct cf_level_lst *list; 588 struct cf_level *fill, *itr; 589 struct cf_setting *fill_set, *itr_set; 590 int i; 591 592 /* 593 * Create a new level, copy it from the old one, and update the 594 * total frequency and power by the percentage specified in the 595 * relative setting. 596 */ 597 fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT); 598 if (fill == NULL) 599 return (NULL); 600 *fill = *dup; 601 fill_set = &fill->total_set; 602 fill_set->freq = 603 ((uint64_t)fill_set->freq * set->freq) / 10000; 604 if (fill_set->power != CPUFREQ_VAL_UNKNOWN) { 605 fill_set->power = ((uint64_t)fill_set->power * set->freq) 606 / 10000; 607 } 608 if (set->lat != CPUFREQ_VAL_UNKNOWN) { 609 if (fill_set->lat != CPUFREQ_VAL_UNKNOWN) 610 fill_set->lat += set->lat; 611 else 612 fill_set->lat = set->lat; 613 } 614 615 /* 616 * If we copied an old level that we already modified (say, at 100%), 617 * we need to remove that setting before adding this one. Since we 618 * process each setting array in order, we know any settings for this 619 * driver will be found at the end. 620 */ 621 for (i = fill->rel_count; i != 0; i--) { 622 if (fill->rel_set[i - 1].dev != set->dev) 623 break; 624 fill->rel_count--; 625 } 626 627 /* 628 * Insert the new level in sorted order. If we find a duplicate, 629 * free the new level. We can do this since any existing level will 630 * be guaranteed to have the same or less settings and thus consume 631 * less power. For example, a level with one absolute setting of 632 * 800 Mhz uses less power than one composed of an absolute setting 633 * of 1600 Mhz and a relative setting at 50%. 634 */ 635 list = &sc->all_levels; 636 if (TAILQ_EMPTY(list)) { 637 TAILQ_INSERT_HEAD(list, fill, link); 638 } else { 639 TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) { 640 itr_set = &itr->total_set; 641 if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) { 642 free(fill, M_TEMP); 643 fill = NULL; 644 break; 645 } else if (fill_set->freq < itr_set->freq) { 646 TAILQ_INSERT_AFTER(list, itr, fill, link); 647 sc->all_count++; 648 break; 649 } 650 } 651 } 652 653 return (fill); 654} 655 656static int 657cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS) 658{ 659 struct cpufreq_softc *sc; 660 struct cf_level *levels; 661 int count, devcount, error, freq, i, n; 662 device_t *devs; 663 664 devs = NULL; 665 sc = oidp->oid_arg1; 666 levels = malloc(CF_MAX_LEVELS * sizeof(*levels), M_TEMP, M_NOWAIT); 667 if (levels == NULL) 668 return (ENOMEM); 669 670 error = CPUFREQ_GET(sc->dev, &levels[0]); 671 if (error) 672 goto out; 673 freq = levels[0].total_set.freq; 674 error = sysctl_handle_int(oidp, &freq, 0, req); 675 if (error != 0 || req->newptr == NULL) 676 goto out; 677 678 /* 679 * While we only call cpufreq_get() on one device (assuming all 680 * CPUs have equal levels), we call cpufreq_set() on all CPUs. 681 * This is needed for some MP systems. 682 */ 683 error = devclass_get_devices(cpufreq_dc, &devs, &devcount); 684 if (error) 685 goto out; 686 for (n = 0; n < devcount; n++) { 687 count = CF_MAX_LEVELS; 688 error = CPUFREQ_LEVELS(devs[n], levels, &count); 689 if (error) 690 break; 691 for (i = 0; i < count; i++) { 692 if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) { 693 error = CPUFREQ_SET(devs[n], &levels[i], 694 CPUFREQ_PRIO_USER); 695 break; 696 } 697 } 698 if (i == count) { 699 error = EINVAL; 700 break; 701 } 702 } 703 704out: 705 if (devs) 706 free(devs, M_TEMP); 707 if (levels) 708 free(levels, M_TEMP); 709 return (error); 710} 711 712static int 713cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS) 714{ 715 struct cpufreq_softc *sc; 716 struct cf_level *levels; 717 struct cf_setting *set; 718 struct sbuf sb; 719 int count, error, i; 720 721 sc = oidp->oid_arg1; 722 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 723 724 /* Get settings from the device and generate the output string. */ 725 count = CF_MAX_LEVELS; 726 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 727 if (levels == NULL) 728 return (ENOMEM); 729 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 730 if (error) 731 goto out; 732 if (count) { 733 for (i = 0; i < count; i++) { 734 set = &levels[i].total_set; 735 sbuf_printf(&sb, "%d/%d ", set->freq, set->power); 736 } 737 } else 738 sbuf_cpy(&sb, "0"); 739 sbuf_trim(&sb); 740 sbuf_finish(&sb); 741 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 742 743out: 744 free(levels, M_TEMP); 745 sbuf_delete(&sb); 746 return (error); 747} 748 749static int 750cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS) 751{ 752 device_t dev; 753 struct cf_setting *sets; 754 struct sbuf sb; 755 int error, i, set_count; 756 757 dev = oidp->oid_arg1; 758 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 759 760 /* Get settings from the device and generate the output string. */ 761 set_count = MAX_SETTINGS; 762 sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT); 763 if (sets == NULL) 764 return (ENOMEM); 765 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 766 if (error) 767 goto out; 768 if (set_count) { 769 for (i = 0; i < set_count; i++) 770 sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power); 771 } else 772 sbuf_cpy(&sb, "0"); 773 sbuf_trim(&sb); 774 sbuf_finish(&sb); 775 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 776 777out: 778 free(sets, M_TEMP); 779 sbuf_delete(&sb); 780 return (error); 781} 782 783int 784cpufreq_register(device_t dev) 785{ 786 struct cpufreq_softc *sc; 787 device_t cf_dev, cpu_dev; 788 789 /* Add a sysctl to get each driver's settings separately. */ 790 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 791 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 792 OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0, 793 cpufreq_settings_sysctl, "A", "CPU frequency driver settings"); 794 795 /* 796 * Add only one cpufreq device to each CPU. Currently, all CPUs 797 * must offer the same levels and be switched at the same time. 798 */ 799 cpu_dev = device_get_parent(dev); 800 if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) { 801 sc = device_get_softc(cf_dev); 802 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 803 return (0); 804 } 805 806 /* Add the child device and possibly sysctls. */ 807 cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1); 808 if (cf_dev == NULL) 809 return (ENOMEM); 810 device_quiet(cf_dev); 811 812 return (device_probe_and_attach(cf_dev)); 813} 814 815int 816cpufreq_unregister(device_t dev) 817{ 818 device_t cf_dev, *devs; 819 int cfcount, devcount, error, i, type; 820 821 /* 822 * If this is the last cpufreq child device, remove the control 823 * device as well. We identify cpufreq children by calling a method 824 * they support. 825 */ 826 error = device_get_children(device_get_parent(dev), &devs, &devcount); 827 if (error) 828 return (error); 829 cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1); 830 cfcount = 0; 831 for (i = 0; i < devcount; i++) { 832 if (!device_is_attached(devs[i])) 833 continue; 834 if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0) 835 cfcount++; 836 } 837 if (cfcount <= 1) 838 device_delete_child(device_get_parent(cf_dev), cf_dev); 839 free(devs, M_TEMP); 840 841 return (0); 842} 843