kern_cpuset.c revision 191639
1/*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/kern/kern_cpuset.c 191639 2009-04-28 21:00:50Z bz $"); 33 34#include "opt_ddb.h" 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/sysproto.h> 39#include <sys/kernel.h> 40#include <sys/lock.h> 41#include <sys/malloc.h> 42#include <sys/mutex.h> 43#include <sys/priv.h> 44#include <sys/proc.h> 45#include <sys/refcount.h> 46#include <sys/sched.h> 47#include <sys/smp.h> 48#include <sys/syscallsubr.h> 49#include <sys/cpuset.h> 50#include <sys/sx.h> 51#include <sys/refcount.h> 52#include <sys/queue.h> 53#include <sys/limits.h> 54#include <sys/bus.h> 55#include <sys/interrupt.h> 56#include <sys/jail.h> /* Must come after sys/proc.h */ 57 58#include <vm/uma.h> 59 60#ifdef DDB 61#include <ddb/ddb.h> 62#endif /* DDB */ 63 64/* 65 * cpusets provide a mechanism for creating and manipulating sets of 66 * processors for the purpose of constraining the scheduling of threads to 67 * specific processors. 68 * 69 * Each process belongs to an identified set, by default this is set 1. Each 70 * thread may further restrict the cpus it may run on to a subset of this 71 * named set. This creates an anonymous set which other threads and processes 72 * may not join by number. 73 * 74 * The named set is referred to herein as the 'base' set to avoid ambiguity. 75 * This set is usually a child of a 'root' set while the anonymous set may 76 * simply be referred to as a mask. In the syscall api these are referred to 77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 78 * 79 * Threads inherit their set from their creator whether it be anonymous or 80 * not. This means that anonymous sets are immutable because they may be 81 * shared. To modify an anonymous set a new set is created with the desired 82 * mask and the same parent as the existing anonymous set. This gives the 83 * illusion of each thread having a private mask.A 84 * 85 * Via the syscall apis a user may ask to retrieve or modify the root, base, 86 * or mask that is discovered via a pid, tid, or setid. Modifying a set 87 * modifies all numbered and anonymous child sets to comply with the new mask. 88 * Modifying a pid or tid's mask applies only to that tid but must still 89 * exist within the assigned parent set. 90 * 91 * A thread may not be assigned to a a group seperate from other threads in 92 * the process. This is to remove ambiguity when the setid is queried with 93 * a pid argument. There is no other technical limitation. 94 * 95 * This somewhat complex arrangement is intended to make it easy for 96 * applications to query available processors and bind their threads to 97 * specific processors while also allowing administrators to dynamically 98 * reprovision by changing sets which apply to groups of processes. 99 * 100 * A simple application should not concern itself with sets at all and 101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 102 * meaning 'curthread'. It may query availble cpus for that tid with a 103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 104 */ 105static uma_zone_t cpuset_zone; 106static struct mtx cpuset_lock; 107static struct setlist cpuset_ids; 108static struct unrhdr *cpuset_unr; 109static struct cpuset *cpuset_zero; 110 111cpuset_t *cpuset_root; 112 113/* 114 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 115 */ 116struct cpuset * 117cpuset_ref(struct cpuset *set) 118{ 119 120 refcount_acquire(&set->cs_ref); 121 return (set); 122} 123 124/* 125 * Walks up the tree from 'set' to find the root. Returns the root 126 * referenced. 127 */ 128static struct cpuset * 129cpuset_refroot(struct cpuset *set) 130{ 131 132 for (; set->cs_parent != NULL; set = set->cs_parent) 133 if (set->cs_flags & CPU_SET_ROOT) 134 break; 135 cpuset_ref(set); 136 137 return (set); 138} 139 140/* 141 * Find the first non-anonymous set starting from 'set'. Returns this set 142 * referenced. May return the passed in set with an extra ref if it is 143 * not anonymous. 144 */ 145static struct cpuset * 146cpuset_refbase(struct cpuset *set) 147{ 148 149 if (set->cs_id == CPUSET_INVALID) 150 set = set->cs_parent; 151 cpuset_ref(set); 152 153 return (set); 154} 155 156/* 157 * Release a reference in a context where it is safe to allocte. 158 */ 159void 160cpuset_rel(struct cpuset *set) 161{ 162 cpusetid_t id; 163 164 if (refcount_release(&set->cs_ref) == 0) 165 return; 166 mtx_lock_spin(&cpuset_lock); 167 LIST_REMOVE(set, cs_siblings); 168 id = set->cs_id; 169 if (id != CPUSET_INVALID) 170 LIST_REMOVE(set, cs_link); 171 mtx_unlock_spin(&cpuset_lock); 172 cpuset_rel(set->cs_parent); 173 uma_zfree(cpuset_zone, set); 174 if (id != CPUSET_INVALID) 175 free_unr(cpuset_unr, id); 176} 177 178/* 179 * Deferred release must be used when in a context that is not safe to 180 * allocate/free. This places any unreferenced sets on the list 'head'. 181 */ 182static void 183cpuset_rel_defer(struct setlist *head, struct cpuset *set) 184{ 185 186 if (refcount_release(&set->cs_ref) == 0) 187 return; 188 mtx_lock_spin(&cpuset_lock); 189 LIST_REMOVE(set, cs_siblings); 190 if (set->cs_id != CPUSET_INVALID) 191 LIST_REMOVE(set, cs_link); 192 LIST_INSERT_HEAD(head, set, cs_link); 193 mtx_unlock_spin(&cpuset_lock); 194} 195 196/* 197 * Complete a deferred release. Removes the set from the list provided to 198 * cpuset_rel_defer. 199 */ 200static void 201cpuset_rel_complete(struct cpuset *set) 202{ 203 LIST_REMOVE(set, cs_link); 204 cpuset_rel(set->cs_parent); 205 uma_zfree(cpuset_zone, set); 206} 207 208/* 209 * Find a set based on an id. Returns it with a ref. 210 */ 211static struct cpuset * 212cpuset_lookup(cpusetid_t setid, struct thread *td) 213{ 214 struct cpuset *set; 215 216 if (setid == CPUSET_INVALID) 217 return (NULL); 218 mtx_lock_spin(&cpuset_lock); 219 LIST_FOREACH(set, &cpuset_ids, cs_link) 220 if (set->cs_id == setid) 221 break; 222 if (set) 223 cpuset_ref(set); 224 mtx_unlock_spin(&cpuset_lock); 225 226 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 227 if (set != NULL && jailed(td->td_ucred)) { 228 struct cpuset *rset, *jset; 229 struct prison *pr; 230 231 rset = cpuset_refroot(set); 232 233 pr = td->td_ucred->cr_prison; 234 mtx_lock(&pr->pr_mtx); 235 cpuset_ref(pr->pr_cpuset); 236 jset = pr->pr_cpuset; 237 mtx_unlock(&pr->pr_mtx); 238 239 if (jset->cs_id != rset->cs_id) { 240 cpuset_rel(set); 241 set = NULL; 242 } 243 cpuset_rel(jset); 244 cpuset_rel(rset); 245 } 246 247 return (set); 248} 249 250/* 251 * Create a set in the space provided in 'set' with the provided parameters. 252 * The set is returned with a single ref. May return EDEADLK if the set 253 * will have no valid cpu based on restrictions from the parent. 254 */ 255static int 256_cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, 257 cpusetid_t id) 258{ 259 260 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 261 return (EDEADLK); 262 CPU_COPY(mask, &set->cs_mask); 263 LIST_INIT(&set->cs_children); 264 refcount_init(&set->cs_ref, 1); 265 set->cs_flags = 0; 266 mtx_lock_spin(&cpuset_lock); 267 CPU_AND(mask, &parent->cs_mask); 268 set->cs_id = id; 269 set->cs_parent = cpuset_ref(parent); 270 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 271 if (set->cs_id != CPUSET_INVALID) 272 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 273 mtx_unlock_spin(&cpuset_lock); 274 275 return (0); 276} 277 278/* 279 * Create a new non-anonymous set with the requested parent and mask. May 280 * return failures if the mask is invalid or a new number can not be 281 * allocated. 282 */ 283static int 284cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) 285{ 286 struct cpuset *set; 287 cpusetid_t id; 288 int error; 289 290 id = alloc_unr(cpuset_unr); 291 if (id == -1) 292 return (ENFILE); 293 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 294 error = _cpuset_create(set, parent, mask, id); 295 if (error == 0) 296 return (0); 297 free_unr(cpuset_unr, id); 298 uma_zfree(cpuset_zone, set); 299 300 return (error); 301} 302 303/* 304 * Recursively check for errors that would occur from applying mask to 305 * the tree of sets starting at 'set'. Checks for sets that would become 306 * empty as well as RDONLY flags. 307 */ 308static int 309cpuset_testupdate(struct cpuset *set, cpuset_t *mask) 310{ 311 struct cpuset *nset; 312 cpuset_t newmask; 313 int error; 314 315 mtx_assert(&cpuset_lock, MA_OWNED); 316 if (set->cs_flags & CPU_SET_RDONLY) 317 return (EPERM); 318 if (!CPU_OVERLAP(&set->cs_mask, mask)) 319 return (EDEADLK); 320 CPU_COPY(&set->cs_mask, &newmask); 321 CPU_AND(&newmask, mask); 322 error = 0; 323 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 324 if ((error = cpuset_testupdate(nset, &newmask)) != 0) 325 break; 326 return (error); 327} 328 329/* 330 * Applies the mask 'mask' without checking for empty sets or permissions. 331 */ 332static void 333cpuset_update(struct cpuset *set, cpuset_t *mask) 334{ 335 struct cpuset *nset; 336 337 mtx_assert(&cpuset_lock, MA_OWNED); 338 CPU_AND(&set->cs_mask, mask); 339 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 340 cpuset_update(nset, &set->cs_mask); 341 342 return; 343} 344 345/* 346 * Modify the set 'set' to use a copy of the mask provided. Apply this new 347 * mask to restrict all children in the tree. Checks for validity before 348 * applying the changes. 349 */ 350static int 351cpuset_modify(struct cpuset *set, cpuset_t *mask) 352{ 353 struct cpuset *root; 354 int error; 355 356 error = priv_check(curthread, PRIV_SCHED_CPUSET); 357 if (error) 358 return (error); 359 /* 360 * In case we are called from within the jail 361 * we do not allow modifying the dedicated root 362 * cpuset of the jail but may still allow to 363 * change child sets. 364 */ 365 if (jailed(curthread->td_ucred) && 366 set->cs_flags & CPU_SET_ROOT) 367 return (EPERM); 368 /* 369 * Verify that we have access to this set of 370 * cpus. 371 */ 372 root = set->cs_parent; 373 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 374 return (EINVAL); 375 mtx_lock_spin(&cpuset_lock); 376 error = cpuset_testupdate(set, mask); 377 if (error) 378 goto out; 379 cpuset_update(set, mask); 380 CPU_COPY(mask, &set->cs_mask); 381out: 382 mtx_unlock_spin(&cpuset_lock); 383 384 return (error); 385} 386 387/* 388 * Resolve the 'which' parameter of several cpuset apis. 389 * 390 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 391 * checks for permission via p_cansched(). 392 * 393 * For WHICH_SET returns a valid set with a new reference. 394 * 395 * -1 may be supplied for any argument to mean the current proc/thread or 396 * the base set of the current thread. May fail with ESRCH/EPERM. 397 */ 398static int 399cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 400 struct cpuset **setp) 401{ 402 struct cpuset *set; 403 struct thread *td; 404 struct proc *p; 405 int error; 406 407 *pp = p = NULL; 408 *tdp = td = NULL; 409 *setp = set = NULL; 410 switch (which) { 411 case CPU_WHICH_PID: 412 if (id == -1) { 413 PROC_LOCK(curproc); 414 p = curproc; 415 break; 416 } 417 if ((p = pfind(id)) == NULL) 418 return (ESRCH); 419 break; 420 case CPU_WHICH_TID: 421 if (id == -1) { 422 PROC_LOCK(curproc); 423 p = curproc; 424 td = curthread; 425 break; 426 } 427 sx_slock(&allproc_lock); 428 FOREACH_PROC_IN_SYSTEM(p) { 429 PROC_LOCK(p); 430 FOREACH_THREAD_IN_PROC(p, td) 431 if (td->td_tid == id) 432 break; 433 if (td != NULL) 434 break; 435 PROC_UNLOCK(p); 436 } 437 sx_sunlock(&allproc_lock); 438 if (td == NULL) 439 return (ESRCH); 440 break; 441 case CPU_WHICH_CPUSET: 442 if (id == -1) { 443 thread_lock(curthread); 444 set = cpuset_refbase(curthread->td_cpuset); 445 thread_unlock(curthread); 446 } else 447 set = cpuset_lookup(id, curthread); 448 if (set) { 449 *setp = set; 450 return (0); 451 } 452 return (ESRCH); 453 case CPU_WHICH_JAIL: 454 { 455 /* Find `set' for prison with given id. */ 456 struct prison *pr; 457 458 sx_slock(&allprison_lock); 459 pr = prison_find(id); 460 sx_sunlock(&allprison_lock); 461 if (pr == NULL) 462 return (ESRCH); 463 if (jailed(curthread->td_ucred)) { 464 if (curthread->td_ucred->cr_prison == pr) { 465 cpuset_ref(pr->pr_cpuset); 466 set = pr->pr_cpuset; 467 } 468 } else { 469 cpuset_ref(pr->pr_cpuset); 470 set = pr->pr_cpuset; 471 } 472 mtx_unlock(&pr->pr_mtx); 473 if (set) { 474 *setp = set; 475 return (0); 476 } 477 return (ESRCH); 478 } 479 case CPU_WHICH_IRQ: 480 return (0); 481 default: 482 return (EINVAL); 483 } 484 error = p_cansched(curthread, p); 485 if (error) { 486 PROC_UNLOCK(p); 487 return (error); 488 } 489 if (td == NULL) 490 td = FIRST_THREAD_IN_PROC(p); 491 *pp = p; 492 *tdp = td; 493 return (0); 494} 495 496/* 497 * Create an anonymous set with the provided mask in the space provided by 498 * 'fset'. If the passed in set is anonymous we use its parent otherwise 499 * the new set is a child of 'set'. 500 */ 501static int 502cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) 503{ 504 struct cpuset *parent; 505 506 if (set->cs_id == CPUSET_INVALID) 507 parent = set->cs_parent; 508 else 509 parent = set; 510 if (!CPU_SUBSET(&parent->cs_mask, mask)) 511 return (EDEADLK); 512 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 513} 514 515/* 516 * Handle two cases for replacing the base set or mask of an entire process. 517 * 518 * 1) Set is non-null and mask is null. This reparents all anonymous sets 519 * to the provided set and replaces all non-anonymous td_cpusets with the 520 * provided set. 521 * 2) Mask is non-null and set is null. This replaces or creates anonymous 522 * sets for every thread with the existing base as a parent. 523 * 524 * This is overly complicated because we can't allocate while holding a 525 * spinlock and spinlocks must be held while changing and examining thread 526 * state. 527 */ 528static int 529cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 530{ 531 struct setlist freelist; 532 struct setlist droplist; 533 struct cpuset *tdset; 534 struct cpuset *nset; 535 struct thread *td; 536 struct proc *p; 537 int threads; 538 int nfree; 539 int error; 540 /* 541 * The algorithm requires two passes due to locking considerations. 542 * 543 * 1) Lookup the process and acquire the locks in the required order. 544 * 2) If enough cpusets have not been allocated release the locks and 545 * allocate them. Loop. 546 */ 547 LIST_INIT(&freelist); 548 LIST_INIT(&droplist); 549 nfree = 0; 550 for (;;) { 551 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 552 if (error) 553 goto out; 554 if (nfree >= p->p_numthreads) 555 break; 556 threads = p->p_numthreads; 557 PROC_UNLOCK(p); 558 for (; nfree < threads; nfree++) { 559 nset = uma_zalloc(cpuset_zone, M_WAITOK); 560 LIST_INSERT_HEAD(&freelist, nset, cs_link); 561 } 562 } 563 PROC_LOCK_ASSERT(p, MA_OWNED); 564 /* 565 * Now that the appropriate locks are held and we have enough cpusets, 566 * make sure the operation will succeed before applying changes. The 567 * proc lock prevents td_cpuset from changing between calls. 568 */ 569 error = 0; 570 FOREACH_THREAD_IN_PROC(p, td) { 571 thread_lock(td); 572 tdset = td->td_cpuset; 573 /* 574 * Verify that a new mask doesn't specify cpus outside of 575 * the set the thread is a member of. 576 */ 577 if (mask) { 578 if (tdset->cs_id == CPUSET_INVALID) 579 tdset = tdset->cs_parent; 580 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 581 error = EDEADLK; 582 /* 583 * Verify that a new set won't leave an existing thread 584 * mask without a cpu to run on. It can, however, restrict 585 * the set. 586 */ 587 } else if (tdset->cs_id == CPUSET_INVALID) { 588 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 589 error = EDEADLK; 590 } 591 thread_unlock(td); 592 if (error) 593 goto unlock_out; 594 } 595 /* 596 * Replace each thread's cpuset while using deferred release. We 597 * must do this because the thread lock must be held while operating 598 * on the thread and this limits the type of operations allowed. 599 */ 600 FOREACH_THREAD_IN_PROC(p, td) { 601 thread_lock(td); 602 /* 603 * If we presently have an anonymous set or are applying a 604 * mask we must create an anonymous shadow set. That is 605 * either parented to our existing base or the supplied set. 606 * 607 * If we have a base set with no anonymous shadow we simply 608 * replace it outright. 609 */ 610 tdset = td->td_cpuset; 611 if (tdset->cs_id == CPUSET_INVALID || mask) { 612 nset = LIST_FIRST(&freelist); 613 LIST_REMOVE(nset, cs_link); 614 if (mask) 615 error = cpuset_shadow(tdset, nset, mask); 616 else 617 error = _cpuset_create(nset, set, 618 &tdset->cs_mask, CPUSET_INVALID); 619 if (error) { 620 LIST_INSERT_HEAD(&freelist, nset, cs_link); 621 thread_unlock(td); 622 break; 623 } 624 } else 625 nset = cpuset_ref(set); 626 cpuset_rel_defer(&droplist, tdset); 627 td->td_cpuset = nset; 628 sched_affinity(td); 629 thread_unlock(td); 630 } 631unlock_out: 632 PROC_UNLOCK(p); 633out: 634 while ((nset = LIST_FIRST(&droplist)) != NULL) 635 cpuset_rel_complete(nset); 636 while ((nset = LIST_FIRST(&freelist)) != NULL) { 637 LIST_REMOVE(nset, cs_link); 638 uma_zfree(cpuset_zone, nset); 639 } 640 return (error); 641} 642 643/* 644 * Apply an anonymous mask to a single thread. 645 */ 646int 647cpuset_setthread(lwpid_t id, cpuset_t *mask) 648{ 649 struct cpuset *nset; 650 struct cpuset *set; 651 struct thread *td; 652 struct proc *p; 653 int error; 654 655 nset = uma_zalloc(cpuset_zone, M_WAITOK); 656 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 657 if (error) 658 goto out; 659 set = NULL; 660 thread_lock(td); 661 error = cpuset_shadow(td->td_cpuset, nset, mask); 662 if (error == 0) { 663 set = td->td_cpuset; 664 td->td_cpuset = nset; 665 sched_affinity(td); 666 nset = NULL; 667 } 668 thread_unlock(td); 669 PROC_UNLOCK(p); 670 if (set) 671 cpuset_rel(set); 672out: 673 if (nset) 674 uma_zfree(cpuset_zone, nset); 675 return (error); 676} 677 678/* 679 * Creates the cpuset for thread0. We make two sets: 680 * 681 * 0 - The root set which should represent all valid processors in the 682 * system. It is initially created with a mask of all processors 683 * because we don't know what processors are valid until cpuset_init() 684 * runs. This set is immutable. 685 * 1 - The default set which all processes are a member of until changed. 686 * This allows an administrator to move all threads off of given cpus to 687 * dedicate them to high priority tasks or save power etc. 688 */ 689struct cpuset * 690cpuset_thread0(void) 691{ 692 struct cpuset *set; 693 int error; 694 695 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 696 NULL, NULL, UMA_ALIGN_PTR, 0); 697 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 698 /* 699 * Create the root system set for the whole machine. Doesn't use 700 * cpuset_create() due to NULL parent. 701 */ 702 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 703 set->cs_mask.__bits[0] = -1; 704 LIST_INIT(&set->cs_children); 705 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 706 set->cs_ref = 1; 707 set->cs_flags = CPU_SET_ROOT; 708 cpuset_zero = set; 709 cpuset_root = &set->cs_mask; 710 /* 711 * Now derive a default, modifiable set from that to give out. 712 */ 713 set = uma_zalloc(cpuset_zone, M_WAITOK); 714 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 715 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 716 /* 717 * Initialize the unit allocator. 0 and 1 are allocated above. 718 */ 719 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 720 721 return (set); 722} 723 724/* 725 * Create a cpuset, which would be cpuset_create() but 726 * mark the new 'set' as root. 727 * 728 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 729 * for that. 730 * 731 * In case of no error, returns the set in *setp locked with a reference. 732 */ 733int 734cpuset_create_root(struct thread *td, struct cpuset **setp) 735{ 736 struct cpuset *root; 737 struct cpuset *set; 738 int error; 739 740 KASSERT(td != NULL, ("[%s:%d] invalid td", __func__, __LINE__)); 741 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 742 743 thread_lock(td); 744 root = cpuset_refroot(td->td_cpuset); 745 thread_unlock(td); 746 747 error = cpuset_create(setp, td->td_cpuset, &root->cs_mask); 748 cpuset_rel(root); 749 if (error) 750 return (error); 751 752 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 753 __func__, __LINE__)); 754 755 /* Mark the set as root. */ 756 set = *setp; 757 set->cs_flags |= CPU_SET_ROOT; 758 759 return (0); 760} 761 762int 763cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 764{ 765 int error; 766 767 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 768 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 769 770 cpuset_ref(set); 771 error = cpuset_setproc(p->p_pid, set, NULL); 772 if (error) 773 return (error); 774 cpuset_rel(set); 775 return (0); 776} 777 778/* 779 * This is called once the final set of system cpus is known. Modifies 780 * the root set and all children and mark the root readonly. 781 */ 782static void 783cpuset_init(void *arg) 784{ 785 cpuset_t mask; 786 787 CPU_ZERO(&mask); 788#ifdef SMP 789 mask.__bits[0] = all_cpus; 790#else 791 mask.__bits[0] = 1; 792#endif 793 if (cpuset_modify(cpuset_zero, &mask)) 794 panic("Can't set initial cpuset mask.\n"); 795 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 796} 797SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 798 799#ifndef _SYS_SYSPROTO_H_ 800struct cpuset_args { 801 cpusetid_t *setid; 802}; 803#endif 804int 805cpuset(struct thread *td, struct cpuset_args *uap) 806{ 807 struct cpuset *root; 808 struct cpuset *set; 809 int error; 810 811 thread_lock(td); 812 root = cpuset_refroot(td->td_cpuset); 813 thread_unlock(td); 814 error = cpuset_create(&set, root, &root->cs_mask); 815 cpuset_rel(root); 816 if (error) 817 return (error); 818 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 819 if (error == 0) 820 error = cpuset_setproc(-1, set, NULL); 821 cpuset_rel(set); 822 return (error); 823} 824 825#ifndef _SYS_SYSPROTO_H_ 826struct cpuset_setid_args { 827 cpuwhich_t which; 828 id_t id; 829 cpusetid_t setid; 830}; 831#endif 832int 833cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 834{ 835 struct cpuset *set; 836 int error; 837 838 /* 839 * Presently we only support per-process sets. 840 */ 841 if (uap->which != CPU_WHICH_PID) 842 return (EINVAL); 843 set = cpuset_lookup(uap->setid, td); 844 if (set == NULL) 845 return (ESRCH); 846 error = cpuset_setproc(uap->id, set, NULL); 847 cpuset_rel(set); 848 return (error); 849} 850 851#ifndef _SYS_SYSPROTO_H_ 852struct cpuset_getid_args { 853 cpulevel_t level; 854 cpuwhich_t which; 855 id_t id; 856 cpusetid_t *setid; 857#endif 858int 859cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 860{ 861 struct cpuset *nset; 862 struct cpuset *set; 863 struct thread *ttd; 864 struct proc *p; 865 cpusetid_t id; 866 int error; 867 868 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 869 return (EINVAL); 870 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 871 if (error) 872 return (error); 873 switch (uap->which) { 874 case CPU_WHICH_TID: 875 case CPU_WHICH_PID: 876 thread_lock(ttd); 877 set = cpuset_refbase(ttd->td_cpuset); 878 thread_unlock(ttd); 879 PROC_UNLOCK(p); 880 break; 881 case CPU_WHICH_CPUSET: 882 case CPU_WHICH_JAIL: 883 break; 884 case CPU_WHICH_IRQ: 885 return (EINVAL); 886 } 887 switch (uap->level) { 888 case CPU_LEVEL_ROOT: 889 nset = cpuset_refroot(set); 890 cpuset_rel(set); 891 set = nset; 892 break; 893 case CPU_LEVEL_CPUSET: 894 break; 895 case CPU_LEVEL_WHICH: 896 break; 897 } 898 id = set->cs_id; 899 cpuset_rel(set); 900 if (error == 0) 901 error = copyout(&id, uap->setid, sizeof(id)); 902 903 return (error); 904} 905 906#ifndef _SYS_SYSPROTO_H_ 907struct cpuset_getaffinity_args { 908 cpulevel_t level; 909 cpuwhich_t which; 910 id_t id; 911 size_t cpusetsize; 912 cpuset_t *mask; 913}; 914#endif 915int 916cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 917{ 918 struct thread *ttd; 919 struct cpuset *nset; 920 struct cpuset *set; 921 struct proc *p; 922 cpuset_t *mask; 923 int error; 924 size_t size; 925 926 if (uap->cpusetsize < sizeof(cpuset_t) || 927 uap->cpusetsize > CPU_MAXSIZE / NBBY) 928 return (ERANGE); 929 size = uap->cpusetsize; 930 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 931 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 932 if (error) 933 goto out; 934 switch (uap->level) { 935 case CPU_LEVEL_ROOT: 936 case CPU_LEVEL_CPUSET: 937 switch (uap->which) { 938 case CPU_WHICH_TID: 939 case CPU_WHICH_PID: 940 thread_lock(ttd); 941 set = cpuset_ref(ttd->td_cpuset); 942 thread_unlock(ttd); 943 break; 944 case CPU_WHICH_CPUSET: 945 case CPU_WHICH_JAIL: 946 break; 947 case CPU_WHICH_IRQ: 948 error = EINVAL; 949 goto out; 950 } 951 if (uap->level == CPU_LEVEL_ROOT) 952 nset = cpuset_refroot(set); 953 else 954 nset = cpuset_refbase(set); 955 CPU_COPY(&nset->cs_mask, mask); 956 cpuset_rel(nset); 957 break; 958 case CPU_LEVEL_WHICH: 959 switch (uap->which) { 960 case CPU_WHICH_TID: 961 thread_lock(ttd); 962 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 963 thread_unlock(ttd); 964 break; 965 case CPU_WHICH_PID: 966 FOREACH_THREAD_IN_PROC(p, ttd) { 967 thread_lock(ttd); 968 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 969 thread_unlock(ttd); 970 } 971 break; 972 case CPU_WHICH_CPUSET: 973 case CPU_WHICH_JAIL: 974 CPU_COPY(&set->cs_mask, mask); 975 break; 976 case CPU_WHICH_IRQ: 977 error = intr_getaffinity(uap->id, mask); 978 break; 979 } 980 break; 981 default: 982 error = EINVAL; 983 break; 984 } 985 if (set) 986 cpuset_rel(set); 987 if (p) 988 PROC_UNLOCK(p); 989 if (error == 0) 990 error = copyout(mask, uap->mask, size); 991out: 992 free(mask, M_TEMP); 993 return (error); 994} 995 996#ifndef _SYS_SYSPROTO_H_ 997struct cpuset_setaffinity_args { 998 cpulevel_t level; 999 cpuwhich_t which; 1000 id_t id; 1001 size_t cpusetsize; 1002 const cpuset_t *mask; 1003}; 1004#endif 1005int 1006cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1007{ 1008 struct cpuset *nset; 1009 struct cpuset *set; 1010 struct thread *ttd; 1011 struct proc *p; 1012 cpuset_t *mask; 1013 int error; 1014 1015 if (uap->cpusetsize < sizeof(cpuset_t) || 1016 uap->cpusetsize > CPU_MAXSIZE / NBBY) 1017 return (ERANGE); 1018 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 1019 error = copyin(uap->mask, mask, uap->cpusetsize); 1020 if (error) 1021 goto out; 1022 /* 1023 * Verify that no high bits are set. 1024 */ 1025 if (uap->cpusetsize > sizeof(cpuset_t)) { 1026 char *end; 1027 char *cp; 1028 1029 end = cp = (char *)&mask->__bits; 1030 end += uap->cpusetsize; 1031 cp += sizeof(cpuset_t); 1032 while (cp != end) 1033 if (*cp++ != 0) { 1034 error = EINVAL; 1035 goto out; 1036 } 1037 1038 } 1039 switch (uap->level) { 1040 case CPU_LEVEL_ROOT: 1041 case CPU_LEVEL_CPUSET: 1042 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1043 if (error) 1044 break; 1045 switch (uap->which) { 1046 case CPU_WHICH_TID: 1047 case CPU_WHICH_PID: 1048 thread_lock(ttd); 1049 set = cpuset_ref(ttd->td_cpuset); 1050 thread_unlock(ttd); 1051 PROC_UNLOCK(p); 1052 break; 1053 case CPU_WHICH_CPUSET: 1054 case CPU_WHICH_JAIL: 1055 break; 1056 case CPU_WHICH_IRQ: 1057 error = EINVAL; 1058 goto out; 1059 } 1060 if (uap->level == CPU_LEVEL_ROOT) 1061 nset = cpuset_refroot(set); 1062 else 1063 nset = cpuset_refbase(set); 1064 error = cpuset_modify(nset, mask); 1065 cpuset_rel(nset); 1066 cpuset_rel(set); 1067 break; 1068 case CPU_LEVEL_WHICH: 1069 switch (uap->which) { 1070 case CPU_WHICH_TID: 1071 error = cpuset_setthread(uap->id, mask); 1072 break; 1073 case CPU_WHICH_PID: 1074 error = cpuset_setproc(uap->id, NULL, mask); 1075 break; 1076 case CPU_WHICH_CPUSET: 1077 case CPU_WHICH_JAIL: 1078 error = cpuset_which(uap->which, uap->id, &p, 1079 &ttd, &set); 1080 if (error == 0) { 1081 error = cpuset_modify(set, mask); 1082 cpuset_rel(set); 1083 } 1084 break; 1085 case CPU_WHICH_IRQ: 1086 error = intr_setaffinity(uap->id, mask); 1087 break; 1088 default: 1089 error = EINVAL; 1090 break; 1091 } 1092 break; 1093 default: 1094 error = EINVAL; 1095 break; 1096 } 1097out: 1098 free(mask, M_TEMP); 1099 return (error); 1100} 1101 1102#ifdef DDB 1103DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1104{ 1105 struct cpuset *set; 1106 int cpu, once; 1107 1108 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1109 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1110 set, set->cs_id, set->cs_ref, set->cs_flags, 1111 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1112 db_printf(" mask="); 1113 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1114 if (CPU_ISSET(cpu, &set->cs_mask)) { 1115 if (once == 0) { 1116 db_printf("%d", cpu); 1117 once = 1; 1118 } else 1119 db_printf(",%d", cpu); 1120 } 1121 } 1122 db_printf("\n"); 1123 if (db_pager_quit) 1124 break; 1125 } 1126} 1127#endif /* DDB */ 1128