kern_cpuset.c revision 214337
1/*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Copyright (c) 2008 Nokia Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/kern/kern_cpuset.c 214337 2010-10-25 13:13:16Z davidxu $"); 33 34#include "opt_ddb.h" 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/sysproto.h> 39#include <sys/jail.h> 40#include <sys/kernel.h> 41#include <sys/lock.h> 42#include <sys/malloc.h> 43#include <sys/mutex.h> 44#include <sys/priv.h> 45#include <sys/proc.h> 46#include <sys/refcount.h> 47#include <sys/sched.h> 48#include <sys/smp.h> 49#include <sys/syscallsubr.h> 50#include <sys/cpuset.h> 51#include <sys/sx.h> 52#include <sys/queue.h> 53#include <sys/limits.h> 54#include <sys/bus.h> 55#include <sys/interrupt.h> 56 57#include <vm/uma.h> 58 59#ifdef DDB 60#include <ddb/ddb.h> 61#endif /* DDB */ 62 63/* 64 * cpusets provide a mechanism for creating and manipulating sets of 65 * processors for the purpose of constraining the scheduling of threads to 66 * specific processors. 67 * 68 * Each process belongs to an identified set, by default this is set 1. Each 69 * thread may further restrict the cpus it may run on to a subset of this 70 * named set. This creates an anonymous set which other threads and processes 71 * may not join by number. 72 * 73 * The named set is referred to herein as the 'base' set to avoid ambiguity. 74 * This set is usually a child of a 'root' set while the anonymous set may 75 * simply be referred to as a mask. In the syscall api these are referred to 76 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 77 * 78 * Threads inherit their set from their creator whether it be anonymous or 79 * not. This means that anonymous sets are immutable because they may be 80 * shared. To modify an anonymous set a new set is created with the desired 81 * mask and the same parent as the existing anonymous set. This gives the 82 * illusion of each thread having a private mask. 83 * 84 * Via the syscall apis a user may ask to retrieve or modify the root, base, 85 * or mask that is discovered via a pid, tid, or setid. Modifying a set 86 * modifies all numbered and anonymous child sets to comply with the new mask. 87 * Modifying a pid or tid's mask applies only to that tid but must still 88 * exist within the assigned parent set. 89 * 90 * A thread may not be assigned to a group separate from other threads in 91 * the process. This is to remove ambiguity when the setid is queried with 92 * a pid argument. There is no other technical limitation. 93 * 94 * This somewhat complex arrangement is intended to make it easy for 95 * applications to query available processors and bind their threads to 96 * specific processors while also allowing administrators to dynamically 97 * reprovision by changing sets which apply to groups of processes. 98 * 99 * A simple application should not concern itself with sets at all and 100 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 101 * meaning 'curthread'. It may query available cpus for that tid with a 102 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 103 */ 104static uma_zone_t cpuset_zone; 105static struct mtx cpuset_lock; 106static struct setlist cpuset_ids; 107static struct unrhdr *cpuset_unr; 108static struct cpuset *cpuset_zero; 109 110cpuset_t *cpuset_root; 111 112/* 113 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 114 */ 115struct cpuset * 116cpuset_ref(struct cpuset *set) 117{ 118 119 refcount_acquire(&set->cs_ref); 120 return (set); 121} 122 123/* 124 * Walks up the tree from 'set' to find the root. Returns the root 125 * referenced. 126 */ 127static struct cpuset * 128cpuset_refroot(struct cpuset *set) 129{ 130 131 for (; set->cs_parent != NULL; set = set->cs_parent) 132 if (set->cs_flags & CPU_SET_ROOT) 133 break; 134 cpuset_ref(set); 135 136 return (set); 137} 138 139/* 140 * Find the first non-anonymous set starting from 'set'. Returns this set 141 * referenced. May return the passed in set with an extra ref if it is 142 * not anonymous. 143 */ 144static struct cpuset * 145cpuset_refbase(struct cpuset *set) 146{ 147 148 if (set->cs_id == CPUSET_INVALID) 149 set = set->cs_parent; 150 cpuset_ref(set); 151 152 return (set); 153} 154 155/* 156 * Release a reference in a context where it is safe to allocate. 157 */ 158void 159cpuset_rel(struct cpuset *set) 160{ 161 cpusetid_t id; 162 163 if (refcount_release(&set->cs_ref) == 0) 164 return; 165 mtx_lock_spin(&cpuset_lock); 166 LIST_REMOVE(set, cs_siblings); 167 id = set->cs_id; 168 if (id != CPUSET_INVALID) 169 LIST_REMOVE(set, cs_link); 170 mtx_unlock_spin(&cpuset_lock); 171 cpuset_rel(set->cs_parent); 172 uma_zfree(cpuset_zone, set); 173 if (id != CPUSET_INVALID) 174 free_unr(cpuset_unr, id); 175} 176 177/* 178 * Deferred release must be used when in a context that is not safe to 179 * allocate/free. This places any unreferenced sets on the list 'head'. 180 */ 181static void 182cpuset_rel_defer(struct setlist *head, struct cpuset *set) 183{ 184 185 if (refcount_release(&set->cs_ref) == 0) 186 return; 187 mtx_lock_spin(&cpuset_lock); 188 LIST_REMOVE(set, cs_siblings); 189 if (set->cs_id != CPUSET_INVALID) 190 LIST_REMOVE(set, cs_link); 191 LIST_INSERT_HEAD(head, set, cs_link); 192 mtx_unlock_spin(&cpuset_lock); 193} 194 195/* 196 * Complete a deferred release. Removes the set from the list provided to 197 * cpuset_rel_defer. 198 */ 199static void 200cpuset_rel_complete(struct cpuset *set) 201{ 202 LIST_REMOVE(set, cs_link); 203 cpuset_rel(set->cs_parent); 204 uma_zfree(cpuset_zone, set); 205} 206 207/* 208 * Find a set based on an id. Returns it with a ref. 209 */ 210static struct cpuset * 211cpuset_lookup(cpusetid_t setid, struct thread *td) 212{ 213 struct cpuset *set; 214 215 if (setid == CPUSET_INVALID) 216 return (NULL); 217 mtx_lock_spin(&cpuset_lock); 218 LIST_FOREACH(set, &cpuset_ids, cs_link) 219 if (set->cs_id == setid) 220 break; 221 if (set) 222 cpuset_ref(set); 223 mtx_unlock_spin(&cpuset_lock); 224 225 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 226 if (set != NULL && jailed(td->td_ucred)) { 227 struct cpuset *jset, *tset; 228 229 jset = td->td_ucred->cr_prison->pr_cpuset; 230 for (tset = set; tset != NULL; tset = tset->cs_parent) 231 if (tset == jset) 232 break; 233 if (tset == NULL) { 234 cpuset_rel(set); 235 set = NULL; 236 } 237 } 238 239 return (set); 240} 241 242/* 243 * Create a set in the space provided in 'set' with the provided parameters. 244 * The set is returned with a single ref. May return EDEADLK if the set 245 * will have no valid cpu based on restrictions from the parent. 246 */ 247static int 248_cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, 249 cpusetid_t id) 250{ 251 252 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 253 return (EDEADLK); 254 CPU_COPY(mask, &set->cs_mask); 255 LIST_INIT(&set->cs_children); 256 refcount_init(&set->cs_ref, 1); 257 set->cs_flags = 0; 258 mtx_lock_spin(&cpuset_lock); 259 CPU_AND(mask, &parent->cs_mask); 260 set->cs_id = id; 261 set->cs_parent = cpuset_ref(parent); 262 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 263 if (set->cs_id != CPUSET_INVALID) 264 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 265 mtx_unlock_spin(&cpuset_lock); 266 267 return (0); 268} 269 270/* 271 * Create a new non-anonymous set with the requested parent and mask. May 272 * return failures if the mask is invalid or a new number can not be 273 * allocated. 274 */ 275static int 276cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) 277{ 278 struct cpuset *set; 279 cpusetid_t id; 280 int error; 281 282 id = alloc_unr(cpuset_unr); 283 if (id == -1) 284 return (ENFILE); 285 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 286 error = _cpuset_create(set, parent, mask, id); 287 if (error == 0) 288 return (0); 289 free_unr(cpuset_unr, id); 290 uma_zfree(cpuset_zone, set); 291 292 return (error); 293} 294 295/* 296 * Recursively check for errors that would occur from applying mask to 297 * the tree of sets starting at 'set'. Checks for sets that would become 298 * empty as well as RDONLY flags. 299 */ 300static int 301cpuset_testupdate(struct cpuset *set, cpuset_t *mask) 302{ 303 struct cpuset *nset; 304 cpuset_t newmask; 305 int error; 306 307 mtx_assert(&cpuset_lock, MA_OWNED); 308 if (set->cs_flags & CPU_SET_RDONLY) 309 return (EPERM); 310 if (!CPU_OVERLAP(&set->cs_mask, mask)) 311 return (EDEADLK); 312 CPU_COPY(&set->cs_mask, &newmask); 313 CPU_AND(&newmask, mask); 314 error = 0; 315 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 316 if ((error = cpuset_testupdate(nset, &newmask)) != 0) 317 break; 318 return (error); 319} 320 321/* 322 * Applies the mask 'mask' without checking for empty sets or permissions. 323 */ 324static void 325cpuset_update(struct cpuset *set, cpuset_t *mask) 326{ 327 struct cpuset *nset; 328 329 mtx_assert(&cpuset_lock, MA_OWNED); 330 CPU_AND(&set->cs_mask, mask); 331 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 332 cpuset_update(nset, &set->cs_mask); 333 334 return; 335} 336 337/* 338 * Modify the set 'set' to use a copy of the mask provided. Apply this new 339 * mask to restrict all children in the tree. Checks for validity before 340 * applying the changes. 341 */ 342static int 343cpuset_modify(struct cpuset *set, cpuset_t *mask) 344{ 345 struct cpuset *root; 346 int error; 347 348 error = priv_check(curthread, PRIV_SCHED_CPUSET); 349 if (error) 350 return (error); 351 /* 352 * In case we are called from within the jail 353 * we do not allow modifying the dedicated root 354 * cpuset of the jail but may still allow to 355 * change child sets. 356 */ 357 if (jailed(curthread->td_ucred) && 358 set->cs_flags & CPU_SET_ROOT) 359 return (EPERM); 360 /* 361 * Verify that we have access to this set of 362 * cpus. 363 */ 364 root = set->cs_parent; 365 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 366 return (EINVAL); 367 mtx_lock_spin(&cpuset_lock); 368 error = cpuset_testupdate(set, mask); 369 if (error) 370 goto out; 371 cpuset_update(set, mask); 372 CPU_COPY(mask, &set->cs_mask); 373out: 374 mtx_unlock_spin(&cpuset_lock); 375 376 return (error); 377} 378 379/* 380 * Resolve the 'which' parameter of several cpuset apis. 381 * 382 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 383 * checks for permission via p_cansched(). 384 * 385 * For WHICH_SET returns a valid set with a new reference. 386 * 387 * -1 may be supplied for any argument to mean the current proc/thread or 388 * the base set of the current thread. May fail with ESRCH/EPERM. 389 */ 390static int 391cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 392 struct cpuset **setp) 393{ 394 struct cpuset *set; 395 struct thread *td; 396 struct proc *p; 397 int error; 398 399 *pp = p = NULL; 400 *tdp = td = NULL; 401 *setp = set = NULL; 402 switch (which) { 403 case CPU_WHICH_PID: 404 if (id == -1) { 405 PROC_LOCK(curproc); 406 p = curproc; 407 break; 408 } 409 if ((p = pfind(id)) == NULL) 410 return (ESRCH); 411 break; 412 case CPU_WHICH_TID: 413 if (id == -1) { 414 PROC_LOCK(curproc); 415 p = curproc; 416 td = curthread; 417 break; 418 } 419 td = tdfind(id, -1); 420 if (td == NULL) 421 return (ESRCH); 422 p = td->td_proc; 423 break; 424 case CPU_WHICH_CPUSET: 425 if (id == -1) { 426 thread_lock(curthread); 427 set = cpuset_refbase(curthread->td_cpuset); 428 thread_unlock(curthread); 429 } else 430 set = cpuset_lookup(id, curthread); 431 if (set) { 432 *setp = set; 433 return (0); 434 } 435 return (ESRCH); 436 case CPU_WHICH_JAIL: 437 { 438 /* Find `set' for prison with given id. */ 439 struct prison *pr; 440 441 sx_slock(&allprison_lock); 442 pr = prison_find_child(curthread->td_ucred->cr_prison, id); 443 sx_sunlock(&allprison_lock); 444 if (pr == NULL) 445 return (ESRCH); 446 cpuset_ref(pr->pr_cpuset); 447 *setp = pr->pr_cpuset; 448 mtx_unlock(&pr->pr_mtx); 449 return (0); 450 } 451 case CPU_WHICH_IRQ: 452 return (0); 453 default: 454 return (EINVAL); 455 } 456 error = p_cansched(curthread, p); 457 if (error) { 458 PROC_UNLOCK(p); 459 return (error); 460 } 461 if (td == NULL) 462 td = FIRST_THREAD_IN_PROC(p); 463 *pp = p; 464 *tdp = td; 465 return (0); 466} 467 468/* 469 * Create an anonymous set with the provided mask in the space provided by 470 * 'fset'. If the passed in set is anonymous we use its parent otherwise 471 * the new set is a child of 'set'. 472 */ 473static int 474cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) 475{ 476 struct cpuset *parent; 477 478 if (set->cs_id == CPUSET_INVALID) 479 parent = set->cs_parent; 480 else 481 parent = set; 482 if (!CPU_SUBSET(&parent->cs_mask, mask)) 483 return (EDEADLK); 484 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 485} 486 487/* 488 * Handle two cases for replacing the base set or mask of an entire process. 489 * 490 * 1) Set is non-null and mask is null. This reparents all anonymous sets 491 * to the provided set and replaces all non-anonymous td_cpusets with the 492 * provided set. 493 * 2) Mask is non-null and set is null. This replaces or creates anonymous 494 * sets for every thread with the existing base as a parent. 495 * 496 * This is overly complicated because we can't allocate while holding a 497 * spinlock and spinlocks must be held while changing and examining thread 498 * state. 499 */ 500static int 501cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 502{ 503 struct setlist freelist; 504 struct setlist droplist; 505 struct cpuset *tdset; 506 struct cpuset *nset; 507 struct thread *td; 508 struct proc *p; 509 int threads; 510 int nfree; 511 int error; 512 /* 513 * The algorithm requires two passes due to locking considerations. 514 * 515 * 1) Lookup the process and acquire the locks in the required order. 516 * 2) If enough cpusets have not been allocated release the locks and 517 * allocate them. Loop. 518 */ 519 LIST_INIT(&freelist); 520 LIST_INIT(&droplist); 521 nfree = 0; 522 for (;;) { 523 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 524 if (error) 525 goto out; 526 if (nfree >= p->p_numthreads) 527 break; 528 threads = p->p_numthreads; 529 PROC_UNLOCK(p); 530 for (; nfree < threads; nfree++) { 531 nset = uma_zalloc(cpuset_zone, M_WAITOK); 532 LIST_INSERT_HEAD(&freelist, nset, cs_link); 533 } 534 } 535 PROC_LOCK_ASSERT(p, MA_OWNED); 536 /* 537 * Now that the appropriate locks are held and we have enough cpusets, 538 * make sure the operation will succeed before applying changes. The 539 * proc lock prevents td_cpuset from changing between calls. 540 */ 541 error = 0; 542 FOREACH_THREAD_IN_PROC(p, td) { 543 thread_lock(td); 544 tdset = td->td_cpuset; 545 /* 546 * Verify that a new mask doesn't specify cpus outside of 547 * the set the thread is a member of. 548 */ 549 if (mask) { 550 if (tdset->cs_id == CPUSET_INVALID) 551 tdset = tdset->cs_parent; 552 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 553 error = EDEADLK; 554 /* 555 * Verify that a new set won't leave an existing thread 556 * mask without a cpu to run on. It can, however, restrict 557 * the set. 558 */ 559 } else if (tdset->cs_id == CPUSET_INVALID) { 560 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 561 error = EDEADLK; 562 } 563 thread_unlock(td); 564 if (error) 565 goto unlock_out; 566 } 567 /* 568 * Replace each thread's cpuset while using deferred release. We 569 * must do this because the thread lock must be held while operating 570 * on the thread and this limits the type of operations allowed. 571 */ 572 FOREACH_THREAD_IN_PROC(p, td) { 573 thread_lock(td); 574 /* 575 * If we presently have an anonymous set or are applying a 576 * mask we must create an anonymous shadow set. That is 577 * either parented to our existing base or the supplied set. 578 * 579 * If we have a base set with no anonymous shadow we simply 580 * replace it outright. 581 */ 582 tdset = td->td_cpuset; 583 if (tdset->cs_id == CPUSET_INVALID || mask) { 584 nset = LIST_FIRST(&freelist); 585 LIST_REMOVE(nset, cs_link); 586 if (mask) 587 error = cpuset_shadow(tdset, nset, mask); 588 else 589 error = _cpuset_create(nset, set, 590 &tdset->cs_mask, CPUSET_INVALID); 591 if (error) { 592 LIST_INSERT_HEAD(&freelist, nset, cs_link); 593 thread_unlock(td); 594 break; 595 } 596 } else 597 nset = cpuset_ref(set); 598 cpuset_rel_defer(&droplist, tdset); 599 td->td_cpuset = nset; 600 sched_affinity(td); 601 thread_unlock(td); 602 } 603unlock_out: 604 PROC_UNLOCK(p); 605out: 606 while ((nset = LIST_FIRST(&droplist)) != NULL) 607 cpuset_rel_complete(nset); 608 while ((nset = LIST_FIRST(&freelist)) != NULL) { 609 LIST_REMOVE(nset, cs_link); 610 uma_zfree(cpuset_zone, nset); 611 } 612 return (error); 613} 614 615/* 616 * Apply an anonymous mask to a single thread. 617 */ 618int 619cpuset_setthread(lwpid_t id, cpuset_t *mask) 620{ 621 struct cpuset *nset; 622 struct cpuset *set; 623 struct thread *td; 624 struct proc *p; 625 int error; 626 627 nset = uma_zalloc(cpuset_zone, M_WAITOK); 628 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 629 if (error) 630 goto out; 631 set = NULL; 632 thread_lock(td); 633 error = cpuset_shadow(td->td_cpuset, nset, mask); 634 if (error == 0) { 635 set = td->td_cpuset; 636 td->td_cpuset = nset; 637 sched_affinity(td); 638 nset = NULL; 639 } 640 thread_unlock(td); 641 PROC_UNLOCK(p); 642 if (set) 643 cpuset_rel(set); 644out: 645 if (nset) 646 uma_zfree(cpuset_zone, nset); 647 return (error); 648} 649 650/* 651 * Creates the cpuset for thread0. We make two sets: 652 * 653 * 0 - The root set which should represent all valid processors in the 654 * system. It is initially created with a mask of all processors 655 * because we don't know what processors are valid until cpuset_init() 656 * runs. This set is immutable. 657 * 1 - The default set which all processes are a member of until changed. 658 * This allows an administrator to move all threads off of given cpus to 659 * dedicate them to high priority tasks or save power etc. 660 */ 661struct cpuset * 662cpuset_thread0(void) 663{ 664 struct cpuset *set; 665 int error; 666 667 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 668 NULL, NULL, UMA_ALIGN_PTR, 0); 669 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 670 /* 671 * Create the root system set for the whole machine. Doesn't use 672 * cpuset_create() due to NULL parent. 673 */ 674 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 675 set->cs_mask.__bits[0] = -1; 676 LIST_INIT(&set->cs_children); 677 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 678 set->cs_ref = 1; 679 set->cs_flags = CPU_SET_ROOT; 680 cpuset_zero = set; 681 cpuset_root = &set->cs_mask; 682 /* 683 * Now derive a default, modifiable set from that to give out. 684 */ 685 set = uma_zalloc(cpuset_zone, M_WAITOK); 686 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 687 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 688 /* 689 * Initialize the unit allocator. 0 and 1 are allocated above. 690 */ 691 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 692 693 return (set); 694} 695 696/* 697 * Create a cpuset, which would be cpuset_create() but 698 * mark the new 'set' as root. 699 * 700 * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 701 * for that. 702 * 703 * In case of no error, returns the set in *setp locked with a reference. 704 */ 705int 706cpuset_create_root(struct prison *pr, struct cpuset **setp) 707{ 708 struct cpuset *set; 709 int error; 710 711 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 712 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 713 714 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 715 if (error) 716 return (error); 717 718 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 719 __func__, __LINE__)); 720 721 /* Mark the set as root. */ 722 set = *setp; 723 set->cs_flags |= CPU_SET_ROOT; 724 725 return (0); 726} 727 728int 729cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 730{ 731 int error; 732 733 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 734 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 735 736 cpuset_ref(set); 737 error = cpuset_setproc(p->p_pid, set, NULL); 738 if (error) 739 return (error); 740 cpuset_rel(set); 741 return (0); 742} 743 744/* 745 * This is called once the final set of system cpus is known. Modifies 746 * the root set and all children and mark the root read-only. 747 */ 748static void 749cpuset_init(void *arg) 750{ 751 cpuset_t mask; 752 753 CPU_ZERO(&mask); 754#ifdef SMP 755 mask.__bits[0] = all_cpus; 756#else 757 mask.__bits[0] = 1; 758#endif 759 if (cpuset_modify(cpuset_zero, &mask)) 760 panic("Can't set initial cpuset mask.\n"); 761 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 762} 763SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 764 765#ifndef _SYS_SYSPROTO_H_ 766struct cpuset_args { 767 cpusetid_t *setid; 768}; 769#endif 770int 771cpuset(struct thread *td, struct cpuset_args *uap) 772{ 773 struct cpuset *root; 774 struct cpuset *set; 775 int error; 776 777 thread_lock(td); 778 root = cpuset_refroot(td->td_cpuset); 779 thread_unlock(td); 780 error = cpuset_create(&set, root, &root->cs_mask); 781 cpuset_rel(root); 782 if (error) 783 return (error); 784 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 785 if (error == 0) 786 error = cpuset_setproc(-1, set, NULL); 787 cpuset_rel(set); 788 return (error); 789} 790 791#ifndef _SYS_SYSPROTO_H_ 792struct cpuset_setid_args { 793 cpuwhich_t which; 794 id_t id; 795 cpusetid_t setid; 796}; 797#endif 798int 799cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 800{ 801 struct cpuset *set; 802 int error; 803 804 /* 805 * Presently we only support per-process sets. 806 */ 807 if (uap->which != CPU_WHICH_PID) 808 return (EINVAL); 809 set = cpuset_lookup(uap->setid, td); 810 if (set == NULL) 811 return (ESRCH); 812 error = cpuset_setproc(uap->id, set, NULL); 813 cpuset_rel(set); 814 return (error); 815} 816 817#ifndef _SYS_SYSPROTO_H_ 818struct cpuset_getid_args { 819 cpulevel_t level; 820 cpuwhich_t which; 821 id_t id; 822 cpusetid_t *setid; 823#endif 824int 825cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 826{ 827 struct cpuset *nset; 828 struct cpuset *set; 829 struct thread *ttd; 830 struct proc *p; 831 cpusetid_t id; 832 int error; 833 834 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 835 return (EINVAL); 836 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 837 if (error) 838 return (error); 839 switch (uap->which) { 840 case CPU_WHICH_TID: 841 case CPU_WHICH_PID: 842 thread_lock(ttd); 843 set = cpuset_refbase(ttd->td_cpuset); 844 thread_unlock(ttd); 845 PROC_UNLOCK(p); 846 break; 847 case CPU_WHICH_CPUSET: 848 case CPU_WHICH_JAIL: 849 break; 850 case CPU_WHICH_IRQ: 851 return (EINVAL); 852 } 853 switch (uap->level) { 854 case CPU_LEVEL_ROOT: 855 nset = cpuset_refroot(set); 856 cpuset_rel(set); 857 set = nset; 858 break; 859 case CPU_LEVEL_CPUSET: 860 break; 861 case CPU_LEVEL_WHICH: 862 break; 863 } 864 id = set->cs_id; 865 cpuset_rel(set); 866 if (error == 0) 867 error = copyout(&id, uap->setid, sizeof(id)); 868 869 return (error); 870} 871 872#ifndef _SYS_SYSPROTO_H_ 873struct cpuset_getaffinity_args { 874 cpulevel_t level; 875 cpuwhich_t which; 876 id_t id; 877 size_t cpusetsize; 878 cpuset_t *mask; 879}; 880#endif 881int 882cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 883{ 884 struct thread *ttd; 885 struct cpuset *nset; 886 struct cpuset *set; 887 struct proc *p; 888 cpuset_t *mask; 889 int error; 890 size_t size; 891 892 if (uap->cpusetsize < sizeof(cpuset_t) || 893 uap->cpusetsize > CPU_MAXSIZE / NBBY) 894 return (ERANGE); 895 size = uap->cpusetsize; 896 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 897 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 898 if (error) 899 goto out; 900 switch (uap->level) { 901 case CPU_LEVEL_ROOT: 902 case CPU_LEVEL_CPUSET: 903 switch (uap->which) { 904 case CPU_WHICH_TID: 905 case CPU_WHICH_PID: 906 thread_lock(ttd); 907 set = cpuset_ref(ttd->td_cpuset); 908 thread_unlock(ttd); 909 break; 910 case CPU_WHICH_CPUSET: 911 case CPU_WHICH_JAIL: 912 break; 913 case CPU_WHICH_IRQ: 914 error = EINVAL; 915 goto out; 916 } 917 if (uap->level == CPU_LEVEL_ROOT) 918 nset = cpuset_refroot(set); 919 else 920 nset = cpuset_refbase(set); 921 CPU_COPY(&nset->cs_mask, mask); 922 cpuset_rel(nset); 923 break; 924 case CPU_LEVEL_WHICH: 925 switch (uap->which) { 926 case CPU_WHICH_TID: 927 thread_lock(ttd); 928 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 929 thread_unlock(ttd); 930 break; 931 case CPU_WHICH_PID: 932 FOREACH_THREAD_IN_PROC(p, ttd) { 933 thread_lock(ttd); 934 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 935 thread_unlock(ttd); 936 } 937 break; 938 case CPU_WHICH_CPUSET: 939 case CPU_WHICH_JAIL: 940 CPU_COPY(&set->cs_mask, mask); 941 break; 942 case CPU_WHICH_IRQ: 943 error = intr_getaffinity(uap->id, mask); 944 break; 945 } 946 break; 947 default: 948 error = EINVAL; 949 break; 950 } 951 if (set) 952 cpuset_rel(set); 953 if (p) 954 PROC_UNLOCK(p); 955 if (error == 0) 956 error = copyout(mask, uap->mask, size); 957out: 958 free(mask, M_TEMP); 959 return (error); 960} 961 962#ifndef _SYS_SYSPROTO_H_ 963struct cpuset_setaffinity_args { 964 cpulevel_t level; 965 cpuwhich_t which; 966 id_t id; 967 size_t cpusetsize; 968 const cpuset_t *mask; 969}; 970#endif 971int 972cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 973{ 974 struct cpuset *nset; 975 struct cpuset *set; 976 struct thread *ttd; 977 struct proc *p; 978 cpuset_t *mask; 979 int error; 980 981 if (uap->cpusetsize < sizeof(cpuset_t) || 982 uap->cpusetsize > CPU_MAXSIZE / NBBY) 983 return (ERANGE); 984 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 985 error = copyin(uap->mask, mask, uap->cpusetsize); 986 if (error) 987 goto out; 988 /* 989 * Verify that no high bits are set. 990 */ 991 if (uap->cpusetsize > sizeof(cpuset_t)) { 992 char *end; 993 char *cp; 994 995 end = cp = (char *)&mask->__bits; 996 end += uap->cpusetsize; 997 cp += sizeof(cpuset_t); 998 while (cp != end) 999 if (*cp++ != 0) { 1000 error = EINVAL; 1001 goto out; 1002 } 1003 1004 } 1005 switch (uap->level) { 1006 case CPU_LEVEL_ROOT: 1007 case CPU_LEVEL_CPUSET: 1008 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 1009 if (error) 1010 break; 1011 switch (uap->which) { 1012 case CPU_WHICH_TID: 1013 case CPU_WHICH_PID: 1014 thread_lock(ttd); 1015 set = cpuset_ref(ttd->td_cpuset); 1016 thread_unlock(ttd); 1017 PROC_UNLOCK(p); 1018 break; 1019 case CPU_WHICH_CPUSET: 1020 case CPU_WHICH_JAIL: 1021 break; 1022 case CPU_WHICH_IRQ: 1023 error = EINVAL; 1024 goto out; 1025 } 1026 if (uap->level == CPU_LEVEL_ROOT) 1027 nset = cpuset_refroot(set); 1028 else 1029 nset = cpuset_refbase(set); 1030 error = cpuset_modify(nset, mask); 1031 cpuset_rel(nset); 1032 cpuset_rel(set); 1033 break; 1034 case CPU_LEVEL_WHICH: 1035 switch (uap->which) { 1036 case CPU_WHICH_TID: 1037 error = cpuset_setthread(uap->id, mask); 1038 break; 1039 case CPU_WHICH_PID: 1040 error = cpuset_setproc(uap->id, NULL, mask); 1041 break; 1042 case CPU_WHICH_CPUSET: 1043 case CPU_WHICH_JAIL: 1044 error = cpuset_which(uap->which, uap->id, &p, 1045 &ttd, &set); 1046 if (error == 0) { 1047 error = cpuset_modify(set, mask); 1048 cpuset_rel(set); 1049 } 1050 break; 1051 case CPU_WHICH_IRQ: 1052 error = intr_setaffinity(uap->id, mask); 1053 break; 1054 default: 1055 error = EINVAL; 1056 break; 1057 } 1058 break; 1059 default: 1060 error = EINVAL; 1061 break; 1062 } 1063out: 1064 free(mask, M_TEMP); 1065 return (error); 1066} 1067 1068#ifdef DDB 1069DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1070{ 1071 struct cpuset *set; 1072 int cpu, once; 1073 1074 LIST_FOREACH(set, &cpuset_ids, cs_link) { 1075 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1076 set, set->cs_id, set->cs_ref, set->cs_flags, 1077 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1078 db_printf(" mask="); 1079 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1080 if (CPU_ISSET(cpu, &set->cs_mask)) { 1081 if (once == 0) { 1082 db_printf("%d", cpu); 1083 once = 1; 1084 } else 1085 db_printf(",%d", cpu); 1086 } 1087 } 1088 db_printf("\n"); 1089 if (db_pager_quit) 1090 break; 1091 } 1092} 1093#endif /* DDB */ 1094