kern_cpuset.c revision 177597
1/*- 2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_cpuset.c 177597 2008-03-25 09:11:53Z ru $"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mutex.h> 38#include <sys/priv.h> 39#include <sys/proc.h> 40#include <sys/refcount.h> 41#include <sys/sched.h> 42#include <sys/smp.h> 43#include <sys/syscallsubr.h> 44#include <sys/cpuset.h> 45#include <sys/sx.h> 46#include <sys/refcount.h> 47#include <sys/queue.h> 48#include <sys/limits.h> 49 50#include <vm/uma.h> 51 52/* 53 * cpusets provide a mechanism for creating and manipulating sets of 54 * processors for the purpose of constraining the scheduling of threads to 55 * specific processors. 56 * 57 * Each process belongs to an identified set, by default this is set 1. Each 58 * thread may further restrict the cpus it may run on to a subset of this 59 * named set. This creates an anonymous set which other threads and processes 60 * may not join by number. 61 * 62 * The named set is referred to herein as the 'base' set to avoid ambiguity. 63 * This set is usually a child of a 'root' set while the anonymous set may 64 * simply be referred to as a mask. In the syscall api these are referred to 65 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 66 * 67 * Threads inherit their set from their creator whether it be anonymous or 68 * not. This means that anonymous sets are immutable because they may be 69 * shared. To modify an anonymous set a new set is created with the desired 70 * mask and the same parent as the existing anonymous set. This gives the 71 * illusion of each thread having a private mask.A 72 * 73 * Via the syscall apis a user may ask to retrieve or modify the root, base, 74 * or mask that is discovered via a pid, tid, or setid. Modifying a set 75 * modifies all numbered and anonymous child sets to comply with the new mask. 76 * Modifying a pid or tid's mask applies only to that tid but must still 77 * exist within the assigned parent set. 78 * 79 * A thread may not be assigned to a a group seperate from other threads in 80 * the process. This is to remove ambiguity when the setid is queried with 81 * a pid argument. There is no other technical limitation. 82 * 83 * This somewhat complex arrangement is intended to make it easy for 84 * applications to query available processors and bind their threads to 85 * specific processors while also allowing administrators to dynamically 86 * reprovision by changing sets which apply to groups of processes. 87 * 88 * A simple application should not concern itself with sets at all and 89 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 90 * meaning 'curthread'. It may query availble cpus for that tid with a 91 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 92 */ 93static uma_zone_t cpuset_zone; 94static struct mtx cpuset_lock; 95static struct setlist cpuset_ids; 96struct cpuset *cpuset_zero; 97static struct unrhdr *cpuset_unr; 98 99/* 100 * Acquire a reference to a cpuset, all pointers must be tracked with refs. 101 */ 102struct cpuset * 103cpuset_ref(struct cpuset *set) 104{ 105 106 refcount_acquire(&set->cs_ref); 107 return (set); 108} 109 110/* 111 * Release a reference in a context where it is safe to allocte. 112 */ 113void 114cpuset_rel(struct cpuset *set) 115{ 116 cpusetid_t id; 117 118 if (refcount_release(&set->cs_ref) == 0) 119 return; 120 mtx_lock_spin(&cpuset_lock); 121 LIST_REMOVE(set, cs_siblings); 122 id = set->cs_id; 123 if (id != CPUSET_INVALID) 124 LIST_REMOVE(set, cs_link); 125 mtx_unlock_spin(&cpuset_lock); 126 cpuset_rel(set->cs_parent); 127 uma_zfree(cpuset_zone, set); 128 if (id != CPUSET_INVALID) 129 free_unr(cpuset_unr, id); 130} 131 132/* 133 * Deferred release must be used when in a context that is not safe to 134 * allocate/free. This places any unreferenced sets on the list 'head'. 135 */ 136static void 137cpuset_rel_defer(struct setlist *head, struct cpuset *set) 138{ 139 140 if (refcount_release(&set->cs_ref) == 0) 141 return; 142 mtx_lock_spin(&cpuset_lock); 143 LIST_REMOVE(set, cs_siblings); 144 if (set->cs_id != CPUSET_INVALID) 145 LIST_REMOVE(set, cs_link); 146 LIST_INSERT_HEAD(head, set, cs_link); 147 mtx_unlock_spin(&cpuset_lock); 148} 149 150/* 151 * Complete a deferred release. Removes the set from the list provided to 152 * cpuset_rel_defer. 153 */ 154static void 155cpuset_rel_complete(struct cpuset *set) 156{ 157 LIST_REMOVE(set, cs_link); 158 cpuset_rel(set->cs_parent); 159 uma_zfree(cpuset_zone, set); 160} 161 162/* 163 * Find a set based on an id. Returns it with a ref. 164 */ 165static struct cpuset * 166cpuset_lookup(cpusetid_t setid) 167{ 168 struct cpuset *set; 169 170 if (setid == CPUSET_INVALID) 171 return (NULL); 172 mtx_lock_spin(&cpuset_lock); 173 LIST_FOREACH(set, &cpuset_ids, cs_link) 174 if (set->cs_id == setid) 175 break; 176 if (set) 177 cpuset_ref(set); 178 mtx_unlock_spin(&cpuset_lock); 179 return (set); 180} 181 182/* 183 * Create a set in the space provided in 'set' with the provided parameters. 184 * The set is returned with a single ref. May return EDEADLK if the set 185 * will have no valid cpu based on restrictions from the parent. 186 */ 187static int 188_cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, 189 cpusetid_t id) 190{ 191 192 if (!CPU_OVERLAP(&parent->cs_mask, mask)) 193 return (EDEADLK); 194 CPU_COPY(mask, &set->cs_mask); 195 LIST_INIT(&set->cs_children); 196 refcount_init(&set->cs_ref, 1); 197 set->cs_flags = 0; 198 mtx_lock_spin(&cpuset_lock); 199 CPU_AND(mask, &parent->cs_mask); 200 set->cs_id = id; 201 set->cs_parent = cpuset_ref(parent); 202 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 203 if (set->cs_id != CPUSET_INVALID) 204 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 205 mtx_unlock_spin(&cpuset_lock); 206 207 return (0); 208} 209 210/* 211 * Create a new non-anonymous set with the requested parent and mask. May 212 * return failures if the mask is invalid or a new number can not be 213 * allocated. 214 */ 215static int 216cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) 217{ 218 struct cpuset *set; 219 cpusetid_t id; 220 int error; 221 222 id = alloc_unr(cpuset_unr); 223 if (id == -1) 224 return (ENFILE); 225 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 226 error = _cpuset_create(set, parent, mask, id); 227 if (error == 0) 228 return (0); 229 free_unr(cpuset_unr, id); 230 uma_zfree(cpuset_zone, set); 231 232 return (error); 233} 234 235/* 236 * Recursively check for errors that would occur from applying mask to 237 * the tree of sets starting at 'set'. Checks for sets that would become 238 * empty as well as RDONLY flags. 239 */ 240static int 241cpuset_testupdate(struct cpuset *set, cpuset_t *mask) 242{ 243 struct cpuset *nset; 244 cpuset_t newmask; 245 int error; 246 247 mtx_assert(&cpuset_lock, MA_OWNED); 248 if (set->cs_flags & CPU_SET_RDONLY) 249 return (EPERM); 250 if (!CPU_OVERLAP(&set->cs_mask, mask)) 251 return (EDEADLK); 252 CPU_COPY(&set->cs_mask, &newmask); 253 CPU_AND(&newmask, mask); 254 error = 0; 255 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 256 if ((error = cpuset_testupdate(nset, &newmask)) != 0) 257 break; 258 return (error); 259} 260 261/* 262 * Applies the mask 'mask' without checking for empty sets or permissions. 263 */ 264static void 265cpuset_update(struct cpuset *set, cpuset_t *mask) 266{ 267 struct cpuset *nset; 268 269 mtx_assert(&cpuset_lock, MA_OWNED); 270 CPU_AND(&set->cs_mask, mask); 271 LIST_FOREACH(nset, &set->cs_children, cs_siblings) 272 cpuset_update(nset, &set->cs_mask); 273 274 return; 275} 276 277/* 278 * Modify the set 'set' to use a copy of the mask provided. Apply this new 279 * mask to restrict all children in the tree. Checks for validity before 280 * applying the changes. 281 */ 282static int 283cpuset_modify(struct cpuset *set, cpuset_t *mask) 284{ 285 struct cpuset *root; 286 int error; 287 288 error = suser(curthread); 289 if (error) 290 return (error); 291 /* 292 * Verify that we have access to this set of 293 * cpus. 294 */ 295 root = set->cs_parent; 296 if (root && !CPU_SUBSET(&root->cs_mask, mask)) 297 return (EINVAL); 298 mtx_lock_spin(&cpuset_lock); 299 error = cpuset_testupdate(set, mask); 300 if (error) 301 goto out; 302 cpuset_update(set, mask); 303 CPU_COPY(mask, &set->cs_mask); 304out: 305 mtx_unlock_spin(&cpuset_lock); 306 307 return (error); 308} 309 310/* 311 * Walks up the tree from 'set' to find the root. Returns the root 312 * referenced. 313 */ 314static struct cpuset * 315cpuset_root(struct cpuset *set) 316{ 317 318 for (; set->cs_parent != NULL; set = set->cs_parent) 319 if (set->cs_flags & CPU_SET_ROOT) 320 break; 321 cpuset_ref(set); 322 323 return (set); 324} 325 326/* 327 * Find the first non-anonymous set starting from 'set'. Returns this set 328 * referenced. May return the passed in set with an extra ref if it is 329 * not anonymous. 330 */ 331static struct cpuset * 332cpuset_base(struct cpuset *set) 333{ 334 335 if (set->cs_id == CPUSET_INVALID) 336 set = set->cs_parent; 337 cpuset_ref(set); 338 339 return (set); 340} 341 342/* 343 * Resolve the 'which' parameter of several cpuset apis. 344 * 345 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 346 * checks for permission via p_cansched(). 347 * 348 * For WHICH_SET returns a valid set with a new reference. 349 * 350 * -1 may be supplied for any argument to mean the current proc/thread or 351 * the base set of the current thread. May fail with ESRCH/EPERM. 352 */ 353static int 354cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 355 struct cpuset **setp) 356{ 357 struct cpuset *set; 358 struct thread *td; 359 struct proc *p; 360 int error; 361 362 *pp = p = NULL; 363 *tdp = td = NULL; 364 *setp = set = NULL; 365 switch (which) { 366 case CPU_WHICH_PID: 367 if (id == -1) { 368 PROC_LOCK(curproc); 369 p = curproc; 370 break; 371 } 372 if ((p = pfind(id)) == NULL) 373 return (ESRCH); 374 break; 375 case CPU_WHICH_TID: 376 if (id == -1) { 377 PROC_LOCK(curproc); 378 p = curproc; 379 td = curthread; 380 break; 381 } 382 sx_slock(&allproc_lock); 383 FOREACH_PROC_IN_SYSTEM(p) { 384 PROC_LOCK(p); 385 FOREACH_THREAD_IN_PROC(p, td) 386 if (td->td_tid == id) 387 break; 388 if (td != NULL) 389 break; 390 PROC_UNLOCK(p); 391 } 392 sx_sunlock(&allproc_lock); 393 if (td == NULL) 394 return (ESRCH); 395 break; 396 case CPU_WHICH_CPUSET: 397 if (id == -1) { 398 thread_lock(curthread); 399 set = cpuset_base(curthread->td_cpuset); 400 thread_unlock(curthread); 401 } else 402 set = cpuset_lookup(id); 403 if (set) { 404 *setp = set; 405 return (0); 406 } 407 return (ESRCH); 408 default: 409 return (EINVAL); 410 } 411 error = p_cansched(curthread, p); 412 if (error) { 413 PROC_UNLOCK(p); 414 return (error); 415 } 416 if (td == NULL) 417 td = FIRST_THREAD_IN_PROC(p); 418 *pp = p; 419 *tdp = td; 420 return (0); 421} 422 423/* 424 * Create an anonymous set with the provided mask in the space provided by 425 * 'fset'. If the passed in set is anonymous we use its parent otherwise 426 * the new set is a child of 'set'. 427 */ 428static int 429cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) 430{ 431 struct cpuset *parent; 432 433 if (set->cs_id == CPUSET_INVALID) 434 parent = set->cs_parent; 435 else 436 parent = set; 437 if (!CPU_SUBSET(&parent->cs_mask, mask)) 438 return (EINVAL); 439 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 440} 441 442/* 443 * Handle two cases for replacing the base set or mask of an entire process. 444 * 445 * 1) Set is non-null and mask is null. This reparents all anonymous sets 446 * to the provided set and replaces all non-anonymous td_cpusets with the 447 * provided set. 448 * 2) Mask is non-null and set is null. This replaces or creates anonymous 449 * sets for every thread with the existing base as a parent. 450 * 451 * This is overly complicated because we can't allocate while holding a 452 * spinlock and spinlocks must be held while changing and examining thread 453 * state. 454 */ 455static int 456cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 457{ 458 struct setlist freelist; 459 struct setlist droplist; 460 struct cpuset *tdset; 461 struct cpuset *nset; 462 struct thread *td; 463 struct proc *p; 464 int threads; 465 int nfree; 466 int error; 467 /* 468 * The algorithm requires two passes due to locking considerations. 469 * 470 * 1) Lookup the process and acquire the locks in the required order. 471 * 2) If enough cpusets have not been allocated release the locks and 472 * allocate them. Loop. 473 */ 474 LIST_INIT(&freelist); 475 LIST_INIT(&droplist); 476 nfree = 0; 477 for (;;) { 478 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 479 if (error) 480 goto out; 481 if (nfree >= p->p_numthreads) 482 break; 483 threads = p->p_numthreads; 484 PROC_UNLOCK(p); 485 for (; nfree < threads; nfree++) { 486 nset = uma_zalloc(cpuset_zone, M_WAITOK); 487 LIST_INSERT_HEAD(&freelist, nset, cs_link); 488 } 489 } 490 PROC_LOCK_ASSERT(p, MA_OWNED); 491 /* 492 * Now that the appropriate locks are held and we have enough cpusets, 493 * make sure the operation will succeed before applying changes. The 494 * proc lock prevents td_cpuset from changing between calls. 495 */ 496 error = 0; 497 FOREACH_THREAD_IN_PROC(p, td) { 498 thread_lock(td); 499 tdset = td->td_cpuset; 500 /* 501 * Verify that a new mask doesn't specify cpus outside of 502 * the set the thread is a member of. 503 */ 504 if (mask) { 505 if (tdset->cs_id == CPUSET_INVALID) 506 tdset = tdset->cs_parent; 507 if (!CPU_SUBSET(&tdset->cs_mask, mask)) 508 error = EINVAL; 509 /* 510 * Verify that a new set won't leave an existing thread 511 * mask without a cpu to run on. It can, however, restrict 512 * the set. 513 */ 514 } else if (tdset->cs_id == CPUSET_INVALID) { 515 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 516 error = EINVAL; 517 } 518 thread_unlock(td); 519 if (error) 520 goto unlock_out; 521 } 522 /* 523 * Replace each thread's cpuset while using deferred release. We 524 * must do this because the thread lock must be held while operating 525 * on the thread and this limits the type of operations allowed. 526 */ 527 FOREACH_THREAD_IN_PROC(p, td) { 528 thread_lock(td); 529 /* 530 * If we presently have an anonymous set or are applying a 531 * mask we must create an anonymous shadow set. That is 532 * either parented to our existing base or the supplied set. 533 * 534 * If we have a base set with no anonymous shadow we simply 535 * replace it outright. 536 */ 537 tdset = td->td_cpuset; 538 if (tdset->cs_id == CPUSET_INVALID || mask) { 539 nset = LIST_FIRST(&freelist); 540 LIST_REMOVE(nset, cs_link); 541 if (mask) 542 error = cpuset_shadow(tdset, nset, mask); 543 else 544 error = _cpuset_create(nset, set, 545 &tdset->cs_mask, CPUSET_INVALID); 546 if (error) { 547 LIST_INSERT_HEAD(&freelist, nset, cs_link); 548 thread_unlock(td); 549 break; 550 } 551 } else 552 nset = cpuset_ref(set); 553 cpuset_rel_defer(&droplist, tdset); 554 td->td_cpuset = nset; 555 sched_affinity(td); 556 thread_unlock(td); 557 } 558unlock_out: 559 PROC_UNLOCK(p); 560out: 561 while ((nset = LIST_FIRST(&droplist)) != NULL) 562 cpuset_rel_complete(nset); 563 while ((nset = LIST_FIRST(&freelist)) != NULL) { 564 LIST_REMOVE(nset, cs_link); 565 uma_zfree(cpuset_zone, nset); 566 } 567 return (error); 568} 569 570/* 571 * Apply an anonymous mask to a single thread. 572 */ 573static int 574cpuset_setthread(lwpid_t id, cpuset_t *mask) 575{ 576 struct cpuset *nset; 577 struct cpuset *set; 578 struct thread *td; 579 struct proc *p; 580 int error; 581 582 nset = uma_zalloc(cpuset_zone, M_WAITOK); 583 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 584 if (error) 585 goto out; 586 thread_lock(td); 587 set = td->td_cpuset; 588 error = cpuset_shadow(set, nset, mask); 589 if (error == 0) { 590 cpuset_rel(td->td_cpuset); 591 td->td_cpuset = nset; 592 sched_affinity(td); 593 nset = NULL; 594 } 595 thread_unlock(td); 596 PROC_UNLOCK(p); 597out: 598 if (nset) 599 uma_zfree(cpuset_zone, nset); 600 return (error); 601} 602 603/* 604 * Creates the cpuset for thread0. We make two sets: 605 * 606 * 0 - The root set which should represent all valid processors in the 607 * system. It is initially created with a mask of all processors 608 * because we don't know what processors are valid until cpuset_init() 609 * runs. This set is immutable. 610 * 1 - The default set which all processes are a member of until changed. 611 * This allows an administrator to move all threads off of given cpus to 612 * dedicate them to high priority tasks or save power etc. 613 */ 614struct cpuset * 615cpuset_thread0(void) 616{ 617 struct cpuset *set; 618 int error; 619 620 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 621 NULL, NULL, UMA_ALIGN_PTR, 0); 622 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 623 /* 624 * Create the root system set for the whole machine. Doesn't use 625 * cpuset_create() due to NULL parent. 626 */ 627 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 628 set->cs_mask.__bits[0] = -1; 629 LIST_INIT(&set->cs_children); 630 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 631 set->cs_ref = 1; 632 set->cs_flags = CPU_SET_ROOT; 633 cpuset_zero = set; 634 /* 635 * Now derive a default, modifiable set from that to give out. 636 */ 637 set = uma_zalloc(cpuset_zone, M_WAITOK); 638 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 639 KASSERT(error == 0, ("Error creating default set: %d\n", error)); 640 /* 641 * Initialize the unit allocator. 0 and 1 are allocated above. 642 */ 643 cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 644 645 return (set); 646} 647 648/* 649 * This is called once the final set of system cpus is known. Modifies 650 * the root set and all children and mark the root readonly. 651 */ 652static void 653cpuset_init(void *arg) 654{ 655 cpuset_t mask; 656 657 CPU_ZERO(&mask); 658#ifdef SMP 659 mask.__bits[0] = all_cpus; 660#else 661 mask.__bits[0] = 1; 662#endif 663 if (cpuset_modify(cpuset_zero, &mask)) 664 panic("Can't set initial cpuset mask.\n"); 665 cpuset_zero->cs_flags |= CPU_SET_RDONLY; 666} 667SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 668 669#ifndef _SYS_SYSPROTO_H_ 670struct cpuset_args { 671 cpusetid_t *setid; 672}; 673#endif 674int 675cpuset(struct thread *td, struct cpuset_args *uap) 676{ 677 struct cpuset *root; 678 struct cpuset *set; 679 int error; 680 681 thread_lock(td); 682 root = cpuset_root(td->td_cpuset); 683 thread_unlock(td); 684 error = cpuset_create(&set, root, &root->cs_mask); 685 cpuset_rel(root); 686 if (error) 687 return (error); 688 error = cpuset_setproc(-1, set, NULL); 689 if (error == 0) 690 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 691 cpuset_rel(set); 692 return (error); 693} 694 695#ifndef _SYS_SYSPROTO_H_ 696struct cpuset_setid_args { 697 cpuwhich_t which; 698 id_t id; 699 cpusetid_t setid; 700}; 701#endif 702int 703cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 704{ 705 struct cpuset *set; 706 int error; 707 708 /* 709 * Presently we only support per-process sets. 710 */ 711 if (uap->which != CPU_WHICH_PID) 712 return (EINVAL); 713 set = cpuset_lookup(uap->setid); 714 if (set == NULL) 715 return (ESRCH); 716 error = cpuset_setproc(uap->id, set, NULL); 717 cpuset_rel(set); 718 return (error); 719} 720 721#ifndef _SYS_SYSPROTO_H_ 722struct cpuset_getid_args { 723 cpulevel_t level; 724 cpuwhich_t which; 725 id_t id; 726 cpusetid_t *setid; 727#endif 728int 729cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 730{ 731 struct cpuset *nset; 732 struct cpuset *set; 733 struct thread *ttd; 734 struct proc *p; 735 cpusetid_t id; 736 int error; 737 738 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 739 return (EINVAL); 740 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 741 if (error) 742 return (error); 743 switch (uap->which) { 744 case CPU_WHICH_TID: 745 case CPU_WHICH_PID: 746 thread_lock(ttd); 747 set = cpuset_base(ttd->td_cpuset); 748 thread_unlock(ttd); 749 PROC_UNLOCK(p); 750 break; 751 case CPU_WHICH_CPUSET: 752 break; 753 } 754 switch (uap->level) { 755 case CPU_LEVEL_ROOT: 756 nset = cpuset_root(set); 757 cpuset_rel(set); 758 set = nset; 759 break; 760 case CPU_LEVEL_CPUSET: 761 break; 762 case CPU_LEVEL_WHICH: 763 break; 764 } 765 id = set->cs_id; 766 cpuset_rel(set); 767 if (error == 0) 768 error = copyout(&id, uap->setid, sizeof(id)); 769 770 return (error); 771} 772 773#ifndef _SYS_SYSPROTO_H_ 774struct cpuset_getaffinity_args { 775 cpulevel_t level; 776 cpuwhich_t which; 777 id_t id; 778 size_t cpusetsize; 779 cpuset_t *mask; 780}; 781#endif 782int 783cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 784{ 785 struct thread *ttd; 786 struct cpuset *nset; 787 struct cpuset *set; 788 struct proc *p; 789 cpuset_t *mask; 790 int error; 791 size_t size; 792 793 if (uap->cpusetsize < sizeof(cpuset_t) || 794 uap->cpusetsize * NBBY > CPU_MAXSIZE) 795 return (ERANGE); 796 size = uap->cpusetsize; 797 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 798 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 799 if (error) 800 goto out; 801 switch (uap->level) { 802 case CPU_LEVEL_ROOT: 803 case CPU_LEVEL_CPUSET: 804 switch (uap->which) { 805 case CPU_WHICH_TID: 806 case CPU_WHICH_PID: 807 thread_lock(ttd); 808 set = cpuset_ref(ttd->td_cpuset); 809 thread_unlock(ttd); 810 break; 811 case CPU_WHICH_CPUSET: 812 break; 813 } 814 if (uap->level == CPU_LEVEL_ROOT) 815 nset = cpuset_root(set); 816 else 817 nset = cpuset_base(set); 818 CPU_COPY(&nset->cs_mask, mask); 819 cpuset_rel(nset); 820 break; 821 case CPU_LEVEL_WHICH: 822 switch (uap->which) { 823 case CPU_WHICH_TID: 824 thread_lock(ttd); 825 CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 826 thread_unlock(ttd); 827 break; 828 case CPU_WHICH_PID: 829 FOREACH_THREAD_IN_PROC(p, ttd) { 830 thread_lock(ttd); 831 CPU_OR(mask, &ttd->td_cpuset->cs_mask); 832 thread_unlock(ttd); 833 } 834 break; 835 case CPU_WHICH_CPUSET: 836 CPU_COPY(&set->cs_mask, mask); 837 break; 838 } 839 break; 840 default: 841 error = EINVAL; 842 break; 843 } 844 if (set) 845 cpuset_rel(set); 846 if (p) 847 PROC_UNLOCK(p); 848 if (error == 0) 849 error = copyout(mask, uap->mask, size); 850out: 851 free(mask, M_TEMP); 852 return (error); 853} 854 855#ifndef _SYS_SYSPROTO_H_ 856struct cpuset_setaffinity_args { 857 cpulevel_t level; 858 cpuwhich_t which; 859 id_t id; 860 size_t cpusetsize; 861 const cpuset_t *mask; 862}; 863#endif 864int 865cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 866{ 867 struct cpuset *nset; 868 struct cpuset *set; 869 struct thread *ttd; 870 struct proc *p; 871 cpuset_t *mask; 872 int error; 873 874 if (uap->cpusetsize < sizeof(cpuset_t) || 875 uap->cpusetsize * NBBY > CPU_MAXSIZE) 876 return (ERANGE); 877 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 878 error = copyin(uap->mask, mask, uap->cpusetsize); 879 if (error) 880 goto out; 881 /* 882 * Verify that no high bits are set. 883 */ 884 if (uap->cpusetsize > sizeof(cpuset_t)) { 885 char *end; 886 char *cp; 887 888 end = cp = (char *)&mask->__bits; 889 end += uap->cpusetsize; 890 cp += sizeof(cpuset_t); 891 while (cp != end) 892 if (*cp++ != 0) { 893 error = EINVAL; 894 goto out; 895 } 896 897 } 898 switch (uap->level) { 899 case CPU_LEVEL_ROOT: 900 case CPU_LEVEL_CPUSET: 901 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 902 if (error) 903 break; 904 switch (uap->which) { 905 case CPU_WHICH_TID: 906 case CPU_WHICH_PID: 907 thread_lock(ttd); 908 set = cpuset_ref(ttd->td_cpuset); 909 thread_unlock(ttd); 910 PROC_UNLOCK(p); 911 break; 912 case CPU_WHICH_CPUSET: 913 break; 914 } 915 if (uap->level == CPU_LEVEL_ROOT) 916 nset = cpuset_root(set); 917 else 918 nset = cpuset_base(set); 919 error = cpuset_modify(nset, mask); 920 cpuset_rel(nset); 921 cpuset_rel(set); 922 break; 923 case CPU_LEVEL_WHICH: 924 switch (uap->which) { 925 case CPU_WHICH_TID: 926 error = cpuset_setthread(uap->id, mask); 927 break; 928 case CPU_WHICH_PID: 929 error = cpuset_setproc(uap->id, NULL, mask); 930 break; 931 case CPU_WHICH_CPUSET: 932 error = cpuset_which(CPU_WHICH_CPUSET, uap->id, &p, 933 &ttd, &set); 934 if (error == 0) { 935 error = cpuset_modify(set, mask); 936 cpuset_rel(set); 937 } 938 break; 939 default: 940 error = EINVAL; 941 break; 942 } 943 break; 944 default: 945 error = EINVAL; 946 break; 947 } 948out: 949 free(mask, M_TEMP); 950 return (error); 951} 952