kern_cpuset.c revision 177597
1176730Sjeff/*- 2176730Sjeff * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 3176730Sjeff * All rights reserved. 4176730Sjeff * 5176730Sjeff * Redistribution and use in source and binary forms, with or without 6176730Sjeff * modification, are permitted provided that the following conditions 7176730Sjeff * are met: 8176730Sjeff * 1. Redistributions of source code must retain the above copyright 9176730Sjeff * notice unmodified, this list of conditions, and the following 10176730Sjeff * disclaimer. 11176730Sjeff * 2. Redistributions in binary form must reproduce the above copyright 12176730Sjeff * notice, this list of conditions and the following disclaimer in the 13176730Sjeff * documentation and/or other materials provided with the distribution. 14176730Sjeff * 15176730Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16176730Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17176730Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18176730Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19176730Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20176730Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21176730Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22176730Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23176730Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24176730Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25176730Sjeff * 26176730Sjeff */ 27176730Sjeff 28176730Sjeff#include <sys/cdefs.h> 29176730Sjeff__FBSDID("$FreeBSD: head/sys/kern/kern_cpuset.c 177597 2008-03-25 09:11:53Z ru $"); 30176730Sjeff 31176730Sjeff#include <sys/param.h> 32176730Sjeff#include <sys/systm.h> 33176730Sjeff#include <sys/sysproto.h> 34176730Sjeff#include <sys/kernel.h> 35176730Sjeff#include <sys/lock.h> 36176730Sjeff#include <sys/malloc.h> 37176730Sjeff#include <sys/mutex.h> 38176730Sjeff#include <sys/priv.h> 39176730Sjeff#include <sys/proc.h> 40176730Sjeff#include <sys/refcount.h> 41176730Sjeff#include <sys/sched.h> 42176730Sjeff#include <sys/smp.h> 43176730Sjeff#include <sys/syscallsubr.h> 44176730Sjeff#include <sys/cpuset.h> 45176730Sjeff#include <sys/sx.h> 46176730Sjeff#include <sys/refcount.h> 47176730Sjeff#include <sys/queue.h> 48176730Sjeff#include <sys/limits.h> 49176730Sjeff 50176730Sjeff#include <vm/uma.h> 51176730Sjeff 52176730Sjeff/* 53176730Sjeff * cpusets provide a mechanism for creating and manipulating sets of 54176730Sjeff * processors for the purpose of constraining the scheduling of threads to 55176730Sjeff * specific processors. 56176730Sjeff * 57176730Sjeff * Each process belongs to an identified set, by default this is set 1. Each 58176730Sjeff * thread may further restrict the cpus it may run on to a subset of this 59176730Sjeff * named set. This creates an anonymous set which other threads and processes 60176730Sjeff * may not join by number. 61176730Sjeff * 62176730Sjeff * The named set is referred to herein as the 'base' set to avoid ambiguity. 63176730Sjeff * This set is usually a child of a 'root' set while the anonymous set may 64176730Sjeff * simply be referred to as a mask. In the syscall api these are referred to 65176730Sjeff * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 66176730Sjeff * 67176730Sjeff * Threads inherit their set from their creator whether it be anonymous or 68176730Sjeff * not. This means that anonymous sets are immutable because they may be 69176730Sjeff * shared. To modify an anonymous set a new set is created with the desired 70176730Sjeff * mask and the same parent as the existing anonymous set. This gives the 71176730Sjeff * illusion of each thread having a private mask.A 72176730Sjeff * 73176730Sjeff * Via the syscall apis a user may ask to retrieve or modify the root, base, 74176730Sjeff * or mask that is discovered via a pid, tid, or setid. Modifying a set 75176730Sjeff * modifies all numbered and anonymous child sets to comply with the new mask. 76176730Sjeff * Modifying a pid or tid's mask applies only to that tid but must still 77176730Sjeff * exist within the assigned parent set. 78176730Sjeff * 79176730Sjeff * A thread may not be assigned to a a group seperate from other threads in 80176730Sjeff * the process. This is to remove ambiguity when the setid is queried with 81176730Sjeff * a pid argument. There is no other technical limitation. 82176730Sjeff * 83176730Sjeff * This somewhat complex arrangement is intended to make it easy for 84176730Sjeff * applications to query available processors and bind their threads to 85176730Sjeff * specific processors while also allowing administrators to dynamically 86176730Sjeff * reprovision by changing sets which apply to groups of processes. 87176730Sjeff * 88176730Sjeff * A simple application should not concern itself with sets at all and 89176730Sjeff * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 90176730Sjeff * meaning 'curthread'. It may query availble cpus for that tid with a 91176730Sjeff * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 92176730Sjeff */ 93176730Sjeffstatic uma_zone_t cpuset_zone; 94176730Sjeffstatic struct mtx cpuset_lock; 95176730Sjeffstatic struct setlist cpuset_ids; 96176730Sjeffstruct cpuset *cpuset_zero; 97176730Sjeffstatic struct unrhdr *cpuset_unr; 98176730Sjeff 99176730Sjeff/* 100176730Sjeff * Acquire a reference to a cpuset, all pointers must be tracked with refs. 101176730Sjeff */ 102176730Sjeffstruct cpuset * 103176730Sjeffcpuset_ref(struct cpuset *set) 104176730Sjeff{ 105176730Sjeff 106176730Sjeff refcount_acquire(&set->cs_ref); 107176730Sjeff return (set); 108176730Sjeff} 109176730Sjeff 110176730Sjeff/* 111176730Sjeff * Release a reference in a context where it is safe to allocte. 112176730Sjeff */ 113176730Sjeffvoid 114176730Sjeffcpuset_rel(struct cpuset *set) 115176730Sjeff{ 116176730Sjeff cpusetid_t id; 117176730Sjeff 118176730Sjeff if (refcount_release(&set->cs_ref) == 0) 119176730Sjeff return; 120176730Sjeff mtx_lock_spin(&cpuset_lock); 121176730Sjeff LIST_REMOVE(set, cs_siblings); 122176730Sjeff id = set->cs_id; 123176730Sjeff if (id != CPUSET_INVALID) 124176730Sjeff LIST_REMOVE(set, cs_link); 125176730Sjeff mtx_unlock_spin(&cpuset_lock); 126176730Sjeff cpuset_rel(set->cs_parent); 127176730Sjeff uma_zfree(cpuset_zone, set); 128176730Sjeff if (id != CPUSET_INVALID) 129176730Sjeff free_unr(cpuset_unr, id); 130176730Sjeff} 131176730Sjeff 132176730Sjeff/* 133176730Sjeff * Deferred release must be used when in a context that is not safe to 134176730Sjeff * allocate/free. This places any unreferenced sets on the list 'head'. 135176730Sjeff */ 136176730Sjeffstatic void 137176730Sjeffcpuset_rel_defer(struct setlist *head, struct cpuset *set) 138176730Sjeff{ 139176730Sjeff 140176730Sjeff if (refcount_release(&set->cs_ref) == 0) 141176730Sjeff return; 142176730Sjeff mtx_lock_spin(&cpuset_lock); 143176730Sjeff LIST_REMOVE(set, cs_siblings); 144176730Sjeff if (set->cs_id != CPUSET_INVALID) 145176730Sjeff LIST_REMOVE(set, cs_link); 146176730Sjeff LIST_INSERT_HEAD(head, set, cs_link); 147176730Sjeff mtx_unlock_spin(&cpuset_lock); 148176730Sjeff} 149176730Sjeff 150176730Sjeff/* 151176730Sjeff * Complete a deferred release. Removes the set from the list provided to 152176730Sjeff * cpuset_rel_defer. 153176730Sjeff */ 154176730Sjeffstatic void 155176730Sjeffcpuset_rel_complete(struct cpuset *set) 156176730Sjeff{ 157176730Sjeff LIST_REMOVE(set, cs_link); 158176730Sjeff cpuset_rel(set->cs_parent); 159176730Sjeff uma_zfree(cpuset_zone, set); 160176730Sjeff} 161176730Sjeff 162176730Sjeff/* 163176730Sjeff * Find a set based on an id. Returns it with a ref. 164176730Sjeff */ 165176730Sjeffstatic struct cpuset * 166176730Sjeffcpuset_lookup(cpusetid_t setid) 167176730Sjeff{ 168176730Sjeff struct cpuset *set; 169176730Sjeff 170176730Sjeff if (setid == CPUSET_INVALID) 171176730Sjeff return (NULL); 172176730Sjeff mtx_lock_spin(&cpuset_lock); 173176730Sjeff LIST_FOREACH(set, &cpuset_ids, cs_link) 174176730Sjeff if (set->cs_id == setid) 175176730Sjeff break; 176176730Sjeff if (set) 177176730Sjeff cpuset_ref(set); 178176730Sjeff mtx_unlock_spin(&cpuset_lock); 179176730Sjeff return (set); 180176730Sjeff} 181176730Sjeff 182176730Sjeff/* 183176730Sjeff * Create a set in the space provided in 'set' with the provided parameters. 184176730Sjeff * The set is returned with a single ref. May return EDEADLK if the set 185176730Sjeff * will have no valid cpu based on restrictions from the parent. 186176730Sjeff */ 187176730Sjeffstatic int 188176730Sjeff_cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, 189176730Sjeff cpusetid_t id) 190176730Sjeff{ 191176730Sjeff 192176811Sjeff if (!CPU_OVERLAP(&parent->cs_mask, mask)) 193176811Sjeff return (EDEADLK); 194176730Sjeff CPU_COPY(mask, &set->cs_mask); 195176730Sjeff LIST_INIT(&set->cs_children); 196176730Sjeff refcount_init(&set->cs_ref, 1); 197176730Sjeff set->cs_flags = 0; 198176730Sjeff mtx_lock_spin(&cpuset_lock); 199176730Sjeff CPU_AND(mask, &parent->cs_mask); 200176811Sjeff set->cs_id = id; 201176811Sjeff set->cs_parent = cpuset_ref(parent); 202176811Sjeff LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 203176811Sjeff if (set->cs_id != CPUSET_INVALID) 204176811Sjeff LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 205176730Sjeff mtx_unlock_spin(&cpuset_lock); 206176730Sjeff 207176811Sjeff return (0); 208176730Sjeff} 209176730Sjeff 210176730Sjeff/* 211176730Sjeff * Create a new non-anonymous set with the requested parent and mask. May 212176730Sjeff * return failures if the mask is invalid or a new number can not be 213176730Sjeff * allocated. 214176730Sjeff */ 215176730Sjeffstatic int 216176730Sjeffcpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) 217176730Sjeff{ 218176730Sjeff struct cpuset *set; 219176730Sjeff cpusetid_t id; 220176730Sjeff int error; 221176730Sjeff 222176730Sjeff id = alloc_unr(cpuset_unr); 223176730Sjeff if (id == -1) 224176730Sjeff return (ENFILE); 225176730Sjeff *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 226176730Sjeff error = _cpuset_create(set, parent, mask, id); 227176730Sjeff if (error == 0) 228176730Sjeff return (0); 229176730Sjeff free_unr(cpuset_unr, id); 230176730Sjeff uma_zfree(cpuset_zone, set); 231176730Sjeff 232176730Sjeff return (error); 233176730Sjeff} 234176730Sjeff 235176730Sjeff/* 236176730Sjeff * Recursively check for errors that would occur from applying mask to 237176730Sjeff * the tree of sets starting at 'set'. Checks for sets that would become 238176730Sjeff * empty as well as RDONLY flags. 239176730Sjeff */ 240176730Sjeffstatic int 241176730Sjeffcpuset_testupdate(struct cpuset *set, cpuset_t *mask) 242176730Sjeff{ 243176730Sjeff struct cpuset *nset; 244176730Sjeff cpuset_t newmask; 245176730Sjeff int error; 246176730Sjeff 247176730Sjeff mtx_assert(&cpuset_lock, MA_OWNED); 248176730Sjeff if (set->cs_flags & CPU_SET_RDONLY) 249176730Sjeff return (EPERM); 250176811Sjeff if (!CPU_OVERLAP(&set->cs_mask, mask)) 251176811Sjeff return (EDEADLK); 252176730Sjeff CPU_COPY(&set->cs_mask, &newmask); 253176730Sjeff CPU_AND(&newmask, mask); 254176811Sjeff error = 0; 255176730Sjeff LIST_FOREACH(nset, &set->cs_children, cs_siblings) 256176730Sjeff if ((error = cpuset_testupdate(nset, &newmask)) != 0) 257176730Sjeff break; 258176730Sjeff return (error); 259176730Sjeff} 260176730Sjeff 261176730Sjeff/* 262176730Sjeff * Applies the mask 'mask' without checking for empty sets or permissions. 263176730Sjeff */ 264176730Sjeffstatic void 265176730Sjeffcpuset_update(struct cpuset *set, cpuset_t *mask) 266176730Sjeff{ 267176730Sjeff struct cpuset *nset; 268176730Sjeff 269176730Sjeff mtx_assert(&cpuset_lock, MA_OWNED); 270176730Sjeff CPU_AND(&set->cs_mask, mask); 271176730Sjeff LIST_FOREACH(nset, &set->cs_children, cs_siblings) 272176730Sjeff cpuset_update(nset, &set->cs_mask); 273176730Sjeff 274176730Sjeff return; 275176730Sjeff} 276176730Sjeff 277176730Sjeff/* 278176730Sjeff * Modify the set 'set' to use a copy of the mask provided. Apply this new 279176730Sjeff * mask to restrict all children in the tree. Checks for validity before 280176730Sjeff * applying the changes. 281176730Sjeff */ 282176730Sjeffstatic int 283176730Sjeffcpuset_modify(struct cpuset *set, cpuset_t *mask) 284176730Sjeff{ 285176811Sjeff struct cpuset *root; 286176730Sjeff int error; 287176730Sjeff 288176730Sjeff error = suser(curthread); 289176730Sjeff if (error) 290176730Sjeff return (error); 291176811Sjeff /* 292176811Sjeff * Verify that we have access to this set of 293176811Sjeff * cpus. 294176811Sjeff */ 295176811Sjeff root = set->cs_parent; 296176811Sjeff if (root && !CPU_SUBSET(&root->cs_mask, mask)) 297176811Sjeff return (EINVAL); 298176730Sjeff mtx_lock_spin(&cpuset_lock); 299176730Sjeff error = cpuset_testupdate(set, mask); 300176730Sjeff if (error) 301176730Sjeff goto out; 302176730Sjeff cpuset_update(set, mask); 303176730Sjeff CPU_COPY(mask, &set->cs_mask); 304176730Sjeffout: 305176730Sjeff mtx_unlock_spin(&cpuset_lock); 306176730Sjeff 307176730Sjeff return (error); 308176730Sjeff} 309176730Sjeff 310176730Sjeff/* 311176730Sjeff * Walks up the tree from 'set' to find the root. Returns the root 312176730Sjeff * referenced. 313176730Sjeff */ 314176730Sjeffstatic struct cpuset * 315176730Sjeffcpuset_root(struct cpuset *set) 316176730Sjeff{ 317176730Sjeff 318176730Sjeff for (; set->cs_parent != NULL; set = set->cs_parent) 319176730Sjeff if (set->cs_flags & CPU_SET_ROOT) 320176730Sjeff break; 321176730Sjeff cpuset_ref(set); 322176730Sjeff 323176730Sjeff return (set); 324176730Sjeff} 325176730Sjeff 326176730Sjeff/* 327176730Sjeff * Find the first non-anonymous set starting from 'set'. Returns this set 328176730Sjeff * referenced. May return the passed in set with an extra ref if it is 329176730Sjeff * not anonymous. 330176730Sjeff */ 331176730Sjeffstatic struct cpuset * 332176730Sjeffcpuset_base(struct cpuset *set) 333176730Sjeff{ 334176730Sjeff 335176730Sjeff if (set->cs_id == CPUSET_INVALID) 336176730Sjeff set = set->cs_parent; 337176730Sjeff cpuset_ref(set); 338176730Sjeff 339176730Sjeff return (set); 340176730Sjeff} 341176730Sjeff 342176730Sjeff/* 343176730Sjeff * Resolve the 'which' parameter of several cpuset apis. 344176730Sjeff * 345176730Sjeff * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 346176730Sjeff * checks for permission via p_cansched(). 347176730Sjeff * 348176730Sjeff * For WHICH_SET returns a valid set with a new reference. 349176730Sjeff * 350176730Sjeff * -1 may be supplied for any argument to mean the current proc/thread or 351176730Sjeff * the base set of the current thread. May fail with ESRCH/EPERM. 352176730Sjeff */ 353176730Sjeffstatic int 354176730Sjeffcpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 355176730Sjeff struct cpuset **setp) 356176730Sjeff{ 357176730Sjeff struct cpuset *set; 358176730Sjeff struct thread *td; 359176730Sjeff struct proc *p; 360176730Sjeff int error; 361176730Sjeff 362176730Sjeff *pp = p = NULL; 363176730Sjeff *tdp = td = NULL; 364176730Sjeff *setp = set = NULL; 365176730Sjeff switch (which) { 366176730Sjeff case CPU_WHICH_PID: 367176730Sjeff if (id == -1) { 368176730Sjeff PROC_LOCK(curproc); 369176730Sjeff p = curproc; 370176730Sjeff break; 371176730Sjeff } 372176730Sjeff if ((p = pfind(id)) == NULL) 373176730Sjeff return (ESRCH); 374176730Sjeff break; 375176730Sjeff case CPU_WHICH_TID: 376176730Sjeff if (id == -1) { 377176730Sjeff PROC_LOCK(curproc); 378176730Sjeff p = curproc; 379176730Sjeff td = curthread; 380176730Sjeff break; 381176730Sjeff } 382176730Sjeff sx_slock(&allproc_lock); 383176730Sjeff FOREACH_PROC_IN_SYSTEM(p) { 384176730Sjeff PROC_LOCK(p); 385176730Sjeff FOREACH_THREAD_IN_PROC(p, td) 386176730Sjeff if (td->td_tid == id) 387176730Sjeff break; 388176730Sjeff if (td != NULL) 389176730Sjeff break; 390176730Sjeff PROC_UNLOCK(p); 391176730Sjeff } 392176730Sjeff sx_sunlock(&allproc_lock); 393176730Sjeff if (td == NULL) 394176730Sjeff return (ESRCH); 395176730Sjeff break; 396176730Sjeff case CPU_WHICH_CPUSET: 397176730Sjeff if (id == -1) { 398176730Sjeff thread_lock(curthread); 399176730Sjeff set = cpuset_base(curthread->td_cpuset); 400176730Sjeff thread_unlock(curthread); 401176730Sjeff } else 402176730Sjeff set = cpuset_lookup(id); 403176730Sjeff if (set) { 404176730Sjeff *setp = set; 405176730Sjeff return (0); 406176730Sjeff } 407176730Sjeff return (ESRCH); 408176730Sjeff default: 409176730Sjeff return (EINVAL); 410176730Sjeff } 411176730Sjeff error = p_cansched(curthread, p); 412176730Sjeff if (error) { 413176730Sjeff PROC_UNLOCK(p); 414176730Sjeff return (error); 415176730Sjeff } 416176730Sjeff if (td == NULL) 417176730Sjeff td = FIRST_THREAD_IN_PROC(p); 418176730Sjeff *pp = p; 419176730Sjeff *tdp = td; 420176730Sjeff return (0); 421176730Sjeff} 422176730Sjeff 423176730Sjeff/* 424176730Sjeff * Create an anonymous set with the provided mask in the space provided by 425176730Sjeff * 'fset'. If the passed in set is anonymous we use its parent otherwise 426176730Sjeff * the new set is a child of 'set'. 427176730Sjeff */ 428176730Sjeffstatic int 429176730Sjeffcpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) 430176730Sjeff{ 431176730Sjeff struct cpuset *parent; 432176730Sjeff 433176730Sjeff if (set->cs_id == CPUSET_INVALID) 434176730Sjeff parent = set->cs_parent; 435176730Sjeff else 436176730Sjeff parent = set; 437176811Sjeff if (!CPU_SUBSET(&parent->cs_mask, mask)) 438176811Sjeff return (EINVAL); 439176730Sjeff return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 440176730Sjeff} 441176730Sjeff 442176730Sjeff/* 443176730Sjeff * Handle two cases for replacing the base set or mask of an entire process. 444176730Sjeff * 445176730Sjeff * 1) Set is non-null and mask is null. This reparents all anonymous sets 446176730Sjeff * to the provided set and replaces all non-anonymous td_cpusets with the 447176730Sjeff * provided set. 448176730Sjeff * 2) Mask is non-null and set is null. This replaces or creates anonymous 449176730Sjeff * sets for every thread with the existing base as a parent. 450176730Sjeff * 451176730Sjeff * This is overly complicated because we can't allocate while holding a 452176730Sjeff * spinlock and spinlocks must be held while changing and examining thread 453176730Sjeff * state. 454176730Sjeff */ 455176730Sjeffstatic int 456176730Sjeffcpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 457176730Sjeff{ 458176730Sjeff struct setlist freelist; 459176730Sjeff struct setlist droplist; 460176811Sjeff struct cpuset *tdset; 461176730Sjeff struct cpuset *nset; 462176730Sjeff struct thread *td; 463176730Sjeff struct proc *p; 464176730Sjeff int threads; 465176730Sjeff int nfree; 466176730Sjeff int error; 467176730Sjeff /* 468176730Sjeff * The algorithm requires two passes due to locking considerations. 469176730Sjeff * 470176730Sjeff * 1) Lookup the process and acquire the locks in the required order. 471176730Sjeff * 2) If enough cpusets have not been allocated release the locks and 472176730Sjeff * allocate them. Loop. 473176730Sjeff */ 474176730Sjeff LIST_INIT(&freelist); 475176730Sjeff LIST_INIT(&droplist); 476176730Sjeff nfree = 0; 477176730Sjeff for (;;) { 478176730Sjeff error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 479176730Sjeff if (error) 480176730Sjeff goto out; 481176730Sjeff if (nfree >= p->p_numthreads) 482176730Sjeff break; 483176730Sjeff threads = p->p_numthreads; 484176730Sjeff PROC_UNLOCK(p); 485176730Sjeff for (; nfree < threads; nfree++) { 486176730Sjeff nset = uma_zalloc(cpuset_zone, M_WAITOK); 487176730Sjeff LIST_INSERT_HEAD(&freelist, nset, cs_link); 488176730Sjeff } 489176730Sjeff } 490176730Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 491176730Sjeff /* 492176730Sjeff * Now that the appropriate locks are held and we have enough cpusets, 493176811Sjeff * make sure the operation will succeed before applying changes. The 494176811Sjeff * proc lock prevents td_cpuset from changing between calls. 495176811Sjeff */ 496176811Sjeff error = 0; 497176811Sjeff FOREACH_THREAD_IN_PROC(p, td) { 498176811Sjeff thread_lock(td); 499176811Sjeff tdset = td->td_cpuset; 500176811Sjeff /* 501176811Sjeff * Verify that a new mask doesn't specify cpus outside of 502176811Sjeff * the set the thread is a member of. 503176811Sjeff */ 504176811Sjeff if (mask) { 505176811Sjeff if (tdset->cs_id == CPUSET_INVALID) 506176811Sjeff tdset = tdset->cs_parent; 507176811Sjeff if (!CPU_SUBSET(&tdset->cs_mask, mask)) 508176811Sjeff error = EINVAL; 509176811Sjeff /* 510176811Sjeff * Verify that a new set won't leave an existing thread 511176811Sjeff * mask without a cpu to run on. It can, however, restrict 512176811Sjeff * the set. 513176811Sjeff */ 514176811Sjeff } else if (tdset->cs_id == CPUSET_INVALID) { 515176811Sjeff if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 516176811Sjeff error = EINVAL; 517176811Sjeff } 518176811Sjeff thread_unlock(td); 519176811Sjeff if (error) 520176811Sjeff goto unlock_out; 521176811Sjeff } 522176811Sjeff /* 523176811Sjeff * Replace each thread's cpuset while using deferred release. We 524177368Sjeff * must do this because the thread lock must be held while operating 525177368Sjeff * on the thread and this limits the type of operations allowed. 526176730Sjeff */ 527176730Sjeff FOREACH_THREAD_IN_PROC(p, td) { 528176730Sjeff thread_lock(td); 529176730Sjeff /* 530176730Sjeff * If we presently have an anonymous set or are applying a 531176730Sjeff * mask we must create an anonymous shadow set. That is 532176730Sjeff * either parented to our existing base or the supplied set. 533176730Sjeff * 534176730Sjeff * If we have a base set with no anonymous shadow we simply 535176730Sjeff * replace it outright. 536176730Sjeff */ 537176730Sjeff tdset = td->td_cpuset; 538176730Sjeff if (tdset->cs_id == CPUSET_INVALID || mask) { 539176730Sjeff nset = LIST_FIRST(&freelist); 540176730Sjeff LIST_REMOVE(nset, cs_link); 541176730Sjeff if (mask) 542176730Sjeff error = cpuset_shadow(tdset, nset, mask); 543176730Sjeff else 544176730Sjeff error = _cpuset_create(nset, set, 545176730Sjeff &tdset->cs_mask, CPUSET_INVALID); 546176730Sjeff if (error) { 547176730Sjeff LIST_INSERT_HEAD(&freelist, nset, cs_link); 548176730Sjeff thread_unlock(td); 549176730Sjeff break; 550176730Sjeff } 551176730Sjeff } else 552176730Sjeff nset = cpuset_ref(set); 553176730Sjeff cpuset_rel_defer(&droplist, tdset); 554176730Sjeff td->td_cpuset = nset; 555176730Sjeff sched_affinity(td); 556176730Sjeff thread_unlock(td); 557176730Sjeff } 558176811Sjeffunlock_out: 559176730Sjeff PROC_UNLOCK(p); 560176730Sjeffout: 561176730Sjeff while ((nset = LIST_FIRST(&droplist)) != NULL) 562176730Sjeff cpuset_rel_complete(nset); 563176730Sjeff while ((nset = LIST_FIRST(&freelist)) != NULL) { 564176730Sjeff LIST_REMOVE(nset, cs_link); 565176730Sjeff uma_zfree(cpuset_zone, nset); 566176730Sjeff } 567176730Sjeff return (error); 568176730Sjeff} 569176730Sjeff 570176730Sjeff/* 571176730Sjeff * Apply an anonymous mask to a single thread. 572176730Sjeff */ 573176730Sjeffstatic int 574176730Sjeffcpuset_setthread(lwpid_t id, cpuset_t *mask) 575176730Sjeff{ 576176730Sjeff struct cpuset *nset; 577176730Sjeff struct cpuset *set; 578176730Sjeff struct thread *td; 579176730Sjeff struct proc *p; 580176730Sjeff int error; 581176730Sjeff 582176730Sjeff nset = uma_zalloc(cpuset_zone, M_WAITOK); 583176821Sjeff error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 584176730Sjeff if (error) 585176730Sjeff goto out; 586176730Sjeff thread_lock(td); 587176730Sjeff set = td->td_cpuset; 588176730Sjeff error = cpuset_shadow(set, nset, mask); 589176730Sjeff if (error == 0) { 590176730Sjeff cpuset_rel(td->td_cpuset); 591176730Sjeff td->td_cpuset = nset; 592176730Sjeff sched_affinity(td); 593176730Sjeff nset = NULL; 594176730Sjeff } 595176730Sjeff thread_unlock(td); 596176730Sjeff PROC_UNLOCK(p); 597176730Sjeffout: 598176730Sjeff if (nset) 599176730Sjeff uma_zfree(cpuset_zone, nset); 600176730Sjeff return (error); 601176730Sjeff} 602176730Sjeff 603176730Sjeff/* 604176730Sjeff * Creates the cpuset for thread0. We make two sets: 605176730Sjeff * 606176730Sjeff * 0 - The root set which should represent all valid processors in the 607176730Sjeff * system. It is initially created with a mask of all processors 608176730Sjeff * because we don't know what processors are valid until cpuset_init() 609176730Sjeff * runs. This set is immutable. 610176730Sjeff * 1 - The default set which all processes are a member of until changed. 611176730Sjeff * This allows an administrator to move all threads off of given cpus to 612176730Sjeff * dedicate them to high priority tasks or save power etc. 613176730Sjeff */ 614176730Sjeffstruct cpuset * 615176730Sjeffcpuset_thread0(void) 616176730Sjeff{ 617176730Sjeff struct cpuset *set; 618176730Sjeff int error; 619176730Sjeff 620176730Sjeff cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 621176730Sjeff NULL, NULL, UMA_ALIGN_PTR, 0); 622176730Sjeff mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 623176730Sjeff /* 624176730Sjeff * Create the root system set for the whole machine. Doesn't use 625176730Sjeff * cpuset_create() due to NULL parent. 626176730Sjeff */ 627176730Sjeff set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 628176730Sjeff set->cs_mask.__bits[0] = -1; 629176730Sjeff LIST_INIT(&set->cs_children); 630176730Sjeff LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 631176730Sjeff set->cs_ref = 1; 632176730Sjeff set->cs_flags = CPU_SET_ROOT; 633176730Sjeff cpuset_zero = set; 634176730Sjeff /* 635176730Sjeff * Now derive a default, modifiable set from that to give out. 636176730Sjeff */ 637176730Sjeff set = uma_zalloc(cpuset_zone, M_WAITOK); 638176730Sjeff error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 639176730Sjeff KASSERT(error == 0, ("Error creating default set: %d\n", error)); 640176730Sjeff /* 641176730Sjeff * Initialize the unit allocator. 0 and 1 are allocated above. 642176730Sjeff */ 643176730Sjeff cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 644176730Sjeff 645176730Sjeff return (set); 646176730Sjeff} 647176730Sjeff 648176730Sjeff/* 649176730Sjeff * This is called once the final set of system cpus is known. Modifies 650176730Sjeff * the root set and all children and mark the root readonly. 651176730Sjeff */ 652176730Sjeffstatic void 653176730Sjeffcpuset_init(void *arg) 654176730Sjeff{ 655176730Sjeff cpuset_t mask; 656176730Sjeff 657176730Sjeff CPU_ZERO(&mask); 658176730Sjeff#ifdef SMP 659176730Sjeff mask.__bits[0] = all_cpus; 660176730Sjeff#else 661176730Sjeff mask.__bits[0] = 1; 662176730Sjeff#endif 663176730Sjeff if (cpuset_modify(cpuset_zero, &mask)) 664176730Sjeff panic("Can't set initial cpuset mask.\n"); 665176730Sjeff cpuset_zero->cs_flags |= CPU_SET_RDONLY; 666176730Sjeff} 667176730SjeffSYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 668176730Sjeff 669176730Sjeff#ifndef _SYS_SYSPROTO_H_ 670176730Sjeffstruct cpuset_args { 671176730Sjeff cpusetid_t *setid; 672176730Sjeff}; 673176730Sjeff#endif 674176730Sjeffint 675176730Sjeffcpuset(struct thread *td, struct cpuset_args *uap) 676176730Sjeff{ 677176730Sjeff struct cpuset *root; 678176730Sjeff struct cpuset *set; 679176730Sjeff int error; 680176730Sjeff 681176730Sjeff thread_lock(td); 682176730Sjeff root = cpuset_root(td->td_cpuset); 683176730Sjeff thread_unlock(td); 684176730Sjeff error = cpuset_create(&set, root, &root->cs_mask); 685176730Sjeff cpuset_rel(root); 686176730Sjeff if (error) 687176730Sjeff return (error); 688176730Sjeff error = cpuset_setproc(-1, set, NULL); 689176730Sjeff if (error == 0) 690176730Sjeff error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 691176730Sjeff cpuset_rel(set); 692176730Sjeff return (error); 693176730Sjeff} 694176730Sjeff 695176730Sjeff#ifndef _SYS_SYSPROTO_H_ 696176730Sjeffstruct cpuset_setid_args { 697176730Sjeff cpuwhich_t which; 698176730Sjeff id_t id; 699176730Sjeff cpusetid_t setid; 700176730Sjeff}; 701176730Sjeff#endif 702176730Sjeffint 703176730Sjeffcpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 704176730Sjeff{ 705176730Sjeff struct cpuset *set; 706176730Sjeff int error; 707176730Sjeff 708176730Sjeff /* 709176730Sjeff * Presently we only support per-process sets. 710176730Sjeff */ 711176730Sjeff if (uap->which != CPU_WHICH_PID) 712176730Sjeff return (EINVAL); 713176730Sjeff set = cpuset_lookup(uap->setid); 714176730Sjeff if (set == NULL) 715176730Sjeff return (ESRCH); 716176730Sjeff error = cpuset_setproc(uap->id, set, NULL); 717176730Sjeff cpuset_rel(set); 718176730Sjeff return (error); 719176730Sjeff} 720176730Sjeff 721176730Sjeff#ifndef _SYS_SYSPROTO_H_ 722176730Sjeffstruct cpuset_getid_args { 723176730Sjeff cpulevel_t level; 724176730Sjeff cpuwhich_t which; 725176730Sjeff id_t id; 726176730Sjeff cpusetid_t *setid; 727176730Sjeff#endif 728176730Sjeffint 729176730Sjeffcpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 730176730Sjeff{ 731176730Sjeff struct cpuset *nset; 732176730Sjeff struct cpuset *set; 733176730Sjeff struct thread *ttd; 734176730Sjeff struct proc *p; 735176730Sjeff cpusetid_t id; 736176730Sjeff int error; 737176730Sjeff 738176730Sjeff if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) 739176730Sjeff return (EINVAL); 740176730Sjeff error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 741176730Sjeff if (error) 742176730Sjeff return (error); 743176730Sjeff switch (uap->which) { 744176730Sjeff case CPU_WHICH_TID: 745176730Sjeff case CPU_WHICH_PID: 746176730Sjeff thread_lock(ttd); 747176730Sjeff set = cpuset_base(ttd->td_cpuset); 748176730Sjeff thread_unlock(ttd); 749176730Sjeff PROC_UNLOCK(p); 750176730Sjeff break; 751176730Sjeff case CPU_WHICH_CPUSET: 752176730Sjeff break; 753176730Sjeff } 754176730Sjeff switch (uap->level) { 755176730Sjeff case CPU_LEVEL_ROOT: 756176730Sjeff nset = cpuset_root(set); 757176730Sjeff cpuset_rel(set); 758176730Sjeff set = nset; 759176730Sjeff break; 760176730Sjeff case CPU_LEVEL_CPUSET: 761176730Sjeff break; 762176730Sjeff case CPU_LEVEL_WHICH: 763176730Sjeff break; 764176730Sjeff } 765176730Sjeff id = set->cs_id; 766176730Sjeff cpuset_rel(set); 767176730Sjeff if (error == 0) 768176730Sjeff error = copyout(&id, uap->setid, sizeof(id)); 769176730Sjeff 770176730Sjeff return (error); 771176730Sjeff} 772176730Sjeff 773176730Sjeff#ifndef _SYS_SYSPROTO_H_ 774176730Sjeffstruct cpuset_getaffinity_args { 775177597Sru cpulevel_t level; 776177597Sru cpuwhich_t which; 777177597Sru id_t id; 778177597Sru size_t cpusetsize; 779177597Sru cpuset_t *mask; 780176730Sjeff}; 781176730Sjeff#endif 782176730Sjeffint 783176730Sjeffcpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 784176730Sjeff{ 785176730Sjeff struct thread *ttd; 786176730Sjeff struct cpuset *nset; 787176730Sjeff struct cpuset *set; 788176730Sjeff struct proc *p; 789176730Sjeff cpuset_t *mask; 790176730Sjeff int error; 791177597Sru size_t size; 792176730Sjeff 793176811Sjeff if (uap->cpusetsize < sizeof(cpuset_t) || 794176811Sjeff uap->cpusetsize * NBBY > CPU_MAXSIZE) 795176730Sjeff return (ERANGE); 796176811Sjeff size = uap->cpusetsize; 797176730Sjeff mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 798176730Sjeff error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 799176730Sjeff if (error) 800176730Sjeff goto out; 801176730Sjeff switch (uap->level) { 802176730Sjeff case CPU_LEVEL_ROOT: 803176730Sjeff case CPU_LEVEL_CPUSET: 804176730Sjeff switch (uap->which) { 805176730Sjeff case CPU_WHICH_TID: 806176730Sjeff case CPU_WHICH_PID: 807176730Sjeff thread_lock(ttd); 808176730Sjeff set = cpuset_ref(ttd->td_cpuset); 809176730Sjeff thread_unlock(ttd); 810176730Sjeff break; 811176730Sjeff case CPU_WHICH_CPUSET: 812176730Sjeff break; 813176730Sjeff } 814176730Sjeff if (uap->level == CPU_LEVEL_ROOT) 815176730Sjeff nset = cpuset_root(set); 816176730Sjeff else 817176730Sjeff nset = cpuset_base(set); 818176730Sjeff CPU_COPY(&nset->cs_mask, mask); 819176730Sjeff cpuset_rel(nset); 820176730Sjeff break; 821176730Sjeff case CPU_LEVEL_WHICH: 822176730Sjeff switch (uap->which) { 823176730Sjeff case CPU_WHICH_TID: 824176730Sjeff thread_lock(ttd); 825176730Sjeff CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 826176730Sjeff thread_unlock(ttd); 827176730Sjeff break; 828176730Sjeff case CPU_WHICH_PID: 829176730Sjeff FOREACH_THREAD_IN_PROC(p, ttd) { 830176730Sjeff thread_lock(ttd); 831176730Sjeff CPU_OR(mask, &ttd->td_cpuset->cs_mask); 832176730Sjeff thread_unlock(ttd); 833176730Sjeff } 834176730Sjeff break; 835176730Sjeff case CPU_WHICH_CPUSET: 836176730Sjeff CPU_COPY(&set->cs_mask, mask); 837176730Sjeff break; 838176730Sjeff } 839176730Sjeff break; 840176730Sjeff default: 841176730Sjeff error = EINVAL; 842176730Sjeff break; 843176730Sjeff } 844176730Sjeff if (set) 845176730Sjeff cpuset_rel(set); 846176730Sjeff if (p) 847176730Sjeff PROC_UNLOCK(p); 848176730Sjeff if (error == 0) 849176730Sjeff error = copyout(mask, uap->mask, size); 850176730Sjeffout: 851176730Sjeff free(mask, M_TEMP); 852176730Sjeff return (error); 853176730Sjeff} 854176730Sjeff 855176730Sjeff#ifndef _SYS_SYSPROTO_H_ 856176730Sjeffstruct cpuset_setaffinity_args { 857176730Sjeff cpulevel_t level; 858177597Sru cpuwhich_t which; 859177597Sru id_t id; 860177597Sru size_t cpusetsize; 861177597Sru const cpuset_t *mask; 862176730Sjeff}; 863176730Sjeff#endif 864176730Sjeffint 865176730Sjeffcpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 866176730Sjeff{ 867176730Sjeff struct cpuset *nset; 868176730Sjeff struct cpuset *set; 869176730Sjeff struct thread *ttd; 870176730Sjeff struct proc *p; 871176730Sjeff cpuset_t *mask; 872176730Sjeff int error; 873176730Sjeff 874176811Sjeff if (uap->cpusetsize < sizeof(cpuset_t) || 875176811Sjeff uap->cpusetsize * NBBY > CPU_MAXSIZE) 876176730Sjeff return (ERANGE); 877176811Sjeff mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 878176811Sjeff error = copyin(uap->mask, mask, uap->cpusetsize); 879176730Sjeff if (error) 880176730Sjeff goto out; 881176811Sjeff /* 882176811Sjeff * Verify that no high bits are set. 883176811Sjeff */ 884176811Sjeff if (uap->cpusetsize > sizeof(cpuset_t)) { 885176811Sjeff char *end; 886176811Sjeff char *cp; 887176811Sjeff 888176811Sjeff end = cp = (char *)&mask->__bits; 889176811Sjeff end += uap->cpusetsize; 890176811Sjeff cp += sizeof(cpuset_t); 891176811Sjeff while (cp != end) 892176811Sjeff if (*cp++ != 0) { 893176811Sjeff error = EINVAL; 894176811Sjeff goto out; 895176811Sjeff } 896176811Sjeff 897176811Sjeff } 898176730Sjeff switch (uap->level) { 899176730Sjeff case CPU_LEVEL_ROOT: 900176730Sjeff case CPU_LEVEL_CPUSET: 901176730Sjeff error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); 902176730Sjeff if (error) 903176730Sjeff break; 904176730Sjeff switch (uap->which) { 905176730Sjeff case CPU_WHICH_TID: 906176730Sjeff case CPU_WHICH_PID: 907176730Sjeff thread_lock(ttd); 908176730Sjeff set = cpuset_ref(ttd->td_cpuset); 909176730Sjeff thread_unlock(ttd); 910176880Sjeff PROC_UNLOCK(p); 911176730Sjeff break; 912176730Sjeff case CPU_WHICH_CPUSET: 913176730Sjeff break; 914176730Sjeff } 915176730Sjeff if (uap->level == CPU_LEVEL_ROOT) 916176730Sjeff nset = cpuset_root(set); 917176730Sjeff else 918176730Sjeff nset = cpuset_base(set); 919176730Sjeff error = cpuset_modify(nset, mask); 920176730Sjeff cpuset_rel(nset); 921176730Sjeff cpuset_rel(set); 922176730Sjeff break; 923176730Sjeff case CPU_LEVEL_WHICH: 924176730Sjeff switch (uap->which) { 925176730Sjeff case CPU_WHICH_TID: 926176730Sjeff error = cpuset_setthread(uap->id, mask); 927176730Sjeff break; 928176730Sjeff case CPU_WHICH_PID: 929176730Sjeff error = cpuset_setproc(uap->id, NULL, mask); 930176730Sjeff break; 931176730Sjeff case CPU_WHICH_CPUSET: 932176730Sjeff error = cpuset_which(CPU_WHICH_CPUSET, uap->id, &p, 933176730Sjeff &ttd, &set); 934176730Sjeff if (error == 0) { 935176730Sjeff error = cpuset_modify(set, mask); 936176730Sjeff cpuset_rel(set); 937176730Sjeff } 938176730Sjeff break; 939176730Sjeff default: 940176730Sjeff error = EINVAL; 941176730Sjeff break; 942176730Sjeff } 943176730Sjeff break; 944176730Sjeff default: 945176730Sjeff error = EINVAL; 946176730Sjeff break; 947176730Sjeff } 948176730Sjeffout: 949176730Sjeff free(mask, M_TEMP); 950176730Sjeff return (error); 951176730Sjeff} 952